diff --git a/.github/workflows/cherry-pick.yml b/.github/workflows/cherry-pick.yml new file mode 100644 index 000000000..0137d84b5 --- /dev/null +++ b/.github/workflows/cherry-pick.yml @@ -0,0 +1,36 @@ +name: Cherry Pick + +on: + issue_comment: + types: [created] + +jobs: + cherry-pick: + runs-on: ubuntu-latest + if: | + startsWith(github.event.comment.body, '/cherry-pick') + steps: + - name: Check out repository + uses: actions/checkout@v2 + + - name: Set up Git + run: | + git config --global user.name "${{ github.actor }}" + git config --global user.email "${{ github.actor }}@users.noreply.github.com" + - name: Extract target branch + id: extract + run: echo "::set-output name=branch::$(echo "${{ github.event.comment.body }}" | cut -d' ' -f2)" + + - name: Cherry-pick the PR + env: + GH_PAT: ${{ secrets.GH_PAT }} + run: | + TARGET_BRANCH=${{ steps.extract.outputs.branch }} + git fetch origin ${{ github.event.pull_request.head.ref }} + git checkout $TARGET_BRANCH + git cherry-pick ${{ github.event.pull_request.head.sha }} || exit 0 + - name: Push changes + env: + GH_PAT: ${{ secrets.GH_PAT }} + run: | + git push https://${GH_PAT}@github.com/${{ github.repository }} $TARGET_BRANCH diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 000000000..6378f560a --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,24 @@ +name: ci +on: + push: + branches: + - master + - main +permissions: + contents: write +jobs: + deploy: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + python-version: 3.x + - uses: actions/cache@v4 + with: + key: ${{ github.ref }} + path: .cache + - run: pip install mkdocs-material + - run: pip install mkdocs-swagger-ui-tag + - run: pip install "mkdocs-material[imaging]" + - run: mkdocs gh-deploy --force diff --git a/.github/workflows/preview.yml b/.github/workflows/preview.yml new file mode 100644 index 000000000..79428caae --- /dev/null +++ b/.github/workflows/preview.yml @@ -0,0 +1,105 @@ +# This is a basic workflow to help you get started with Actions + +name: ccutil-workflow + +# Controls when the workflow will run +on: + # Triggers the workflow on push or pull request events but only for the master branch + push: + branches: [ master ] + #pull_request: + # branches: [ master ] + + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +# A workflow run is made up of one or more jobs that can run sequentially or in parallel +jobs: + get: + # The type of runner that the job will run on + runs-on: ubuntu-latest + + # Steps represent a sequence of tasks that will be executed as part of the job + steps: + - uses: actions/checkout@v2 + with: + ref: master + path: source + + - uses: actions/checkout@v2 + with: + ref: gh-pages + path: dest + + - name: whereami + run: | + cd source + pwd + ls + ls ${{ github.workspace }}/ + cd ${{ github.workspace }}/ + pwd + ls + + + + - uses: addnab/docker-run-action@v3 + with: + image: quay.io/rhn_support_gmcgoldr/levccutil + options: -v ${{ github.workspace }}:/work + shell: bash + run: | + cd /work/source/release_notes + ccutil compile --lang en_US --type asciidoc --main-file master.adoc + cp -Rf /work/source/release_notes/build/tmp/en-US/html-single/* /work/dest/master/release_notes/ + cd /work/source/deploy_quay + ccutil compile --lang en_US --type asciidoc --main-file master.adoc + cp -Rf /work/source/deploy_quay/build/tmp/en-US/html-single/* /work/dest/master/deploy_quay/ + cd /work/source/deploy_quay_on_openshift_op_tng + ccutil compile --lang en_US --type asciidoc --main-file master.adoc + cp -Rf /work/source/deploy_quay_on_openshift_op_tng/build/tmp/en-US/html-single/* /work/dest/master/deploy_quay_on_openshift_op_tng/ + cd /work/source/deploy_quay_ha + ccutil compile --lang en_US --type asciidoc --main-file master.adoc + cp -Rf /work/source/deploy_quay_ha/build/tmp/en-US/html-single/* /work/dest/master/deploy_quay_ha/ + cd /work/source/config_quay + ccutil compile --lang en_US --type asciidoc --main-file master.adoc + cp -Rf /work/source/config_quay/build/tmp/en-US/html-single/* /work/dest/master/config_quay/ + cd /work/source/manage_quay + ccutil compile --lang en_US --type asciidoc --main-file master.adoc + cp -Rf /work/source/manage_quay/build/tmp/en-US/html-single/* /work/dest/master/manage_quay/ + cd /work/source/upgrade_quay + ccutil compile --lang en_US --type asciidoc --main-file master.adoc + cp -Rf /work/source/upgrade_quay/build/tmp/en-US/html-single/* /work/dest/master/upgrade_quay/ + cd /work/source/use_quay + ccutil compile --lang en_US --type asciidoc --main-file master.adoc + cp -Rf /work/source/use_quay/build/tmp/en-US/html-single/* /work/dest/master/use_quay/ + cd /work/source/api + ccutil compile --lang en_US --type asciidoc --main-file master.adoc + cp -Rf /work/source/api/build/tmp/en-US/html-single/* /work/dest/master/api/ + cd /work/source/architecture + ccutil compile --lang en_US --type asciidoc --main-file master.adoc + cp -Rf /work/source/architecture/build/tmp/en-US/html-single/* /work/dest/master/architecture/ + + - name: commit + run: | + cd dest + git status + git config --local user.email "action@github.com" + git config --local user.name "GitHub Action" + #git diff + #git diff-index --quiet HEAD || (git commit -a -m 'DOCS Auto-update' --allow-empty && git push -f) + git add . + git commit -m "update docs" + + + - name: Push changes + uses: ad-m/github-push-action@master + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + branch: gh-pages + directory: dest + + + # Runs a single command using the runners shell + # docker run -ti --rm --privileged -v source:/source quay.io/rhn_support_gmcgoldr/levccutil "/bin/bash cd /source/deploy_quay; ls; ccutil compile --lang en_US --type asciidoc --main-file master.adoc; ls -al " + diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..acadae5bd --- /dev/null +++ b/.gitignore @@ -0,0 +1,4 @@ +dist +build/ +.DS_Store +.idea/ \ No newline at end of file diff --git a/.gitleaks.toml b/.gitleaks.toml new file mode 100644 index 000000000..c8ba4d00a --- /dev/null +++ b/.gitleaks.toml @@ -0,0 +1,54 @@ +[allowlist] +description = "Allow bcrypt hashes used in SQL updates" + +regexes = [ + # Ignore bcrypt password hashes (e.g., $2b$12$...) + '''\$2b\$12\$[A-Za-z0-9./]{53}''' +] + +paths = [ + # Ignore all example certs + '''\/example.*\.pem$''', + + # Ignore anything with the word funkymonkey anywhere in the path (example values below) + '''ANSWCWSGLVAUQ60L4Q4CEO3C1QAYGEXZK2VKJNI''', + '''E6GJSHOZMFBVNHTHNB53''', + '''MCJ61D8KQBFS2DXM56S2''', + '''J5G7CCX5QCA8Q5XZLWGI7USJPSM4M5MQHJED46CF''', + '''IG58PX2REEY9O08IZFZE''', + '''2LWTWO89KH26P2CO4TWFM7PGCX4V4SUZES2CIZMR''', + '''6XBK7QY7ACSCN5XBM3GS''', + '''AVKBOUXTFO3MXBBK5UJD5QCQRN2FWL3O0XPZZT78''', + '''SANSWCWSGLVAUQ60L4Q4CEO3C1QAYGEXZK2VKJNI''', + '''WB4FUG4PP2278KK579EN4NDP150CPYOG6DN42MP6JF8IAJ4PON4RC7DIOH5UEFBP''', + '''MXFE7NSOWPN33O7UC3THY0BN03DW940CMWTLRBE2EPTI8JPX0B0CWIIDGTI4YTJ6''', + '''IJWZ8TIY301KPFOW3WEUJEVZ3JR11CY1''', + '''9Q36xF54YEOLjetayC0NBaIKgcFFmIHsS3xTZDLzZSrhTBkxUc9FDwUKfnxLWhco6oBJV1NDBjoBcDGmsZMYPt1dSA4yWpPe/JKY9pnDcsw=''', + '''MXZ9DATUWRD8WCMT8AZIPYE0IEZHJJ1B8P8ZEIXC0W552DUMMTNJJH02HFGXTOVG''', + '''CWLBVAODE61IXNDJ40GERFOZPB3ARZDRCP4X70ID1NB28AI0OOJBTR9S4M0ACYMD''', + '''BILZ6YTVAZAKOGMD9270OKN3SOD9KPB7OLKEJQOJE38NBBRUJTIH7T5859DJL31Q''', + '''QBFYWIWZOS1I0P0R9N1JRNP1UZAOPUIR3EB4ASPZKK9IA1SFC12LTEF7OJHB05Z8''', + '''E6GJSHOZMFBVNHTHNB53''', + '''postgresql://:test123@172.24.10.50/quay''', + '''postgresql://:test123@172.24.10.50/example-restore-registry-quay-database''', + '''quayadmin''', + '''DB_URI: postgresql://restore-registry-quay-database:zLTm315muk6rz7mL4aFuLQ2Q8rAk-dB4kPHQ2WMvdyqhaZywf20503wCZfv2Ml1f15LUsDN2-0m71gnI@restore-registry-quay-database:5432/restore-registry-quay-database +''', + '''DB_URI: postgresql://quay360-quay-database:0vrsIUYdhCnF8r-jwz7zR6gck6kcLLQhJ11u0dx1lz8YBk185P5NnqIBwtY22JArYLi3opdKJH2-w4aM@quay360-quay-database:5432/quay360-quay-database +''', + '''XyThQKm6lMWh4O7dKdmRwMUHB9ktxPPVSRIePOY2''', + '''VvoFhVFp8BqcOgQ9LczE''', + '''DB_URI: postgresql://restore-registry-quay-database:zLTm315muk6rz7mL4aFuLQ2Q8rAk-dB4kPHQ2WMvdyqhaZywf20503wCZfv2Ml1f15LUsDN2-0m71gnI@restore-registry-quay-database:5432/restore-registry-quay-database +''', + '''postgresql://quayuser:quaypass@quay-server:5432/quay''', + '''4b1c5663-88c6-47ac-b4a8-bb594660f08b''', + '''postgresql://example-registry-quay-database:OyC4zGhJMbi3yUzW1aIgOLQNW18r14nAcuJfbsjtrAXUVInj2JgwLskQPOutPCXMtlKr1UPTsIPqOEjV@example-registry-quay-database:5432/example-registry-quay-database''', + '''postgresql://restore-registry-quay-database:zLTm315muk6rz7mL4aFuLQ2Q8rAk-dB4kPHQ2WMvdyqhaZywf20503wCZfv2Ml1f15LUsDN2-0m71gnI@restore-registry-quay-database:5432/restore-registry-quay-database''', + '''postgresql://example-restore-registry-quay-database:onHl1LDsspZh4hoOL5wW1Of7GV0Kmtp2@example-restore-registry-quay-database:5432/example-restore-registry-quay-database''', + '''postgresql://example-restore-registry-quay-database:onHl1LDsspZh4hoOL5wW1Of7GV0Kmtp2@example-restore-registry-quay-database:5432/example-restore-registry-quay-database''', + '''zsk/j4zEOkQq+W0BQJdSufP+IackV8WICXB5zvdF''', + '''1H36Izzc90cUNVHaiaUX''', + '''iO1b3RUt4KKgjSimCROSPN3cEMn4TqSgsPyniMBR''', + '''EH67NB3Y6PTBED8H0HC6UVHGGGA3ODSE''', + '''fn37AZAUQH0PTsU+vlO9lS0QxPW9A/boXL4ovZjIFtlUPrBz9i4j9UDOqMjuxQ/0HTfy38goKEpG8zYXVeQh3lOFzuOjSvKic2Vq7xdtQsU=''', +] diff --git a/.s2i/bin/assemble b/.s2i/bin/assemble new file mode 100644 index 000000000..8d4cf5677 --- /dev/null +++ b/.s2i/bin/assemble @@ -0,0 +1,25 @@ +#!/bin/bash -e +# The assemble script builds the application artifacts from source and +# places them into appropriate directories inside the image. + +echo "---> Updating bundler gem..." +gem install bundler + +shopt -s dotglob +echo "---> Installing application source ..." +mv /tmp/src/* ./ + +# Fix source directory permissions +fix-permissions ./ + +bundle install --path ./bundle --binstubs + +# Fixes incompatible character encodings: US-ASCII and UTF-8 error +export LANG="en_US.UTF-8" + +echo "---> Building webpage from source ..." +exec $HOME/build_docs + +# Fix source directory permissions +echo "---> Fixing permissions ..." +fix-permissions ./ diff --git a/.s2i/httpd-cfg/01-default.conf b/.s2i/httpd-cfg/01-default.conf new file mode 100644 index 000000000..30ef69957 --- /dev/null +++ b/.s2i/httpd-cfg/01-default.conf @@ -0,0 +1,221 @@ +# ---------------------------------------------------------------------- +# Better website experience for IE users +# ---------------------------------------------------------------------- + +# Force the latest IE version, in various cases when it may fall back to IE7 mode +# github.com/rails/rails/commit/123eb25#commitcomment-118920 +# Use ChromeFrame if it's installed for a better experience for the poor IE folk + + + Header set X-UA-Compatible "IE=Edge,chrome=1" + # mod_headers can't match by content-type, but we don't want to send this header on *everything*... + + Header unset X-UA-Compatible + + + +# ---------------------------------------------------------------------- +# Proper MIME type for all files +# ---------------------------------------------------------------------- + +# JavaScript +# Normalize to standard type (it's sniffed in IE anyways) +# tools.ietf.org/html/rfc4329#section-7.2 +AddType application/javascript js jsonp +AddType application/json json + +# Audio +AddType audio/ogg oga ogg +AddType audio/mp4 m4a f4a f4b + +# Video +AddType video/ogg ogv +AddType video/mp4 mp4 m4v f4v f4p +AddType video/webm webm +AddType video/x-flv flv + +# SVG +# Required for svg webfonts on iPad +# twitter.com/FontSquirrel/status/14855840545 +AddType image/svg+xml svg svgz +AddEncoding gzip svgz + +# Webfonts +AddType application/vnd.ms-fontobject eot +AddType application/x-font-ttf ttf ttc +AddType font/opentype otf +AddType application/x-font-woff woff +AddType application/font-woff2 woff2 + +# Assorted types +AddType image/x-icon ico +AddType image/webp webp +AddType text/cache-manifest appcache manifest +AddType text/x-component htc +AddType application/xml rss atom xml rdf +AddType application/x-chrome-extension crx +AddType application/x-opera-extension oex +AddType application/x-xpinstall xpi +AddType application/octet-stream safariextz +AddType application/x-web-app-manifest+json webapp +AddType text/x-vcard vcf +AddType application/x-shockwave-flash swf +AddType text/vtt vtt + + +# ---------------------------------------------------------------------- +# Gzip compression +# ---------------------------------------------------------------------- + + + # Force deflate for mangled headers developer.yahoo.com/blogs/ydn/posts/2010/12/pushing-beyond-gzipping/ + + + SetEnvIfNoCase ^(Accept-EncodXng|X-cept-Encoding|X{15}|~{15}|-{15})$ ^((gzip|deflate)\s*,?\s*)+|[X~-]{4,13}$ HAVE_Accept-Encoding + RequestHeader append Accept-Encoding "gzip,deflate" env=HAVE_Accept-Encoding + + + + # Compress all output labeled with one of the following MIME-types + + AddOutputFilterByType DEFLATE application/atom+xml \ + application/javascript \ + application/json \ + application/rss+xml \ + application/vnd.ms-fontobject \ + application/x-font-ttf \ + application/xhtml+xml \ + application/xml \ + font/opentype \ + image/svg+xml \ + image/x-icon \ + text/css \ + text/html \ + text/plain \ + text/x-component \ + text/xml + + + +# ---------------------------------------------------------------------- +# Start rewrite engine +# ---------------------------------------------------------------------- + +# Turning on the rewrite engine is necessary for the following rules and +# features. FollowSymLinks must be enabled for this to work. + +# Some cloud hosting services require RewriteBase to be set: goo.gl/HOcPN +# If using the h5bp in a subdirectory, use `RewriteBase /foo` instead where +# 'foo' is your directory. + +# If your web host doesn't allow the FollowSymlinks option, you may need to +# comment it out and use `Options +SymLinksOfOwnerMatch`, but be aware of the +# performance impact: http://goo.gl/Mluzd + + + + + Options -Indexes +FollowSymLinks + AllowOverride All + Order Allow,Deny + Allow from All + + RewriteEngine On + RewriteBase / + + # Rules have NE added to the end in order to preserve either explicit or implicit # anchor tags + RewriteRule ^$ /welcome.html [R=301,NE] + + + + +# ---------------------------------------------------------------------- +# Specify cache retension policy +# ---------------------------------------------------------------------- + + + ExpiresActive On + ExpiresDefault "access plus 1 seconds" + ExpiresByType image/x-icon "access plus 2592000 seconds" + ExpiresByType image/jpeg "access plus 2592000 seconds" + ExpiresByType image/png "access plus 2592000 seconds" + ExpiresByType image/gif "access plus 2592000 seconds" + ExpiresByType text/css "access plus 604800 seconds" + ExpiresByType text/javascript "access plus 216000 seconds" + ExpiresByType application/x-javascript "access plus 216000 seconds" + + + + + Header set Cache-Control "max-age=2692000, public" + + + Header set Cache-Control "max-age=2692000, public" + + + Header set Cache-Control "max-age=216000, private" + + Header unset ETag + Header unset Last-Modified + + +# ---------------------------------------------------------------------- +# Prevent 404 errors for non-existing redirected folders +# ---------------------------------------------------------------------- + +# without -MultiViews, Apache will give a 404 for a rewrite if a folder of the +# same name does not exist. +# webmasterworld.com/apache/3808792.htm + +Options -MultiViews + +# ---------------------------------------------------------------------- +# Custom 404 page +# ---------------------------------------------------------------------- + +# You can add custom pages to handle 500 or 403 pretty easily, if you like. +# If you are hosting your site in subdirectory, adjust this accordingly +# e.g. ErrorDocument 404 /subdir/404.html +# ErrorDocument 404 /404.html + + +# ---------------------------------------------------------------------- +# UTF-8 encoding +# ---------------------------------------------------------------------- + +# Use UTF-8 encoding for anything served text/plain or text/html +AddDefaultCharset utf-8 + +# Force UTF-8 for a number of file formats +AddCharset utf-8 .atom .css .js .json .rss .vtt .xml + + +# ---------------------------------------------------------------------- +# A little more security +# ---------------------------------------------------------------------- + +# "-Indexes" will have Apache block users from browsing folders without a +# default document Usually you should leave this activated, because you +# shouldn't allow everybody to surf through every folder on your server (which +# includes rather private places like CMS system folders). + + Options -Indexes + + +# Block access to "hidden" directories or files whose names begin with a +# period. This includes directories used by version control systems such as +# Subversion or Git. + + RewriteCond %{SCRIPT_FILENAME} -d [OR] + RewriteCond %{SCRIPT_FILENAME} -f + RewriteRule "(^|/)\." - [F] + + +# Block access to backup and source files. These files may be left by some +# text/html editors and pose a great security danger, when anyone can access +# them. + + Order allow,deny + Deny from all + Satisfy All + diff --git a/.vale.ini b/.vale.ini new file mode 100644 index 000000000..0d97147d2 --- /dev/null +++ b/.vale.ini @@ -0,0 +1,8 @@ +StylesPath = styles + +MinAlertLevel = suggestion + +Packages = RedHat + +[*] +BasedOnStyles = RedHat \ No newline at end of file diff --git a/.vale/styles/Vocab/OpenShiftDocs/accept.txt b/.vale/styles/Vocab/OpenShiftDocs/accept.txt new file mode 100644 index 000000000..d79ca1ba7 --- /dev/null +++ b/.vale/styles/Vocab/OpenShiftDocs/accept.txt @@ -0,0 +1,11 @@ +# Regex terms added to accept.txt are ignored by the Vale linter and override RedHat Vale rules. +# Add terms that have a corresponding incorrectly capitalized form to reject.txt. + +[Pp]assthrough +Assisted Installer +Control Plane Machine Set Operator +custom resource +custom resources +MetalLB +Operator +Operators \ No newline at end of file diff --git a/.vale/styles/Vocab/OpenShiftDocs/reject.txt b/.vale/styles/Vocab/OpenShiftDocs/reject.txt new file mode 100644 index 000000000..5a9d2b4c2 --- /dev/null +++ b/.vale/styles/Vocab/OpenShiftDocs/reject.txt @@ -0,0 +1,15 @@ +# Regex terms added to reject.txt are highlighted as errors by the Vale linter and override RedHat Vale rules. +# Add terms that have a corresponding correctly capitalized form to accept.txt. + +[Dd]eployment [Cc]onfigs? +[Dd]eployment [Cc]onfigurations? +[Oo]peratorize +[Ss]ingle [Nn]ode OpenShift +[Tt]hree [Nn]ode OpenShift +AI +configuration maps? +MachineSets +machinesets? +minions? +operators? +SNO \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 000000000..a20e22283 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,12 @@ +{ + "spellright.language": [], + "spellright.documentTypes": [ + "markdown", + "latex", + "plaintext" + ], + "cSpell.words": [ + "OIDC", + "productname" + ] +} \ No newline at end of file diff --git a/Gemfile b/Gemfile index c811fa6cc..da8b4c9ba 100644 --- a/Gemfile +++ b/Gemfile @@ -1,3 +1,3 @@ source 'https://rubygems.org' -gem 'asciidoctor', '~>1.5.6.2' +gem 'asciidoctor' diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..a8d675477 --- /dev/null +++ b/Makefile @@ -0,0 +1,11 @@ + +build: + @podman run -it --rm --name asciidoctor --detach -v $(CURDIR):/documents/:z asciidoctor/docker-asciidoctor + @-podman exec -it asciidoctor bash -c "source build_docs" + @podman kill asciidoctor + +view: + xdg-open file://$(CURDIR)/dist/welcome.html + +all: build view + diff --git a/README b/README deleted file mode 100644 index ed116240f..000000000 --- a/README +++ /dev/null @@ -1,33 +0,0 @@ -Documentation for the {productname} product - -These are some really basic guidelines to get started. - -Structure of this repo: -* Books go in a top level folder. For example: manage_quay. -* Each book folder has a symlink to the top level modules folder. -* A book's TOC is defined in the master.adoc file contained within the book's folder. -* master.adoc contains includes to modules (chapters) which are created in the top level modules folder. -* You will also need to define a docinfo.xml in the book's folder to contain basic information about a book. - -To get started: - -(this assumes you have been granted read/write access to the GitLab repo. If not, contact the owner of this repo). - -1. git clone https://gitlab.cee.redhat.com/red-hat-quay-documentation/quay-documentation.git -2. cd quay-documentation -3. -4. git push origin master - -Once merge to master is done and you want to stage: - -1. git fetch origin -2. git rebase origin/master -3. git checkout stage -4. git rebase origin/stage -5. git cherry-pick - -In the last step, you are cherry picking the commit id of your work in the master. - -More instructions to follow once we have a full fledged product. - - diff --git a/README.adoc b/README.adoc new file mode 100644 index 000000000..fb6db7746 --- /dev/null +++ b/README.adoc @@ -0,0 +1,228 @@ += Contributing to Red Hat Quay documentation +:downstream: + +== Repository structure + +The Red Hat Quay repository is structured as follows: + +* Books go into a top-level directory. For example, `repo_dir/manage_quay` or `repo_dir/release_notes`. +* Each book directory has a symlink to the top-level `repo_dir/modules` directory. +* A book's _table of contents_, or ToC, is defined in the `master.adoc` that is contained within the book's directory. Each directory has its own `master.adoc` file. +* The `master.adoc` file contains `include` statements to _modules_, which act as chapters and subchapters. These are created in the top-level `modules/` directory. +* The `docinfo.xml` in the book's directory contains basic information about the book, such as the product name, the product version, and the organization name. + +== Setting up your repository for contribution + +ifdef::downstream[] + +. For _downstream_ contribution, which is the official Red Hat Quay documentation found on the Red Hat portal, you must obtain _Developer_, _Maintainer_, or _Owner_ permissions for the https://gitlab.cee.redhat.com/red-hat-quay-documentation/quay-documentation/[downstream repository]. ++ +To obtain the necessary permissions, contact a Maintainer or Owner from the Gitlab project members https://gitlab.cee.redhat.com/red-hat-quay-documentation/quay-documentation/-/project_members[list]. Default to contacting Steven Smith. + +endif::downstream[] + +. Fork the https://github.com/quay/quay-docs[upstream repository] by clicking the *Fork* button. + +. Clone your fork of the repository to your computer: ++ +---- +$ git clone git@github.com:/quay-docs.git +---- ++ +Substitute `` with your GitHub user name. + +. Navigate to the cloned repository: ++ +---- +$ cd quay-docs +---- + +. Add the `upstream` remote: ++ +---- +$ git remote add upstream git@github.com:quay/quay-docs.git +---- + +ifdef::downstream[] + +. Add the `downstream` remote: ++ +---- +$ git remote add downstream git@gitlab.cee.redhat.com:red-hat-quay-documentation/quay-documentation.git +---- + +endif::downstream[] + +[id="how-do-i-make-a-contribution"] +== How do I make a contribution? + +To contribute to Red Hat Quay documentation, you must create a new feature branch based off of the `master` branch. + +. Checkout the `master` branch if you have not already: ++ +---- +$ git checkout master +---- + +. Create a new feature branch based off the `master` branch: ++ +---- +$ git checkout -b master +---- ++ +Substitute `` with a name that reflects the contribution you intend to make. + +. Edit the files and commit them using `git add` and `git commit`. Make your commit in present tense, highlighting the change that you have made. + +. Push your commits to your fork of the upstream repository: ++ +---- +$ git push origin +---- + +. Create a pull request from `/` to `quay/master`. For that, either: ++ +-- +.. Visit the link from the output of the previous step. The link is there after the first push only. + +.. Navigate to https://github.com//quay-docs. Use the interface to create the pull request +-- ++ +As you create the pull request, tag one of the repository collaborators and ask them to review the pull request. The default contact should be Steven Smith. + +. Work with the reviewer to finish your pull request. After the suggested changes have been made, the reviewer will merge the pull request. + +. After your pull request is merged into the `master` branch, your updates will become live in the https://docs.projectquay.io[Project Quay documentation]. Eventually, those changes will end up on the portal. + +== How do I make a contribution to the downstream documentation? + +Like upstream documentation, downstream documentation primarily resides in the `master` branch of the https://gitlab.cee.redhat.com/red-hat-quay-documentation/quay-documentation/[downstream repository]. For most users, the only necessary step is to create a feature branch from the `master` branch. + +To make a contribution to upstream documentation, follow the instructions at <>. Be sure to work with the documentation lead for Red Hat Quay to get the content reviewed, merged, and published on the downstream portal. + +=== How Red Hat Quay downstream documentation is branched + +After you have created and merged a pull request, relevant branches are then reset to match the `master` branch. For example, if the current version of Red Hat Quay is 3.10, then the relevant 3.10 branch (`redhat-3.10`) is reset to match the `master`. branch. This ensures that the most recent content changes are up to date in the most recent version branch. + +After the the most recent branch is reset to match the `master` branch, the `3.0-stage` branch is then reset to match the most recent version branch (for example, `3.0-stage` is reset to match `redhat-3.10`). The reason for this is that the Red Hat Quay `3` version is copied directly from the most recent version of Red Hat Quay. + +[id="how-do-i-keep-my-local-master-up-to-date-with-remote-master"] +== How do I keep my local `master` up-to-date with remote `master`? + +As contributors push and merge pull requests to the `master` branch, you must keep your local `master` branch up to date. Prior to making any changes to the documentation, you should rebase your local `master` branch to match the most recent version of the remote `master` branch. + +. Check out the `master` branch: ++ +---- +$ git checkout master +---- + +. Fetch the commits that are in the upstream repository but not in your local repository: ++ +---- +$ git fetch upstream +---- + +. Apply the fetched commits to your local `master`: ++ +---- +$ git rebase upstream/master +---- + +Now, your local `master` branch is up to date. + +== How do I keep my feature branch up-to-date with the master branch? + +As new commits appear on the `master` branch, your existing feature branch does not automatically incorporate those commits. To prevent your feature branch and `master` from diverging, you need to manually update your feature branch to the `master` branch: + +. Bring your local `master` brnach up-to-date with the remote `master` branch by following the instructions at <>. + +. Switch to the feature branch that you want to update: ++ +---- +$ git checkout +---- + +. Apply the commits from the `master` branch to your ``: ++ +---- +$ git rebase upstream/master +---- ++ + +. Push the updated `` to your fork of the upstream repository. Since your local `` has been updated, it might be incompatible with the remote ``, so you need to use the `--force` option: ++ +[IMPORTANT] +==== +Never use the `--force` argument when pushing to `master`. +==== ++ +---- +$ git push --force origin +---- + +ifdef::downstream[] + +//// +[id="how-do-i-keep-the-downstream-repository-and-branch-up-to-date"] +== How do I keep the downstream repository and branch up-to-date? + +To bring the https://gitlab.cee.redhat.com/red-hat-quay-documentation/quay-documentation/[downstream repository] up-to-date with the upstream repository, you need to push the changes to the `3.0-master` branch of the downstream repository and merge `3.0-master` into `3.0-stage`, from which downstream documentation is published: + +. Update your local `3.0-master` branch. <> + +. Switch to the `3.0-master` branch: ++ +---- +$ git checkout 3.0-master +---- + +. Push `3.0-master` to the downstream repository: ++ +---- +$ git push downstream +---- + +. Switch to the `3.0-stage` branch: ++ +---- +$ git checkout 3.0-stage +---- + +. Merge `3.0-master` into `3.0-stage`: ++ +---- +$ git merge 3.0-master +---- + +. Push `3.0-stage` to the downstream repository: ++ +---- +$ git push downstream +---- + +endif::downstream[] +//// + +== How do I make content appear in upstream but not in downstream? + +You can make content appear only in the upstream by using the `ifdef::upstream` conditional around the content that you only want to appear upstream. For example: + +---- +\ifdef::upstream[] + +\endif::upstream[] +---- + +ifdef::downstream[] +== How do I make content appear in downstream but not in upstream? + +You can make content appear only in the link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/[downstream] by using the `ifdef::downstream` conditional around the content that you only want to appear downstream. For example: + +---- +\ifdef::downstream[] + +\endif::downstream[] +---- +endif::downstream[] + diff --git a/README.md b/README.md new file mode 100644 index 000000000..785921352 --- /dev/null +++ b/README.md @@ -0,0 +1,73 @@ +# Documentation for Project Quay + + +## Contributing + +Testing changes. Please ignore. + +These are some really basic guidelines to get started. + +Structure of this repo: +* Books go in a top level folder. For example: manage_quay. +* Each book folder has a symlink to the top level modules folder. +* A book's TOC is defined in the master.adoc file contained within the book's folder. +* master.adoc contains includes to modules (chapters) which are created in the top level modules folder. +* You will also need to define a docinfo.xml in the book's folder to contain basic information about a book. + +To get started: + +1. Fork this repository +2. git clone https://github.com/quay/quay-docs.git +3. cd quay-docs +4. git remote add git@github.com:/quay-docs.git +5. git fetch --all + +To contribute: + +1. git checkout master +2. git checkout -b +3. Edit files with changes +4. git commit -a -m "description of changes" +5. git push +6. Visit https://github.com/quay/quay-docs and create pull-request against master + + +Once merge to master is done and you want to stage: + +1. git fetch origin +2. git rebase origin/master +3. git checkout stage +4. git rebase origin/stage +5. git cherry-pick + +In the last step, you are cherry picking the commit id of your work in the master. + +More instructions to follow once we have a full fledged product. + +## Deploying to OpenShift + +### Adding Let's Encrypt operator in production + +```bash +$ oc create -fhttps://raw.githubusercontent.com/tnozicka/openshift-acme/master/deploy/letsencrypt-live/single-namespace/{role,serviceaccount,imagestream,deployment}.yaml +$ oc policy add-role-to-user openshift-acme --role-namespace="$(oc project --short)" -z openshift-acme +``` + +### Deploying application template + +#### Preview + +```bash +$ oc new-app deployment-template.yml \ + -p NAME=projectquay-docs-preview \ + -p SOURCE_REPOSITORY_URL=https://github.com//quay-docs.git \ + -p SOURCE_REPOSITORY_REF= +``` + +#### Production + +```bash +$ oc new-app deployment-template.yml \ + -p NAME=projectquay-docs-production \ + -p APPLICATION_DOMAIN=docs.projectquay.io +``` diff --git a/_redirects b/_redirects new file mode 100644 index 000000000..39ab53359 --- /dev/null +++ b/_redirects @@ -0,0 +1 @@ +/ /dist/welcome.html diff --git a/access_permissions_management/docinfo.xml b/access_permissions_management/docinfo.xml new file mode 100644 index 000000000..45f4c60ad --- /dev/null +++ b/access_permissions_management/docinfo.xml @@ -0,0 +1,10 @@ +{productname} +{producty} +Managing access and permissions + + Managing access and permissions: Roles, Robots, and Repository Security + + + Red Hat OpenShift Documentation Team + + diff --git a/access_permissions_management/master.adoc b/access_permissions_management/master.adoc new file mode 100644 index 000000000..e74b99f15 --- /dev/null +++ b/access_permissions_management/master.adoc @@ -0,0 +1,50 @@ +include::modules/attributes.adoc[] + +:_content-type: ASSEMBLY +[id="access-permissions-management-quay"] += Managing access and permissions +:context: quay-security + +{productname} offers a comprehensive permissions model, which allows administrators the ability to control who can access, manage, and modify repositories at a granular level. The following sections show you how to manage user access, define team roles, set permissions for users and robot accounts, and define the visibility of a repository. These guides include instructions using both the {productname} UI and the API. + +The following topics are covered: + +* Role-based access controls +* Adjusting repository visibility +* Creating and managing robot accounts +* Clair vulnerability reporting + +//rbac + +include::modules/role-based-access-control-intro.adoc[leveloffset=+1] +include::modules/teams-overview.adoc[leveloffset=+2] +include::modules/set-team-role.adoc[leveloffset=+3] +include::modules/managing-team-members-repo-permissions-ui.adoc[leveloffset=+3] +include::modules/setting-role-of-team-within-organization-api.adoc[leveloffset=+3] +include::modules/default-permissions-v2-ui.adoc[leveloffset=+2] +include::modules/default-permissions-api.adoc[leveloffset=+2] +include::modules/allow-access-user-repo.adoc[leveloffset=+2] +include::modules/adjust-access-user-repo-api.adoc[leveloffset=+2] + +//Registry restriction +include::modules/registry-wide-access-management.adoc[leveloffset=+1] +include::modules/managing-restricted-users.adoc[leveloffset=+2] +include::modules/managing-superuser-full-access.adoc[leveloffset=+2] + +//private repo +include::modules/proc_use-quay-create-repo.adoc[leveloffset=+1] +include::modules/adjusting-repository-visibility-via-the-ui.adoc[leveloffset=+2] +include::modules/adjusting-repository-access-via-the-api.adoc[leveloffset=+2] + +//robot accounts +include::modules/robot-account-overview.adoc[leveloffset=+1] +include::modules/creating-robot-account-v2-ui.adoc[leveloffset=+2] +include::modules/creating-robot-account-api.adoc[leveloffset=+2] +include::modules/managing-robot-account-permissions-v2-ui.adoc[leveloffset=+2] +include::modules/disabling-robot-account.adoc[leveloffset=+2] +include::modules/regenerating-robot-account-token-api.adoc[leveloffset=+2] +include::modules/deleting-robot-account-v2-ui.adoc[leveloffset=+2] +include::modules/deleting-robot-account-api.adoc[leveloffset=+2] + +//clair +include::modules/clair-vulnerability-scanner-overview.adoc[leveloffset=+1] diff --git a/deploy_quay_on_openshift/modules b/access_permissions_management/modules similarity index 100% rename from deploy_quay_on_openshift/modules rename to access_permissions_management/modules diff --git a/api-v2-public.yaml b/api-v2-public.yaml new file mode 100644 index 000000000..db3527a6c --- /dev/null +++ b/api-v2-public.yaml @@ -0,0 +1,521 @@ +openapi: 3.0.3 +info: + title: Quay API + version: 1.0.0 +servers: + - url: "{protocol}://{host}" + description: "Set your own Quay registry URL" + variables: + protocol: + default: "https" + enum: + - "http" + - "https" + host: + default: "quay-server.example.com" + description: "Enter your Quay registry hostname" + +security: + - BearerAuth: [] + +tags: + - name: "Application specific tokens" + description: "Manage application-specific tokens by using the API" + - name: Builds + description: API endpoints for managing Quay repository builds + - name: Discovery + description: API discovery information + - name: Error + description: Obtain error details by using the API +paths: + /api/v1/user/apptoken: + get: + tags: + - "Application specific tokens" + summary: List app-specific tokens + description: Retrieves a list of application-specific tokens for the user. + operationId: listAppTokens + security: + - BearerAuth: [] + parameters: + - name: expiring + in: query + required: false + schema: + type: boolean + description: "If true, only returns those tokens expiring soon" + responses: + '200': + description: Successful invocation + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + '401': + description: Session required + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + '403': + description: Unauthorized access + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + '404': + description: Not found + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + post: + tags: + - "Application specific tokens" + summary: Create a new app-specific token + description: Creates a new application-specific token for the user. + operationId: createAppToken + security: + - BearerAuth: [] + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + title: + type: string + example: "MyAppToken" + responses: + '201': + description: Successful creation + content: + application/json: + schema: + type: object + properties: + token: + type: string + example: "abc123xyz" + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + '401': + description: Session required + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + '403': + description: Unauthorized access + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + '404': + description: Not found + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + + /api/v1/user/apptoken/{token_uuid}: + get: + tags: + - "Application specific tokens" + summary: Get details of a specific app token + description: Retrieves details for a specific application token. + operationId: getAppToken + security: + - BearerAuth: [] + parameters: + - name: token_uuid + in: path + required: true + schema: + type: string + responses: + '200': + description: Successful invocation + content: + application/json: + schema: + type: object + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + '401': + description: Session required + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + '403': + description: Unauthorized access + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + '404': + description: Not found + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + delete: + tags: + - "Application specific tokens" + summary: Revoke a specific app token + description: Revokes a specific application token for the user. + operationId: revokeAppToken + security: + - BearerAuth: [] + parameters: + - name: token_uuid + in: path + required: true + schema: + type: string + responses: + '204': + description: Deleted + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + '401': + description: Session required + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + '403': + description: Unauthorized access + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + '404': + description: Not found + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' +#build + /api/v1/repository/{repository}/build/{build_uuid}/status: + get: + tags: + - "Builds" + summary: Return the status for the builds specified by the build UUID + parameters: + - name: repository + in: path + required: true + schema: + type: string + description: The full path of the repository (e.g., namespace/name) + - name: build_uuid + in: path + required: true + schema: + type: string + description: The UUID of the build + responses: + "200": + description: Successful invocation + "400": + description: Bad Request + "401": + description: Session required + "403": + description: Unauthorized access + "404": + description: Not found + + /api/v1/repository/{repository}/build/{build_uuid}/logs: + get: + tags: + - "Builds" + summary: Return the build logs for the specified build UUID + parameters: + - name: repository + in: path + required: true + schema: + type: string + - name: build_uuid + in: path + required: true + schema: + type: string + responses: + "200": + description: Successful invocation + "400": + description: Bad Request + "401": + description: Session required + "403": + description: Unauthorized access + "404": + description: Not found + + /api/v1/repository/{repository}/build/{build_uuid}: + get: + tags: + - "Builds" + summary: Returns information about a build + parameters: + - name: repository + in: path + required: true + schema: + type: string + - name: build_uuid + in: path + required: true + schema: + type: string + responses: + "200": + description: Successful invocation + "400": + description: Bad Request + "401": + description: Session required + "403": + description: Unauthorized access + "404": + description: Not found + + delete: + tags: + - "Builds" + summary: Cancel a repository build + parameters: + - name: repository + in: path + required: true + schema: + type: string + - name: build_uuid + in: path + required: true + schema: + type: string + responses: + "204": + description: Deleted + "400": + description: Bad Request + "401": + description: Session required + "403": + description: Unauthorized access + "404": + description: Not found + + /api/v1/repository/{repository}/build/: + post: + tags: + - "Builds" + summary: Request a repository build and push + parameters: + - name: repository + in: path + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + type: object + properties: + file_id: + type: string + archive_url: + type: string + subdirectory: + type: string + dockerfile_path: + type: string + context: + type: string + pull_robot: + type: string + tags: + type: array + items: + type: string + responses: + "201": + description: Successful creation + "400": + description: Bad Request + "401": + description: Session required + "403": + description: Unauthorized access + "404": + description: Not found + get: + tags: + - "Builds" + summary: Get the list of repository builds + parameters: + - name: repository + in: path + required: true + schema: + type: string + - name: since + in: query + schema: + type: integer + description: Returns all builds since the given Unix timestamp + - name: limit + in: query + schema: + type: integer + description: The maximum number of builds to return + responses: + "200": + description: Successful invocation + "400": + description: Bad Request + "401": + description: Session required + "403": + description: Unauthorized access + "404": + description: Not found + /api/v1/discovery: + get: + tags: + - "Discovery" + summary: List all available API endpoints + description: Returns a list of all API endpoints available in the Swagger API format. + operationId: getDiscovery + parameters: + - name: internal + in: query + description: Whether to include internal APIs. + required: false + schema: + type: boolean + responses: + '200': + description: Successful invocation + content: + application/json: + schema: + type: array + items: + type: string + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + '401': + description: Session required + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + '403': + description: Unauthorized access + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + '404': + description: Not found + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + + /api/v1/error/{error_type}: + get: + tags: + - "Error" + summary: Get a detailed description of the error + description: Retrieves a detailed description of the specified error type. + operationId: getErrorDescription + parameters: + - name: error_type + in: path + description: The error code identifying the type of error. + required: true + schema: + type: string + responses: + '200': + description: Successful invocation + content: + application/json: + schema: + $ref: '#/components/schemas/ApiErrorDescription' + '400': + description: Bad Request + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + '401': + description: Session required + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + '403': + description: Unauthorized access + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + '404': + description: Not found + content: + application/json: + schema: + $ref: '#/components/schemas/ApiError' + +components: + securitySchemes: + BearerAuth: + type: http + scheme: bearer + schemas: + ApiError: + type: object + properties: + status: + type: integer + description: HTTP status code of the error. + detail: + type: string + description: A short message describing the error. + ApiErrorDescription: + type: object + properties: + error: + type: string + description: The error code. + message: + type: string + description: A detailed description of the error. \ No newline at end of file diff --git a/deploy_quay_on_openshift/docinfo.xml b/api/docinfo.xml similarity index 62% rename from deploy_quay_on_openshift/docinfo.xml rename to api/docinfo.xml index bb66b233a..ef4e7fa4b 100644 --- a/deploy_quay_on_openshift/docinfo.xml +++ b/api/docinfo.xml @@ -1,8 +1,8 @@ {productname} -3 -Deploy {productname} on OpenShift +{producty} +{productname} API Guide - Deploy {productname} on an OpenShift Cluster + Use the {productname} API Red Hat OpenShift Documentation Team diff --git a/api/master.adoc b/api/master.adoc new file mode 100644 index 000000000..271e62b72 --- /dev/null +++ b/api/master.adoc @@ -0,0 +1,134 @@ +:_content-type: ASSEMBLY +include::modules/attributes.adoc[] +[id="api"] += {productname} API guide +:context: use-api + +The {productname} application programming interface (API) provides a comprehensive, RESTful interface for managing and automating tasks within {productname}. Designed around the link:https://oauth.net/2/[_OAuth 2.0 protocol_], this API enables secure, fine-grained access to {productname} resources, and allows administrators and users to perform such actions as creating repositories, managing images, setting permissions, and more. + +{productname} follows Semantic Versioning (SemVer) principles, ensuring predictable API stability across releases, such as: + +* *Major releases*: Introduce new capabilities. Might include breaking changes to API compatibility. For example, the API of {productname} _2.0_ differs from {productname} _3.0_. + +* *Minor releases*: Add new functionality in a backward-compatible manner. For example, a _3.y_ release adds functionality to the version _3._ release. + +* *Patch releases*: Deliver bug fixes and improvements while preserving backward compatibility with minor releases, such as _3.y.z_. + +The following guide describes the {productname} API in more detail, and provides details on the following topics: + +* OAuth 2 access tokens and how they compare to traditional API tokens and {productname}'s robot tokens +* Generating an OAuth 2 access token +* Best practices for token management +* OAuth 2 access token capabilities +* Using the {productname} API +* {productname} API configuration examples + +This guide is accompanied with a second guide, link:https://docs.redhat.com/en/documentation/red_hat_quay/3.13/html/red_hat_quay_api_reference/index[{productname} API reference], that provides information about all `api/v1` endpoints and how to access those endpoints with example commands. + +//overview +include::modules/token-overview.adoc[leveloffset=+1] + +//creating oauth 2 access token +include::modules/oauth2-access-tokens.adoc[leveloffset=+1] +include::modules/creating-oauth-access-token.adoc[leveloffset=+2] +include::modules/reassigning-oauth-access-token.adoc[leveloffset=+2] +include::modules/deleting-oauth-access-token.adoc[leveloffset=+2] + +//robot account tokens +include::modules/robot-account-tokens.adoc[leveloffset=+1] +include::modules/regenerating-robot-account-token-ui.adoc[leveloffset=+2] +include::modules/regenerating-robot-account-token-api.adoc[leveloffset=+2] + +//oci referrers +include::modules/oci-referrers-oauth-access-token.adoc[leveloffset=+1] +include::modules/creating-v2-oauth-access-token.adoc[leveloffset=+2] + +//how to use the API +include::modules/enabling-using-the-api.adoc[leveloffset=+1] +include::modules/configuring-api-calls.adoc[leveloffset=+2] +include::modules/using-the-api.adoc[leveloffset=+2] +include::modules/accessing-swagger-ui.adoc[leveloffset=+2] +include::modules/automating-quay-using-the-api.adoc[leveloffset=+2] + + +//API examples +include::modules/quay-api-examples.adoc[leveloffset=+1] +//application +include::modules/creating-oauth-application-api.adoc[leveloffset=+2] +//discovery +include::modules/discovering-quay-api-endpoints.adoc[leveloffset=+2] +//error +include::modules/quay-error-details.adoc[leveloffset=+2] +//global message +include::modules/api-global-messages.adoc[leveloffset=+2] +//viewing logs (aggregate) +include::modules/viewing-usage-logs-api.adoc[leveloffset=+2] +//exporting logs +include::modules/use-quay-export-logs-api.adoc[leveloffset=+2] +//manifest label +include::modules/adding-managing-labels-api.adoc[leveloffset=+2] +//mirror +include::modules/mirror-quay-api.adoc[leveloffset=+2] +//quota +include::modules/quota-management-api.adoc[leveloffset=+2] +//quota (organization) +include::modules/quota-organization-management-api.adoc[leveloffset=+3] +// quota limits organization +include::modules/quota-limit-api.adoc[leveloffset=+3] +//quota (user limits and policies) +include::modules/quota-limit-user-api.adoc[leveloffset=+3] +//organization +include::modules/organization-management-api.adoc[leveloffset=+2] +//org creation +include::modules/org-create-api.adoc[leveloffset=+3] +include::modules/org-delete-api.adoc[leveloffset=+3] +//member management +include::modules/org-team-member-api.adoc[leveloffset=+3] +//application +include::modules/org-application-create-api.adoc[leveloffset=+3] +//proxy-cache +include::modules/org-proxy-cache-configuration-api.adoc[leveloffset=+3] +//permission +include::modules/repo-permission-api.adoc[leveloffset=+2] +include::modules/user-permissions-repo.adoc[leveloffset=+3] +include::modules/team-permissions-api.adoc[leveloffset=+3] +//policy +include::modules/repo-policy-api.adoc[leveloffset=+2] +include::modules/creating-org-policy-api.adoc[leveloffset=+3] +include::modules/creating-policy-api-current-user.adoc[leveloffset=+3] +include::modules/creating-repository-policy-api.adoc[leveloffset=+3] +include::modules/creating-policy-api-other-user.adoc[leveloffset=+3] +//repo +include::modules/repo-manage-api.adoc[leveloffset=+2] +include::modules/repo-creation-management.adoc[leveloffset=+3] +include::modules/creating-notifications-api.adoc[leveloffset=+3] +//robot account +include::modules/robot-account-manage-api.adoc[leveloffset=+2] +include::modules/creating-robot-account-api.adoc[leveloffset=+3] +include::modules/robot-account-permissions-api.adoc[leveloffset=+3] +include::modules/deleting-robot-account-api.adoc[leveloffset=+3] +//search +include::modules/search-api.adoc[leveloffset=+2] +//sec-scan +include::modules/security-scanning-api.adoc[leveloffset=+2] +//superuser +include::modules/superuser-manage-api.adoc[leveloffset=+2] +include::modules/creating-user-account-quay-api.adoc[leveloffset=+3] +include::modules/deleting-user-cli-api.adoc[leveloffset=+3] +include::modules/managing-organization-superuser-api.adoc[leveloffset=+3] +include::modules/listing-repos-superuser-api.adoc[leveloffset=+3] +include::modules/managing-organization-quota-superuser-api.adoc[leveloffset=+3] +include::modules/managing-user-quota-superuser-api.adoc[leveloffset=+3] +include::modules/retrieving-build-info-superuser-api.adoc[leveloffset=+3] +include::modules/managing-service-keys-api.adoc[leveloffset=+3] +//tag +include::modules/managing-tags-api.adoc[leveloffset=+2] +// team member management +include::modules/managing-teams-api.adoc[leveloffset=+2] +include::modules/managing-team-members-api.adoc[leveloffset=+3] +include::modules/setting-role-of-team-within-organization-api.adoc[leveloffset=+3] +include::modules/deleting-team-within-organization-api.adoc[leveloffset=+3] +//build +include::modules/managing-builds-api.adoc[leveloffset=+2] +//user +include::modules/managing-user-options-api.adoc[leveloffset=+2] \ No newline at end of file diff --git a/api/modules b/api/modules new file mode 120000 index 000000000..43aab75b5 --- /dev/null +++ b/api/modules @@ -0,0 +1 @@ +../modules/ \ No newline at end of file diff --git a/api_reference/docinfo.xml b/api_reference/docinfo.xml new file mode 100644 index 000000000..267e8ebfc --- /dev/null +++ b/api_reference/docinfo.xml @@ -0,0 +1,10 @@ +{productname} +{producty} +{productname} API reference + + {productname} API reference + + + Red Hat OpenShift Documentation Team + + diff --git a/api_reference/master.adoc b/api_reference/master.adoc new file mode 100644 index 000000000..5d35387f3 --- /dev/null +++ b/api_reference/master.adoc @@ -0,0 +1,280 @@ +:_content-type: ASSEMBLY +include::modules/attributes.adoc[] +[id="api-reference"] += {productname} API reference +:context: use-api + +The {productname} application programming interface (API) provides a comprehensive, RESTful interface for managing and automating tasks within {productname}. Designed around the link:https://oauth.net/2/[_OAuth 2.0 protocol_], this API enables secure, fine-grained access to {productname} resources, and allows administrators and users to perform such actions as creating repositories, managing images, setting permissions, and more. + +{productname} follows Semantic Versioning (SemVer) principles, ensuring predictable API stability across releases, such as: + +* *Major releases*: Introduce new capabilities. Might include breaking changes to API compatibility. For example, the API of {productname} _2.0_ differs from {productname} _3.0_. + +* *Minor releases*: Add new functionality in a backward-compatible manner. For example, a _3.y_ release adds functionality to the version _3._ release. + +* *Patch releases*: Deliver bug fixes and improvements while preserving backward compatibility with minor releases, such as _3.y.z_. + +The following guide describes the {productname} API in more detail, and provides details on the following topics: + +* API endpoint structure, including supported HTTP methods +* Request and response schemas for each endpoint +* Required and optional parameters +* Authentication and authorization requirements +* Common error codes and troubleshooting information + +For a more guided approach, including token overview, management strategies, understanding API endpoints, and more, refer to the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html/red_hat_quay_api_guide/index[{productname} API guide]. + +include::modules/api-authorization.adoc[leveloffset=+1] + +//example procedures provided +include::modules/api-appspecifictokens.adoc[leveloffset=+1] +include::modules/api-appspecifictokens-createAppToken.adoc[leveloffset=+2] +include::modules/api-appspecifictokens-listAppTokens.adoc[leveloffset=+2] +include::modules/api-appspecifictokens-getAppToken.adoc[leveloffset=+2] +include::modules/api-appspecifictokens-revokeAppToken.adoc[leveloffset=+2] + +include::modules/api-build.adoc[leveloffset=+1] +include::modules/api-build-getRepoBuildStatus.adoc[leveloffset=+2] +include::modules/api-build-getRepoBuildLogs.adoc[leveloffset=+2] +include::modules/api-build-getRepoBuild.adoc[leveloffset=+2] +include::modules/api-build-cancelRepoBuild.adoc[leveloffset=+2] +include::modules/api-build-requestRepoBuild.adoc[leveloffset=+2] +include::modules/api-build-getRepoBuilds.adoc[leveloffset=+2] + +//example procedures provided +include::modules/api-discovery.adoc[leveloffset=+1] +include::modules/api-discovery-discovery.adoc[leveloffset=+2] + +//example procedures provided +include::modules/api-error.adoc[leveloffset=+1] +include::modules/api-error-getErrorDescription.adoc[leveloffset=+2] + +//example procedures provided +include::modules/api-globalmessages.adoc[leveloffset=+1] +include::modules/api-globalmessages-createGlobalMessage.adoc[leveloffset=+2] +include::modules/api-globalmessages-getGlobalMessages.adoc[leveloffset=+2] +include::modules/api-globalmessages-deleteGlobalMessage.adoc[leveloffset=+2] + +//example procedures provided +include::modules/api-logs.adoc[leveloffset=+1] +include::modules/api-logs-getAggregateUserLogs.adoc[leveloffset=+2] +include::modules/api-logs-exportUserLogs.adoc[leveloffset=+2] +include::modules/api-logs-listUserLogs.adoc[leveloffset=+2] +include::modules/api-logs-getAggregateOrgLogs.adoc[leveloffset=+2] +include::modules/api-logs-exportOrgLogs.adoc[leveloffset=+2] +include::modules/api-logs-listOrgLogs.adoc[leveloffset=+2] +include::modules/api-logs-getAggregateRepoLogs.adoc[leveloffset=+2] +include::modules/api-logs-exportRepoLogs.adoc[leveloffset=+2] +include::modules/api-logs-listRepoLogs.adoc[leveloffset=+2] + +//example procedures provided +include::modules/api-manifest.adoc[leveloffset=+1] +include::modules/api-manifest-getManifestLabel.adoc[leveloffset=+2] +include::modules/api-manifest-deleteManifestLabel.adoc[leveloffset=+2] +include::modules/api-manifest-addManifestLabel.adoc[leveloffset=+2] +include::modules/api-manifest-listManifestLabels.adoc[leveloffset=+2] +include::modules/api-manifest-getRepoManifest.adoc[leveloffset=+2] + +//example procedures provided +include::modules/api-mirror.adoc[leveloffset=+1] +include::modules/api-mirror-syncCancel.adoc[leveloffset=+2] +include::modules/api-mirror-syncNow.adoc[leveloffset=+2] +include::modules/api-mirror-getRepoMirrorConfig.adoc[leveloffset=+2] +include::modules/api-mirror-changeRepoMirrorConfig.adoc[leveloffset=+2] +include::modules/api-mirror-createRepoMirrorConfig.adoc[leveloffset=+2] + +//commands provided +include::modules/api-namespacequota.adoc[leveloffset=+1] +include::modules/api-namespacequota-listUserQuota.adoc[leveloffset=+2] +include::modules/api-namespacequota-getOrganizationQuotaLimit.adoc[leveloffset=+2] +include::modules/api-namespacequota-changeOrganizationQuotaLimit.adoc[leveloffset=+2] +include::modules/api-namespacequota-deleteOrganizationQuotaLimit.adoc[leveloffset=+2] +include::modules/api-namespacequota-createOrganizationQuotaLimit.adoc[leveloffset=+2] +include::modules/api-namespacequota-listOrganizationQuotaLimit.adoc[leveloffset=+2] +include::modules/api-namespacequota-getUserQuotaLimit.adoc[leveloffset=+2] +include::modules/api-namespacequota-listUserQuotaLimit.adoc[leveloffset=+2] +include::modules/api-namespacequota-getOrganizationQuota.adoc[leveloffset=+2] +include::modules/api-namespacequota-changeOrganizationQuota.adoc[leveloffset=+2] +include::modules/api-namespacequota-deleteOrganizationQuota.adoc[leveloffset=+2] +include::modules/api-namespacequota-createOrganizationQuota.adoc[leveloffset=+2] +include::modules/api-namespacequota-listOrganizationQuota.adoc[leveloffset=+2] +include::modules/api-namespacequota-getUserQuota.adoc[leveloffset=+2] + +//done +include::modules/api-organization.adoc[leveloffset=+1] +include::modules/api-organization-createOrganization.adoc[leveloffset=+2] +include::modules/api-organization-validateProxyCacheConfig.adoc[leveloffset=+2] +include::modules/api-organization-getOrganizationCollaborators.adoc[leveloffset=+2] +include::modules/api-organization-getOrganizationApplication.adoc[leveloffset=+2] +include::modules/api-organization-updateOrganizationApplication.adoc[leveloffset=+2] +include::modules/api-organization-deleteOrganizationApplication.adoc[leveloffset=+2] +include::modules/api-organization-createOrganizationApplication.adoc[leveloffset=+2] +include::modules/api-organization-getOrganizationApplications.adoc[leveloffset=+2] +include::modules/api-organization-getProxyCacheConfig.adoc[leveloffset=+2] +include::modules/api-organization-deleteProxyCacheConfig.adoc[leveloffset=+2] +include::modules/api-organization-createProxyCacheConfig.adoc[leveloffset=+2] +include::modules/api-organization-getOrganizationMember.adoc[leveloffset=+2] +include::modules/api-organization-removeOrganizationMember.adoc[leveloffset=+2] +include::modules/api-organization-getOrganizationMembers.adoc[leveloffset=+2] +include::modules/api-organization-getOrganization.adoc[leveloffset=+2] +include::modules/api-organization-changeOrganizationDetails.adoc[leveloffset=+2] +include::modules/api-organization-deleteAdminedOrganization.adoc[leveloffset=+2] +include::modules/api-organization-getApplicationInformation.adoc[leveloffset=+2] +//done +include::modules/api-permission.adoc[leveloffset=+1] +include::modules/api-permission-getUserTransitivePermission.adoc[leveloffset=+2] +include::modules/api-permission-getUserPermissions.adoc[leveloffset=+2] +include::modules/api-permission-changeUserPermissions.adoc[leveloffset=+2] +include::modules/api-permission-deleteUserPermissions.adoc[leveloffset=+2] +include::modules/api-permission-getTeamPermissions.adoc[leveloffset=+2] +include::modules/api-permission-changeTeamPermissions.adoc[leveloffset=+2] +include::modules/api-permission-deleteTeamPermissions.adoc[leveloffset=+2] +include::modules/api-permission-listRepoTeamPermissions.adoc[leveloffset=+2] +include::modules/api-permission-listRepoUserPermissions.adoc[leveloffset=+2] +//done but might need example procs +include::modules/api-policy.adoc[leveloffset=+1] +include::modules/api-policy-createOrganizationAutoPrunePolicy.adoc[leveloffset=+2] +include::modules/api-policy-listOrganizationAutoPrunePolicies.adoc[leveloffset=+2] +include::modules/api-policy-getOrganizationAutoPrunePolicy.adoc[leveloffset=+2] +include::modules/api-policy-deleteOrganizationAutoPrunePolicy.adoc[leveloffset=+2] +include::modules/api-policy-updateOrganizationAutoPrunePolicy.adoc[leveloffset=+2] +include::modules/api-policy-createRepositoryAutoPrunePolicy.adoc[leveloffset=+2] +include::modules/api-policy-listRepositoryAutoPrunePolicies.adoc[leveloffset=+2] +include::modules/api-policy-getRepositoryAutoPrunePolicy.adoc[leveloffset=+2] +include::modules/api-policy-deleteRepositoryAutoPrunePolicy.adoc[leveloffset=+2] +include::modules/api-policy-updateRepositoryAutoPrunePolicy.adoc[leveloffset=+2] +include::modules/api-policy-createUserAutoPrunePolicy.adoc[leveloffset=+2] +include::modules/api-policy-listUserAutoPrunePolicies.adoc[leveloffset=+2] +include::modules/api-policy-getUserAutoPrunePolicy.adoc[leveloffset=+2] +include::modules/api-policy-deleteUserAutoPrunePolicy.adoc[leveloffset=+2] +include::modules/api-policy-updateUserAutoPrunePolicy.adoc[leveloffset=+2] + +//done +include::modules/api-prototype.adoc[leveloffset=+1] +include::modules/api-prototype-updateOrganizationPrototypePermission.adoc[leveloffset=+2] +include::modules/api-prototype-deleteOrganizationPrototypePermission.adoc[leveloffset=+2] +include::modules/api-prototype-createOrganizationPrototypePermission.adoc[leveloffset=+2] +include::modules/api-prototype-getOrganizationPrototypePermissions.adoc[leveloffset=+2] +//won't do +include::modules/api-referrers.adoc[leveloffset=+1] +include::modules/api-referrers-getReferrers.adoc[leveloffset=+2] + +//done +include::modules/api-repository.adoc[leveloffset=+1] +//do not edit +include::modules/api-repository-createRepo.adoc[leveloffset=+2] +include::modules/api-repository-listRepos.adoc[leveloffset=+2] +include::modules/api-repository-changeRepoVisibility.adoc[leveloffset=+2] +include::modules/api-repository-changeRepoState.adoc[leveloffset=+2] +include::modules/api-repository-getRepo.adoc[leveloffset=+2] +include::modules/api-repository-updateRepo.adoc[leveloffset=+2] +include::modules/api-repository-deleteRepository.adoc[leveloffset=+2] + +//done +include::modules/api-repositorynotification.adoc[leveloffset=+1] +include::modules/api-repositorynotification-testRepoNotification.adoc[leveloffset=+2] +include::modules/api-repositorynotification-getRepoNotification.adoc[leveloffset=+2] +include::modules/api-repositorynotification-deleteRepoNotification.adoc[leveloffset=+2] +include::modules/api-repositorynotification-resetRepositoryNotificationFailures.adoc[leveloffset=+2] +include::modules/api-repositorynotification-createRepoNotification.adoc[leveloffset=+2] +include::modules/api-repositorynotification-listRepoNotifications.adoc[leveloffset=+2] +//done +include::modules/api-robot.adoc[leveloffset=+1] +include::modules/api-robot-getUserRobots.adoc[leveloffset=+2] +include::modules/api-robot-getOrgRobotPermissions.adoc[leveloffset=+2] +include::modules/api-robot-regenerateOrgRobotToken.adoc[leveloffset=+2] +include::modules/api-robot-getUserRobotPermissions.adoc[leveloffset=+2] +include::modules/api-robot-regenerateUserRobotToken.adoc[leveloffset=+2] +include::modules/api-robot-getOrgRobot.adoc[leveloffset=+2] +include::modules/api-robot-createOrgRobot.adoc[leveloffset=+2] +include::modules/api-robot-deleteOrgRobot.adoc[leveloffset=+2] +include::modules/api-robot-getOrgRobots.adoc[leveloffset=+2] +include::modules/api-robot-getUserRobot.adoc[leveloffset=+2] +include::modules/api-robot-createUserRobot.adoc[leveloffset=+2] +include::modules/api-robot-deleteUserRobot.adoc[leveloffset=+2] +include::modules/api-robot-getOrgRobotFederation.adoc[leveloffset=+2] +include::modules/api-robot-createOrgRobotFederation.adoc[leveloffset=+2] +//include::modules/api-robot-deleteOrgRobotFederation.adoc[leveloffset=+2] + +//done +include::modules/api-search.adoc[leveloffset=+1] +include::modules/api-search-conductRepoSearch.adoc[leveloffset=+2] +include::modules/api-search-conductSearch.adoc[leveloffset=+2] +include::modules/api-search-getMatchingEntities.adoc[leveloffset=+2] + +//done +include::modules/api-secscan.adoc[leveloffset=+1] +include::modules/api-secscan-getRepoManifestSecurity.adoc[leveloffset=+2] +//done +include::modules/api-superuser.adoc[leveloffset=+1] +include::modules/api-superuser-createInstallUser.adoc[leveloffset=+2] +include::modules/api-superuser-deleteInstallUser.adoc[leveloffset=+2] +include::modules/api-superuser-listAllUsers.adoc[leveloffset=+2] +include::modules/api-superuser-listAllLogs.adoc[leveloffset=+2] +include::modules/api-superuser-listAllOrganizations.adoc[leveloffset=+2] +include::modules/api-superuser-createServiceKey.adoc[leveloffset=+2] +include::modules/api-superuser-listServiceKeys.adoc[leveloffset=+2] +include::modules/api-superuser-changeUserQuotaSuperUser.adoc[leveloffset=+2] +include::modules/api-superuser-deleteUserQuotaSuperUser.adoc[leveloffset=+2] +include::modules/api-superuser-createUserQuotaSuperUser.adoc[leveloffset=+2] +include::modules/api-superuser-listUserQuotaSuperUser.adoc[leveloffset=+2] +include::modules/api-superuser-changeOrganizationQuotaSuperUser.adoc[leveloffset=+2] +include::modules/api-superuser-deleteOrganizationQuotaSuperUser.adoc[leveloffset=+2] +include::modules/api-superuser-createOrganizationQuotaSuperUser.adoc[leveloffset=+2] +include::modules/api-superuser-listOrganizationQuotaSuperUser.adoc[leveloffset=+2] +include::modules/api-superuser-changeOrganization.adoc[leveloffset=+2] +include::modules/api-superuser-deleteOrganization.adoc[leveloffset=+2] +include::modules/api-superuser-approveServiceKey.adoc[leveloffset=+2] +include::modules/api-superuser-deleteServiceKey.adoc[leveloffset=+2] +include::modules/api-superuser-updateServiceKey.adoc[leveloffset=+2] +include::modules/api-superuser-getServiceKey.adoc[leveloffset=+2] +include::modules/api-superuser-getRepoBuildStatusSuperUser.adoc[leveloffset=+2] +include::modules/api-superuser-getRepoBuildSuperUser.adoc[leveloffset=+2] +include::modules/api-superuser-getRepoBuildLogsSuperUser.adoc[leveloffset=+2] +include::modules/api-superuser-getRegistrySize.adoc[leveloffset=+2] +include::modules/api-superuser-postRegistrySize.adoc[leveloffset=+2] + +//done +include::modules/api-tag.adoc[leveloffset=+1] +include::modules/api-tag-restoreTag.adoc[leveloffset=+2] +include::modules/api-tag-changeTag.adoc[leveloffset=+2] +include::modules/api-tag-deleteFullTag.adoc[leveloffset=+2] +include::modules/api-tag-listRepoTags.adoc[leveloffset=+2] +//done +include::modules/api-team.adoc[leveloffset=+1] +include::modules/api-team-getOrganizationTeamPermissions.adoc[leveloffset=+2] +include::modules/api-team-updateOrganizationTeamMember.adoc[leveloffset=+2] +include::modules/api-team-deleteOrganizationTeamMember.adoc[leveloffset=+2] +include::modules/api-team-getOrganizationTeamMembers.adoc[leveloffset=+2] +include::modules/api-team-inviteTeamMemberEmail.adoc[leveloffset=+2] +include::modules/api-team-deleteTeamMemberEmailInvite.adoc[leveloffset=+2] +include::modules/api-team-updateOrganizationTeam.adoc[leveloffset=+2] +include::modules/api-team-deleteOrganizationTeam.adoc[leveloffset=+2] +//done +include::modules/api-trigger.adoc[leveloffset=+1] +include::modules/api-trigger-activateBuildTrigger.adoc[leveloffset=+2] +include::modules/api-trigger-listTriggerRecentBuilds.adoc[leveloffset=+2] +include::modules/api-trigger-manuallyStartBuildTrigger.adoc[leveloffset=+2] +include::modules/api-trigger-getBuildTrigger.adoc[leveloffset=+2] +include::modules/api-trigger-updateBuildTrigger.adoc[leveloffset=+2] +include::modules/api-trigger-deleteBuildTrigger.adoc[leveloffset=+2] +include::modules/api-trigger-listBuildTriggers.adoc[leveloffset=+2] + +//done +include::modules/api-user.adoc[leveloffset=+1] +include::modules/api-user-createStar.adoc[leveloffset=+2] +include::modules/api-user-listStarredRepos.adoc[leveloffset=+2] +include::modules/api-user-getLoggedInUser.adoc[leveloffset=+2] +include::modules/api-user-deleteStar.adoc[leveloffset=+2] +include::modules/api-user-getUserInformation.adoc[leveloffset=+2] + +include::modules/api-definitions.adoc[leveloffset=+1] + +//// +// do not remove +[id="api-config-examples"] +== API configuration examples + +include::modules/external-registry-config-api-example.adoc[leveloffset=+2] +include::modules/root-rule-config-api-example.adoc[leveloffset=+2] +//// \ No newline at end of file diff --git a/api_reference/modules b/api_reference/modules new file mode 120000 index 000000000..43aab75b5 --- /dev/null +++ b/api_reference/modules @@ -0,0 +1 @@ +../modules/ \ No newline at end of file diff --git a/architecture/docinfo.xml b/architecture/docinfo.xml new file mode 100644 index 000000000..d3e342081 --- /dev/null +++ b/architecture/docinfo.xml @@ -0,0 +1,10 @@ +{productname} +{producty} +{productname} Architecture + + {productname} Architecture + + + Red Hat OpenShift Documentation Team + + diff --git a/architecture/images b/architecture/images new file mode 120000 index 000000000..5e6757319 --- /dev/null +++ b/architecture/images @@ -0,0 +1 @@ +../images \ No newline at end of file diff --git a/architecture/master.adoc b/architecture/master.adoc new file mode 100644 index 000000000..946267ed2 --- /dev/null +++ b/architecture/master.adoc @@ -0,0 +1,73 @@ +include::modules/attributes.adoc[] + +[id="quay-architecture"] += {productname} architecture + +include::modules/arch-intro.adoc[leveloffset=+1] +include::modules/arch-intro-scalability.adoc[leveloffset=+2] +include::modules/arch-intro-content-distribution.adoc[leveloffset=+2] +include::modules/arch-intro-build-automation.adoc[leveloffset=+2] +include::modules/build-enhanced-arch.adoc[leveloffset=+2] +include::modules/arch-intro-integration.adoc[leveloffset=+2] +include::modules/arch-intro-security.adoc[leveloffset=+2] +include::modules/arch-intro-recent-features.adoc[leveloffset=+2] + + +include::modules/arch-prereqs.adoc[leveloffset=+1] +include::modules/core-prereqs-storage.adoc[leveloffset=+2] +include::modules/core-prereqs-db.adoc[leveloffset=+2] +include::modules/core-prereqs-redis.adoc[leveloffset=+2] + +include::modules/core-infrastructure.adoc[leveloffset=+1] +include::modules/arch-mirror-registry.adoc[leveloffset=+2] +include::modules/core-distinct-registries.adoc[leveloffset=+2] + +include::modules/core-sample-quay-on-prem.adoc[leveloffset=+1] +include::modules/core-example-deployment.adoc[leveloffset=+2] +include::modules/deployment-topology.adoc[leveloffset=+2] +include::modules/deployment-topology-with-storage-proxy.adoc[leveloffset=+2] + +include::modules/public-cloud-intro.adoc[leveloffset=+1] +include::modules/public-cloud-aws.adoc[leveloffset=+2] +include::modules/public-cloud-azure.adoc[leveloffset=+2] + +//include::modules/security-intro.adoc[leveloffset=+1] +//include::modules/clair-intro.adoc[leveloffset=+2] +//include::modules/clair-analyses.adoc[leveloffset=+3] +//include::modules/clairv4-intro.adoc[leveloffset=+2] +//include::modules/clairv4-arch.adoc[leveloffset=+3] +//include::modules/clairv2-compare-v4.adoc[leveloffset=+3] +//include::modules/clairv2-to-v4.adoc[leveloffset=+3] +//include::modules/clairv4-limitations.adoc[leveloffset=+3] +//include::modules/clairv4-air-gapped.adoc[leveloffset=+3] + +include::modules/content-distrib-intro.adoc[leveloffset=+1] +//mirroring +include::modules/mirroring-intro.adoc[leveloffset=+2] +include::modules/mirroring-using.adoc[leveloffset=+3] +//include::modules/mirroring-working-with.adoc[leveloffset=+3] +include::modules/mirroring-recommend.adoc[leveloffset=+3] +include::modules/mirroring-events.adoc[leveloffset=+3] +include::modules/mirroring-api-intro.adoc[leveloffset=+3] +//geo-repl +include::modules/georepl-intro.adoc[leveloffset=+2] +include::modules/arch-georpl-features.adoc[leveloffset=+3] +include::modules/georepl-prereqs.adoc[leveloffset=+3] +include::modules/georepl-arch-standalone.adoc[leveloffset=+3] +include::modules/georepl-arch-operator.adoc[leveloffset=+3] +include::modules/georepl-mixed-storage.adoc[leveloffset=+3] +include::modules/mirroring-versus-georepl.adoc[leveloffset=+2] +include::modules/airgap-intro.adoc[leveloffset=+2] +//include::modules/airgap-clair.adoc[leveloffset=+3] + +//sizing +include::modules/sizing-intro.adoc[leveloffset=+1] +include::modules/sizing-sample.adoc[leveloffset=+2] +include::modules/subscription-intro.adoc[leveloffset=+2] +include::modules/quay-internal-registry-intro.adoc[leveloffset=+2] + +// Quota management +include::modules/quota-management-arch.adoc[leveloffset=+1] + +//Namespace auto-pruning +include::modules/namespace-auto-pruning-arch.adoc[leveloffset=+1] diff --git a/architecture/modules b/architecture/modules new file mode 120000 index 000000000..464b823ac --- /dev/null +++ b/architecture/modules @@ -0,0 +1 @@ +../modules \ No newline at end of file diff --git a/build_docs b/build_docs new file mode 100755 index 000000000..e67ad8784 --- /dev/null +++ b/build_docs @@ -0,0 +1,22 @@ +rm -rf dist + +asciidoctor -a productname="Project Quay" -d book welcome.adoc -D dist -o welcome.html + +asciidoctor -a productname="Project Quay" -a toc="left" -d book release_notes/master.adoc -D dist -o release_notes.html + +asciidoctor -a productname="Project Quay" -a toc="left" -d book deploy_quay/master.adoc -D dist -o deploy_quay.html +asciidoctor -a productname="Project Quay" -a toc="left" -d book deploy_quay_ha/master.adoc -D dist -o deploy_quay_ha.html +asciidoctor -a productname="Project Quay" -a toc="left" -d book deploy_red_hat_quay_operator/master.adoc -D dist -o deploy_red_hat_quay_operator.html + +asciidoctor -a productname="Project Quay" -a toc="left" -d book config_quay/master.adoc -D dist -o config_quay.html +asciidoctor -a productname="Project Quay" -a toc="left" -d book manage_quay/master.adoc -D dist -o manage_quay.html +asciidoctor -a productname="Project Quay" -a toc="left" -d book upgrade_quay/master.adoc -D dist -o upgrade_quay.html + +asciidoctor -a productname="Project Quay" -a toc="left" -d book use_quay/master.adoc -D dist -o use_quay.html +asciidoctor -a productname="Project Quay" -a toc="left" -d book api/master.adoc -D dist -o api_quay.html + +asciidoctor -a productname="Project Quay" -a toc="left" -d book quay_io/master.adoc -D dist -o quay_io.html + +asciidoctor -a productname="Project Quay" -a toc="left" -d book build_quay/master.adoc -D dist -o build_quay.html + +cp -a images dist/images diff --git a/build_quay/docinfo.xml b/build_quay/docinfo.xml new file mode 100644 index 000000000..75838b49b --- /dev/null +++ b/build_quay/docinfo.xml @@ -0,0 +1,10 @@ +{productname} +{producty} +{productname} + + Build {productname} + + + Red Hat OpenShift Documentation Team + + diff --git a/build_quay/master.adoc b/build_quay/master.adoc new file mode 100644 index 000000000..f68d43129 --- /dev/null +++ b/build_quay/master.adoc @@ -0,0 +1,10 @@ +include::modules/attributes.adoc[] + +[id='build-quay'] += Build {productname} + +``` +podman build -t quay:master -f Dockerfile . +``` + +[discrete] diff --git a/build_quay/modules b/build_quay/modules new file mode 120000 index 000000000..464b823ac --- /dev/null +++ b/build_quay/modules @@ -0,0 +1 @@ +../modules \ No newline at end of file diff --git a/builders_and_image_automation/docinfo.xml b/builders_and_image_automation/docinfo.xml new file mode 100644 index 000000000..e8b3dc5ef --- /dev/null +++ b/builders_and_image_automation/docinfo.xml @@ -0,0 +1,10 @@ +{productname} +{producty} +Builders and image automation + + Understand builders and their role in automating image builds. + + + Red Hat OpenShift Documentation Team + + diff --git a/builders_and_image_automation/master.adoc b/builders_and_image_automation/master.adoc new file mode 100644 index 000000000..0fab3f64f --- /dev/null +++ b/builders_and_image_automation/master.adoc @@ -0,0 +1,43 @@ +include::modules/attributes.adoc[] + +:_content-type: ASSEMBLY +[id="quay-builders-image-automation"] += Builders and image automation +:context: quay-builders-image-automation + +The following guide shows you how to configure the {productname} _builds_ feature on both bare metal and virtual machines. + +include::modules/builds-overview.adoc[leveloffset=+1] +include::modules/proc_use-quay-build-dockerfiles.adoc[leveloffset=+2] +include::modules/configuring-openshift-tls-component-builds.adoc[leveloffset=+1] + +//bare metal builders +include::modules/proc_use-quay-build-workers-dockerfiles.adoc[leveloffset=+1] +include::modules/prepare-ocp-for-bare-metal-builds.adoc[leveloffset=+2] +ifdef::upstream[] +include::modules/setting-up-builds-aws.adoc[leveloffset=+3] +endif::upstream[] +include::modules/openshift-routes-limitations.adoc[leveloffset=+3] + +// Virtual builders +include::modules/build-enhancements.adoc[leveloffset=+1] +include::modules/builders-virtual-environment.adoc[leveloffset=+2] +include::modules/red-hat-quay-s3-bucket-modify.adoc[leveloffset=+3] +include::modules/red-hat-quay-gcp-bucket-modify.adoc[leveloffset=+3] + +//Starting a new build +include::modules/starting-a-build.adoc[leveloffset=+1] + +//Build triggers with UI +include::modules/build-trigger-overview.adoc[leveloffset=+1] +include::modules/red-hat-quay-builders-ui.adoc[leveloffset=+2] +include::modules/proc_use-quay-git-trigger.adoc[leveloffset=+3] +include::modules/understanding-tag-naming-build-triggers.adoc[leveloffset=+3] +include::modules/skipping-source-control-triggered-build.adoc[leveloffset=+3] +include::modules/manually-triggering-a-build-trigger.adoc[leveloffset=+2] + +// Github OAUTH +include::modules/proc_github-app.adoc[leveloffset=+1] + +//Troubleshooting +include::modules/troubleshooting-builds.adoc[leveloffset=+1] \ No newline at end of file diff --git a/builders_and_image_automation/modules b/builders_and_image_automation/modules new file mode 120000 index 000000000..43aab75b5 --- /dev/null +++ b/builders_and_image_automation/modules @@ -0,0 +1 @@ +../modules/ \ No newline at end of file diff --git a/clair/docinfo.xml b/clair/docinfo.xml new file mode 100644 index 000000000..3cacdace5 --- /dev/null +++ b/clair/docinfo.xml @@ -0,0 +1,10 @@ +{productname} +{producty} +Vulnerability reporting with Clair on {productname} + + Getting started with Clair + + + Red Hat OpenShift Documentation Team + + diff --git a/clair/master.adoc b/clair/master.adoc new file mode 100644 index 000000000..de1bdb6ce --- /dev/null +++ b/clair/master.adoc @@ -0,0 +1,85 @@ +:_content-type: ASSEMBLY +include::modules/attributes.adoc[] +[id="vulnerability-reporting-clair-quay"] += Vulnerability reporting with Clair on {productname} +:context: clair + +The contents within this guide provide an overview of Clair for {productname}, running Clair on standalone {productname} and Operator deployments, and advanced Clair configuration. + +[id="vulnerability-reporting-clair-quay-overview"] += Vulnerability reporting with Clair on {productname} overview + +The content in this guide explains the key purposes and concepts of Clair on {productname}. It also contains information about Clair releases and the location of official Clair containers. + +include::modules/clair-vulnerability-scanner-overview.adoc[leveloffset=+1] +include::modules/about-clair.adoc[leveloffset=+2] +include::modules/clair-severity-mapping.adoc[leveloffset=+2] +//include::modules/clair-vulnerability-scanner-hosts.adoc[leveloffset=+2] +include::modules/clair-concepts.adoc[leveloffset=+1] +// include::modules/internal-api.adoc[leveloffset=+2] +include::modules/clair-authentication.adoc[leveloffset=+2] +//include::modules/testing-clair.adoc[leveloffset=+1] +include::modules/clair-updaters.adoc[leveloffset=+2] +include::modules/clair-updater-urls.adoc[leveloffset=+2] +include::modules/configuring-clair-updaters.adoc[leveloffset=+2] +include::modules/clair-cve.adoc[leveloffset=+2] +include::modules/fips-overview.adoc[leveloffset=+2] + +[id="testing-clair-with-quay"] += Clair on {productname} + +This guide contains procedures for running Clair on {productname} in both standalone and {ocp} Operator deployments. + +include::modules/clair-standalone-configure.adoc[leveloffset=+1] +include::modules/clair-postgresql-database-update.adoc[leveloffset=+2] +include::modules/clair-standalone-upgrade.adoc[leveloffset=+2] + +include::modules/clair-openshift.adoc[leveloffset=+1] +// include::modules/clair-openshift-manual.adoc[leveloffset=+2] + +include::modules/clair-testing.adoc[leveloffset=+1] + + +[id="advanced-clair-configuration"] += Advanced Clair configuration + +Use this section to configure advanced Clair features. + +include::modules/clair-unmanaged.adoc[leveloffset=+1] +include::modules/unmanaging-clair-database.adoc[leveloffset=+2] +include::modules/configuring-custom-clair-database.adoc[leveloffset=+2] + +include::modules/custom-clair-configuration-managed-database.adoc[leveloffset=+1] +include::modules/managed-clair-database.adoc[leveloffset=+2] +include::modules/configuring-custom-clair-database-managed.adoc[leveloffset=+2] + +include::modules/clair-disconnected.adoc[leveloffset=+1] + + +include::modules/clair-clairctl.adoc[leveloffset=+2] +include::modules/clair-openshift-config.adoc[leveloffset=+3] +include::modules/clair-export-bundle.adoc[leveloffset=+3] +include::modules/clair-openshift-airgap-database.adoc[leveloffset=+3] +include::modules/clair-openshift-airgap-import-bundle.adoc[leveloffset=+3] + + +include::modules/clair-clairctl-standalone.adoc[leveloffset=+2] +include::modules/clair-standalone-config-location.adoc[leveloffset=+3] +include::modules/clair-export-bundle-standalone.adoc[leveloffset=+3] +include::modules/clair-openshift-airgap-database-standalone.adoc[leveloffset=+3] +include::modules/clair-openshift-airgap-import-bundle-standalone.adoc[leveloffset=+3] + +//include::modules/clair-crda-configuration.adoc[leveloffset=+2] +include::modules/mapping-repositories-to-cpe-information.adoc[leveloffset=+2] + + +include::modules/config-fields-overview.adoc[leveloffset=+1] +include::modules/config-fields-general-clair.adoc[leveloffset=+2] +include::modules/config-fields-clair-indexer.adoc[leveloffset=+2] +include::modules/config-fields-clair-matcher.adoc[leveloffset=+2] +include::modules/config-fields-clair-matchers.adoc[leveloffset=+2] +include::modules/config-fields-clair-updaters.adoc[leveloffset=+2] +include::modules/config-fields-clair-notifiers.adoc[leveloffset=+2] +include::modules/config-fields-clair-auth.adoc[leveloffset=+2] +include::modules/config-fields-clair-trace.adoc[leveloffset=+2] +include::modules/config-fields-clair-metrics.adoc[leveloffset=+2] \ No newline at end of file diff --git a/clair/modules b/clair/modules new file mode 120000 index 000000000..464b823ac --- /dev/null +++ b/clair/modules @@ -0,0 +1 @@ +../modules \ No newline at end of file diff --git a/config_quay/docinfo.xml b/config_quay/docinfo.xml new file mode 100644 index 000000000..1ad7f27c7 --- /dev/null +++ b/config_quay/docinfo.xml @@ -0,0 +1,10 @@ +{productname} +{producty} +Customizing {productname} using configuration options + + Configure {productname} + + + Red Hat OpenShift Documentation Team + + diff --git a/config_quay/master.adoc b/config_quay/master.adoc new file mode 100644 index 000000000..bb454fc26 --- /dev/null +++ b/config_quay/master.adoc @@ -0,0 +1,155 @@ +:_content-type: ASSEMBLY + +include::modules/attributes.adoc[] + +[id="configure-quay"] += Configure {productname} + +include::modules/config-intro.adoc[leveloffset=+1] +include::modules/config-disclaimer.adoc[leveloffset=+1] +include::modules/config-updates-314.adoc[leveloffset=+2] +//include::modules/config-updates-313.adoc[leveloffset=+2] +//include::modules/config-updates-312.adoc[leveloffset=+2] +//include::modules/config-updates-311.adoc[leveloffset=+2] +//include::modules/config-updates-310.adoc[leveloffset=+2] +//include::modules/config-updates-39.adoc[leveloffset=+2] +//include::modules/config-updates-38.adoc[leveloffset=+2] +//include::modules/config-updates-37.adoc[leveloffset=+2] +//include::modules/config-updates-36.adoc[leveloffset=+2] +include::modules/config-file-intro.adoc[leveloffset=+2] +include::modules/config-file-location.adoc[leveloffset=+2] +include::modules/config-file-minimal.adoc[leveloffset=+2] + +include::modules/config-fields-intro.adoc[leveloffset=+1] +include::modules/config-fields-required-intro.adoc[leveloffset=+2] +include::modules/config-preconfigure-automation-intro.adoc[leveloffset=+2] +include::modules/config-fields-optional-intro.adoc[leveloffset=+2] + +include::modules/config-fields-required-general.adoc[leveloffset=+2] +include::modules/config-fields-db.adoc[leveloffset=+2] +include::modules/config-fields-storage.adoc[leveloffset=+2] +include::modules/config-fields-storage-features.adoc[leveloffset=+3] +include::modules/config-fields-storage-fields.adoc[leveloffset=+3] +include::modules/config-fields-storage-local.adoc[leveloffset=+3] +include::modules/config-fields-storage-noobaa.adoc[leveloffset=+3] +include::modules/config-fields-storage-rados.adoc[leveloffset=+3] +include::modules/config-fields-storage-aws.adoc[leveloffset=+3] +include::modules/config-fields-storage-gcp.adoc[leveloffset=+3] +include::modules/config-fields-storage-azure.adoc[leveloffset=+3] +include::modules/config-fields-storage-swift.adoc[leveloffset=+3] +include::modules/config-fields-nutanix.adoc[leveloffset=+3] +include::modules/config-fields-ibmcloudstorage.adoc[leveloffset=+3] +include::modules/config-fields-netapp-ontap-s3.adoc[leveloffset=+3] +include::modules/config-fields-hcp.adoc[leveloffset=+3] + + +include::modules/config-fields-redis.adoc[leveloffset=+2] +include::modules/config-fields-modelcache.adoc[leveloffset=+2] +include::modules/config-fields-modelcache-memcache.adoc[leveloffset=+3] +include::modules/config-fields-modelcache-single-redis.adoc[leveloffset=+3] +include::modules/config-fields-modelcache-clustered-redis.adoc[leveloffset=+3] +include::modules/config-fields-tag-expiration.adoc[leveloffset=+2] +include::modules/config-fields-quota-management.adoc[leveloffset=+2] +include::modules/config-fields-proxy-cache.adoc[leveloffset=+2] +include::modules/config-fields-robot-account.adoc[leveloffset=+2] + +include::modules/config-preconfigure-automation.adoc[leveloffset=+2] +include::modules/deploying-the-operator-using-initial-configuration.adoc[leveloffset=+2] +include::modules/first-user-api.adoc[leveloffset=+3] +include::modules/using-the-oauth-token.adoc[leveloffset=+3] +include::modules/using-the-api-to-create-an-organization.adoc[leveloffset=+3] + +include::modules/config-fields-basic.adoc[leveloffset=+2] +//include::modules/config-fields-server.adoc[leveloffset=+2] +include::modules/config-fields-ssl.adoc[leveloffset=+2] +include::modules/config-custom-ssl-certs-manual.adoc[leveloffset=+2] +include::modules/config-fields-ldap.adoc[leveloffset=+2] +include::modules/config-fields-mirroring.adoc[leveloffset=+2] +include::modules/config-fields-scanner.adoc[leveloffset=+2] +include::modules/config-fields-helm-oci.adoc[leveloffset=+2] +include::modules/other-oci-artifacts-with-quay.adoc[leveloffset=+2] +include::modules/config-fields-modelcard-rendering.adoc[leveloffset=+2] +//include::modules/unknown-artifacts.adoc[leveloffset=+2] +include::modules/config-fields-actionlog.adoc[leveloffset=+2] +include::modules/config-fields-build-logs.adoc[leveloffset=+2] +include::modules/config-fields-dockerfile-build.adoc[leveloffset=+2] +include::modules/config-fields-build-manager.adoc[leveloffset=+2] +include::modules/config-fields-oauth.adoc[leveloffset=+2] +include::modules/oidc-config-fields.adoc[leveloffset=+2] +include::modules/config-fields-nested-repositories.adoc[leveloffset=+2] +include::modules/ref_quay-integration-config-fields.adoc[leveloffset=+2] +include::modules/config-fields-mail.adoc[leveloffset=+2] +include::modules/config-fields-user.adoc[leveloffset=+2] +include::modules/config-fields-recaptcha.adoc[leveloffset=+2] +include::modules/config-fields-aci.adoc[leveloffset=+2] +include::modules/config-fields-jwt.adoc[leveloffset=+2] +include::modules/config-fields-app-tokens.adoc[leveloffset=+2] +include::modules/config-fields-misc.adoc[leveloffset=+2] +include::modules/config-fields-legacy.adoc[leveloffset=+2] +include::modules/config-fields-v2-ui.adoc[leveloffset=+2] +include::modules/config-fields-ipv6.adoc[leveloffset=+2] +include::modules/config-fields-branding.adoc[leveloffset=+2] +include::modules/config-fields-footer.adoc[leveloffset=+2] +include::modules/config-fields-session-logout.adoc[leveloffset=+2] + +include::modules/config-envvar-intro.adoc[leveloffset=+1] +include::modules/config-envvar-georepl.adoc[leveloffset=+2] +include::modules/config-envvar-dbpool.adoc[leveloffset=+2] +include::modules/config-envvar-worker-connection.adoc[leveloffset=+2] +include::modules/config-envvar-worker-count.adoc[leveloffset=+2] +include::modules/config-debug-variables.adoc[leveloffset=+2] + + +// Clair + +include::modules/clair-vulnerability-scanner-overview.adoc[leveloffset=+1] +include::modules/config-fields-overview.adoc[leveloffset=+2] +include::modules/config-fields-general-clair.adoc[leveloffset=+3] +include::modules/config-fields-clair-indexer.adoc[leveloffset=+3] +include::modules/config-fields-clair-matcher.adoc[leveloffset=+3] +include::modules/config-fields-clair-matchers.adoc[leveloffset=+3] +include::modules/config-fields-clair-updaters.adoc[leveloffset=+3] +include::modules/config-fields-clair-notifiers.adoc[leveloffset=+3] +include::modules/config-fields-clair-auth.adoc[leveloffset=+3] +include::modules/config-fields-clair-trace.adoc[leveloffset=+3] +include::modules/config-fields-clair-metrics.adoc[leveloffset=+3] + +ifeval::["{productname}" == "Project Quay"] +include::modules/proc_manage-security-scanning.adoc[leveloffset=+1] +include::modules/proc_manage-clair-enable.adoc[leveloffset=+1] +endif::[] + +//// + +include::modules/config-api-intro.adoc[leveloffset=+1] +include::modules/config-api-default.adoc[leveloffset=+2] +include::modules/config-api-retrieve.adoc[leveloffset=+2] +include::modules/config-api-validate.adoc[leveloffset=+2] +include::modules/config-api-required.adoc[leveloffset=+2] + +// TODO 36 Redo config tool images +include::modules/config-ui-intro.adoc[leveloffset=+1] +include::modules/config-ui-custom-ssl-certs.adoc[leveloffset=+2] +include::modules/config-ui-basic-config.adoc[leveloffset=+2] +include::modules/config-ui-server-config.adoc[leveloffset=+2] +include::modules/config-ui-database.adoc[leveloffset=+2] +include::modules/config-ui-data-consistency.adoc[leveloffset=+2] +include::modules/config-ui-time-machine.adoc[leveloffset=+2] +include::modules/config-ui-redis.adoc[leveloffset=+2] +include::modules/config-ui-repo-mirroring.adoc[leveloffset=+2] + +include::modules/config-ui-storage.adoc[leveloffset=+2] +// TODO 36 Storage proxy +// include::modules/config-ui-storage-proxy.adoc[leveloffset=+3] +include::modules/config-ui-storage-georepl.adoc[leveloffset=+3] +include::modules/config-ui-storage-engines.adoc[leveloffset=+3] + +include::modules/config-ui-action-log.adoc[leveloffset=+2] +include::modules/config-ui-security-scanner.adoc[leveloffset=+2] +include::modules/config-ui-app-registry.adoc[leveloffset=+2] +include::modules/config-ui-email.adoc[leveloffset=+2] +include::modules/config-ui-internal-authentication.adoc[leveloffset=+2] +include::modules/config-ui-oauth.adoc[leveloffset=+2] +include::modules/config-ui-access-settings.adoc[leveloffset=+2] +include::modules/config-ui-dockerfile-build.adoc[leveloffset=+2] +//include::modules/ssl-config-ui.adoc[leveloffset=+2] \ No newline at end of file diff --git a/config_quay/modules b/config_quay/modules new file mode 120000 index 000000000..464b823ac --- /dev/null +++ b/config_quay/modules @@ -0,0 +1 @@ +../modules \ No newline at end of file diff --git a/deploy_quay/docinfo.xml b/deploy_quay/docinfo.xml index af3f183d7..482cf8ece 100644 --- a/deploy_quay/docinfo.xml +++ b/deploy_quay/docinfo.xml @@ -1,8 +1,8 @@ {productname} -3 -Deploy {productname} +{producty} +Deploying {productname} - Get started with {productname} + Getting started with {productname} Red Hat OpenShift Documentation Team diff --git a/deploy_quay/master.adoc b/deploy_quay/master.adoc index 46dcfb3c4..9eb48c65b 100644 --- a/deploy_quay/master.adoc +++ b/deploy_quay/master.adoc @@ -1,24 +1,104 @@ +:_content-type: ASSEMBLY include::modules/attributes.adoc[] -[id='deploy-quay-single'] -= Deploy {productname} - Basic +[id="deploy-quay-single"] += Proof of Concept - Deploying {productname} -{productname} is an enterprise-quality container registry. Use Quay to build and store containers, then deploy them to the servers across your enterprise. +[IMPORTANT] +==== +The following _proof of concept_ deployment method is unsupported for production purposes. This deployment type uses local storage. Local storage is not guaranteed to provide the required read-after-write consistency and data integrity guarantees during parallel access that a storage registry like {productname} requires. Do not use this deployment type for production purposes. Use it for testing purposes only. +==== -This procedure describes how to deploy a non-production, test-only {productname} setup (based on link:https://coreos.com/quay-enterprise/docs/latest/initial-setup.html[For Testing as a container]). +{productname} is an enterprise-quality registry for building, securing and serving container images. The documents in this section detail how to deploy {productname} for _proof of concept_, or non-production, purposes. The primary objectives of this document includes the following: -include::modules/con_quay_intro.adoc[leveloffset=+1] +* How to deploy {productname} for basic non-production purposes. +* Asses {productname}'s container image management, including how to push, pull, tag, and organize images. +* Explore availability and scalability. +* How to deploy an advanced {productname} proof of concept deployment using SSL/TLS certificates. + +Beyond the primary objectives of this document, a proof of concept deployment can be used to test various features offered by {productname}, such as establishing superusers, setting repository quota limitations, enabling Splunk for action log storage, enabling Clair for vulnerability reporting, and more. See the "Next steps" section for a list of some of the features available after you have followed this guide. + +This proof of concept deployment procedure can be followed on a single machine, either physical or virtual. include::modules/con_quay_single_prereq.adoc[leveloffset=+1] -include::modules/proc_deploy_quay_single.adoc[leveloffset=+1] +//ifeval::["{productname}" == "Red Hat Quay"] +include::modules/proc_deploy_quay_poc_rhel.adoc[leveloffset=+1] +//sendif::[] + +include::modules/preparing-system-deploy-quay.adoc[leveloffset=+1] + +include::modules/configuring-port-mapping.adoc[leveloffset=+2] + +include::modules/proc_deploy_quay_poc_db.adoc[leveloffset=+2] + +include::modules/proc_deploy_quay_poc_redis.adoc[leveloffset=+2] + +//include::modules/proc_deploy_quay_poc_conf.adoc[leveloffset=+1] + +include::modules/proc_deploy_quay_poc_run.adoc[leveloffset=+1] + +include::modules/proc_deploy_quay_poc_use.adoc[leveloffset=+1] + + + +include::modules/advanced-quay-poc-deployment.adoc[leveloffset=+1] +//include::modules/proc_manage-quay-ssl.adoc[leveloffset=+1] +include::modules/ssl-intro.adoc[leveloffset=+2] +include::modules/ssl-create-certs.adoc[leveloffset=+3] +include::modules/configuring-ssl-tls.adoc[leveloffset=+2] +//include::modules/ssl-config-ui.adoc[leveloffset=+3] +include::modules/ssl-config-cli.adoc[leveloffset=+3] +include::modules/testing-ssl-tls-configuration.adoc[leveloffset=+2] +include::modules/ssl-testing-cli.adoc[leveloffset=+3] +include::modules/ssl-testing-ui.adoc[leveloffset=+3] +include::modules/ssl-trust-ca-podman.adoc[leveloffset=+2] +include::modules/ssl-trust-ca-system.adoc[leveloffset=+2] + +//local ipv6 deployment +include::modules/proc_deploy_quay_local_ipv6.adoc[leveloffset=+1] +include::modules/poc-creating-dual-stack-cn.adoc[leveloffset=+2] +include::modules/deploy-local-quay-ipv6.adoc[leveloffset=+2] + +include::modules/proc_deploy_quay_poc_next.adoc[leveloffset=1] + +//// +include::modules/proc_deploy_quay_poc_dns.adoc[leveloffset=+2] + +=== Repository Mirroring + +include::modules/proc_deploy_quay_common_superuser.adoc[leveloffset=+2] + + +include::modules/mirroring-intro.adoc[leveloffset=+3] +include::modules/config-ui-mirroring.adoc[leveloffset=+3] +include::modules/mirroring-worker.adoc[leveloffset=+3] +include::modules/mirroring-creating-repo.adoc[leveloffset=+3] +include::modules/mirroring-tag-patterns.adoc[leveloffset=+3] + + +include::modules/clair-vulnerability-scanner-overview.adoc[leveloffset=+2] +include::modules/clair-standalone-configure.adoc[leveloffset=+3] +include::modules/clair-testing.adoc[leveloffset=+3] +include::modules/clair-cve.adoc[leveloffset=+3] + +.Additional resources + +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/vulnerability_reporting_with_clair_on_red_hat_quay/index#vulnerability-reporting-clair-quay-overview[Vulnerability reporting with Clair on {productname}] + +include::modules/proc_deploy_quay_poc_restart.adoc[leveloffset=+2] + + +include::modules/fips-overview.adoc[leveloffset=+2] + -include::modules/proc_deploy_quay_guided.adoc[leveloffset=+1] +//include::modules/proc_deploy_quay_guided.adoc[leveloffset=+1] -include::modules/proc_deploy_quay_add.adoc[leveloffset=+1] +//include::modules/proc_deploy_quay_add.adoc[leveloffset=+1] -include::modules/con_deploy_quay_start_using.adoc[leveloffset=+1] +//include::modules/con_deploy_quay_start_using.adoc[leveloffset=+1] -[discrete] -== Additional resources +//[discrete] +//== Additional resources +//// \ No newline at end of file diff --git a/deploy_quay_ha/docinfo.xml b/deploy_quay_ha/docinfo.xml index 3593d3d9a..aed69fa63 100644 --- a/deploy_quay_ha/docinfo.xml +++ b/deploy_quay_ha/docinfo.xml @@ -1,5 +1,5 @@ {productname} -3 +{producty} Deploy {productname} HA Deploy {productname} in a HA environment diff --git a/deploy_quay_ha/master.adoc b/deploy_quay_ha/master.adoc index f5fc33ced..7f1f95c34 100644 --- a/deploy_quay_ha/master.adoc +++ b/deploy_quay_ha/master.adoc @@ -25,6 +25,11 @@ include::modules/proc_deploy_quay_add.adoc[leveloffset=+1] include::modules/con_deploy_quay_start_using.adoc[leveloffset=+1] +//upgrade + +include::modules/upgrading-geo-repl-quay.adoc[leveloffset=+1] + +include::modules/health-check-quay.adoc[leveloffset=+1] [discrete] == Additional resources diff --git a/deploy_quay_on_openshift/master.adoc b/deploy_quay_on_openshift/master.adoc deleted file mode 100644 index 6ca843f3d..000000000 --- a/deploy_quay_on_openshift/master.adoc +++ /dev/null @@ -1,42 +0,0 @@ -include::modules/attributes.adoc[] - -[id='deploy-quay-on-openshift'] -= Deploy {productname} on OpenShift - -{productname} is an enterprise-quality container registry. Use {productname} to build and -store container images, then make them available to deploy across your enterprise. -Red Hat is working on two approaches to deploying {productname} on OpenShift: - -* **Deploy {productname} objects individually**: The current procedure in this guide -provides a set of yaml files that you deploy individually to set up your {productname} -cluster. This procedure is currently fully supported. - -* **Deploy {productname} with an Operator**: The {productname} Setup Operator is being -developed to provide a simpler method to deploy and manage a {productname} cluster. -Although currently available as Developer Preview, the setup portion of the -link:https://github.com/redhat-cop/quay-operator[{productname} Setup Operator] -procedure is quite solid and represents the future direction of {productname} -deployment on OpenShift. -We strongly recommend trying the {productname} Operator for non-production uses -and contributing to the project, if you are so inclined. - - -include::modules/con_quay_intro.adoc[leveloffset=+1] - -ifeval::["{productname}" == "Red Hat Quay"] -include::modules/con_quay_openshift_prereq.adoc[leveloffset=+1] -endif::[] - -ifeval::["{productname}" == "Project Quay"] -include::modules/con_quay_kubernetes.adoc[leveloffset=+1] -endif::[] - - -include::modules/proc_deploy_quay_openshift.adoc[leveloffset=+1] - -include::modules/con_deploy_quay_start_using.adoc[leveloffset=+1] - -include::modules/ref_deploy_quay_openshift.adoc[leveloffset=+1] - -[discrete] -== Additional resources diff --git a/deploy_quay_on_openshift_op_tng/docinfo.xml b/deploy_quay_on_openshift_op_tng/docinfo.xml new file mode 100644 index 000000000..5b72146e7 --- /dev/null +++ b/deploy_quay_on_openshift_op_tng/docinfo.xml @@ -0,0 +1,10 @@ +{productname} +{producty} +Deploy {productname} on OpenShift with Quay Operator + + Deploy {productname} on an OpenShift Cluster with the {productname} Operator + + + Red Hat OpenShift Documentation Team + + diff --git a/deploy_quay_on_openshift_op_tng/master.adoc b/deploy_quay_on_openshift_op_tng/master.adoc new file mode 100644 index 000000000..479892ec9 --- /dev/null +++ b/deploy_quay_on_openshift_op_tng/master.adoc @@ -0,0 +1,177 @@ +include::modules/attributes.adoc[] + +[id="deploy-quay-on-openshift-op-tng"] += Deploy {productname} on {ocp} with the {productname} Operator + +{productname} is an enterprise-quality container registry. Use {productname} to build and store container images, then make them available to deploy across your enterprise. + +The {productname} Operator provides a simple method to deploy and manage {productname} on an OpenShift cluster. + +include::modules/operator-differences.adoc[leveloffset=+2] + + +include::modules/operator-concepts.adoc[leveloffset=+1] + +include::modules/operator-quayregistry-api.adoc[leveloffset=+2] +include::modules/operator-components-intro.adoc[leveloffset=+2] +include::modules/operator-components-managed.adoc[leveloffset=+2] +include::modules/operator-components-unmanaged.adoc[leveloffset=+2] +include::modules/operator-config-bundle-secret.adoc[leveloffset=+2] +include::modules/operator-prereq.adoc[leveloffset=+2] + + + +include::modules/operator-install.adoc[leveloffset=+1] + + + +include::modules/operator-preconfigure.adoc[leveloffset=+1] +include::modules/config-preconfigure-automation.adoc[leveloffset=+2] + + +include::modules/operator-preconfig-storage.adoc[leveloffset=+2] +include::modules/operator-unmanaged-storage.adoc[leveloffset=+3] +include::modules/operator-managed-storage.adoc[leveloffset=3] +ifeval::["{productname}" == "Red Hat Quay"] +include::modules/operator-standalone-object-gateway.adoc[leveloffset=4] +endif::[] + +//traffic ingress +[id="configuring-traffic-ingress"] +== Configuring traffic ingress +include::modules/operator-preconfig-tls-routes.adoc[leveloffset=+1] + +//Database +[id="configuring-the-database-poc"] +== Configuring the database +include::modules/operator-unmanaged-postgres.adoc[leveloffset=+1] +include::modules/config-fields-db.adoc[leveloffset=+1] +include::modules/operator-managed-postgres.adoc[leveloffset=+1] +//* The Operator will deploy an OpenShift `Route` as the default entrypoint to the registry. If you prefer a different entrypoint (e.g. `Ingress` or direct `Service` access that configuration will need to be done manually). +include::modules/operator-components-unmanaged-other.adoc[leveloffset=+2] +include::modules/operator-unmanaged-redis.adoc[leveloffset=+1] + +[role="_additional-resources"] +.Additional resources + +xref:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index#config-fields-redis[Redis configuration fields] + +include::modules/operator-unmanaged-hpa.adoc[leveloffset=+3] +include::modules/operator-unmanaged-route.adoc[leveloffset=+3] +include::modules/operator-unmanaged-monitoring.adoc[leveloffset=+3] +include::modules/operator-unmanaged-mirroring.adoc[leveloffset=+3] + +include::modules/operator-deploy.adoc[leveloffset=+1] + +include::modules/operator-deploy-cli.adoc[leveloffset=+2] +include::modules/operator-deploy-view-pods-cli.adoc[leveloffset=+3] +include::modules/operator-deploy-hpa.adoc[leveloffset=+3] +include::modules/first-user-api.adoc[leveloffset=+3] +include::modules/operator-monitor-deploy-cli.adoc[leveloffset=+3] + +include::modules/operator-deploy-ui.adoc[leveloffset=+2] +include::modules/operator-first-user-ui.adoc[leveloffset=+3] + + + +//include::modules/operator-quayregistry-status.adoc[leveloffset=+2] + + +include::modules/operator-config-cli.adoc[leveloffset=+1] +include::modules/operator-config-cli-access.adoc[leveloffset=+2] +include::modules/operator-config-cli-download.adoc[leveloffset=+2] +include::modules/operator-custom-ssl-certs-config-bundle.adoc[leveloffset=+2] + +include::modules/operator-config-ui.adoc[leveloffset=+1] +include::modules/operator-config-ui-access.adoc[leveloffset=+2] +include::modules/operator-config-ui-change.adoc[leveloffset=+2] +include::modules/operator-config-ui-monitoring.adoc[leveloffset=+2] +include::modules/operator-config-ui-updated.adoc[leveloffset=+2] +include::modules/config-ui-custom-ssl-certs.adoc[leveloffset=+2] + +//nclude::modules/operator-custom-ssl-certs.adoc[leveloffset=+2] + + +include::modules/operator-external-access.adoc[leveloffset=+2] + + + +== Quay Operator features + +include::modules/operator-console-monitoring-alerting.adoc[leveloffset=+2] + +include::modules/clair-vulnerability-scanner-overview.adoc[leveloffset=+2] +include::modules/clair-vulnerability-scanner-hosts.adoc[leveloffset=+3] +include::modules/clair-openshift.adoc[leveloffset=+3] +include::modules/clair-testing.adoc[leveloffset=+3] + +include::modules/fips-overview.adoc[leveloffset=+2] + + +include::modules/operator-advanced.adoc[leveloffset=+1] +include::modules/operator-deploy-infrastructure.adoc[leveloffset=+2] +include::modules/monitoring-single-namespace.adoc[leveloffset=+2] +include::modules/operator-resize-storage.adoc[leveloffset=+2] +include::modules/operator-customize-images.adoc[leveloffset=+2] +include::modules/operator-cloudfront.adoc[leveloffset=+2] + +include::modules/clair-advanced-configuration-overview.adoc[leveloffset=+2] + +include::modules/clair-unmanaged.adoc[leveloffset=+3] +include::modules/unmanaging-clair-database.adoc[leveloffset=+4] +include::modules/configuring-custom-clair-database.adoc[leveloffset=+4] + + +include::modules/custom-clair-configuration-managed-database.adoc[leveloffset=+3] +include::modules/managed-clair-database.adoc[leveloffset=+4] +include::modules/configuring-custom-clair-database-managed.adoc[leveloffset=+4] + +include::modules/clair-disconnected.adoc[leveloffset=+3] +include::modules/clair-clairctl.adoc[leveloffset=+4] +include::modules/clair-openshift-config.adoc[leveloffset=+5] +include::modules/clair-export-bundle.adoc[leveloffset=+5] +include::modules/clair-openshift-airgap-database.adoc[leveloffset=+5] +include::modules/clair-openshift-airgap-import-bundle.adoc[leveloffset=+5] + +include::modules/clair-clairctl-standalone.adoc[leveloffset=+4] +include::modules/clair-standalone-config-location.adoc[leveloffset=+5] +include::modules/clair-export-bundle-standalone.adoc[leveloffset=+5] +include::modules/clair-openshift-airgap-database-standalone.adoc[leveloffset=+5] +include::modules/clair-openshift-airgap-import-bundle-standalone.adoc[leveloffset=+5] + +//include::modules/clair-crda-configuration.adoc[leveloffset=+3] +include::modules/mapping-repositories-to-cpe-information.adoc[leveloffset=+3] + +//// +include::modules/clair-unmanaged.adoc[leveloffset=+3] +include::modules/unmanaging-clair-database.adoc[leveloffset=+4] +include::modules/configuring-custom-clair-database.adoc[leveloffset=+4] +//// + +include::modules/build-enhancements.adoc[leveloffset=+1] +include::modules/build-enhanced-arch.adoc[leveloffset=+2] +include::modules/build-limitations.adoc[leveloffset=+2] +include::modules/builders-virtual-environment.adoc[leveloffset=+2] + +include::modules/georepl-intro.adoc[leveloffset=+1] +include::modules/arch-georpl-features.adoc[leveloffset=+2] +include::modules/georepl-prereqs.adoc[leveloffset=+2] +include::modules/georepl-arch-operator.adoc[leveloffset=+2] +include::modules/georepl-deploy-operator.adoc[leveloffset=+3] +include::modules/georepl-mixed-storage.adoc[leveloffset=+3] +include::modules/upgrading-geo-repl-quay-operator.adoc[leveloffset=+2] + +include::modules/backing-up-and-restoring-intro.adoc[leveloffset=+1] +include::modules/backing-up-red-hat-quay-operator.adoc[leveloffset=+2] +include::modules/restoring-red-hat-quay.adoc[leveloffset=+2] + +include::modules/operator-ipv6-dual-stack.adoc[leveloffset=+1] + + +include::modules/operator-upgrade.adoc[leveloffset=+1] + + +[discrete] +== Additional resources +* For more details on the {productname} Operator, see the upstream +link:https://github.com/quay/quay-operator/[quay-operator] project. diff --git a/deploy_quay_on_openshift_op_tng/modules b/deploy_quay_on_openshift_op_tng/modules new file mode 120000 index 000000000..464b823ac --- /dev/null +++ b/deploy_quay_on_openshift_op_tng/modules @@ -0,0 +1 @@ +../modules \ No newline at end of file diff --git a/deploy_red_hat_quay_operator/docinfo.xml b/deploy_red_hat_quay_operator/docinfo.xml new file mode 100644 index 000000000..90dcf4389 --- /dev/null +++ b/deploy_red_hat_quay_operator/docinfo.xml @@ -0,0 +1,10 @@ +{productname} +{producty} +Deploying the {productname} Operator on {ocp} + + Deploy the {productname} Operator on an {ocp} cluster + + + Red Hat OpenShift Documentation Team + + diff --git a/deploy_red_hat_quay_operator/master.adoc b/deploy_red_hat_quay_operator/master.adoc new file mode 100644 index 000000000..f5bfc8e4c --- /dev/null +++ b/deploy_red_hat_quay_operator/master.adoc @@ -0,0 +1,101 @@ +include::modules/attributes.adoc[] + +[id="deploy-quay-on-openshift-op-tng"] += Deploying the {productname} Operator on {ocp} + +{productname} is an enterprise-quality container registry. Use {productname} to build and store container images, then make them available to deploy across your enterprise. + +The {productname} Operator provides a simple method to deploy and manage {productname} on an OpenShift cluster. + +//differences +include::modules/operator-differences.adoc[leveloffset=+2] + +//concepts +include::modules/operator-concepts.adoc[leveloffset=+1] +include::modules/operator-components-intro.adoc[leveloffset=+2] +include::modules/operator-components-managed.adoc[leveloffset=+2] +include::modules/operator-components-unmanaged.adoc[leveloffset=+2] +include::modules/operator-config-bundle-secret.adoc[leveloffset=+2] +include::modules/operator-prereq.adoc[leveloffset=+2] + +//installing the operator +include::modules/operator-install.adoc[leveloffset=+1] + + +//preconfiguration +include::modules/operator-preconfigure.adoc[leveloffset=+1] +include::modules/config-preconfigure-automation.adoc[leveloffset=+2] +include::modules/operator-preconfig-storage.adoc[leveloffset=+2] +include::modules/operator-unmanaged-storage.adoc[leveloffset=+3] +include::modules/operator-unmanaged-storage-noobaa.adoc[leveloffset=+3] +include::modules/operator-managed-storage.adoc[leveloffset=3] +include::modules/operator-standalone-object-gateway.adoc[leveloffset=4] + +//traffic ingress +[id="configuring-traffic-ingress"] +== Configuring traffic ingress +include::modules/operator-preconfig-tls-routes.adoc[leveloffset=+2] + +//configuring resources +include::modules/configuring-resources-managed-components.adoc[leveloffset=+1] + +//Database +[id="configuring-the-database-poc"] +== Configuring the database +include::modules/operator-unmanaged-postgres.adoc[leveloffset=+2] +include::modules/config-fields-db.adoc[leveloffset=+3] +include::modules/operator-managed-postgres.adoc[leveloffset=+3] +//* The Operator will deploy an OpenShift `Route` as the default entrypoint to the registry. If you prefer a different entrypoint (e.g. `Ingress` or direct `Service` access that configuration will need to be done manually). +include::modules/operator-components-unmanaged-other.adoc[leveloffset=+2] +include::modules/operator-unmanaged-redis.adoc[leveloffset=+3] + +[role="_additional-resources"] +.Additional resources +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index#config-fields-redis[Redis configuration fields] + +include::modules/operator-unmanaged-hpa.adoc[leveloffset=+3] +include::modules/operator-unmanaged-route.adoc[leveloffset=+3] +include::modules/operator-unmanaged-monitoring.adoc[leveloffset=+3] +include::modules/operator-unmanaged-mirroring.adoc[leveloffset=+3] + +//operator deployment +include::modules/operator-deploy.adoc[leveloffset=+1] +//cli +include::modules/operator-deploy-cli.adoc[leveloffset=+2] +include::modules/first-user-api.adoc[leveloffset=+3] +include::modules/operator-deploy-view-pods-cli.adoc[leveloffset=+3] +include::modules/operator-deploy-hpa.adoc[leveloffset=+3] + +[role="_additional-resources"] +.Additional resources +For more information on pre-configuring your {productname} deployment, see the section xref:config-preconfigure-automation[Pre-configuring {productname} for automation] + +include::modules/operator-monitor-deploy-cli.adoc[leveloffset=+3] +//ui +include::modules/operator-deploy-ui.adoc[leveloffset=+2] +include::modules/operator-first-user-ui.adoc[leveloffset=+3] + +//quayregistry status +include::modules/operator-quayregistry-status.adoc[leveloffset=+1] + +//configuring +include::modules/operator-config-cli.adoc[leveloffset=+1] +include::modules/operator-config-cli-access.adoc[leveloffset=+2] +include::modules/operator-config-cli-download.adoc[leveloffset=+2] + +//SSL/TLS +include::modules/operator-custom-ssl-certs-config-bundle.adoc[leveloffset=+1] +include::modules/ssl-create-certs.adoc[leveloffset=+2] +include::modules/creating-custom-ssl-certs-config-bundle.adoc[leveloffset=+2] + +//Deploying configuration tool +//include::modules/operator-config-ui.adoc[leveloffset=+1] + +//upgrading 38-39 +//removed for 3.10+ +//include::modules/upgrading-postgresql.adoc[leveloffset=+1] + +[role="quay-next-steps"] +.Next steps + +* https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_operator_features/[{productname} features] diff --git a/deploy_red_hat_quay_operator/modules b/deploy_red_hat_quay_operator/modules new file mode 120000 index 000000000..464b823ac --- /dev/null +++ b/deploy_red_hat_quay_operator/modules @@ -0,0 +1 @@ +../modules \ No newline at end of file diff --git a/deployment-template.yml b/deployment-template.yml new file mode 100644 index 000000000..216a38bee --- /dev/null +++ b/deployment-template.yml @@ -0,0 +1,299 @@ +--- +kind: "Template" +apiVersion: "v1" +metadata: + name: "public-project-quay-docs" + annotations: + openshift.io/display-name: "Public Project Quay Static Documentation Website" + tags: "ruby,asciidoctor" + iconClass: "icon-shadowman" + template.openshift.io/provider-display-name: "Red Hat, Inc." +message: "The following service(s) have been created in your project: ${NAME}." +labels: + template: "project-quay-docs" +objects: + - kind: "Service" + apiVersion: "v1" + metadata: + name: "${NAME}" + annotations: + description: "Exposes and load balances the frontend application pods for the deployment" + labels: + app: "${NAME}" + spec: + ports: + - name: "https" + port: 8443 + targetPort: 8080 + selector: + name: "${NAME}" + + - kind: "Route" + apiVersion: "v1" + metadata: + annotations: + kubernetes.io/tls-acme: "true" + name: "${NAME}" + labels: + app: "${NAME}" + spec: + host: "${APPLICATION_DOMAIN}" + to: + kind: "Service" + name: "${NAME}" + weight: 1 + port: + targetPort: "https" + + - kind: "ImageStream" + apiVersion: "v1" + metadata: + name: "stg1-${NAME}" + annotations: + description: "Keeps track of changes in the stg1-${NAME} application image" + labels: + app: "${NAME}" + + - kind: "ImageStream" + apiVersion: "v1" + metadata: + name: "final-${NAME}" + annotations: + description: "Keeps track of changes in the final-${NAME} application image" + labels: + app: "${NAME}" + + - kind: "ImageStream" + apiVersion: "v1" + metadata: + name: "httpd-24-rhel7" + annotations: + description: "Upstream httpd 2.4 s2i image" + labels: + app: "${NAME}" + spec: + lookupPolicy: + local: false + tags: + - annotations: null + from: + kind: "DockerImage" + name: "registry.access.redhat.com/rhscl/httpd-24-rhel7" + generation: 0 + importPolicy: + scheduled: true + name: "latest" + referencePolicy: + type: "Source" + + - kind: "BuildConfig" + apiVersion: "v1" + metadata: + name: "stg1-${NAME}" + annotations: + description: "Defines how to perform stage 1 build for ${NAME}" + labels: + app: "${NAME}" + spec: + nodeSelector: + source: + type: "Git" + git: + uri: "${SOURCE_REPOSITORY_URL}" + ref: "${SOURCE_REPOSITORY_REF}" + contextDir: "${CONTEXT_DIR}" + strategy: + type: "Source" + sourceStrategy: + from: + kind: "ImageStreamTag" + namespace: "${NAMESPACE}" + name: "ruby:2.5" + output: + to: + kind: "ImageStreamTag" + name: "stg1-${NAME}:latest" + triggers: + - type: "ImageChange" + - type: "ConfigChange" + - type: "GitHub" + github: + secret: "${GITHUB_WEBHOOK_SECRET}" + + - kind: "BuildConfig" + apiVersion: "v1" + metadata: + name: "final-${NAME}" + annotations: + description: "Defines how to perform final build for ${NAME} before deployment" + labels: + app: "${NAME}" + spec: + nodeSelector: + source: + type: "Images" + images: + - from: + kind: "ImageStreamTag" + name: "stg1-${NAME}:latest" + paths: + - sourcePath: "/opt/app-root/src/dist/." + destinationDir: "." + - sourcePath: "/opt/app-root/src/.s2i/httpd-cfg/." + destinationDir: "httpd-cfg/." + strategy: + type: "Source" + sourceStrategy: + from: + kind: "ImageStreamTag" + name: "httpd-24-rhel7:latest" + output: + to: + kind: "ImageStreamTag" + name: "final-${NAME}:latest" + triggers: + - imageChange: + from: + kind: ImageStreamTag + name: 'stg1-${NAME}:latest' + type: "ImageChange" + - type: "ConfigChange" + + - kind: "DeploymentConfig" + apiVersion: "v1" + metadata: + name: "${NAME}" + annotations: + description: "Defines how to deploy the ${APPLICATION_DOMAIN} domain" + labels: + app: "${NAME}" + spec: + strategy: + type: "Rolling" + triggers: + - type: "ImageChange" + imageChangeParams: + automatic: true + containerNames: + - "${NAME}" + from: + kind: "ImageStreamTag" + name: "final-${NAME}:latest" + - type: "ConfigChange" + replicas: 1 + test: false + selector: + name: "${NAME}" + template: + metadata: + name: "${NAME}" + labels: + name: "${NAME}" + app: "${NAME}" + spec: + containers: + - name: "${NAME}" + ports: + - containerPort: 8080 + readinessProbe: + timeoutSeconds: 5 + initialDelaySeconds: 10 + httpGet: + path: "/${HEALTHCHECK_PATH}" + port: 8080 + livenessProbe: + timeoutSeconds: 5 + initialDelaySeconds: 10 + periodSeconds: 60 + httpGet: + path: "/${HEALTHCHECK_PATH}" + port: 8080 + resources: + requests: + cpu: "${CPU_REQUEST}" + memory: "${MEMORY_REQUEST}" + limits: + cpu: "${CPU_LIMIT}" + memory: "${MEMORY_LIMIT}" + + - kind: "HorizontalPodAutoscaler" + apiVersion: "autoscaling/v1" + metadata: + name: "${NAME}" + labels: + app: "${NAME}" + spec: + scaleTargetRef: + kind: DeploymentConfig + name: "${NAME}" + apiVersion: "v1" + minReplicas: 2 + maxReplicas: 5 + targetCPUUtilizationPercentage: 75 + +parameters: + - name: "NAME" + displayName: "Application name" + description: "The name assigned to all of the application components defined in this template." + required: true + + - name: "APPLICATION_DOMAIN" + displayName: "Application Hostname" + description: "The exposed hostname that will route to the httpd service for content." + value: "" + + - name: "HEALTHCHECK_PATH" + displayName: "URI path to a known, working web page" + description: "The URI path to a known, working web page for testing liveness and readiness probes. Exclude leading '/'" + required: true + value: "welcome.html" + + - name: "SOURCE_REPOSITORY_URL" + displayName: "Git Repository URL" + description: "The URL of the repository with your application source code." + required: true + value: "https://github.com/quay/quay-docs.git" + + - name: "SOURCE_REPOSITORY_REF" + displayName: "Git Reference" + description: "Set this to a branch name, tag or other ref of your repository if you are not using the default branch." + + - name: "CONTEXT_DIR" + displayName: "Context Directory" + description: "Set this to the relative path to your project if it is not in the root of your repository." + + - name: "CPU_REQUEST" + displayName: "CPU Request" + description: "Requested amount of CPU the httpd container will use." + required: true + value: "50m" + + - name: "CPU_LIMIT" + displayName: "CPU Limit" + description: "Maximum amount of CPU the httpd container can use." + required: true + value: "500m" + + - name: "MEMORY_REQUEST" + displayName: "Memory Request" + description: "Requested amount of memory the httpd container will use." + required: true + value: "100Mi" + + - name: "MEMORY_LIMIT" + displayName: "Memory Limit" + description: "Maximum amount of memory the httpd container can use." + required: true + value: "256Mi" + + - name: "NAMESPACE" + displayName: "Namespace" + description: "The OpenShift Namespace where the Ruby s2i builder ImageStream resides." + required: true + value: "openshift" + + - name: "GITHUB_WEBHOOK_SECRET" + displayName: "GitHub Webhook Secret" + description: "A secret string used to configure the GitHub webhook." + generate: "expression" + from: "[a-zA-Z0-9]{40}" diff --git a/docs/api-v2-public.json b/docs/api-v2-public.json new file mode 100644 index 000000000..b36e6637d --- /dev/null +++ b/docs/api-v2-public.json @@ -0,0 +1,2369 @@ +{ + "openapi": "3.0.3", + "info": { + "title": "Red Hat Quay API", + "version": "1.0.0", + "description": "This API allows you to perform many of the operations required to work with Quay repositories, users, and organizations." + }, + "servers": [ + { + "url": "{protocol}://{host}", + "description": "Set your own Red Hat Quay registry URL", + "variables": { + "protocol": { + "default": "https", + "enum": [ + "http", + "https" + ] + }, + "host": { + "default": "quay-server.example.com", + "description": "Enter your Red Hat Quay registry hostname." + } + } + } + ], + "security": [ + { + "BearerAuth": [] + } + ], + "tags": [ + { + "name": "Application specific tokens", + "description": "Manage application-specific tokens by using the API" + }, + { + "name": "Builds", + "description": "API endpoints for managing Quay repository builds" + }, + { + "name": "Discovery", + "description": "API discovery information" + }, + { + "name": "Error", + "description": "Obtain error details by using the API" + }, + { + "name": "Global messages", + "description": "Messages API" + }, + { + "name": "Logs", + "description": "Access usage logs for organizations or repositories." + } + ], + "paths": { + "/api/v1/user/apptoken": { + "get": { + "tags": [ + "Application specific tokens" + ], + "summary": "List app-specific tokens", + "description": "Retrieves a list of application-specific tokens for the user.", + "operationId": "listAppTokens", + "security": [ + { + "BearerAuth": [] + } + ], + "parameters": [ + { + "name": "expiring", + "in": "query", + "required": false, + "schema": { + "type": "boolean" + }, + "description": "If true, only returns those tokens expiring soon" + } + ], + "responses": { + "200": { + "description": "Successful invocation" + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "401": { + "description": "Session required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "403": { + "description": "Unauthorized access", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "404": { + "description": "Not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + } + } + }, + "post": { + "tags": [ + "Application specific tokens" + ], + "summary": "Create a new app-specific token", + "description": "Creates a new application-specific token for the user.", + "operationId": "createAppToken", + "security": [ + { + "BearerAuth": [] + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "title": { + "type": "string", + "example": "MyAppToken" + } + } + } + } + } + }, + "responses": { + "201": { + "description": "Successful creation", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "token": { + "type": "string", + "example": "abc123xyz" + } + } + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "401": { + "description": "Session required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "403": { + "description": "Unauthorized access", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "404": { + "description": "Not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + } + } + } + }, + "/api/v1/user/apptoken/{token_uuid}": { + "get": { + "tags": [ + "Application specific tokens" + ], + "summary": "Get details of a specific app token", + "description": "Retrieves details for a specific application token.", + "operationId": "getAppToken", + "security": [ + { + "BearerAuth": [] + } + ], + "parameters": [ + { + "name": "token_uuid", + "in": "path", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Successful invocation", + "content": { + "application/json": { + "schema": { + "type": "object" + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "401": { + "description": "Session required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "403": { + "description": "Unauthorized access", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "404": { + "description": "Not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + } + } + }, + "delete": { + "tags": [ + "Application specific tokens" + ], + "summary": "Revoke a specific app token", + "description": "Revokes a specific application token for the user.", + "operationId": "revokeAppToken", + "security": [ + { + "BearerAuth": [] + } + ], + "parameters": [ + { + "name": "token_uuid", + "in": "path", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "204": { + "description": "Deleted" + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "401": { + "description": "Session required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "403": { + "description": "Unauthorized access", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "404": { + "description": "Not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + } + } + } + }, + "/api/v1/repository/{repository}/build/{build_uuid}/status": { + "get": { + "tags": [ + "Builds" + ], + "summary": "Return the status for the builds specified by the build UUID", + "parameters": [ + { + "name": "repository", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The full path of the repository (e.g., namespace/name)" + }, + { + "name": "build_uuid", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The UUID of the build" + } + ], + "responses": { + "200": { + "description": "Successful invocation" + }, + "400": { + "description": "Bad Request" + }, + "401": { + "description": "Session required" + }, + "403": { + "description": "Unauthorized access" + }, + "404": { + "description": "Not found" + } + } + } + }, + "/api/v1/repository/{repository}/build/{build_uuid}/logs": { + "get": { + "tags": [ + "Builds" + ], + "summary": "Return the build logs for the specified build UUID", + "parameters": [ + { + "name": "repository", + "in": "path", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "build_uuid", + "in": "path", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Successful invocation" + }, + "400": { + "description": "Bad Request" + }, + "401": { + "description": "Session required" + }, + "403": { + "description": "Unauthorized access" + }, + "404": { + "description": "Not found" + } + } + } + }, + "/api/v1/repository/{repository}/build/{build_uuid}": { + "get": { + "tags": [ + "Builds" + ], + "summary": "Returns information about a build", + "parameters": [ + { + "name": "repository", + "in": "path", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "build_uuid", + "in": "path", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Successful invocation" + }, + "400": { + "description": "Bad Request" + }, + "401": { + "description": "Session required" + }, + "403": { + "description": "Unauthorized access" + }, + "404": { + "description": "Not found" + } + } + }, + "delete": { + "tags": [ + "Builds" + ], + "summary": "Cancel a repository build", + "parameters": [ + { + "name": "repository", + "in": "path", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "build_uuid", + "in": "path", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "204": { + "description": "Deleted" + }, + "400": { + "description": "Bad Request" + }, + "401": { + "description": "Session required" + }, + "403": { + "description": "Unauthorized access" + }, + "404": { + "description": "Not found" + } + } + } + }, + "/api/v1/repository/{repository}/build/": { + "post": { + "tags": [ + "Builds" + ], + "summary": "Request a repository build and push", + "parameters": [ + { + "name": "repository", + "in": "path", + "required": true, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "file_id": { + "type": "string" + }, + "archive_url": { + "type": "string" + }, + "subdirectory": { + "type": "string" + }, + "dockerfile_path": { + "type": "string" + }, + "context": { + "type": "string" + }, + "pull_robot": { + "type": "string" + }, + "tags": { + "type": "array", + "items": { + "type": "string" + } + } + } + } + } + } + }, + "responses": { + "201": { + "description": "Successful creation" + }, + "400": { + "description": "Bad Request" + }, + "401": { + "description": "Session required" + }, + "403": { + "description": "Unauthorized access" + }, + "404": { + "description": "Not found" + } + } + }, + "get": { + "tags": [ + "Builds" + ], + "summary": "Get the list of repository builds", + "parameters": [ + { + "name": "repository", + "in": "path", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "since", + "in": "query", + "schema": { + "type": "integer" + }, + "description": "Returns all builds since the given Unix timestamp" + }, + { + "name": "limit", + "in": "query", + "schema": { + "type": "integer" + }, + "description": "The maximum number of builds to return" + } + ], + "responses": { + "200": { + "description": "Successful invocation" + }, + "400": { + "description": "Bad Request" + }, + "401": { + "description": "Session required" + }, + "403": { + "description": "Unauthorized access" + }, + "404": { + "description": "Not found" + } + } + } + }, + "/api/v1/discovery": { + "get": { + "tags": [ + "Discovery" + ], + "summary": "List all available API endpoints", + "description": "Returns a list of all API endpoints available in the Swagger API format.", + "operationId": "getDiscovery", + "parameters": [ + { + "name": "internal", + "in": "query", + "description": "Whether to include internal APIs.", + "required": false, + "schema": { + "type": "boolean" + } + } + ], + "responses": { + "200": { + "description": "Successful invocation", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "type": "string" + } + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "401": { + "description": "Session required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "403": { + "description": "Unauthorized access", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "404": { + "description": "Not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + } + } + } + }, + "/api/v1/error/{error_type}": { + "get": { + "tags": [ + "Error" + ], + "summary": "Get a detailed description of the error", + "description": "Retrieves a detailed description of the specified error type.", + "operationId": "getErrorDescription", + "security": [ + { + "BearerAuth": [] + } + ], + "parameters": [ + { + "name": "error_type", + "in": "path", + "description": "The error code identifying the type of error.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Successful invocation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiErrorDescription" + }, + "example": { + "error": "404", + "message": "The requested resource was not found." + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + }, + "example": { + "status": 400, + "detail": "Invalid request format." + } + } + } + }, + "401": { + "description": "Session required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + }, + "example": { + "status": 401, + "detail": "Authentication token is missing or invalid." + } + } + } + }, + "403": { + "description": "Unauthorized access", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + }, + "example": { + "status": 403, + "detail": "You do not have permission to access this resource." + } + } + } + }, + "404": { + "description": "Not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + }, + "example": { + "status": 404, + "detail": "Error type not found." + } + } + } + } + } + } + }, + "/api/v1/messages": { + "post": { + "tags": [ + "Global messages" + ], + "summary": "Create a global message", + "description": "Creates a new global message with a specified content type and severity level.", + "operationId": "createGlobalMessage", + "security": [ + { + "oauth2_implicit": [ + "super:user" + ] + }, + { + "BearerAuth": [] + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "message": { + "type": "object", + "properties": { + "content": { + "type": "string", + "description": "The message text." + }, + "media_type": { + "type": "string", + "enum": [ + "text/plain", + "text/html", + "application/json" + ], + "description": "The media type of the message." + }, + "severity": { + "type": "string", + "enum": [ + "info", + "warning", + "error" + ], + "description": "The severity level of the message." + } + }, + "required": [ + "content", + "media_type", + "severity" + ] + } + } + }, + "example": { + "message": { + "content": "Hi", + "media_type": "text/plain", + "severity": "info" + } + } + } + } + }, + "responses": { + "201": { + "description": "Successful creation" + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + }, + "example": { + "status": 400, + "detail": "Invalid message format." + } + } + } + }, + "401": { + "description": "Session required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + }, + "example": { + "status": 401, + "detail": "Authentication required." + } + } + } + }, + "403": { + "description": "Unauthorized access", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + }, + "example": { + "status": 403, + "detail": "You do not have permission to create messages." + } + } + } + }, + "404": { + "description": "Not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + }, + "example": { + "status": 404, + "detail": "Endpoint not found." + } + } + } + } + } + }, + "get": { + "tags": [ + "Global messages" + ], + "summary": "Get global messages", + "description": "Returns all global messages visible to super users.", + "operationId": "getGlobalMessages", + "security": [ + { + "oauth2_implicit": [ + "super:user" + ] + }, + { + "BearerAuth": [] + } + ], + "responses": { + "200": { + "description": "Successful invocation", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "messages": { + "type": "array", + "description": "A list of global messages.", + "items": { + "type": "object", + "properties": { + "uuid": { + "type": "string", + "format": "uuid", + "description": "Unique identifier of the message." + }, + "content": { + "type": "string", + "description": "The message content." + }, + "severity": { + "type": "string", + "enum": [ + "info", + "warning", + "error" + ], + "description": "The severity level of the message." + }, + "media_type": { + "type": "string", + "description": "The media type of the message (e.g., text/plain)." + } + } + } + } + } + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "401": { + "description": "Session required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "403": { + "description": "Unauthorized access", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "404": { + "description": "Not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + } + } + } + }, + "/api/v1/message/{uuid}": { + "delete": { + "tags": [ + "Global messages" + ], + "summary": "Delete a message", + "description": "Deletes a global message by its UUID.", + "operationId": "deleteGlobalMessage", + "security": [ + { + "BearerAuth": [] + } + ], + "parameters": [ + { + "name": "uuid", + "in": "path", + "required": true, + "description": "The unique identifier of the message to delete.", + "schema": { + "type": "string" + } + } + ], + "responses": { + "204": { + "description": "Deleted" + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "401": { + "description": "Session required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "403": { + "description": "Unauthorized access", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "404": { + "description": "Not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + } + } + } + }, + "/api/v1/user/aggregatelogs": { + "get": { + "summary": "Returns the aggregated logs for the current user.", + "description": "Retrieves aggregated logs for the specified user within a given date range.", + "operationId": "getAggregateUserLogs", + "tags": [ + "Logs" + ], + "security": [ + { + "BearerAuth": [] + } + ], + "parameters": [ + { + "name": "performer", + "in": "query", + "description": "Username for which to filter logs.", + "required": false, + "schema": { + "type": "string" + } + }, + { + "name": "starttime", + "in": "query", + "description": "Earliest time for logs. Format: YYYY-MM-DD in UTC.", + "required": false, + "schema": { + "type": "string", + "format": "date" + } + }, + { + "name": "endtime", + "in": "query", + "description": "Latest time for logs. Format: YYYY-MM-DD in UTC.", + "required": false, + "schema": { + "type": "string", + "format": "date" + } + } + ], + "responses": { + "200": { + "description": "Successful invocation" + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "401": { + "description": "Session required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "403": { + "description": "Unauthorized access", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "404": { + "description": "Not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + } + } + } + }, + "/api/v1/user/exportlogs": { + "post": { + "summary": "Exports aggregated logs for the current user.", + "description": "Initiates an export of user logs for a given date range and sends the exported logs via a callback URL or email.\n", + "operationId": "exportUserLogs", + "tags": [ + "Logs" + ], + "security": [ + { + "BearerAuth": [] + } + ], + "parameters": [ + { + "name": "starttime", + "in": "query", + "description": "Earliest time for logs. Format: YYYY-MM-DD in UTC.", + "required": false, + "schema": { + "type": "string", + "format": "date" + } + }, + { + "name": "endtime", + "in": "query", + "description": "Latest time for logs. Format: YYYY-MM-DD in UTC.", + "required": false, + "schema": { + "type": "string", + "format": "date" + } + } + ], + "requestBody": { + "description": "Configuration for an export logs operation (empty JSON object required for request).", + "required": false, + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "callback_url": { + "type": "string", + "description": "The callback URL to invoke with a link to the exported logs." + }, + "callback_email": { + "type": "string", + "description": "The e-mail address at which to e-mail a link to the exported logs." + } + } + } + } + } + }, + "responses": { + "201": { + "description": "Successful creation", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "export_id": { + "type": "string", + "description": "The ID of the exported log file.", + "example": "19689987-b37c-4319-a18b-86b92407af74" + } + } + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "401": { + "description": "Session required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "403": { + "description": "Unauthorized access", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "404": { + "description": "Not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + } + } + } + }, + "/api/v1/user/logs": { + "get": { + "summary": "List the logs for the current user.", + "description": "Retrieves a list of logs for the current user with optional filters for performer, start time, end time, and pagination.\n", + "operationId": "listUserLogs", + "tags": [ + "Logs" + ], + "security": [ + { + "oauth2_implicit": [ + "user:admin" + ] + }, + { + "BearerAuth": [] + } + ], + "parameters": [ + { + "name": "next_page", + "in": "query", + "description": "The page token for the next page of logs.", + "required": false, + "schema": { + "type": "string" + } + }, + { + "name": "performer", + "in": "query", + "description": "Username for which to filter logs.", + "required": false, + "schema": { + "type": "string" + } + }, + { + "name": "endtime", + "in": "query", + "description": "Latest time for logs. Format: MM/DD/YYYY in UTC.", + "required": false, + "schema": { + "type": "string", + "format": "date" + } + }, + { + "name": "starttime", + "in": "query", + "description": "Earliest time for logs. Format: MM/DD/YYYY in UTC.", + "required": false, + "schema": { + "type": "string", + "format": "date" + } + } + ], + "responses": { + "200": { + "description": "Successful invocation", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "logs": { + "type": "array", + "items": { + "type": "object", + "properties": { + "log_id": { + "type": "string" + }, + "message": { + "type": "string" + }, + "timestamp": { + "type": "string", + "format": "date-time" + } + } + } + } + }, + "example": { + "logs": [ + { + "log_id": "12345", + "message": "User performed an action.", + "timestamp": "2024-03-25T12:00:00Z" + } + ] + } + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "401": { + "description": "Session required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "403": { + "description": "Unauthorized access", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "404": { + "description": "Not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + } + } + } + }, + "/api/v1/organization/{orgname}/aggregatelogs": { + "get": { + "summary": "Gets the aggregated logs for the specified organization.", + "description": "Retrieves the aggregated logs for a specified organization with optional filters for performer, start time, and end time.\n", + "operationId": "getAggregateOrgLogs", + "tags": [ + "Logs" + ], + "security": [ + { + "oauth2_implicit": [ + "org:admin" + ] + }, + { + "BearerAuth": [] + } + ], + "parameters": [ + { + "name": "orgname", + "in": "path", + "description": "The name of the organization.", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "performer", + "in": "query", + "description": "Username for which to filter logs.", + "required": false, + "schema": { + "type": "string" + } + }, + { + "name": "endtime", + "in": "query", + "description": "Latest time for logs. Format: MM-DD-YYYY in UTC.", + "required": false, + "schema": { + "type": "string", + "format": "date" + } + }, + { + "name": "starttime", + "in": "query", + "description": "Earliest time for logs. Format: MM-DD-YYYY in UTC.", + "required": false, + "schema": { + "type": "string", + "format": "date" + } + } + ], + "responses": { + "200": { + "description": "Successful invocation", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "logs": { + "type": "array", + "items": { + "type": "object", + "properties": { + "log_id": { + "type": "string" + }, + "message": { + "type": "string" + }, + "timestamp": { + "type": "string", + "format": "date-time" + } + } + } + } + }, + "example": { + "logs": [ + { + "log_id": "98765", + "message": "Organization-level action performed.", + "timestamp": "2024-03-25T12:00:00Z" + } + ] + } + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "401": { + "description": "Session required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "403": { + "description": "Unauthorized access", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "404": { + "description": "Not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + } + } + } + }, + "/api/v1/organization/{orgname}/exportlogs": { + "post": { + "summary": "Exports the logs for the specified organization.", + "description": "Exports the logs for a specified organization within a given time range.\n", + "operationId": "exportOrgLogs", + "tags": [ + "Logs" + ], + "security": [ + { + "oauth2_implicit": [ + "org:admin" + ] + }, + { + "BearerAuth": [] + } + ], + "parameters": [ + { + "name": "orgname", + "in": "path", + "description": "The name of the organization.", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "starttime", + "in": "query", + "description": "Earliest time for logs. Format: MM/DD/YYYY in UTC.", + "required": false, + "schema": { + "type": "string", + "format": "date" + } + }, + { + "name": "endtime", + "in": "query", + "description": "Latest time for logs. Format: MM/DD/YYYY in UTC.", + "required": false, + "schema": { + "type": "string", + "format": "date" + } + } + ], + "requestBody": { + "description": "Optional payload (empty JSON object required for request).", + "required": true, + "content": { + "application/json": { + "schema": { + "type": "object", + "example": {} + } + } + } + }, + "responses": { + "200": { + "description": "Export request received successfully.", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "export_id": { + "type": "string" + } + }, + "example": { + "export_id": "d21f74c6-7e6c-4d2a-bc34-8c926789ab56" + } + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "401": { + "description": "Session required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "403": { + "description": "Unauthorized access", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "404": { + "description": "Not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + } + } + } + }, + "/api/v1/organization/{orgname}/logs": { + "get": { + "summary": "List logs for the specified organization.", + "description": "Retrieves the logs for a specified organization within a given time range.\n", + "operationId": "listOrgLogs", + "tags": [ + "Logs" + ], + "security": [ + { + "oauth2_implicit": [ + "org:admin" + ] + }, + { + "BearerAuth": [] + } + ], + "parameters": [ + { + "name": "orgname", + "in": "path", + "description": "The name of the organization.", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "next_page", + "in": "query", + "description": "The page token for the next page.", + "required": false, + "schema": { + "type": "string" + } + }, + { + "name": "performer", + "in": "query", + "description": "Username for which to filter logs.", + "required": false, + "schema": { + "type": "string" + } + }, + { + "name": "starttime", + "in": "query", + "description": "Earliest time for logs. Format: MM/DD/YYYY in UTC.", + "required": false, + "schema": { + "type": "string", + "format": "date" + } + }, + { + "name": "endtime", + "in": "query", + "description": "Latest time for logs. Format: MM/DD/YYYY in UTC.", + "required": false, + "schema": { + "type": "string", + "format": "date" + } + } + ], + "responses": { + "200": { + "description": "Successful invocation", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "type": "object", + "properties": { + "log_id": { + "type": "string" + }, + "timestamp": { + "type": "string", + "format": "date-time" + }, + "performer": { + "type": "string" + }, + "action": { + "type": "string" + } + } + }, + "example": [ + { + "log_id": "1a2b3c4d", + "timestamp": "2025-03-25T10:15:30Z", + "performer": "johndoe", + "action": "repo.created" + } + ] + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "401": { + "description": "Session required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "403": { + "description": "Unauthorized access", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "404": { + "description": "Not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + } + } + } + }, + "/api/v1/repository/{repository}/aggregatelogs": { + "get": { + "summary": "Get aggregated logs for a repository", + "description": "Returns the aggregated logs for the specified repository within a given time range.\n", + "operationId": "getAggregateRepoLogs", + "tags": [ + "Logs" + ], + "security": [ + { + "oauth2_implicit": [ + "repo:admin" + ] + }, + { + "BearerAuth": [] + } + ], + "parameters": [ + { + "name": "repository", + "in": "path", + "description": "The full path of the repository (e.g., namespace/name).", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "starttime", + "in": "query", + "description": "Earliest time for logs. Format: MM-DD-YYYY in UTC.", + "required": false, + "schema": { + "type": "string", + "format": "date" + } + }, + { + "name": "endtime", + "in": "query", + "description": "Latest time for logs. Format: MM-DD-YYYY in UTC.", + "required": false, + "schema": { + "type": "string", + "format": "date" + } + } + ], + "responses": { + "200": { + "description": "Successful invocation", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "repository": { + "type": "string" + }, + "logs": { + "type": "array", + "items": { + "type": "object", + "properties": { + "log_id": { + "type": "string" + }, + "timestamp": { + "type": "string", + "format": "date-time" + }, + "performer": { + "type": "string" + }, + "action": { + "type": "string" + } + } + } + } + }, + "example": { + "repository": "namespace/myrepo", + "logs": [ + { + "log_id": "1a2b3c4d", + "timestamp": "2025-03-25T10:15:30Z", + "performer": "johndoe", + "action": "tag.deleted" + } + ] + } + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "401": { + "description": "Session required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "403": { + "description": "Unauthorized access", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "404": { + "description": "Not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + } + } + } + }, + "/api/v1/repository/{repository}/exportlogs": { + "post": { + "summary": "Export repository logs", + "description": "Queues an export of the logs for the specified repository. The export can be delivered via a callback URL or an email notification.\n", + "operationId": "exportRepoLogs", + "tags": [ + "Logs" + ], + "security": [ + { + "oauth2_implicit": [ + "repo:admin" + ] + }, + { + "BearerAuth": [] + } + ], + "parameters": [ + { + "name": "repository", + "in": "path", + "description": "The full path of the repository (e.g., namespace/name).", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "starttime", + "in": "query", + "description": "Earliest time for logs. Format: MM/DD/YYYY in UTC.", + "required": false, + "schema": { + "type": "string", + "format": "date" + } + }, + { + "name": "endtime", + "in": "query", + "description": "Latest time for logs. Format: MM/DD/YYYY in UTC.", + "required": false, + "schema": { + "type": "string", + "format": "date" + } + } + ], + "requestBody": { + "description": "Configuration for an export logs operation (empty JSON object required for request).", + "required": false, + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "callback_url": { + "type": "string", + "format": "uri", + "description": "The callback URL to invoke with a link to the exported logs." + }, + "callback_email": { + "type": "string", + "format": "email", + "description": "The email address at which to send a link to the exported logs." + } + } + } + } + } + }, + "responses": { + "201": { + "description": "Successful creation", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "export_id": { + "type": "string" + }, + "status": { + "type": "string", + "enum": [ + "queued", + "processing", + "completed" + ] + } + }, + "example": { + "export_id": "12345-abcdef", + "status": "queued" + } + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "401": { + "description": "Session required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "403": { + "description": "Unauthorized access", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "404": { + "description": "Not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + } + } + } + }, + "/api/v1/repository/{repository}/logs": { + "get": { + "summary": "List repository logs", + "description": "Retrieves a list of logs for the specified repository. Supports pagination and filtering by time range.\n", + "operationId": "listRepoLogs", + "tags": [ + "Logs" + ], + "security": [ + { + "oauth2_implicit": [ + "repo:admin" + ] + }, + { + "BearerAuth": [] + } + ], + "parameters": [ + { + "name": "repository", + "in": "path", + "description": "The full path of the repository (e.g., namespace/name).", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "starttime", + "in": "query", + "description": "Earliest time for logs. Format: MM/DD/YYYY in UTC.", + "required": false, + "schema": { + "type": "string", + "format": "date" + } + }, + { + "name": "endtime", + "in": "query", + "description": "Latest time for logs. Format: MM/DD/YYYY in UTC.", + "required": false, + "schema": { + "type": "string", + "format": "date" + } + }, + { + "name": "next_page", + "in": "query", + "description": "The page token for retrieving the next set of logs.", + "required": false, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Successful invocation", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "logs": { + "type": "array", + "items": { + "type": "object", + "properties": { + "timestamp": { + "type": "string", + "format": "date-time", + "description": "Timestamp of the log entry." + }, + "action": { + "type": "string", + "description": "Action performed." + }, + "performer": { + "type": "string", + "description": "User who performed the action." + }, + "details": { + "type": "object", + "description": "Additional details about the log entry." + } + } + } + }, + "next_page": { + "type": "string", + "description": "Token for the next page of results, if available." + } + }, + "example": { + "logs": [ + { + "timestamp": "2024-03-25T12:34:56Z", + "action": "push", + "performer": "user123", + "details": { + "tag": "latest" + } + } + ], + "next_page": "token123" + } + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "401": { + "description": "Session required", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "403": { + "description": "Unauthorized access", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + }, + "404": { + "description": "Not found", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApiError" + } + } + } + } + } + } + } + }, + "components": { + "securitySchemes": { + "BearerAuth": { + "type": "http", + "scheme": "bearer" + } + }, + "schemas": { + "ApiError": { + "type": "object", + "properties": { + "status": { + "type": "integer", + "description": "HTTP status code of the error." + }, + "detail": { + "type": "string", + "description": "A short message describing the error." + } + } + }, + "ApiErrorDescription": { + "type": "object", + "properties": { + "error": { + "type": "string", + "description": "The error code." + }, + "message": { + "type": "string", + "description": "A detailed description of the error." + } + } + } + } + } +} \ No newline at end of file diff --git a/docs/api-v2.md b/docs/api-v2.md new file mode 100644 index 000000000..e64324ce1 --- /dev/null +++ b/docs/api-v2.md @@ -0,0 +1,3 @@ +# API V2 + + diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 000000000..5327c2e4a --- /dev/null +++ b/docs/index.md @@ -0,0 +1,17 @@ +# Red Hat Quay + +For full documentation visit [mkdocs.org](https://www.mkdocs.org). + +## Commands + +* `mkdocs new [dir-name]` - Create a new project. +* `mkdocs serve` - Start the live-reloading docs server. +* `mkdocs build` - Build the documentation site. +* `mkdocs -h` - Print help message and exit. + +## Project layout + + mkdocs.yml # The configuration file. + docs/ + index.md # The documentation homepage. + ... # Other markdown pages, images and other files. diff --git a/early_access/docinfo.xml b/early_access/docinfo.xml new file mode 100644 index 000000000..c5a561281 --- /dev/null +++ b/early_access/docinfo.xml @@ -0,0 +1,10 @@ +{productname} +{producty} +{productname} Early Access Documentation + + Early access docs for 370 release + + + Red Hat OpenShift Documentation Team + + diff --git a/early_access/master.adoc b/early_access/master.adoc new file mode 100644 index 000000000..4dcd5e675 --- /dev/null +++ b/early_access/master.adoc @@ -0,0 +1,92 @@ +include::modules/attributes.adoc[] + +[id='early-access'] += Early access documentation for {productname} {productmin} + +toc::[] + +[id='quay-release-notes'] +== {productname} Release Notes + +{productname} is regularly released, containing new features, bug fixes, and software updates. +We highly recommend deploying the latest version of {productname}. + +ifdef::downstream[] +For {productname} documentation, you should know that: + +* Documentation is versioned along with each major release +* The latest {productname} documentation is available from the link:https://access.redhat.com/documentation/en-us/red_hat_quay[Red Hat Quay Documentation] page +* Prior to version 2.9.2, the product was referred to as Quay Enterprise +endif::downstream[] + +{productname}, version 3 is the latest major version. + +include::modules/rn_3_70.adoc[leveloffset=+1] + +== Testing Features + +include::modules/testing-features.adoc[leveloffset=+2] + +== Quota management + +include::modules/quota-management-and-enforcement.adoc[leveloffset=+2] +include::modules/quota-management-arch.adoc[leveloffset=+3] +include::modules/quota-management-limitations.adoc[leveloffset=+3] + +include::modules/config-fields-quota.adoc[leveloffset=+3] +include::modules/quota-establishment-ui.adoc[leveloffset=+3] +include::modules/quota-establishment-api.adoc[leveloffset=+3] + +include::modules/api-namespacequota.adoc[leveloffset=+2] +include::modules/api-namespacequota-listUserQuota.adoc[leveloffset=+3] +include::modules/api-namespacequota-getOrganizationQuotaLimit.adoc[leveloffset=+3] +include::modules/api-namespacequota-deleteOrganizationQuotaLimit.adoc[leveloffset=+3] +include::modules/api-namespacequota-changeOrganizationQuotaLimit.adoc[leveloffset=+3] +include::modules/api-namespacequota-listOrganizationQuotaLimit.adoc[leveloffset=+3] +include::modules/api-namespacequota-createOrganizationQuotaLimit.adoc[leveloffset=+3] +include::modules/api-namespacequota-getUserQuotaLimit.adoc[leveloffset=+3] +include::modules/api-namespacequota-listUserQuotaLimit.adoc[leveloffset=+3] +include::modules/api-namespacequota-getOrganizationQuota.adoc[leveloffset=+3] +include::modules/api-namespacequota-deleteOrganizationQuota.adoc[leveloffset=+3] +include::modules/api-namespacequota-changeOrganizationQuota.adoc[leveloffset=+3] +include::modules/api-namespacequota-listOrganizationQuota.adoc[leveloffset=+3] +include::modules/api-namespacequota-createOrganizationQuota.adoc[leveloffset=+3] +include::modules/api-namespacequota-getUserQuota.adoc[leveloffset=+3] + +include::modules/api-superuser-deleteUserQuotaSuperUser.adoc[leveloffset=+3] +include::modules/api-superuser-changeUserQuotaSuperUser.adoc[leveloffset=+3] +include::modules/api-superuser-createUserQuotaSuperUser.adoc[leveloffset=+3] +include::modules/api-superuser-deleteOrganizationQuotaSuperUser.adoc[leveloffset=+3] +include::modules/api-superuser-changeOrganizationQuotaSuperUser.adoc[leveloffset=+3] +include::modules/api-superuser-createOrganizationQuotaSuperUser.adoc[leveloffset=+3] + +include::modules/api-definitions.adoc[leveloffset=+3] + +== Geo-replication with the Operator + +include::modules/georepl-intro.adoc[leveloffset=+2] +include::modules/georepl-prereqs.adoc[leveloffset=+3] +include::modules/georepl-arch-operator.adoc[leveloffset=+3] +include::modules/georepl-deploy-operator.adoc[leveloffset=+3] +include::modules/georepl-mixed-storage.adoc[leveloffset=+3] + + +== {productname} as cache proxy + + +include::modules/quay-as-cache-proxy.adoc[leveloffset=+2] +include::modules/proxy-cache-arch.adoc[leveloffset=+3] +include::modules/proxy-cache-limitations.adoc[leveloffset=+3] +include::modules/proxy-cache-procedure.adoc[leveloffset=+3] + +include::modules/api-organization-createProxyCacheConfig.adoc[leveloffset=+3] +include::modules/api-organization-deleteProxyCacheConfig.adoc[leveloffset=+3] +include::modules/api-organization-getProxyCacheConfig.adoc[leveloffset=+3] +include::modules/api-organization-validateProxyCacheConfig.adoc[leveloffset=+3] + +== Build Enhancements + +include::modules/build-enhancements.adoc[leveloffset=+2] +include::modules/build-enhanced-arch.adoc[leveloffset=+3] +include::modules/build-limitations.adoc[leveloffset=+3] +include::modules/builders-virtual-environment.adoc[leveloffset=+3] diff --git a/early_access/modules b/early_access/modules new file mode 120000 index 000000000..464b823ac --- /dev/null +++ b/early_access/modules @@ -0,0 +1 @@ +../modules \ No newline at end of file diff --git a/images/178_Quay_architecture_0821_air-gapped.png b/images/178_Quay_architecture_0821_air-gapped.png new file mode 100644 index 000000000..e943a9ae5 Binary files /dev/null and b/images/178_Quay_architecture_0821_air-gapped.png differ diff --git a/images/178_Quay_architecture_0821_deploy_topology.png b/images/178_Quay_architecture_0821_deploy_topology.png new file mode 100644 index 000000000..0e69a58b4 Binary files /dev/null and b/images/178_Quay_architecture_0821_deploy_topology.png differ diff --git a/images/178_Quay_architecture_0821_deploy_topology_storage.png b/images/178_Quay_architecture_0821_deploy_topology_storage.png new file mode 100644 index 000000000..d5abf5ddf Binary files /dev/null and b/images/178_Quay_architecture_0821_deploy_topology_storage.png differ diff --git a/images/178_Quay_architecture_0821_deployment_ex1.png b/images/178_Quay_architecture_0821_deployment_ex1.png new file mode 100644 index 000000000..48397e3b2 Binary files /dev/null and b/images/178_Quay_architecture_0821_deployment_ex1.png differ diff --git a/images/178_Quay_architecture_0821_deployment_ex2.png b/images/178_Quay_architecture_0821_deployment_ex2.png new file mode 100644 index 000000000..99d047da0 Binary files /dev/null and b/images/178_Quay_architecture_0821_deployment_ex2.png differ diff --git a/images/178_Quay_architecture_0821_features.png b/images/178_Quay_architecture_0821_features.png new file mode 100644 index 000000000..818ff64d9 Binary files /dev/null and b/images/178_Quay_architecture_0821_features.png differ diff --git a/images/178_Quay_architecture_0821_georeplication.png b/images/178_Quay_architecture_0821_georeplication.png new file mode 100644 index 000000000..d0171bad6 Binary files /dev/null and b/images/178_Quay_architecture_0821_georeplication.png differ diff --git a/images/178_Quay_architecture_0821_georeplication_openshift-temp.png b/images/178_Quay_architecture_0821_georeplication_openshift-temp.png new file mode 100644 index 000000000..b646f1fd3 Binary files /dev/null and b/images/178_Quay_architecture_0821_georeplication_openshift-temp.png differ diff --git a/images/178_Quay_architecture_0821_georeplication_openshift.png b/images/178_Quay_architecture_0821_georeplication_openshift.png new file mode 100644 index 000000000..8f9769770 Binary files /dev/null and b/images/178_Quay_architecture_0821_georeplication_openshift.png differ diff --git a/images/178_Quay_architecture_0821_on-premises_config.png b/images/178_Quay_architecture_0821_on-premises_config.png new file mode 100644 index 000000000..720808a42 Binary files /dev/null and b/images/178_Quay_architecture_0821_on-premises_config.png differ diff --git a/images/178_Quay_architecture_0821_on_AWS.png b/images/178_Quay_architecture_0821_on_AWS.png new file mode 100644 index 000000000..62cad9a47 Binary files /dev/null and b/images/178_Quay_architecture_0821_on_AWS.png differ diff --git a/images/178_Quay_architecture_0821_on_Azure.png b/images/178_Quay_architecture_0821_on_Azure.png new file mode 100644 index 000000000..83bfddc02 Binary files /dev/null and b/images/178_Quay_architecture_0821_on_Azure.png differ diff --git a/images/178_Quay_architecture_0821_tenancy_model.png b/images/178_Quay_architecture_0821_tenancy_model.png new file mode 100644 index 000000000..278cad6f5 Binary files /dev/null and b/images/178_Quay_architecture_0821_tenancy_model.png differ diff --git a/images/38-ui-toggle.png b/images/38-ui-toggle.png new file mode 100644 index 000000000..d60184aed Binary files /dev/null and b/images/38-ui-toggle.png differ diff --git a/images/Figure01.png b/images/Figure01.png index fdcba0a64..3bf69a788 100644 Binary files a/images/Figure01.png and b/images/Figure01.png differ diff --git a/images/Figure05.png b/images/Figure05.png new file mode 100644 index 000000000..cd034d1c6 Binary files /dev/null and b/images/Figure05.png differ diff --git a/images/add-superuser.png b/images/add-superuser.png new file mode 100644 index 000000000..2988ed374 Binary files /dev/null and b/images/add-superuser.png differ diff --git a/images/air-gap.png b/images/air-gap.png new file mode 100644 index 000000000..9577b2c71 Binary files /dev/null and b/images/air-gap.png differ diff --git a/images/alerting-rules.png b/images/alerting-rules.png new file mode 100644 index 000000000..0b6541181 Binary files /dev/null and b/images/alerting-rules.png differ diff --git a/images/api-mirror.png b/images/api-mirror.png new file mode 100644 index 000000000..fa1308bed Binary files /dev/null and b/images/api-mirror.png differ diff --git a/images/application-token.png b/images/application-token.png new file mode 100644 index 000000000..5acb1c2ce Binary files /dev/null and b/images/application-token.png differ diff --git a/images/august-receipt.png b/images/august-receipt.png new file mode 100644 index 000000000..b1f1ead53 Binary files /dev/null and b/images/august-receipt.png differ diff --git a/images/authentication-ldap-admin-dn.png b/images/authentication-ldap-admin-dn.png new file mode 100644 index 000000000..3c7ea2eed Binary files /dev/null and b/images/authentication-ldap-admin-dn.png differ diff --git a/images/authentication-ldap-basedn.png b/images/authentication-ldap-basedn.png new file mode 100644 index 000000000..9749ce9d7 Binary files /dev/null and b/images/authentication-ldap-basedn.png differ diff --git a/images/authentication-ldap-details.png b/images/authentication-ldap-details.png deleted file mode 100644 index 4abc92fbb..000000000 Binary files a/images/authentication-ldap-details.png and /dev/null differ diff --git a/images/authentication-ldap-ssl.png b/images/authentication-ldap-ssl.png new file mode 100644 index 000000000..1316d45bc Binary files /dev/null and b/images/authentication-ldap-ssl.png differ diff --git a/images/authentication-ldap-success.png b/images/authentication-ldap-success.png index 956818593..547ffa6fc 100644 Binary files a/images/authentication-ldap-success.png and b/images/authentication-ldap-success.png differ diff --git a/images/authentication-ldap-team-sync-1.png b/images/authentication-ldap-team-sync-1.png new file mode 100644 index 000000000..0dd8f933c Binary files /dev/null and b/images/authentication-ldap-team-sync-1.png differ diff --git a/images/authentication-ldap-team-sync-2.png b/images/authentication-ldap-team-sync-2.png new file mode 100644 index 000000000..6c1bcf223 Binary files /dev/null and b/images/authentication-ldap-team-sync-2.png differ diff --git a/images/authentication-ldap-uid-mail.png b/images/authentication-ldap-uid-mail.png new file mode 100644 index 000000000..b4fc3c171 Binary files /dev/null and b/images/authentication-ldap-uid-mail.png differ diff --git a/images/authentication-ldap-uri.png b/images/authentication-ldap-uri.png new file mode 100644 index 000000000..e73f45c38 Binary files /dev/null and b/images/authentication-ldap-uri.png differ diff --git a/images/authentication-ldap-user-filter.png b/images/authentication-ldap-user-filter.png new file mode 100644 index 000000000..272050309 Binary files /dev/null and b/images/authentication-ldap-user-filter.png differ diff --git a/images/authentication-ldap.png b/images/authentication-ldap.png index f97b580e0..396fa7ea2 100644 Binary files a/images/authentication-ldap.png and b/images/authentication-ldap.png differ diff --git a/images/auto-prune-policies-page.png b/images/auto-prune-policies-page.png new file mode 100644 index 000000000..05ddb54c2 Binary files /dev/null and b/images/auto-prune-policies-page.png differ diff --git a/images/build-history.png b/images/build-history.png new file mode 100644 index 000000000..8f9605f38 Binary files /dev/null and b/images/build-history.png differ diff --git a/images/build-trigger-example.png b/images/build-trigger-example.png new file mode 100644 index 000000000..bb3b5fad6 Binary files /dev/null and b/images/build-trigger-example.png differ diff --git a/images/cache-proxy-overview.png b/images/cache-proxy-overview.png new file mode 100644 index 000000000..98b8766ff Binary files /dev/null and b/images/cache-proxy-overview.png differ diff --git a/images/cache-proxy-pulled-image.png b/images/cache-proxy-pulled-image.png new file mode 100644 index 000000000..03e3da592 Binary files /dev/null and b/images/cache-proxy-pulled-image.png differ diff --git a/images/cache-proxy-staleness-pull.png b/images/cache-proxy-staleness-pull.png new file mode 100644 index 000000000..2fa7f1a30 Binary files /dev/null and b/images/cache-proxy-staleness-pull.png differ diff --git a/images/choose-dashboard.png b/images/choose-dashboard.png new file mode 100644 index 000000000..d4438fc97 Binary files /dev/null and b/images/choose-dashboard.png differ diff --git a/images/clair-4-0-cve-report.png b/images/clair-4-0-cve-report.png new file mode 100644 index 000000000..e234071d1 Binary files /dev/null and b/images/clair-4-0-cve-report.png differ diff --git a/images/clair-4-2-enrichment-data.png b/images/clair-4-2-enrichment-data.png new file mode 100644 index 000000000..0fc0a45a0 Binary files /dev/null and b/images/clair-4-2-enrichment-data.png differ diff --git a/images/clair-core-architecture.png b/images/clair-core-architecture.png new file mode 100644 index 000000000..2cba02e39 Binary files /dev/null and b/images/clair-core-architecture.png differ diff --git a/images/clair-reposcan.png b/images/clair-reposcan.png new file mode 100644 index 000000000..7cb904a3e Binary files /dev/null and b/images/clair-reposcan.png differ diff --git a/images/clair-vulnerabilities.png b/images/clair-vulnerabilities.png new file mode 100644 index 000000000..8d256d7a7 Binary files /dev/null and b/images/clair-vulnerabilities.png differ diff --git a/images/config-editor-details-openshift.png b/images/config-editor-details-openshift.png new file mode 100644 index 000000000..40e2d2867 Binary files /dev/null and b/images/config-editor-details-openshift.png differ diff --git a/images/config-editor-details-operator-36.png b/images/config-editor-details-operator-36.png new file mode 100644 index 000000000..511212fd7 Binary files /dev/null and b/images/config-editor-details-operator-36.png differ diff --git a/images/config-editor-reconfigure.png b/images/config-editor-reconfigure.png new file mode 100644 index 000000000..77290111f Binary files /dev/null and b/images/config-editor-reconfigure.png differ diff --git a/images/config-editor-reconfigured.png b/images/config-editor-reconfigured.png new file mode 100644 index 000000000..e0c6b4380 Binary files /dev/null and b/images/config-editor-reconfigured.png differ diff --git a/images/config-editor-secret-reveal.png b/images/config-editor-secret-reveal.png new file mode 100644 index 000000000..67f7721ee Binary files /dev/null and b/images/config-editor-secret-reveal.png differ diff --git a/images/config-editor-secret-updated.png b/images/config-editor-secret-updated.png new file mode 100644 index 000000000..92ab01fd3 Binary files /dev/null and b/images/config-editor-secret-updated.png differ diff --git a/images/config-editor-secret.png b/images/config-editor-secret.png new file mode 100644 index 000000000..d8db00525 Binary files /dev/null and b/images/config-editor-secret.png differ diff --git a/images/config-editor-su.png b/images/config-editor-su.png new file mode 100644 index 000000000..2ce9a20c4 Binary files /dev/null and b/images/config-editor-su.png differ diff --git a/images/config-editor-ui.png b/images/config-editor-ui.png new file mode 100644 index 000000000..5798ed756 Binary files /dev/null and b/images/config-editor-ui.png differ diff --git a/images/configtoolroute.png b/images/configtoolroute.png new file mode 100644 index 000000000..61bb88ff8 Binary files /dev/null and b/images/configtoolroute.png differ diff --git a/images/configtoolsetup.png b/images/configtoolsetup.png new file mode 100644 index 000000000..e8a5925f1 Binary files /dev/null and b/images/configtoolsetup.png differ diff --git a/images/configure-tagging.png b/images/configure-tagging.png new file mode 100644 index 000000000..cd55b6720 Binary files /dev/null and b/images/configure-tagging.png differ diff --git a/images/confirm-ldap-username.png b/images/confirm-ldap-username.png new file mode 100644 index 000000000..8e09d6aa6 Binary files /dev/null and b/images/confirm-ldap-username.png differ diff --git a/images/console-dashboard-1.png b/images/console-dashboard-1.png new file mode 100644 index 000000000..4f9804ef6 Binary files /dev/null and b/images/console-dashboard-1.png differ diff --git a/images/create-account-1.png b/images/create-account-1.png new file mode 100644 index 000000000..df7cc7e69 Binary files /dev/null and b/images/create-account-1.png differ diff --git a/images/create-account-2.png b/images/create-account-2.png new file mode 100644 index 000000000..6a0391526 Binary files /dev/null and b/images/create-account-2.png differ diff --git a/images/create-account-3.png b/images/create-account-3.png new file mode 100644 index 000000000..c3faf77b2 Binary files /dev/null and b/images/create-account-3.png differ diff --git a/images/create-build-trigger.png b/images/create-build-trigger.png new file mode 100644 index 000000000..d588cef89 Binary files /dev/null and b/images/create-build-trigger.png differ diff --git a/images/create-new-message.png b/images/create-new-message.png new file mode 100644 index 000000000..b42a38602 Binary files /dev/null and b/images/create-new-message.png differ diff --git a/images/create-new-org.png b/images/create-new-org.png new file mode 100644 index 000000000..6d49b0ca6 Binary files /dev/null and b/images/create-new-org.png differ diff --git a/images/cso-dashboard.png b/images/cso-dashboard.png new file mode 100644 index 000000000..b71cf7ae1 Binary files /dev/null and b/images/cso-dashboard.png differ diff --git a/images/cso-namespace-vulnerable.png b/images/cso-namespace-vulnerable.png new file mode 100644 index 000000000..948a6dc81 Binary files /dev/null and b/images/cso-namespace-vulnerable.png differ diff --git a/images/cso-registry-vulnerable.png b/images/cso-registry-vulnerable.png new file mode 100644 index 000000000..c9b147d11 Binary files /dev/null and b/images/cso-registry-vulnerable.png differ diff --git a/images/custom-tagging.png b/images/custom-tagging.png new file mode 100644 index 000000000..d40092b7d Binary files /dev/null and b/images/custom-tagging.png differ diff --git a/images/e2e-demo-httpd-example.png b/images/e2e-demo-httpd-example.png new file mode 100644 index 000000000..d32aa5b90 Binary files /dev/null and b/images/e2e-demo-httpd-example.png differ diff --git a/images/elasticsearch_action_logs.png b/images/elasticsearch_action_logs.png new file mode 100644 index 000000000..c6193f795 Binary files /dev/null and b/images/elasticsearch_action_logs.png differ diff --git a/images/export-usage-logs.png b/images/export-usage-logs.png new file mode 100644 index 000000000..c5e47b1fa Binary files /dev/null and b/images/export-usage-logs.png differ diff --git a/images/garbage-collection-metrics.png b/images/garbage-collection-metrics.png new file mode 100644 index 000000000..b10de0d82 Binary files /dev/null and b/images/garbage-collection-metrics.png differ diff --git a/images/georeplication-arch.png b/images/georeplication-arch.png new file mode 100644 index 000000000..5ed03f962 Binary files /dev/null and b/images/georeplication-arch.png differ diff --git a/images/georeplication-aws.png b/images/georeplication-aws.png new file mode 100644 index 000000000..ee650e75e Binary files /dev/null and b/images/georeplication-aws.png differ diff --git a/images/grant-user-access.png b/images/grant-user-access.png new file mode 100644 index 000000000..0a3149cbe Binary files /dev/null and b/images/grant-user-access.png differ diff --git a/images/image-fetch.png b/images/image-fetch.png index 2463fdd8f..568de1dec 100644 Binary files a/images/image-fetch.png and b/images/image-fetch.png differ diff --git a/images/installed-operators-list.png b/images/installed-operators-list.png new file mode 100644 index 000000000..e7aa9295e Binary files /dev/null and b/images/installed-operators-list.png differ diff --git a/images/kinesis_producer.png b/images/kinesis_producer.png new file mode 100644 index 000000000..cc69cf901 Binary files /dev/null and b/images/kinesis_producer.png differ diff --git a/images/ldap-internal-authentication.png b/images/ldap-internal-authentication.png new file mode 100644 index 000000000..218f23dbf Binary files /dev/null and b/images/ldap-internal-authentication.png differ diff --git a/images/logs.png b/images/logs.png new file mode 100644 index 000000000..f0b13f46a Binary files /dev/null and b/images/logs.png differ diff --git a/images/logsv2-ui.png b/images/logsv2-ui.png new file mode 100644 index 000000000..38c8b4876 Binary files /dev/null and b/images/logsv2-ui.png differ diff --git a/images/mail-attribute-ldap.png b/images/mail-attribute-ldap.png new file mode 100644 index 000000000..9a487c0bd Binary files /dev/null and b/images/mail-attribute-ldap.png differ diff --git a/images/manifest-deletion-architecture.png b/images/manifest-deletion-architecture.png new file mode 100644 index 000000000..f7ae29cf4 Binary files /dev/null and b/images/manifest-deletion-architecture.png differ diff --git a/images/manifest-example.png b/images/manifest-example.png new file mode 100644 index 000000000..6c7313067 Binary files /dev/null and b/images/manifest-example.png differ diff --git a/images/metadata-request.png b/images/metadata-request.png new file mode 100644 index 000000000..ccc13a985 Binary files /dev/null and b/images/metadata-request.png differ diff --git a/images/metrics-single-namespace-users.png b/images/metrics-single-namespace-users.png new file mode 100644 index 000000000..50dc92500 Binary files /dev/null and b/images/metrics-single-namespace-users.png differ diff --git a/images/metrics-single-namespace.png b/images/metrics-single-namespace.png new file mode 100644 index 000000000..3b187a6c2 Binary files /dev/null and b/images/metrics-single-namespace.png differ diff --git a/images/modelcard.png b/images/modelcard.png new file mode 100644 index 000000000..717e216e0 Binary files /dev/null and b/images/modelcard.png differ diff --git a/images/new-org.png b/images/new-org.png new file mode 100644 index 000000000..675b21863 Binary files /dev/null and b/images/new-org.png differ diff --git a/images/operator-config-bundle-edit-secret.png b/images/operator-config-bundle-edit-secret.png new file mode 100644 index 000000000..1eb81899f Binary files /dev/null and b/images/operator-config-bundle-edit-secret.png differ diff --git a/images/operator-install-page.png b/images/operator-install-page.png new file mode 100644 index 000000000..29604e468 Binary files /dev/null and b/images/operator-install-page.png differ diff --git a/images/operator-quay-registry-overview.png b/images/operator-quay-registry-overview.png new file mode 100644 index 000000000..21b188f6d Binary files /dev/null and b/images/operator-quay-registry-overview.png differ diff --git a/images/operator-save-config-changes.png b/images/operator-save-config-changes.png new file mode 100644 index 000000000..96efbf3e0 Binary files /dev/null and b/images/operator-save-config-changes.png differ diff --git a/images/operator-subscription.png b/images/operator-subscription.png new file mode 100644 index 000000000..2694932fd Binary files /dev/null and b/images/operator-subscription.png differ diff --git a/images/operatorhub-quay.png b/images/operatorhub-quay.png new file mode 100644 index 000000000..20be02409 Binary files /dev/null and b/images/operatorhub-quay.png differ diff --git a/images/permanently-delete-image-tag.png b/images/permanently-delete-image-tag.png new file mode 100644 index 000000000..ebe887964 Binary files /dev/null and b/images/permanently-delete-image-tag.png differ diff --git a/images/poc-clair-1.png b/images/poc-clair-1.png new file mode 100644 index 000000000..ae864b4c9 Binary files /dev/null and b/images/poc-clair-1.png differ diff --git a/images/poc-clair-2.png b/images/poc-clair-2.png new file mode 100644 index 000000000..c71111eda Binary files /dev/null and b/images/poc-clair-2.png differ diff --git a/images/poc-quay-scanner-config.png b/images/poc-quay-scanner-config.png new file mode 100644 index 000000000..6772f3980 Binary files /dev/null and b/images/poc-quay-scanner-config.png differ diff --git a/images/proxy-cache-size-configuration.png b/images/proxy-cache-size-configuration.png new file mode 100644 index 000000000..b9641f701 Binary files /dev/null and b/images/proxy-cache-size-configuration.png differ diff --git a/images/public-cloud-aws.png b/images/public-cloud-aws.png new file mode 100644 index 000000000..9ae793c8e Binary files /dev/null and b/images/public-cloud-aws.png differ diff --git a/images/public-cloud-azure.png b/images/public-cloud-azure.png new file mode 100644 index 000000000..c73d7efa3 Binary files /dev/null and b/images/public-cloud-azure.png differ diff --git a/images/quay-builds-architecture.png b/images/quay-builds-architecture.png new file mode 100644 index 000000000..3ca2e8ee3 Binary files /dev/null and b/images/quay-builds-architecture.png differ diff --git a/images/quay-deployment-example-one.png b/images/quay-deployment-example-one.png new file mode 100644 index 000000000..44524b806 Binary files /dev/null and b/images/quay-deployment-example-one.png differ diff --git a/images/quay-deployment-example-two.png b/images/quay-deployment-example-two.png new file mode 100644 index 000000000..391643364 Binary files /dev/null and b/images/quay-deployment-example-two.png differ diff --git a/images/quay-deployment-topology-storage-proxy.png b/images/quay-deployment-topology-storage-proxy.png new file mode 100644 index 000000000..8665a358b Binary files /dev/null and b/images/quay-deployment-topology-storage-proxy.png differ diff --git a/images/quay-deployment-topology.png b/images/quay-deployment-topology.png new file mode 100644 index 000000000..8376500d4 Binary files /dev/null and b/images/quay-deployment-topology.png differ diff --git a/images/quay-features.png b/images/quay-features.png new file mode 100644 index 000000000..27ee227a6 Binary files /dev/null and b/images/quay-features.png differ diff --git a/images/quay-hybrid-cloud-landing-page.png b/images/quay-hybrid-cloud-landing-page.png new file mode 100644 index 000000000..24d2ed49e Binary files /dev/null and b/images/quay-hybrid-cloud-landing-page.png differ diff --git a/images/quay-metrics-org-rows.png b/images/quay-metrics-org-rows.png new file mode 100644 index 000000000..98696781e Binary files /dev/null and b/images/quay-metrics-org-rows.png differ diff --git a/images/quay-metrics.png b/images/quay-metrics.png new file mode 100644 index 000000000..b4365e650 Binary files /dev/null and b/images/quay-metrics.png differ diff --git a/images/quay-pod-frequently-restarting.png b/images/quay-pod-frequently-restarting.png new file mode 100644 index 000000000..e1641040d Binary files /dev/null and b/images/quay-pod-frequently-restarting.png differ diff --git a/images/quay-sample-sizing.png b/images/quay-sample-sizing.png new file mode 100644 index 000000000..33ef2ee44 Binary files /dev/null and b/images/quay-sample-sizing.png differ diff --git a/images/quay-setup-operator-openshift.png b/images/quay-setup-operator-openshift.png new file mode 100644 index 000000000..d881ce2d8 Binary files /dev/null and b/images/quay-setup-operator-openshift.png differ diff --git a/images/quay-tenancy-model.png b/images/quay-tenancy-model.png new file mode 100644 index 000000000..db2ee51f9 Binary files /dev/null and b/images/quay-tenancy-model.png differ diff --git a/images/quayio-footer.png b/images/quayio-footer.png new file mode 100644 index 000000000..db71d50fd Binary files /dev/null and b/images/quayio-footer.png differ diff --git a/images/quayio-header.png b/images/quayio-header.png new file mode 100644 index 000000000..540519188 Binary files /dev/null and b/images/quayio-header.png differ diff --git a/images/quayio-repo-landing-page.png b/images/quayio-repo-landing-page.png new file mode 100644 index 000000000..532f4cdfe Binary files /dev/null and b/images/quayio-repo-landing-page.png differ diff --git a/images/quota-100MB-empty.png b/images/quota-100MB-empty.png new file mode 100644 index 000000000..c40c4ca07 Binary files /dev/null and b/images/quota-100MB-empty.png differ diff --git a/images/quota-100MB-settings-ui.png b/images/quota-100MB-settings-ui.png new file mode 100644 index 000000000..cc6530dcc Binary files /dev/null and b/images/quota-100MB-settings-ui.png differ diff --git a/images/quota-10MB-empty.png b/images/quota-10MB-empty.png new file mode 100644 index 000000000..29d61bbd2 Binary files /dev/null and b/images/quota-10MB-empty.png differ diff --git a/images/quota-first-image.png b/images/quota-first-image.png new file mode 100644 index 000000000..7618dc76d Binary files /dev/null and b/images/quota-first-image.png differ diff --git a/images/quota-limits.png b/images/quota-limits.png new file mode 100644 index 000000000..3f4348f1a Binary files /dev/null and b/images/quota-limits.png differ diff --git a/images/quota-management.png b/images/quota-management.png new file mode 100644 index 000000000..3d024465b Binary files /dev/null and b/images/quota-management.png differ diff --git a/images/quota-no-quota.png b/images/quota-no-quota.png new file mode 100644 index 000000000..b87687c35 Binary files /dev/null and b/images/quota-no-quota.png differ diff --git a/images/quota-none-org-settings.png b/images/quota-none-org-settings.png new file mode 100644 index 000000000..6d01c3dae Binary files /dev/null and b/images/quota-none-org-settings.png differ diff --git a/images/quota-notifications.png b/images/quota-notifications.png new file mode 100644 index 000000000..900e983c9 Binary files /dev/null and b/images/quota-notifications.png differ diff --git a/images/quota-org-consumed-first.png b/images/quota-org-consumed-first.png new file mode 100644 index 000000000..3c70c1cfc Binary files /dev/null and b/images/quota-org-consumed-first.png differ diff --git a/images/quota-org-consumed-second.png b/images/quota-org-consumed-second.png new file mode 100644 index 000000000..8e75e9a40 Binary files /dev/null and b/images/quota-org-consumed-second.png differ diff --git a/images/quota-org-init-consumed.png b/images/quota-org-init-consumed.png new file mode 100644 index 000000000..dd38584d4 Binary files /dev/null and b/images/quota-org-init-consumed.png differ diff --git a/images/quota-org-quota-policy.png b/images/quota-org-quota-policy.png new file mode 100644 index 000000000..2bcff31d0 Binary files /dev/null and b/images/quota-org-quota-policy.png differ diff --git a/images/quota-su-consumed-first.png b/images/quota-su-consumed-first.png new file mode 100644 index 000000000..e34c0afbd Binary files /dev/null and b/images/quota-su-consumed-first.png differ diff --git a/images/quota-su-increase-100MB.png b/images/quota-su-increase-100MB.png new file mode 100644 index 000000000..91258705d Binary files /dev/null and b/images/quota-su-increase-100MB.png differ diff --git a/images/quota-su-init-10MB.png b/images/quota-su-init-10MB.png new file mode 100644 index 000000000..cc6272d97 Binary files /dev/null and b/images/quota-su-init-10MB.png differ diff --git a/images/quota-su-init-consumed.png b/images/quota-su-init-consumed.png new file mode 100644 index 000000000..0daf93653 Binary files /dev/null and b/images/quota-su-init-consumed.png differ diff --git a/images/quota-su-org-options.png b/images/quota-su-org-options.png new file mode 100644 index 000000000..caa2510c1 Binary files /dev/null and b/images/quota-su-org-options.png differ diff --git a/images/quota-su-reject-80.png b/images/quota-su-reject-80.png new file mode 100644 index 000000000..22f7fe4e8 Binary files /dev/null and b/images/quota-su-reject-80.png differ diff --git a/images/quota-su-warning-70.png b/images/quota-su-warning-70.png new file mode 100644 index 000000000..74e7535ba Binary files /dev/null and b/images/quota-su-warning-70.png differ diff --git a/images/register-app.png b/images/register-app.png index b3963d7b0..aff4c5aaa 100644 Binary files a/images/register-app.png and b/images/register-app.png differ diff --git a/images/repo-create.png b/images/repo-create.png new file mode 100644 index 000000000..36e97dd3e Binary files /dev/null and b/images/repo-create.png differ diff --git a/images/repo-mirror-details-start.png b/images/repo-mirror-details-start.png new file mode 100644 index 000000000..409e63571 Binary files /dev/null and b/images/repo-mirror-details-start.png differ diff --git a/images/repo-mirror-details.png b/images/repo-mirror-details.png new file mode 100644 index 000000000..e8ac6b47b Binary files /dev/null and b/images/repo-mirror-details.png differ diff --git a/images/repo-mirror-tags.png b/images/repo-mirror-tags.png new file mode 100644 index 000000000..eafb93286 Binary files /dev/null and b/images/repo-mirror-tags.png differ diff --git a/images/repo-mirror-usage-logs.png b/images/repo-mirror-usage-logs.png new file mode 100644 index 000000000..5a4cf3712 Binary files /dev/null and b/images/repo-mirror-usage-logs.png differ diff --git a/images/repo_mirror_create.png b/images/repo_mirror_create.png index 1c7932efd..c713952fe 100644 Binary files a/images/repo_mirror_create.png and b/images/repo_mirror_create.png differ diff --git a/images/repo_quay_rhel8.png b/images/repo_quay_rhel8.png new file mode 100644 index 000000000..590f81a57 Binary files /dev/null and b/images/repo_quay_rhel8.png differ diff --git a/images/robot-gen-token.png b/images/robot-gen-token.png new file mode 100644 index 000000000..379141991 Binary files /dev/null and b/images/robot-gen-token.png differ diff --git a/images/sample-on-prem.png b/images/sample-on-prem.png new file mode 100644 index 000000000..68394cc96 Binary files /dev/null and b/images/sample-on-prem.png differ diff --git a/images/save-configuration.png b/images/save-configuration.png index b4f2eb123..547ffa6fc 100644 Binary files a/images/save-configuration.png and b/images/save-configuration.png differ diff --git a/images/scheduled-maintenance-banner.png b/images/scheduled-maintenance-banner.png new file mode 100644 index 000000000..160142fdc Binary files /dev/null and b/images/scheduled-maintenance-banner.png differ diff --git a/images/set-repository-permissions-robot-account.png b/images/set-repository-permissions-robot-account.png new file mode 100644 index 000000000..fa97eeb0e Binary files /dev/null and b/images/set-repository-permissions-robot-account.png differ diff --git a/images/set-team-role.png b/images/set-team-role.png new file mode 100644 index 000000000..334138383 Binary files /dev/null and b/images/set-team-role.png differ diff --git a/images/splunk-log-metadata.png b/images/splunk-log-metadata.png new file mode 100644 index 000000000..813e2dd17 Binary files /dev/null and b/images/splunk-log-metadata.png differ diff --git a/images/ssl-config.png b/images/ssl-config.png new file mode 100644 index 000000000..29ac81055 Binary files /dev/null and b/images/ssl-config.png differ diff --git a/images/ssl-connection-not-private.png b/images/ssl-connection-not-private.png new file mode 100644 index 000000000..db40c8bb5 Binary files /dev/null and b/images/ssl-connection-not-private.png differ diff --git a/images/ssl-connection-not-secure.png b/images/ssl-connection-not-secure.png new file mode 100644 index 000000000..c38062c49 Binary files /dev/null and b/images/ssl-connection-not-secure.png differ diff --git a/images/ssl-connection-secure.png b/images/ssl-connection-secure.png new file mode 100644 index 000000000..061ea4160 Binary files /dev/null and b/images/ssl-connection-secure.png differ diff --git a/images/ssl-potential-risk.png b/images/ssl-potential-risk.png new file mode 100644 index 000000000..312e11edf Binary files /dev/null and b/images/ssl-potential-risk.png differ diff --git a/images/super-user-admin-panel.png b/images/super-user-admin-panel.png new file mode 100644 index 000000000..5ea2c51e8 Binary files /dev/null and b/images/super-user-admin-panel.png differ diff --git a/images/swagger-mirroring.png b/images/swagger-mirroring.png new file mode 100644 index 000000000..676997056 Binary files /dev/null and b/images/swagger-mirroring.png differ diff --git a/images/tag-expiration-v2-ui.png b/images/tag-expiration-v2-ui.png new file mode 100644 index 000000000..f15c4b1f3 Binary files /dev/null and b/images/tag-expiration-v2-ui.png differ diff --git a/images/tag-expires-ui.png b/images/tag-expires-ui.png new file mode 100644 index 000000000..dfdb0e04b Binary files /dev/null and b/images/tag-expires-ui.png differ diff --git a/images/toggle-legacy-ui.png b/images/toggle-legacy-ui.png new file mode 100644 index 000000000..9bfa2a932 Binary files /dev/null and b/images/toggle-legacy-ui.png differ diff --git a/images/total-quota-consumed.png b/images/total-quota-consumed.png new file mode 100644 index 000000000..e2b7b83af Binary files /dev/null and b/images/total-quota-consumed.png differ diff --git a/images/total-registry-size.png b/images/total-registry-size.png new file mode 100644 index 000000000..689f87c9b Binary files /dev/null and b/images/total-registry-size.png differ diff --git a/images/ui-access-settings.png b/images/ui-access-settings.png new file mode 100644 index 000000000..881e8a409 Binary files /dev/null and b/images/ui-access-settings.png differ diff --git a/images/ui-action-log-choice.png b/images/ui-action-log-choice.png new file mode 100644 index 000000000..a23086f3d Binary files /dev/null and b/images/ui-action-log-choice.png differ diff --git a/images/ui-action-log-database.png b/images/ui-action-log-database.png new file mode 100644 index 000000000..e7a28ad78 Binary files /dev/null and b/images/ui-action-log-database.png differ diff --git a/images/ui-action-log-elastic.png b/images/ui-action-log-elastic.png new file mode 100644 index 000000000..ba0f3811c Binary files /dev/null and b/images/ui-action-log-elastic.png differ diff --git a/images/ui-action-log-rotation-storage-choice.png b/images/ui-action-log-rotation-storage-choice.png new file mode 100644 index 000000000..e28b6b267 Binary files /dev/null and b/images/ui-action-log-rotation-storage-choice.png differ diff --git a/images/ui-action-log-rotation.png b/images/ui-action-log-rotation.png new file mode 100644 index 000000000..f023ace9c Binary files /dev/null and b/images/ui-action-log-rotation.png differ diff --git a/images/ui-app-registry.png b/images/ui-app-registry.png new file mode 100644 index 000000000..52ac09fcd Binary files /dev/null and b/images/ui-app-registry.png differ diff --git a/images/ui-auth-external-app.png b/images/ui-auth-external-app.png new file mode 100644 index 000000000..6c6f68cbc Binary files /dev/null and b/images/ui-auth-external-app.png differ diff --git a/images/ui-auth-jwt.png b/images/ui-auth-jwt.png new file mode 100644 index 000000000..416d00f02 Binary files /dev/null and b/images/ui-auth-jwt.png differ diff --git a/images/ui-auth-keystone.png b/images/ui-auth-keystone.png new file mode 100644 index 000000000..9270e3e7b Binary files /dev/null and b/images/ui-auth-keystone.png differ diff --git a/images/ui-auth-ldap.png b/images/ui-auth-ldap.png new file mode 100644 index 000000000..b14053d94 Binary files /dev/null and b/images/ui-auth-ldap.png differ diff --git a/images/ui-basic-config-contact-info.png b/images/ui-basic-config-contact-info.png new file mode 100644 index 000000000..4bf8f6317 Binary files /dev/null and b/images/ui-basic-config-contact-info.png differ diff --git a/images/ui-basic-config.png b/images/ui-basic-config.png new file mode 100644 index 000000000..237e05b89 Binary files /dev/null and b/images/ui-basic-config.png differ diff --git a/images/ui-custom-ssl-certs-uploaded.png b/images/ui-custom-ssl-certs-uploaded.png new file mode 100644 index 000000000..f2b63f78f Binary files /dev/null and b/images/ui-custom-ssl-certs-uploaded.png differ diff --git a/images/ui-custom-ssl-certs.png b/images/ui-custom-ssl-certs.png new file mode 100644 index 000000000..731db91cb Binary files /dev/null and b/images/ui-custom-ssl-certs.png differ diff --git a/images/ui-data-consistency.png b/images/ui-data-consistency.png new file mode 100644 index 000000000..b7a1ff396 Binary files /dev/null and b/images/ui-data-consistency.png differ diff --git a/images/ui-database-choice.png b/images/ui-database-choice.png new file mode 100644 index 000000000..1767040fc Binary files /dev/null and b/images/ui-database-choice.png differ diff --git a/images/ui-database-postgres.png b/images/ui-database-postgres.png new file mode 100644 index 000000000..3013374a4 Binary files /dev/null and b/images/ui-database-postgres.png differ diff --git a/images/ui-dockerfile-build-bitbucket.png b/images/ui-dockerfile-build-bitbucket.png new file mode 100644 index 000000000..e1af808b0 Binary files /dev/null and b/images/ui-dockerfile-build-bitbucket.png differ diff --git a/images/ui-dockerfile-build-github.png b/images/ui-dockerfile-build-github.png new file mode 100644 index 000000000..b3afacbfd Binary files /dev/null and b/images/ui-dockerfile-build-github.png differ diff --git a/images/ui-dockerfile-build-gitlab.png b/images/ui-dockerfile-build-gitlab.png new file mode 100644 index 000000000..de415ca0f Binary files /dev/null and b/images/ui-dockerfile-build-gitlab.png differ diff --git a/images/ui-dockerfile-build.png b/images/ui-dockerfile-build.png new file mode 100644 index 000000000..08e9a0e93 Binary files /dev/null and b/images/ui-dockerfile-build.png differ diff --git a/images/ui-email.png b/images/ui-email.png new file mode 100644 index 000000000..4ab971147 Binary files /dev/null and b/images/ui-email.png differ diff --git a/images/ui-internal-authentication-choice.png b/images/ui-internal-authentication-choice.png new file mode 100644 index 000000000..076733f8d Binary files /dev/null and b/images/ui-internal-authentication-choice.png differ diff --git a/images/ui-internal-authentication.png b/images/ui-internal-authentication.png new file mode 100644 index 000000000..52918bb46 Binary files /dev/null and b/images/ui-internal-authentication.png differ diff --git a/images/ui-monitor-deploy-done.png b/images/ui-monitor-deploy-done.png new file mode 100644 index 000000000..ed83a8509 Binary files /dev/null and b/images/ui-monitor-deploy-done.png differ diff --git a/images/ui-monitor-deploy-events.png b/images/ui-monitor-deploy-events.png new file mode 100644 index 000000000..2cd73274e Binary files /dev/null and b/images/ui-monitor-deploy-events.png differ diff --git a/images/ui-monitor-deploy-quay-min.png b/images/ui-monitor-deploy-quay-min.png new file mode 100644 index 000000000..304662198 Binary files /dev/null and b/images/ui-monitor-deploy-quay-min.png differ diff --git a/images/ui-monitor-deploy-streaming-events.png b/images/ui-monitor-deploy-streaming-events.png new file mode 100644 index 000000000..792c1f607 Binary files /dev/null and b/images/ui-monitor-deploy-streaming-events.png differ diff --git a/images/ui-monitor-deploy-update.png b/images/ui-monitor-deploy-update.png new file mode 100644 index 000000000..f7b2161fd Binary files /dev/null and b/images/ui-monitor-deploy-update.png differ diff --git a/images/ui-monitor-done.png b/images/ui-monitor-done.png new file mode 100644 index 000000000..ed83a8509 Binary files /dev/null and b/images/ui-monitor-done.png differ diff --git a/images/ui-oauth-github.png b/images/ui-oauth-github.png new file mode 100644 index 000000000..cbc622b24 Binary files /dev/null and b/images/ui-oauth-github.png differ diff --git a/images/ui-oauth-google.png b/images/ui-oauth-google.png new file mode 100644 index 000000000..894d972af Binary files /dev/null and b/images/ui-oauth-google.png differ diff --git a/images/ui-redis.png b/images/ui-redis.png new file mode 100644 index 000000000..35a5c4b2d Binary files /dev/null and b/images/ui-redis.png differ diff --git a/images/ui-repo-mirroring.png b/images/ui-repo-mirroring.png new file mode 100644 index 000000000..c90048165 Binary files /dev/null and b/images/ui-repo-mirroring.png differ diff --git a/images/ui-security-scanner-psk.png b/images/ui-security-scanner-psk.png new file mode 100644 index 000000000..4a24981c8 Binary files /dev/null and b/images/ui-security-scanner-psk.png differ diff --git a/images/ui-security-scanner.png b/images/ui-security-scanner.png new file mode 100644 index 000000000..9be7d4d9c Binary files /dev/null and b/images/ui-security-scanner.png differ diff --git a/images/ui-server-config-no-tls.png b/images/ui-server-config-no-tls.png new file mode 100644 index 000000000..a25c675bd Binary files /dev/null and b/images/ui-server-config-no-tls.png differ diff --git a/images/ui-server-config-tls-choice.png b/images/ui-server-config-tls-choice.png new file mode 100644 index 000000000..8edce3641 Binary files /dev/null and b/images/ui-server-config-tls-choice.png differ diff --git a/images/ui-server-config-tls.png b/images/ui-server-config-tls.png new file mode 100644 index 000000000..c1364dd2a Binary files /dev/null and b/images/ui-server-config-tls.png differ diff --git a/images/ui-storage-azure.png b/images/ui-storage-azure.png new file mode 100644 index 000000000..56d089f38 Binary files /dev/null and b/images/ui-storage-azure.png differ diff --git a/images/ui-storage-ceph.png b/images/ui-storage-ceph.png new file mode 100644 index 000000000..9a422e4d0 Binary files /dev/null and b/images/ui-storage-ceph.png differ diff --git a/images/ui-storage-choice.png b/images/ui-storage-choice.png new file mode 100644 index 000000000..ea59e9919 Binary files /dev/null and b/images/ui-storage-choice.png differ diff --git a/images/ui-storage-cloudfront.png b/images/ui-storage-cloudfront.png new file mode 100644 index 000000000..d7183eac3 Binary files /dev/null and b/images/ui-storage-cloudfront.png differ diff --git a/images/ui-storage-google.png b/images/ui-storage-google.png new file mode 100644 index 000000000..6967cc8d2 Binary files /dev/null and b/images/ui-storage-google.png differ diff --git a/images/ui-storage-local.png b/images/ui-storage-local.png new file mode 100644 index 000000000..189658b3e Binary files /dev/null and b/images/ui-storage-local.png differ diff --git a/images/ui-storage-noobaa.png b/images/ui-storage-noobaa.png new file mode 100644 index 000000000..07145198d Binary files /dev/null and b/images/ui-storage-noobaa.png differ diff --git a/images/ui-storage-s3.png b/images/ui-storage-s3.png new file mode 100644 index 000000000..4d46b1ec8 Binary files /dev/null and b/images/ui-storage-s3.png differ diff --git a/images/ui-storage-swift.png b/images/ui-storage-swift.png new file mode 100644 index 000000000..e4d2b7f67 Binary files /dev/null and b/images/ui-storage-swift.png differ diff --git a/images/ui-time-machine-add.png b/images/ui-time-machine-add.png new file mode 100644 index 000000000..c7eaff7ae Binary files /dev/null and b/images/ui-time-machine-add.png differ diff --git a/images/ui-time-machine.png b/images/ui-time-machine.png new file mode 100644 index 000000000..ff96b944a Binary files /dev/null and b/images/ui-time-machine.png differ diff --git a/images/uid-attribute-ldap.png b/images/uid-attribute-ldap.png new file mode 100644 index 000000000..42d78acfc Binary files /dev/null and b/images/uid-attribute-ldap.png differ diff --git a/images/update-channel-approval-strategy.png b/images/update-channel-approval-strategy.png new file mode 100644 index 000000000..6028fe258 Binary files /dev/null and b/images/update-channel-approval-strategy.png differ diff --git a/images/updated-layers-in-cache.png b/images/updated-layers-in-cache.png new file mode 100644 index 000000000..db1e22066 Binary files /dev/null and b/images/updated-layers-in-cache.png differ diff --git a/images/user-options.png b/images/user-options.png new file mode 100644 index 000000000..f0720ef97 Binary files /dev/null and b/images/user-options.png differ diff --git a/images/user-relative-dn.png b/images/user-relative-dn.png new file mode 100644 index 000000000..274208f5f Binary files /dev/null and b/images/user-relative-dn.png differ diff --git a/images/view-credentials.png b/images/view-credentials.png index 52296ffcb..cce2a663e 100644 Binary files a/images/view-credentials.png and b/images/view-credentials.png differ diff --git a/images/view-tags-set.png b/images/view-tags-set.png new file mode 100644 index 000000000..f55e8fefa Binary files /dev/null and b/images/view-tags-set.png differ diff --git a/manage_quay/docinfo.xml b/manage_quay/docinfo.xml index 3a8334351..6ef2d5ab3 100644 --- a/manage_quay/docinfo.xml +++ b/manage_quay/docinfo.xml @@ -1,5 +1,5 @@ {productname} -3 +{producty} Manage {productname} Manage {productname} diff --git a/manage_quay/master.adoc b/manage_quay/master.adoc index f94c2ff7a..3012076b7 100644 --- a/manage_quay/master.adoc +++ b/manage_quay/master.adoc @@ -1,53 +1,176 @@ +:_content-type: ASSEMBLY include::modules/attributes.adoc[] -:context: manage_quay - -[id='manage-quay'] +[id="manage-quay"] = Manage {productname} Once you have deployed a {productname} registry, there are many ways you can further configure and manage that deployment. Topics covered here include: +* Advanced {productname} configuration * Setting notifications to alert you of a new {productname} release -* Securing connections with SSL and TLS certificates +* Securing connections with SSL/TLS certificates +* Directing action logs storage to Elasticsearch * Configuring image security scanning with Clair +* Scan pod images with the Container Security Operator +* Integrate {productname} into {ocp} with the Quay Bridge Operator * Mirroring images with repository mirroring -* Sharing Quay images with a BitTorrent service +* Sharing {productname} images with a BitTorrent service * Authenticating users with LDAP * Enabling Quay for Prometheus and Grafana metrics * Setting up geo-replication -* Troubleshooting Quay +* Troubleshooting {productname} + +For a complete list of {productname} configuration fields, see the link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index[Configure {productname}] page. + +include::modules/proc_manage-advanced-config.adoc[leveloffset=+1] + + +include::modules/config-api-intro.adoc[leveloffset=+1] +include::modules/config-api-default.adoc[leveloffset=+2] +include::modules/config-api-retrieve.adoc[leveloffset=+2] +include::modules/config-api-validate.adoc[leveloffset=+2] +include::modules/config-api-required.adoc[leveloffset=+2] include::modules/proc_manage-release-notifications.adoc[leveloffset=+1] -include::modules/proc_manage-quay-ssl.adoc[leveloffset=+1] -include::modules/proc_manage-insert-custom-cert.adoc[leveloffset=+1] +[[using-ssl-to-protect-quay]] +== Using SSL to protect connections to {productname} +//include::modules/proc_manage-quay-ssl.adoc[leveloffset=+1] +include::modules/ssl-intro.adoc[leveloffset=+2] +//// +include::modules/ssl-create-certs.adoc[leveloffset=+2] +include::modules/ssl-config-cli.adoc[leveloffset=+2] +include::modules/ssl-config-ui.adoc[leveloffset=+2] +include::modules/ssl-testing-cli.adoc[leveloffset=+2] +include::modules/ssl-testing-ui.adoc[leveloffset=+2] +include::modules/ssl-trust-ca-podman.adoc[leveloffset=+2] +include::modules/ssl-trust-ca-system.adoc[leveloffset=+2] + +include::modules/config-custom-ssl-certs-manual.adoc[leveloffset=+1] +include::modules/config-custom-ssl-certs-kubernetes.adoc[leveloffset=+2] +//// + +include::modules/proc_manage-log-storage.adoc[leveloffset=+1] +include::modules/proc_manage-log-storage-elasticsearch.adoc[leveloffset=+2] +include::modules/proc_manage-log-storage-splunk.adoc[leveloffset=+2] +include::modules/proc_installing-creating-username-splunk.adoc[leveloffset=+3] +include::modules/proc_generating-splunk-token.adoc[leveloffset=+3] +include::modules/proc_splunk-config.adoc[leveloffset=+3] +include::modules/proc_splunk-action-log.adoc[leveloffset=+3] +include::modules/understanding-action-logs.adoc[leveloffset=+2] :context: security-scanning -include::modules/proc_manage-security-scanning.adoc[leveloffset=+1] +include::modules/clair-vulnerability-scanner-overview.adoc[leveloffset=+1] +include::modules/clair-vulnerability-scanner-hosts.adoc[leveloffset=+2] +include::modules/clair-standalone-configure.adoc[leveloffset=+2] +include::modules/clair-openshift.adoc[leveloffset=+2] +include::modules/clair-testing.adoc[leveloffset=+2] :context: manage_quay -include::modules/proc_manage-clair-enable.adoc[leveloffset=+1] +[[repo-mirroring-in-red-hat-quay]] +== Repository mirroring + +//include::modules/proc_manage-repomirror.adoc[leveloffset=+1] +include::modules/mirroring-intro.adoc[leveloffset=+2] +include::modules/mirroring-versus-georepl.adoc[leveloffset=+2] +include::modules/mirroring-using.adoc[leveloffset=+2] +include::modules/config-ui-mirroring.adoc[leveloffset=+2] +include::modules/config-fields-mirroring.adoc[leveloffset=+2] +include::modules/mirroring-worker.adoc[leveloffset=+2] +include::modules/mirroring-creating-repo.adoc[leveloffset=+2] +include::modules/mirroring-events.adoc[leveloffset=+2] +include::modules/mirroring-tag-patterns.adoc[leveloffset=+2] +include::modules/mirroring-working-with.adoc[leveloffset=+2] +include::modules/mirroring-recommend.adoc[leveloffset=+2] -include::modules/proc_manage-repomirror.adoc[leveloffset=+1] +:context: manage_quay -:context: bittorrent -include::modules/proc_manage-bittorrent.adoc[leveloffset=+1] +include::modules/proc_manage-ipv6-dual-stack.adoc[leveloffset=+1] :context: manage_quay include::modules/proc_manage-ldap-setup.adoc[leveloffset=+1] +//oidc and SSO +include::modules/configuring-oidc-authentication.adoc[leveloffset=+1] +include::modules/configuring-red-hat-sso.adoc[leveloffset=+2] +include::modules/enabling-team-sync-oidc.adoc[leveloffset=+2] + +//keyless auth + +include::modules/keyless-authentication-robot-accounts.adoc[leveloffset=+1] + +//aws sts +include::modules/configuring-aws-sts-quay.adoc[leveloffset=+1] +include::modules/configuring-quay-standalone-aws-sts.adoc[leveloffset=+2] + + include::modules/proc_manage-quay-prometheus.adoc[leveloffset=+1] +include::modules/metrics-intro.adoc[leveloffset=+2] +include::modules/metrics-general-registry-stats.adoc[leveloffset=+3] +include::modules/metrics-queue-items.adoc[leveloffset=+3] +include::modules/metrics-garbage-collection.adoc[leveloffset=+3] +include::modules/metrics-multipart-uploads.adoc[leveloffset=+3] +include::modules/metrics-image-push-pull.adoc[leveloffset=+3] +include::modules/metrics-authentication.adoc[leveloffset=+3] + +//include::modules/proc_manage-quay-geo-replication.adoc[leveloffset=+1] + +include::modules/quota-management-and-enforcement.adoc[leveloffset=+1] +//include::modules/quota-management-arch.adoc[leveloffset=+2] +include::modules/quota-management-limitations.adoc[leveloffset=+2] +include::modules/red-hat-quay-quota-management-configure-39.adoc[leveloffset=+2] + +include::modules/quota-management-testing-39.adoc[leveloffset=+2] +include::modules/setting-default-quota.adoc[leveloffset=+2] +include::modules/quota-establishment-ui.adoc[leveloffset=+2] +include::modules/quota-establishment-api.adoc[leveloffset=+2] +include::modules/quota-management-query-39.adoc[leveloffset=+2] +include::modules/deleting-tag-permanently.adoc[leveloffset=+2] + +//namespace auto-pruning + +include::modules/red-hat-quay-namespace-auto-pruning-overview.adoc[leveloffset=+1] +include::modules/managing-namespace-auto-pruning-policies.adoc[leveloffset=+2] + +include::modules/georepl-intro.adoc[leveloffset=+1] +include::modules/arch-georpl-features.adoc[leveloffset=+2] +include::modules/georepl-prereqs.adoc[leveloffset=+2] +//include::modules/georepl-arch-standalone.adoc[leveloffset=+2] +include::modules/config-ui-storage-georepl.adoc[leveloffset=+3] +include::modules/georepl-deploy-standalone.adoc[leveloffset=+3] +include::modules/standalone-georepl-site-removal.adoc[leveloffset=+3] +//include::modules/georepl-arch-operator.adoc[leveloffset=+2] +include::modules/georepl-deploy-operator.adoc[leveloffset=+3] +include::modules/operator-georepl-site-removal.adoc[leveloffset=+3] +include::modules/georepl-mixed-storage.adoc[leveloffset=+2] + +//// +include::modules/backing-up-and-restoring-intro.adoc[leveloffset=+1] +include::modules/backing-up-red-hat-quay-operator.adoc[leveloffset=+2] +include::modules/restoring-red-hat-quay.adoc[leveloffset=+2] +//// + +include::modules/standalone-deployment-backup-restore.adoc[leveloffset=+1] +include::modules/optional-enabling-read-only-mode-backup-restore-standalone.adoc[leveloffset=+2] +include::modules/backing-up-red-hat-quay-standalone.adoc[leveloffset=+2] +include::modules/restoring-red-hat-quay-standalone.adoc[leveloffset=+2] + +include::modules/standalone-to-operator-backup-restore.adoc[leveloffset=+1] + +//include::modules/configuring-oci-media-types.adoc[leveloffset=+1] + +include::modules/garbage-collection.adoc[leveloffset=+1] + +include::modules/using-v2-ui.adoc[leveloffset=+1] -include::modules/proc_manage-quay-geo-replication.adoc[leveloffset=+1] +include::modules/health-check-quay.adoc[leveloffset=+1] -include::modules/proc_manage-quay-troubleshooting.adoc[leveloffset=+1] -include::modules/con_schema.adoc[leveloffset=+1] +include::modules/branding-quay-deployment.adoc[leveloffset=+1] -[discrete] -== Additional resources +include::modules/con_schema.adoc[leveloffset=+1] \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml new file mode 100644 index 000000000..606d65758 --- /dev/null +++ b/mkdocs.yml @@ -0,0 +1,59 @@ +site_name: Red Hat Quay API endpoint +site_url: https://github.com/quay +theme: + name: material + features: + - navigation.tabs + - navigation.sections + - toc.integrate + - navigation.top + - search.suggest + - search.highlight + - content.tabs.link + - content.code.annotation + - content.code.copy + language: en + palette: + - scheme: default + toggle: + icon: material/toggle-switch-off-outline + name: Switch to dark mode + primary: teal + accent: purple + - scheme: slate + toggle: + icon: material/toggle-switch + name: Switch to light mode + primary: teal + accent: lime + +plugins: + - social + - search + - swagger-ui-tag + +extra: + social: + - icon: fontawesome/brands/github-alt + link: https://github.com/quay + +markdown_extensions: + - pymdownx.highlight: + anchor_linenums: true + - pymdownx.inlinehilite + - pymdownx.snippets + - admonition + - pymdownx.arithmatex: + generic: true + - footnotes + - pymdownx.details + - pymdownx.superfences + - pymdownx.mark + - attr_list + +# Docs Navigation +nav: +- API V2: api-v2.md + +copyright: | + © 2023 Red Hat Quay Development Team \ No newline at end of file diff --git a/modules/about-clair.adoc b/modules/about-clair.adoc new file mode 100644 index 000000000..de31b0e7e --- /dev/null +++ b/modules/about-clair.adoc @@ -0,0 +1,258 @@ +// Module included in the following assemblies: +// +// clair/master.adoc +// quay.io +// security + +:_content-type: CONCEPT +[id="about-clair"] += About Clair + +Clair uses Common Vulnerability Scoring System (CVSS) data from the National Vulnerability Database (NVD) to enrich vulnerability data, which is a United States government repository of security-related information, including known vulnerabilities and security issues in various software components and systems. Using scores from the NVD provides Clair the following benefits: + +* **Data synchronization**. Clair can periodically synchronize its vulnerability database with the NVD. This ensures that it has the latest vulnerability data. +* **Matching and enrichment**. Clair compares the metadata and identifiers of vulnerabilities it discovers in container images with the data from the NVD. This process involves matching the unique identifiers, such as Common Vulnerabilities and Exposures (CVE) IDs, to the entries in the NVD. When a match is found, Clair can enrich its vulnerability information with additional details from NVD, such as severity scores, descriptions, and references. +* **Severity Scores**. The NVD assigns severity scores to vulnerabilities, such as the Common Vulnerability Scoring System (CVSS) score, to indicate the potential impact and risk associated with each vulnerability. By incorporating NVD's severity scores, Clair can provide more context on the seriousness of the vulnerabilities it detects. + +If Clair finds vulnerabilities from NVD, a detailed and standardized assessment of the severity and potential impact of vulnerabilities detected within container images is reported to users on the UI. CVSS enrichment data provides Clair the following benefits: + +* *Vulnerability prioritization*. By utilizing CVSS scores, users can prioritize vulnerabilities based on their severity, helping them address the most critical issues first. +* *Assess Risk*. CVSS scores can help Clair users understand the potential risk a vulnerability poses to their containerized applications. +* *Communicate Severity*. CVSS scores provide Clair users a standardized way to communicate the severity of vulnerabilities across teams and organizations. +* *Inform Remediation Strategies*. CVSS enrichment data can guide {quayio} users in developing appropriate remediation strategies. +* *Compliance and Reporting*. Integrating CVSS data into reports generated by Clair can help organizations demonstrate their commitment to addressing security vulnerabilities and complying with industry standards and regulations. + +ifeval::["{context}" == "clair"] +[id="clair-releases"] +== Clair releases + +New versions of Clair are regularly released. The source code needed to build Clair is packaged as an archive and attached to each release. Clair releases can be found at link:https://github.com/quay/clair/releases[Clair releases]. + +Release artifacts also include the `clairctl` command line interface tool, which obtains updater data from the internet by using an open host. + +[discrete] +[id="clair-releases-48"] +=== Clair 4.8 + +Clair 4.8 was released on 24-10-28. The following changes have been made: + +* Clair on {productname} now requires that you update the Clair PostgreSQL database from version 13 to version 15. For more information about this procedure, see link:https://docs.redhat.com/en/documentation/red_hat_quay/3.12/html-single/vulnerability_reporting_with_clair_on_red_hat_quay/index#upgrading-clair-postgresql-database[Upgrading the Clair PostgreSQL database]. + +* This release deprecates the updaters that rely on the Red Hat OVAL v2 security data in favor of the Red Hat VEX data. This change includes a database migration to delete all the vulnerabilities that originated from the OVAL v2 feeds. Because of this, there could be intermittent downtime in production environments before the VEX updater complete for the first time when no vulnerabilities exist. + +[id="clair-suse-enterprise-known-issue"] +=== Clair 4.8.0 known issues + +* When pushing Suse Enterprise Linux Images with *HIGH* image vulnerabilities, Clair 4.8.0 does not report these vulnerabilities. This is a known issue and will be fixed in a future version of {productname}. + +[discrete] +[id="clair-releases-474"] +=== Clair 4.7.4 + +Clair 4.7.4 was released on 2024-05-01. The following changes have been made: + +* The default layer download location has changed. For more information, see link:https://github.com/quay/clair/blob/release-4.7/Documentation/howto/deployment.md#disk-usage-considerations[Disk usage considerations]. + +[discrete] +[id="clair-releases-473"] +=== Clair 4.7.3 + +Clair 4.7.3 was released on 2024-02-26. The following changes have been made: + +* The minimum TLS version for Clair is now 1.2. Previously, servers allowed for 1.1 connections. + +[discrete] +[id="clair-releases-472"] +=== Clair 4.7.2 + +Clair 4.7.2 was released on 2023-10-09. The following changes have been made: + +* CRDA support has been removed. + +[discrete] +[id="clair-releases-471"] +=== Clair 4.7.1 + +Clair 4.7.1 was released as part of {productname} 3.9.1. The following changes have been made: + +* With this release, you can view unpatched vulnerabilities from {rhel} sources. If you want to view unpatched vulnerabilities, you can the set `ignore_unpatched` parameter to `false`. For example: ++ +[source,terminal] +---- +updaters: + config: + rhel: + ignore_unpatched: false +---- ++ +To disable this feature, you can set `ignore_unpatched` to `true`. + +[discrete] +[id="clair-releases-47"] +=== Clair 4.7 + +Clair 4.7 was released as part of {productname} 3.9, and includes support for the following features: + +* Native support for indexing Golang modules and RubeGems in container images. +* Change to link:OSV.dev[OSV.dev] as the vulnerability database source for any programming language package managers. +** This includes popular sources like GitHub Security Advisories or PyPA. +** This allows offline capability. +* Use of pyup.io for Python and CRDA for Java is suspended. +* Clair now supports Java, Golang, Python, and Ruby dependencies. +endif::[] + +[id="vuln-database-clair"] +== Clair vulnerability databases + +Clair uses the following vulnerability databases to report for issues in your images: + +* Ubuntu Oval database +* Debian Security Tracker +* {rhel} Oval database +* SUSE Oval database +* Oracle Oval database +* Alpine SecDB database +* VMware Photon OS database +* Amazon Web Services (AWS) UpdateInfo +* link:https://osv.dev/[Open Source Vulnerability (OSV) Database] + +[id="clair-supported-languages"] +== Clair supported dependencies + +Clair supports identifying and managing the following dependencies: + +* Java +* Golang +* Python +* Ruby + +This means that it can analyze and report on the third-party libraries and packages that a project in these languages relies on to work correctly. + +When an image that contains packages from a language unsupported by Clair is pushed to your repository, a vulnerability scan cannot be performed on those packages. Users do not receive an analysis or security report for unsupported dependencies or packages. As a result, the following consequences should be considered: + +* *Security risks*. Dependencies or packages that are not scanned for vulnerability might pose security risks to your organization. +* *Compliance issues*. If your organization has specific security or compliance requirements, unscanned, or partially scanned, container images might result in non-compliance with certain regulations. ++ +[NOTE] +==== +Scanned images are indexed, and a vulnerability report is created, but it might omit data from certain unsupported languages. For example, if your container image contains a Lua application, feedback from Clair is not provided because Clair does not detect it. It can detect other languages used in the container image, and shows detected CVEs for those languages. As a result, Clair images are _fully scanned_ based on what it supported by Clair. +==== + +ifeval::["{context}" == "clair"] +[id="clair-containers"] +== Clair containers + +Official downstream Clair containers bundled with {productname} can be found on the link:https://catalog.redhat.com[Red Hat Ecosystem Catalog]. + +Official upstream containers are packaged and released as a under the Clair project on link:https://quay.io/repository/projectquay/clair[Quay.io]. The latest tag tracks the Git development branch. Version tags are built from the corresponding release. +endif::[] + +//// +==== OSV mapping + +[cols="2,2",options="header"] +|=== +| Severity | Clair Severity +| | + +|=== +//// + +//// + +[id="notifier-pagination"] +===== Notifier pagination + +The URL returned in the callback field takes the client to a paginated result. + +The following example shows the callback endpoint specification: +[source,json] +---- +GET /notifier/api/v1/notification/{id}?[page_size=N][next=N] +{ + page: { + size: int, + next: string, // if present, the next id to fetch. + } + notifications: [ Notification… ] // array of notifications; max len == page.size +} +---- +.small +-- +* The `GET` callback request implements a simple paging mechanism. +* A `page` object accompanying the notification list specifies `next` and `size` fields. +* The `next` field returned in the page must be provided as the subsequent request's `next` URL parameter to retrieve the next set of notifications. +* The `size` field will echo back to the request `page_size` parameter. + + + +* The `page_size` URL parameter controls how many notifications rae returned in a single page. If unprovided, a default of `500` is used. +* The `next` URL parameter informs Clair of the next set of paginated notifications to return. If not provided, the `0th` page is assumed. +* + +//// + +//// + +.Prerequisites + +* The Linux `make` command is required to start the local development environment. + +* Podman v3.0 or greater. Alternatively, you can use Docker or Docker Compose, however not all versions of Docker or Docker Compose have been tested. As a result, some versions might not work properly. ++ +This guide uses Podman with an implementation of Compose Specification. + +* Go v1.16 or greater. + +.Procedure + +. Enter the following command to close the Clair github repository: ++ +[source,terminal] +---- +$ git clone git@github.com:quay/clair.git +---- + +. Change into the Clair directory by entering the following command: ++ +[source,terminal] +---- +$ cd clair +---- + +. Start the Clair container by entering the following command: ++ +[source,terminal] +---- +$ podman-compose up -d +---- + +After the local development environment starts, the following infrastructure is available to you: + +* `localhost:8080`. This includes dashboards and debugging services. You can see Traefik configuration logs in `local-dev/traefik`, where various services are served. + +* `localhost:6060`. This includes Clair services. + +* {productname}. If started, {productname} will be started in a single node, local storage configuration. A random port will be forwarded from `localhost`. Use `podman port` to view mapping information. + +* PostgreSQL. PostgreSQL has a random port forwarded from `localhost` to the database server. See `local-dev/clair/init.sql` for credentials and permissions. Use `podman port` to view mapping information. + +[id="testing-clair"] +== Testing Clair on the local development environment + +After starting the Clair container, a {productname} server is forwarded to a random port on the host. + +. Locate, and open, the port hosting {productname}. + +. Click *Create Account* and create a new user, for example, `admin`. + +. Set a password. + +. To push to the {productname} container, you must exec into the skopeo container. For example: ++ +[source,terminal] +---- +$ podman exec -it quay-skopeo /usr/bin/skopeo copy --dst-creds ':' --dst-tls-verify=false clair-quay:8080//: +---- + +//// \ No newline at end of file diff --git a/modules/access-control-intro.adoc b/modules/access-control-intro.adoc new file mode 100644 index 000000000..d5121d368 --- /dev/null +++ b/modules/access-control-intro.adoc @@ -0,0 +1,6 @@ +[[access-control-intro]] += Access control in {productname} + +{productname} provides both role-based access control (RBAC) and fine-grained access control, and has team features that allow for limited access control of repositories, organizations, and user privileges. {productname} access control features also provide support for dispersed organizations. + + diff --git a/modules/accessing-swagger-ui.adoc b/modules/accessing-swagger-ui.adoc new file mode 100644 index 000000000..8d2437679 --- /dev/null +++ b/modules/accessing-swagger-ui.adoc @@ -0,0 +1,38 @@ +:_content-type: REFERENCE +[id="accessing-swagger-ui"] += Accessing {productname} Swagger UI + +{productname} administrators and users can interacting with the API by using the Swagger UI-an interactive web interface that compiles executable commands. The Swagger UI can be launched as a container that points to your {productname} instance's API discovery endpoint (`/api/v1/discovery`). After deploying the container, you can access the Swagger UI, which loads the OpenAPI specification for {productname} from the specified URL. {productname} administrators and users can explore the available endpoints and their structure. + +Use the following procedure to access the {productname} Swagger UI. + +.Procedure + +. Enter the following command to deploy the Swagger UI container, pointing the URL to your {productname}'s API discovery endpoint. For example: ++ +[source,terminal] +---- +$ podman run -p 8080:8080 -e SWAGGER_JSON_URL= docker.swagger.io/swaggerapi/swagger-ui +---- ++ +.Example output ++ +[source,terminal] +---- +--- +/docker-entrypoint.sh: Launching /docker-entrypoint.d/20-envsubst-on-templates.sh +20-envsubst-on-templates.sh: Running envsubst on /etc/nginx/templates/default.conf.template to /etc/nginx/conf.d/default.conf +/docker-entrypoint.sh: Launching /docker-entrypoint.d/30-tune-worker-processes.sh +/docker-entrypoint.sh: Launching /docker-entrypoint.d/40-swagger-ui.sh +/docker-entrypoint.sh: Configuration complete; ready for start up +--- +---- + +. Navigate to the `localhost` URL. In this example, it is *http://localhost:8080/*. + +. Use the Swagger UI to test various API endpoints. For example, to create a new token for a user, you can click the *POST /api/v1/user/apptoken* endpoint -> *Try it out* -> *Execute* to generate an example `curl` command. ++ +[NOTE] +==== +Currently, server responses cannot be generated. This is because the Swagger UI is not set up to accept bearer tokens. As a result, the following error is returned for each command: `{"error": "CSRF token was invalid or missing."}`. As a workaround, you can copy this command into your terminal and manually add your bearer token, for example, `-H 'Authorization: Bearer '` +==== \ No newline at end of file diff --git a/modules/add-users-to-team.adoc b/modules/add-users-to-team.adoc new file mode 100644 index 000000000..d08378ce7 --- /dev/null +++ b/modules/add-users-to-team.adoc @@ -0,0 +1,43 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: PROCEDURE +[id="adding-users-to-team"] += Adding users to a team by using the UI + +With administrative privileges to an Organization, you can add users and robot accounts to a team. When you add a user, +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +sends an email to that user. The user remains pending until they accept the invitation. + +Use the following procedure to add users or robot accounts to a team. + +.Procedure + +. On the {productname} landing page, click the name of your Organization. + +. In the navigation pane, click *Teams and Membership*. + +. Select the menu kebab of the team that you want to add users or robot accounts to. Then, click *Manage team members*. + +. Click *Add new member*. + +. In the textbox, enter information for one of the following: ++ +* A username from an account on the registry. +* The email address for a user account on the registry. +* The name of a robot account. The name must be in the form of +. ++ +[NOTE] +==== +Robot Accounts are immediately added to the team. For user accounts, an invitation to join is mailed to the user. Until the user accepts that invitation, the user remains in the *INVITED TO JOIN* state. After the user accepts the email invitation to join the team, they move from the *INVITED TO JOIN* list to the *MEMBERS* list for the Organization. +==== + +. Click *Add member*. \ No newline at end of file diff --git a/modules/adding-a-new-tag-to-image-api.adoc b/modules/adding-a-new-tag-to-image-api.adoc new file mode 100644 index 000000000..cfeeae0be --- /dev/null +++ b/modules/adding-a-new-tag-to-image-api.adoc @@ -0,0 +1,69 @@ +:_content-type: CONCEPT +[id="adding-tags-api"] += Adding a new tag to an image tag to an image by using the API + +You can add a new tag, or restore an old one, to an image by using the API. + +.Prerequisites + +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +. You can change which image a tag points to or create a new tag by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#changetag[`PUT /api/v1/repository/{repository}/tag/{tag}`] command: ++ +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + --data '{ + "manifest_digest": "" + }' \ + https:///api/v1/repository///tag/ +---- ++ +.Example output ++ +[source,terminal] +---- +"Updated" +---- + +. You can restore a repository tag to its previous image by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#restoretag[`POST /api/v1/repository/{repository}/tag/{tag}/restore`] command. For example: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + --data '{ + "manifest_digest": + }' \ + quay-server.example.com/api/v1/repository/quayadmin/busybox/tag/test/restore +---- ++ +.Example output ++ +[source,terminal] +---- +{} +---- + +. To see a list of tags after creating a new tag you can use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listrepotags[`GET /api/v1/repository/{repository}/tag/`] command. For example: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + https:///api/v1/repository///tag +---- ++ +.Example output ++ +[source,terminal] +---- +{"tags": [{"name": "test", "reversion": false, "start_ts": 1716324069, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Tue, 21 May 2024 20:41:09 -0000"}, {"name": "example", "reversion": false, "start_ts": 1715698131, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Tue, 14 May 2024 14:48:51 -0000"}, {"name": "example", "reversion": false, "start_ts": 1715697708, "end_ts": 1715698131, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Tue, 14 May 2024 14:41:48 -0000", "expiration": "Tue, 14 May 2024 14:48:51 -0000"}, {"name": "test", "reversion": false, "start_ts": 1715695488, "end_ts": 1716324069, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Tue, 14 May 2024 14:04:48 -0000", "expiration": "Tue, 21 May 2024 20:41:09 -0000"}, {"name": "test", "reversion": false, "start_ts": 1715631517, "end_ts": 1715695488, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Mon, 13 May 2024 20:18:37 -0000", "expiration": "Tue, 14 May 2024 14:04:48 -0000"}], "page": 1, "has_additional": false} +---- \ No newline at end of file diff --git a/modules/adding-a-new-tag-to-image.adoc b/modules/adding-a-new-tag-to-image.adoc new file mode 100644 index 000000000..fa350436b --- /dev/null +++ b/modules/adding-a-new-tag-to-image.adoc @@ -0,0 +1,23 @@ +:_content-type: CONCEPT +[id="adding-a-new-tag-to-image"] += Adding a new image tag to an image by using the UI + +You can add a new tag to an image in +ifeval::["{context}" == "quay-io"] +{quayio}. +endif::[] +ifeval::["{context}" == "use-quay"] +{productname}. +endif::[] + +.Procedure + +. On the {productname} v2 UI dashboard, click *Repositories* in the navigation pane. + +. Click the name of a repository that has image tags. + +. Click the menu kebab, then click *Add new tag*. + +. Enter a name for the tag, then, click *Create tag*. ++ +The new tag is now listed on the *Repository Tags* page. \ No newline at end of file diff --git a/modules/adding-ca-certs-to-config.adoc b/modules/adding-ca-certs-to-config.adoc new file mode 100644 index 000000000..bcf8fb476 --- /dev/null +++ b/modules/adding-ca-certs-to-config.adoc @@ -0,0 +1,104 @@ +[id="adding-ca-certs-to-config"] += Adding additional Certificate Authorities to {productname-ocp} + +The following example shows you how to add additional Certificate Authorities to your {productname-ocp} deployment. + +.Prerequisites + +* You have base64 decoded the original config bundle into a `config.yaml` file. For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/deploying_the_red_hat_quay_operator_on_openshift_container_platform/index#operator-config-cli-download[Downloading the existing configuration]. +* You have a Certificate Authority (CA) file or files. + +.Procedure + +. Create a new YAML file, for example, `extra-ca-certificate-config-bundle-secret.yaml`: ++ +[source,terminal] +---- +$ touch extra-ca-certificate-config-bundle-secret.yaml +---- + +. Create the `extra-ca-certificate-config-bundle-secret` resource. + +.. Create the resource by entering the following command: ++ +[source,terminal] +---- +$ oc -n create secret generic extra-ca-certificate-config-bundle-secret \ + --from-file=config.yaml= \ <1> + --from-file=extra_ca_cert_= \ <2> + --from-file=extra_ca_cert_= \ <3> + --from-file=extra_ca_cert_= \ <4> + --dry-run=client -o yaml > extra-ca-certificate-config-bundle-secret.yaml +---- +<1> Where `` is your `base64 decoded` `config.yaml` file. +<2> The extra CA file to be added to into the system trust bundle. +<3> Optional. A second CA file to be added into the system trust bundle. +<4> Optional. A third CA file to be added into the system trust bundle. + +. Optional. You can check the content of the `extra-ca-certificate-config-bundle-secret.yaml` file by entering the following command: ++ +[source,terminal] +---- +$ cat extra-ca-certificate-config-bundle-secret.yaml +---- ++ +.Example output ++ +[source,terminal] +---- +apiVersion: v1 +data: + config.yaml: QUxMT1dfUFVMTFNfV0lUSE9VVF9TVFJJQ1RfTE9HR0lORzogZmFsc2UKQVVUSEVOVElDQVRJT05fVFlQRTogRGF0YWJhc2UKREVGQVVMVF9UQUdfRVhQSVJBVElPTjogMncKUFJFRkVSU... + extra_ca_cert_certificate-one: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUQyVENDQXNHZ0F3SUJBZ0lVS2xOai90VUJBZHBkNURjYkdRQUo4anRuKzd3d0RRWUpLb1pJaHZjTkFRRUwKQlFBd2ZERUxNQWtHQ... + extra_ca_cert_certificate-three: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUQ0ekNDQXN1Z0F3SUJBZ0lVQmJpTXNUeExjM0s4ODNWby9GTThsWXlOS2lFd0RRWUpLb1pJaHZjTkFRRUwKQlFBd2ZERUxNQWtHQ... + extra_ca_cert_certificate-two: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUQ0ekNDQXN1Z0F3SUJBZ0lVVFVPTXZ2YVdFOFRYV3djYTNoWlBCTnV2QjYwd0RRWUpLb1pJaHZjTkFRRUwKQlFBd2ZERUxNQWtHQ... +kind: Secret +metadata: + creationTimestamp: null + name: custom-ssl-config-bundle-secret + namespace: +---- + +. Create the `configBundleSecret` resource by entering the following command: ++ +[source,terminal] +---- +$ oc create -n -f extra-ca-certificate-config-bundle-secret.yaml +---- ++ +.Example output ++ +[source,terminal] +---- +secret/extra-ca-certificate-config-bundle-secret created +---- + +. Update the `QuayRegistry` YAML file to reference the `extra-ca-certificate-config-bundle-secret` object by entering the following command: ++ +[source,terminal] +---- +$ oc patch quayregistry -n --type=merge -p '{"spec":{"configBundleSecret":"extra-ca-certificate-config-bundle-secret"}}' +---- ++ +.Example output ++ +[source,terminal] +---- +quayregistry.quay.redhat.com/example-registry patched +---- + +. Ensure that your `QuayRegistry` YAML file has been updated to use the extra CA certificate `configBundleSecret` resource by entering the following command: ++ +[source,terminal] +---- +$ oc get quayregistry -n -o yaml +---- ++ +.Example output ++ +[source,terminal] +---- +# ... + configBundleSecret: extra-ca-certificate-config-bundle-secret +# ... +---- \ No newline at end of file diff --git a/modules/adding-managing-labels-api.adoc b/modules/adding-managing-labels-api.adoc new file mode 100644 index 000000000..11505ead5 --- /dev/null +++ b/modules/adding-managing-labels-api.adoc @@ -0,0 +1,89 @@ +:_content-type: CONCEPT +[id="adding-managing-labels-api"] += Adding and managing labels by using the API + +{productname} administrators can add and manage labels for tags with the API by using the following procedure. + +.Prerequisites + +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getrepomanifest[`GET /api/v1/repository/{repository}/manifest/{manifestref}`] command to retrieve the details of a specific manifest in a repository: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + https:///api/v1/repository//manifest/ +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listmanifestlabels[`GET /api/v1/repository/{repository}/manifest/{manifestref}/labels`] command to retrieve a list of labels for a specific manifest: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + https:///api/v1/repository//manifest//labels +---- ++ +.Example output ++ +[source,terminal] +---- +{"labels": [{"id": "e9f717d2-c1dd-4626-802d-733a029d17ad", "key": "org.opencontainers.image.url", "value": "https://github.com/docker-library/busybox", "source_type": "manifest", "media_type": "text/plain"}, {"id": "2d34ec64-4051-43ad-ae06-d5f81003576a", "key": "org.opencontainers.image.version", "value": "1.36.1-glibc", "source_type": "manifest", "media_type": "text/plain"}]} +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getmanifestlabel[`GET /api/v1/repository/{repository}/manifest/{manifestref}/labels/{labelid}`] command to obtain information about a specific manifest: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + https:///api/v1/repository//manifest//labels/ +---- ++ +.Example output ++ +[source,terminal] +---- +{"id": "e9f717d2-c1dd-4626-802d-733a029d17ad", "key": "org.opencontainers.image.url", "value": "https://github.com/docker-library/busybox", "source_type": "manifest", "media_type": "text/plain"} +---- + +. You can add an additional label to a manifest in a given repository with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#addmanifestlabel[`POST /api/v1/repository/{repository}/manifest/{manifestref}/labels`] command. For example: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + --data '{ + "key": "", + "value": "", + "media_type": "" + }' \ + https:///api/v1/repository//manifest//labels +---- ++ +.Example output ++ +[source,terminal] +---- +{"label": {"id": "346593fd-18c8-49db-854f-4cb1fb76ff9c", "key": "example-key", "value": "example-value", "source_type": "api", "media_type": "text/plain"}} +---- + +. You can delete a label using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deletemanifestlabel[`DELETE /api/v1/repository/{repository}/manifest/{manifestref}/labels/{labelid}`] command: ++ +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + https:///api/v1/repository//manifest//labels/ +---- ++ +This command does not return output in the CLI. You can use one of the commands above to ensure that it was successfully removed. \ No newline at end of file diff --git a/modules/adding-managing-labels.adoc b/modules/adding-managing-labels.adoc new file mode 100644 index 000000000..8e85650a2 --- /dev/null +++ b/modules/adding-managing-labels.adoc @@ -0,0 +1,30 @@ +:_content-type: CONCEPT +[id="adding-managing-labels"] += Adding and managing labels by using the UI + +Administrators can add and manage labels for tags by using the following procedure. + +.Procedure + +. On the v2 UI dashboard, click *Repositories* in the navigation pane. + +. Click the name of a repository that has image tags. + +. Click the menu kebab for an image and select *Edit labels*. + +. In the *Edit labels* window, click *Add new label*. + +. Enter a label for the image tag using the `key=value` format, for example, `com.example.release-date=2023-11-14`. ++ +[NOTE] +==== +The following error is returned when failing to use the `key=value` format: `Invalid label format, must be key value separated by =`. +==== + +. Click the whitespace of the box to add the label. + +. Optional. Add a second label. + +. Click *Save labels* to save the label to the image tag. The following notification is returned: `Created labels successfully`. + +. Optional. Click the same image tag's menu kebab -> *Edit labels* -> *X* on the label to remove it; alternatively, you can edit the text. Click *Save labels*. The label is now removed or edited. \ No newline at end of file diff --git a/modules/adjust-access-user-repo-api.adoc b/modules/adjust-access-user-repo-api.adoc new file mode 100644 index 000000000..c26a11259 --- /dev/null +++ b/modules/adjust-access-user-repo-api.adoc @@ -0,0 +1,64 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: CONCEPT +[id="adjust-access-user-repo-api"] += Adjusting access settings for a repository by using the API + +Use the following procedure to adjust access settings for a user or robot account for a repository by using the API. + +.Prerequisites + +* You have created a user account or robot account. +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +. Enter the following link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#changeuserpermissions[`PUT /api/v1/repository/{repository}/permissions/user/{username}`] command to change the permissions of a user: ++ +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{"role": "admin"}' \ + https:///api/v1/repository///permissions/user/ +---- ++ +.Example output ++ +[source,terminal] +---- +{"role": "admin", "name": "quayadmin+test", "is_robot": true, "avatar": {"name": "quayadmin+test", "hash": "ca9afae0a9d3ca322fc8a7a866e8476dd6c98de543decd186ae090e420a88feb", "color": "#8c564b", "kind": "robot"}} +---- + +. To delete the current permission, you can enter the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleteuserpermissions[`DELETE /api/v1/repository/{repository}/permissions/user/{username}`] command: ++ +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + https:///api/v1/repository///permissions/user/ +---- ++ +This command does not return any output in the CLI. Instead, you can check that the permissions were deleted by entering the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listrepouserpermissions[`GET /api/v1/repository/{repository}/permissions/user/`] command: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + https:///api/v1/repository///permissions/user// +---- ++ +.Example output ++ +[source,terminal] +---- +{"message":"User does not have permission for repo."} +---- \ No newline at end of file diff --git a/modules/adjusting-repository-access-via-the-api.adoc b/modules/adjusting-repository-access-via-the-api.adoc new file mode 100644 index 000000000..cb210ac8a --- /dev/null +++ b/modules/adjusting-repository-access-via-the-api.adoc @@ -0,0 +1,38 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc + +:_content-type: CONCEPT +[id="adjusting-image-repository-visibility-the-api"] += Adjusting repository visibility by using the API + +The visibility of your repository can be set to `private` or `public` by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/3.12/html-single/red_hat_quay_api_reference/index#changerepovisibility[`POST /api/v1/repository/{repository}/changevisibility`] command. + +.Prerequisites + +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. +* You have created a repository. + +.Procedure + +* You can change the visibility of your repository to public or private by specifying the desired option in the `visibility` schema. For example: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "visibility": "private" + }' \ + "https://quay-server.example.com/api/v1/repository/my_namespace/test_repo_three/changevisibility" +---- ++ +.Example output ++ +[source,terminal] +---- +{"success": true} +---- \ No newline at end of file diff --git a/modules/adjusting-repository-visibility-via-the-ui.adoc b/modules/adjusting-repository-visibility-via-the-ui.adoc new file mode 100644 index 000000000..e1efa09c3 --- /dev/null +++ b/modules/adjusting-repository-visibility-via-the-ui.adoc @@ -0,0 +1,22 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc + +:_content-type: CONCEPT +[id="adjusting-image-repository-visibility-the-ui"] += Adjusting repository visibility by using the UI + +You can adjust the visibility of a repository to make it either public of private by using the {productname} UI. + +.Procedure + +. On the {productname} UI, click *Repositories* in the navigation pane. + +. Click the name of a repository. + +. Click *Settings* in the navigation pane. + +. Click *Repository visibility*. + +. Click *Make private*. The repository is made private, and only users on the permissions list can view and interact with it. \ No newline at end of file diff --git a/modules/advanced-quay-poc-deployment.adoc b/modules/advanced-quay-poc-deployment.adoc new file mode 100644 index 000000000..c696eaf28 --- /dev/null +++ b/modules/advanced-quay-poc-deployment.adoc @@ -0,0 +1,5 @@ +:_content-type: PROCEDURE +[id="advanced-quay-poc-deployment"] += Proof of concept deployment using SSL/TLS certificates + +Use the following sections to configure a proof of concept {productname} deployment with SSL/TLS certificates. \ No newline at end of file diff --git a/modules/airgap-clair.adoc b/modules/airgap-clair.adoc new file mode 100644 index 000000000..d4053b52c --- /dev/null +++ b/modules/airgap-clair.adoc @@ -0,0 +1,11 @@ +:_content-type: CONCEPT +[id="airgap-clair"] += Using Clair in air-gapped environments + +By default, Clair tries to run automated updates against Red Hat servers. To run Clair in network environments that are disconnected from the internet, you must execute the following actions: + +* Disable Clair auto-update in the Clair configuration bundle. +* Manually update the vulnerability database on a system with internet access and then export to disk. +* Transfer the on-disk data to the target system using offline media, and then manually import it into Clair. + +Using Clair in air-gapped environments is fully containerized and, as a result, is easy to automate. \ No newline at end of file diff --git a/modules/airgap-intro.adoc b/modules/airgap-intro.adoc new file mode 100644 index 000000000..f2388e648 --- /dev/null +++ b/modules/airgap-intro.adoc @@ -0,0 +1,16 @@ +:_content-type: CONCEPT +[id="arch-airgap-intro"] += Air-gapped or disconnected deployments + +In the following diagram, the upper deployment in the diagram shows {productname} and Clair connected to the internet, with an air-gapped {ocp} cluster accessing the {productname} registry through an explicit, allowlisted hole in the firewall. + +The lower deployment in the diagram shows {productname} and Clair running inside of the firewall, with image and CVE data transferred to the target system using offline media. The data is exported from a separate {productname} and Clair deployment that is connected to the internet. + +The following diagram shows how {productname} and Clair can be deployed in air-gapped or disconnected environments: + +.{productname} and Clair in disconnected, or air-gapped, environments +image:178_Quay_architecture_0821_air-gapped.png[Air-gapped deployment] + + + + diff --git a/modules/allow-access-user-repo.adoc b/modules/allow-access-user-repo.adoc new file mode 100644 index 000000000..9b88be878 --- /dev/null +++ b/modules/allow-access-user-repo.adoc @@ -0,0 +1,66 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: CONCEPT +[id="allow-access-user-repo"] += Adjusting access settings for a repository by using the UI + +Use the following procedure to adjust access settings for a user or robot account for a repository using the v2 UI. + +.Prerequisites + +* You have created a user account or robot account. + +.Procedure + +. Log into +ifeval::["{context}" == "quay-io"] +{quayio}. +endif::[] +ifeval::["{context}" == "use-quay"] +{productname}. +endif::[] +ifeval::["{context}" == "quay-security"] +{productname}. +endif::[] + +. On the v2 UI, click *Repositories*. + +. Click the name of a repository, for example, `quayadmin/busybox`. + +. Click the *Settings* tab. + +. Optional. Click *User and robot permissions*. You can adjust the settings for a user or robot account by clicking the dropdown menu option under *Permissions*. You can change the settings to *Read*, *Write*, or *Admin*. ++ +* *Read*. The User or Robot Account can view and pull from the repository. +* *Write*. The User or Robot Account can read (pull) from and write (push) to the repository. +* *Admin*. The User or Robot account has access to pull from, and push to, the repository, plus the ability to do administrative tasks associated with the repository. + +//// +. Optional. Click *Events and notifications*. You can create an event and notification by clicking *Create Notification*. The following event options are available: ++ +* Push to Repository +* Package Vulnerability Found +* Image build failed +* Image build queued +* Image build started +* Image build success +* Image build cancelled ++ +Then, issue a notification. The following options are available: ++ +* Email Notification +* Flowdock Team Notification +* HipChat Room Notification +* Slack Notification +* Webhook POST ++ +After selecting an event option and the method of notification, include a *Room ID #*, a *Room Notification Token*, then, click *Submit*. + +. Optional. Click *Repository visibility*. You can make the repository private, or public, by clicking *Make Public*. + +. Optional. Click *Delete repository*. You can delete the repository by clicking *Delete Repository*. +//// \ No newline at end of file diff --git a/modules/api-appspecifictokens-createAppToken.adoc b/modules/api-appspecifictokens-createAppToken.adoc new file mode 100644 index 000000000..305c85110 --- /dev/null +++ b/modules/api-appspecifictokens-createAppToken.adoc @@ -0,0 +1,51 @@ + += createAppToken +Create a new app specific token for user. + +[discrete] +== POST /api/v1/user/apptoken + + + +**Authorizations: **oauth2_implicit (**user:admin**) + + + +[discrete] +== Request body schema (application/json) + +Description of a new token. + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**title** + +_required_|Friendly name to help identify the token|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- + $ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "title": "MyAppToken" + }' \ + "http://quay-server.example.com/api/v1/user/apptoken" +---- diff --git a/modules/api-appspecifictokens-getAppToken.adoc b/modules/api-appspecifictokens-getAppToken.adoc new file mode 100644 index 000000000..79b81bb50 --- /dev/null +++ b/modules/api-appspecifictokens-getAppToken.adoc @@ -0,0 +1,44 @@ + += getAppToken +Returns a specific app token for the user. + +[discrete] +== GET /api/v1/user/apptoken/{token_uuid} + + + +**Authorizations: **oauth2_implicit (**user:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**token_uuid** + +_required_|The uuid of the app specific token|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "http://quay-server.example.com/api/v1/user/apptoken/" +---- \ No newline at end of file diff --git a/modules/api-appspecifictokens-listAppTokens.adoc b/modules/api-appspecifictokens-listAppTokens.adoc new file mode 100644 index 000000000..7c0a737e3 --- /dev/null +++ b/modules/api-appspecifictokens-listAppTokens.adoc @@ -0,0 +1,45 @@ + += listAppTokens +Lists the app specific tokens for the user. + +[discrete] +== GET /api/v1/user/apptoken + + + +**Authorizations: **oauth2_implicit (**user:admin**) + + + +[discrete] +== Query parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|query|**expiring** + +_optional_|If true, only returns those tokens expiring soon|boolean +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "http://quay-server.example.com/api/v1/user/apptoken" +---- \ No newline at end of file diff --git a/modules/api-appspecifictokens-revokeAppToken.adoc b/modules/api-appspecifictokens-revokeAppToken.adoc new file mode 100644 index 000000000..9daa9a072 --- /dev/null +++ b/modules/api-appspecifictokens-revokeAppToken.adoc @@ -0,0 +1,44 @@ + += revokeAppToken +Revokes a specific app token for the user. + +[discrete] +== DELETE /api/v1/user/apptoken/{token_uuid} + + + +**Authorizations: **oauth2_implicit (**user:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**token_uuid** + +_required_|The uuid of the app specific token|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|204|Deleted| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + "http://quay-server.example.com/api/v1/user/apptoken/" +---- \ No newline at end of file diff --git a/modules/api-appspecifictokens.adoc b/modules/api-appspecifictokens.adoc new file mode 100644 index 000000000..8215d15a5 --- /dev/null +++ b/modules/api-appspecifictokens.adoc @@ -0,0 +1,4 @@ + += appspecifictokens +Manages app specific tokens for the current user. + diff --git a/modules/api-authorization.adoc b/modules/api-authorization.adoc new file mode 100644 index 000000000..392193c8f --- /dev/null +++ b/modules/api-authorization.adoc @@ -0,0 +1,23 @@ + += Authorization + +oauth2_implicit + + +[discrete] +== Scopes + +The following scopes are used to control access to the API endpoints: + +[options="header", width=100%, cols=".^2a,.^9a"] +|=== +|Scope|Description +|**repo:read**|This application will be able to view and pull all repositories visible to the granting user or robot account +|**repo:write**|This application will be able to view, push and pull to all repositories to which the granting user or robot account has write access +|**repo:admin**|This application will have administrator access to all repositories to which the granting user or robot account has access +|**repo:create**|This application will be able to create repositories in to any namespaces that the granting user or robot account is allowed to create repositories +|**user:read**|This application will be able to read user information such as username and email address. +|**org:admin**|This application will be able to administer your organizations including creating robots, creating teams, adjusting team membership, and changing billing settings. You should have absolute trust in the requesting application before granting this permission. +|**super:user**|This application will be able to administer your installation including managing users, managing organizations and other features found in the superuser panel. You should have absolute trust in the requesting application before granting this permission. +|**user:admin**|This application will be able to administer your account including creating robots and granting them permissions to your repositories. You should have absolute trust in the requesting application before granting this permission. +|=== diff --git a/modules/api-build-cancelRepoBuild.adoc b/modules/api-build-cancelRepoBuild.adoc new file mode 100644 index 000000000..015cb8180 --- /dev/null +++ b/modules/api-build-cancelRepoBuild.adoc @@ -0,0 +1,37 @@ + += cancelRepoBuild +Cancels a repository build. + +[discrete] +== DELETE /api/v1/repository/{repository}/build/{build_uuid} + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|path|**build_uuid** + +_required_|The UUID of the build|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|204|Deleted| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== diff --git a/modules/api-build-getRepoBuild.adoc b/modules/api-build-getRepoBuild.adoc new file mode 100644 index 000000000..7f2a54946 --- /dev/null +++ b/modules/api-build-getRepoBuild.adoc @@ -0,0 +1,37 @@ + += getRepoBuild +Returns information about a build. + +[discrete] +== GET /api/v1/repository/{repository}/build/{build_uuid} + + + +**Authorizations: **oauth2_implicit (**repo:read**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|path|**build_uuid** + +_required_|The UUID of the build|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== diff --git a/modules/api-build-getRepoBuildLogs.adoc b/modules/api-build-getRepoBuildLogs.adoc new file mode 100644 index 000000000..818643a25 --- /dev/null +++ b/modules/api-build-getRepoBuildLogs.adoc @@ -0,0 +1,37 @@ + += getRepoBuildLogs +Return the build logs for the build specified by the build uuid. + +[discrete] +== GET /api/v1/repository/{repository}/build/{build_uuid}/logs + + + +**Authorizations: **oauth2_implicit (**repo:read**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|path|**build_uuid** + +_required_|The UUID of the build|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== diff --git a/modules/api-build-getRepoBuildStatus.adoc b/modules/api-build-getRepoBuildStatus.adoc new file mode 100644 index 000000000..3611a3f1e --- /dev/null +++ b/modules/api-build-getRepoBuildStatus.adoc @@ -0,0 +1,37 @@ + += getRepoBuildStatus +Return the status for the builds specified by the build uuids. + +[discrete] +== GET /api/v1/repository/{repository}/build/{build_uuid}/status + + + +**Authorizations: **oauth2_implicit (**repo:read**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|path|**build_uuid** + +_required_|The UUID of the build|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== diff --git a/modules/api-build-getRepoBuilds.adoc b/modules/api-build-getRepoBuilds.adoc new file mode 100644 index 000000000..c32491bad --- /dev/null +++ b/modules/api-build-getRepoBuilds.adoc @@ -0,0 +1,48 @@ + += getRepoBuilds +Get the list of repository builds. + +[discrete] +== GET /api/v1/repository/{repository}/build/ + + + +**Authorizations: **oauth2_implicit (**repo:read**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Query parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|query|**since** + +_optional_|Returns all builds since the given unix timecode|integer +|query|**limit** + +_optional_|The maximum number of builds to return|integer +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== diff --git a/modules/api-build-requestRepoBuild.adoc b/modules/api-build-requestRepoBuild.adoc new file mode 100644 index 000000000..4dffc2ea2 --- /dev/null +++ b/modules/api-build-requestRepoBuild.adoc @@ -0,0 +1,61 @@ + += requestRepoBuild +Request that a repository be built and pushed from the specified input. + +[discrete] +== POST /api/v1/repository/{repository}/build/ + + + +**Authorizations: **oauth2_implicit (**repo:write**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Request body schema (application/json) + +Description of a new repository build. + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**file_id** + +_optional_|The file id that was generated when the build spec was uploaded|string +|**archive_url** + +_optional_|The URL of the .tar.gz to build. Must start with "http" or "https".|string +|**subdirectory** + +_optional_|Subdirectory in which the Dockerfile can be found. You can only specify this or dockerfile_path|string +|**dockerfile_path** + +_optional_|Path to a dockerfile. You can only specify this or subdirectory.|string +|**context** + +_optional_|Pass in the context for the dockerfile. This is optional.|string +|**pull_robot** + +_optional_|Username of a Quay robot account to use as pull credentials|string +|**tags** + +_optional_|The tags to which the built images will be pushed. If none specified, "latest" is used.|array of string + +`non-empty` `unique` +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== diff --git a/modules/api-build.adoc b/modules/api-build.adoc new file mode 100644 index 000000000..c886afa3b --- /dev/null +++ b/modules/api-build.adoc @@ -0,0 +1,4 @@ + += build +Create, list, cancel and get status/logs of repository builds. + diff --git a/modules/api-definitions.adoc b/modules/api-definitions.adoc new file mode 100644 index 000000000..b40994a21 --- /dev/null +++ b/modules/api-definitions.adoc @@ -0,0 +1,89 @@ + + += Definitions + +[[_apierror]] +== ApiError + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**status** + +_optional_|Status code of the response.|integer +|**type** + +_optional_|Reference to the type of the error.|string +|**detail** + +_optional_|Details about the specific instance of the error.|string +|**title** + +_optional_|Unique error code to identify the type of error.|string +|**error_message** + +_optional_|Deprecated; alias for detail|string +|**error_type** + +_optional_|Deprecated; alias for detail|string +|=== + +[[_userview]] +== UserView + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**verified** + +_optional_|Whether the user's email address has been verified|boolean +|**anonymous** + +_optional_|true if this user data represents a guest user|boolean +|**email** + +_optional_|The user's email address|string +|**avatar** + +_optional_|Avatar data representing the user's icon|object +|**organizations** + +_optional_|Information about the organizations in which the user is a member|array of object + + +|**logins** + +_optional_|The list of external login providers against which the user has authenticated|array of object + + +|**can_create_repo** + +_optional_|Whether the user has permission to create repositories|boolean +|**preferred_namespace** + +_optional_|If true, the user's namespace is the preferred namespace to display|boolean +|=== + +[[_viewmirrorconfig]] +== ViewMirrorConfig + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**is_enabled** + +_optional_|Used to enable or disable synchronizations.|boolean +|**external_reference** + +_optional_|Location of the external repository.|string +|**external_registry_username** + +_optional_|Username used to authenticate with external registry.| +|**external_registry_password** + +_optional_|Password used to authenticate with external registry.| +|**sync_start_date** + +_optional_|Determines the next time this repository is ready for synchronization.|string +|**sync_interval** + +_optional_|Number of seconds after next_start_date to begin synchronizing.|integer +|**robot_username** + +_optional_|Username of robot which will be used for image pushes.|string +|**root_rule** + +_optional_|A list of glob-patterns used to determine which tags should be synchronized.|object +|**external_registry_config** + +_optional_||object +|=== + +[[_apierrordescription]] +== ApiErrorDescription + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**type** + +_optional_|A reference to the error type resource|string +|**title** + +_optional_|The title of the error. Can be used to uniquely identify the kind of error.|string +|**description** + +_optional_|A more detailed description of the error that may include help for fixing the issue.|string +|=== diff --git a/modules/api-discovery-discovery.adoc b/modules/api-discovery-discovery.adoc new file mode 100644 index 000000000..883f3db92 --- /dev/null +++ b/modules/api-discovery-discovery.adoc @@ -0,0 +1,43 @@ + += discovery +List all of the API endpoints available in the swagger API format. + +[discrete] +== GET /api/v1/discovery + + + +**Authorizations: ** + +[discrete] +== Query parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|query|**internal** + +_optional_|Whether to include internal APIs.|boolean +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https:///api/v1/discovery?query=true" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-discovery.adoc b/modules/api-discovery.adoc new file mode 100644 index 000000000..5fbc4df60 --- /dev/null +++ b/modules/api-discovery.adoc @@ -0,0 +1,4 @@ + += discovery +API discovery information. + diff --git a/modules/api-error-getErrorDescription.adoc b/modules/api-error-getErrorDescription.adoc new file mode 100644 index 000000000..66f9b066e --- /dev/null +++ b/modules/api-error-getErrorDescription.adoc @@ -0,0 +1,42 @@ + += getErrorDescription +Get a detailed description of the error. + +[discrete] +== GET /api/v1/error/{error_type} + + + +**Authorizations: ** +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**error_type** + +_required_|The error code identifying the type of error.|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation|<<_apierrordescription,ApiErrorDescription>> +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https:///api/v1/error/" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-error.adoc b/modules/api-error.adoc new file mode 100644 index 000000000..8ec93b9ea --- /dev/null +++ b/modules/api-error.adoc @@ -0,0 +1,4 @@ + += error +Error details API. + diff --git a/modules/api-global-messages.adoc b/modules/api-global-messages.adoc new file mode 100644 index 000000000..86ba49e3c --- /dev/null +++ b/modules/api-global-messages.adoc @@ -0,0 +1,55 @@ +:_content-type: PROCEDURE +[id="api-global-messages"] += Global messages + +Global messages can be created, obtained, or deleted by using the {productname} API. +Use the following procedure to create, obtain, or delete a global message. + +.Prerequisites + +* You have created an OAuth 2 access token. + +.Procedure + +. Create a message by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createglobalmessage[`POST /api/v1/message`] endpoint: ++ +[source,terminal] +---- +$ curl -X POST "https:///api/v1/messages" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "message": { + "content": "Hi", + "media_type": "text/plain", + "severity": "info" + } + }' +---- ++ +This command does not return output. + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getglobalmessages[`GET /api/v1/messages`] command to return the list of global messages: ++ +[source,terminal] +---- +$ curl -X GET "https:///api/v1/messages" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +{"messages": [{"uuid": "ecababd4-3451-4458-b5db-801684137444", "content": "Hi", "severity": "info", "media_type": "text/plain"}]} +---- + +. Delete the global message by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleteglobalmessage[`DELETE /api/v1/message/{uuid}`] endpoint: ++ +[source,terminal] +---- +$ curl -X DELETE "https:///api/v1/message/" \ + -H "Authorization: Bearer " +---- ++ +This command does not return output. \ No newline at end of file diff --git a/modules/api-globalmessages-createGlobalMessage.adoc b/modules/api-globalmessages-createGlobalMessage.adoc new file mode 100644 index 000000000..d3f1e451b --- /dev/null +++ b/modules/api-globalmessages-createGlobalMessage.adoc @@ -0,0 +1,54 @@ + += createGlobalMessage +Create a message. + +[discrete] +== POST /api/v1/messages + + + +**Authorizations: **oauth2_implicit (**super:user**) + + + +[discrete] +== Request body schema (application/json) + +Create a new message + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**message** + +_required_|A single message|object +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X POST "https:///api/v1/messages" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "message": { + "content": "Hi", + "media_type": "text/plain", + "severity": "info" + } + }' +---- \ No newline at end of file diff --git a/modules/api-globalmessages-deleteGlobalMessage.adoc b/modules/api-globalmessages-deleteGlobalMessage.adoc new file mode 100644 index 000000000..656cd9b3c --- /dev/null +++ b/modules/api-globalmessages-deleteGlobalMessage.adoc @@ -0,0 +1,43 @@ + += deleteGlobalMessage +Delete a message. + +[discrete] +== DELETE /api/v1/message/{uuid} + + + +**Authorizations: **oauth2_implicit (**super:user**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**uuid** + +_required_||string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|204|Deleted| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X DELETE "https:///api/v1/message/" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-globalmessages-getGlobalMessages.adoc b/modules/api-globalmessages-getGlobalMessages.adoc new file mode 100644 index 000000000..39c2319e1 --- /dev/null +++ b/modules/api-globalmessages-getGlobalMessages.adoc @@ -0,0 +1,31 @@ + += getGlobalMessages +Return a super users messages. + +[discrete] +== GET /api/v1/messages + + + +**Authorizations: ** + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X GET "https:///api/v1/messages" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-globalmessages.adoc b/modules/api-globalmessages.adoc new file mode 100644 index 000000000..2d47006d3 --- /dev/null +++ b/modules/api-globalmessages.adoc @@ -0,0 +1,4 @@ + += globalmessages +Messages API. + diff --git a/modules/api-logs-exportOrgLogs.adoc b/modules/api-logs-exportOrgLogs.adoc new file mode 100644 index 000000000..654d8d85c --- /dev/null +++ b/modules/api-logs-exportOrgLogs.adoc @@ -0,0 +1,79 @@ + += exportOrgLogs +Exports the logs for the specified organization. + +[discrete] +== POST /api/v1/organization/{orgname}/exportlogs + + + +**Authorizations: **oauth2_implicit (**org:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Query parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|query|**endtime** + +_optional_|Latest time for logs. Format: "%m/%d/%Y" in UTC.|string +|query|**starttime** + +_optional_|Earliest time for logs. Format: "%m/%d/%Y" in UTC.|string +|=== + + +[discrete] +== Request body schema (application/json) + +Configuration for an export logs operation + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**callback_url** + +_optional_|The callback URL to invoke with a link to the exported logs|string +|**callback_email** + +_optional_|The e-mail address at which to e-mail a link to the exported logs|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -H "Accept: application/json" \ + -d '{ + "starttime": "", + "endtime": "", + "callback_email": "org.logs@example.com" + }' \ + "http:///api/v1/organization/{orgname}/exportlogs" +---- \ No newline at end of file diff --git a/modules/api-logs-exportRepoLogs.adoc b/modules/api-logs-exportRepoLogs.adoc new file mode 100644 index 000000000..b34b59207 --- /dev/null +++ b/modules/api-logs-exportRepoLogs.adoc @@ -0,0 +1,79 @@ + += exportRepoLogs +Queues an export of the logs for the specified repository. + +[discrete] +== POST /api/v1/repository/{repository}/exportlogs + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Query parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|query|**endtime** + +_optional_|Latest time for logs. Format: "%m/%d/%Y" in UTC.|string +|query|**starttime** + +_optional_|Earliest time for logs. Format: "%m/%d/%Y" in UTC.|string +|=== + + +[discrete] +== Request body schema (application/json) + +Configuration for an export logs operation + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**callback_url** + +_optional_|The callback URL to invoke with a link to the exported logs|string +|**callback_email** + +_optional_|The e-mail address at which to e-mail a link to the exported logs|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -H "Accept: application/json" \ + -d '{ + "starttime": "2024-01-01", + "endtime": "2024-06-18", + "callback_url": "http://your-callback-url.example.com" + }' \ + "http:///api/v1/repository/{repository}/exportlogs" +---- \ No newline at end of file diff --git a/modules/api-logs-exportUserLogs.adoc b/modules/api-logs-exportUserLogs.adoc new file mode 100644 index 000000000..a21912db1 --- /dev/null +++ b/modules/api-logs-exportUserLogs.adoc @@ -0,0 +1,69 @@ + += exportUserLogs +Returns the aggregated logs for the current user. + +[discrete] +== POST /api/v1/user/exportlogs + + + +**Authorizations: **oauth2_implicit (**user:admin**) + + + +[discrete] +== Query parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|query|**endtime** + +_optional_|Latest time for logs. Format: "%m/%d/%Y" in UTC.|string +|query|**starttime** + +_optional_|Earliest time for logs. Format: "%m/%d/%Y" in UTC.|string +|=== + + +[discrete] +== Request body schema (application/json) + +Configuration for an export logs operation + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**callback_url** + +_optional_|The callback URL to invoke with a link to the exported logs|string +|**callback_email** + +_optional_|The e-mail address at which to e-mail a link to the exported logs|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -H "Accept: application/json" \ + -d '{ + "starttime": "", + "endtime": "", + "callback_email": "your.email@example.com" + }' \ + "http:///api/v1/user/exportlogs" +---- \ No newline at end of file diff --git a/modules/api-logs-getAggregateOrgLogs.adoc b/modules/api-logs-getAggregateOrgLogs.adoc new file mode 100644 index 000000000..4693edca4 --- /dev/null +++ b/modules/api-logs-getAggregateOrgLogs.adoc @@ -0,0 +1,60 @@ + += getAggregateOrgLogs +Gets the aggregated logs for the specified organization. + +[discrete] +== GET /api/v1/organization/{orgname}/aggregatelogs + + + +**Authorizations: **oauth2_implicit (**org:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Query parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|query|**performer** + +_optional_|Username for which to filter logs.|string +|query|**endtime** + +_optional_|Latest time for logs. Format: "%m/%d/%Y" in UTC.|string +|query|**starttime** + +_optional_|Earliest time for logs. Format: "%m/%d/%Y" in UTC.|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + "/api/v1/organization/{orgname}/aggregatelogs" +---- diff --git a/modules/api-logs-getAggregateRepoLogs.adoc b/modules/api-logs-getAggregateRepoLogs.adoc new file mode 100644 index 000000000..993d71b0b --- /dev/null +++ b/modules/api-logs-getAggregateRepoLogs.adoc @@ -0,0 +1,58 @@ + += getAggregateRepoLogs +Returns the aggregated logs for the specified repository. + +[discrete] +== GET /api/v1/repository/{repository}/aggregatelogs + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Query parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|query|**endtime** + +_optional_|Latest time for logs. Format: "%m/%d/%Y" in UTC.|string +|query|**starttime** + +_optional_|Earliest time for logs. Format: "%m/%d/%Y" in UTC.|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + "/api/v1/repository///aggregatelogs?starttime=2024-01-01&endtime=2024-06-18"" +---- \ No newline at end of file diff --git a/modules/api-logs-getAggregateUserLogs.adoc b/modules/api-logs-getAggregateUserLogs.adoc new file mode 100644 index 000000000..6fe558bc7 --- /dev/null +++ b/modules/api-logs-getAggregateUserLogs.adoc @@ -0,0 +1,50 @@ + += getAggregateUserLogs +Returns the aggregated logs for the current user. + +[discrete] +== GET /api/v1/user/aggregatelogs + + + +**Authorizations: **oauth2_implicit (**user:admin**) + + + +[discrete] +== Query parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|query|**performer** + +_optional_|Username for which to filter logs.|string +|query|**endtime** + +_optional_|Latest time for logs. Format: "%m/%d/%Y" in UTC.|string +|query|**starttime** + +_optional_|Earliest time for logs. Format: "%m/%d/%Y" in UTC.|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + "/api/v1/user/aggregatelogs?performer=&starttime=&endtime=" +---- \ No newline at end of file diff --git a/modules/api-logs-listOrgLogs.adoc b/modules/api-logs-listOrgLogs.adoc new file mode 100644 index 000000000..5ea0b177f --- /dev/null +++ b/modules/api-logs-listOrgLogs.adoc @@ -0,0 +1,62 @@ + += listOrgLogs +List the logs for the specified organization. + +[discrete] +== GET /api/v1/organization/{orgname}/logs + + + +**Authorizations: **oauth2_implicit (**org:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Query parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|query|**next_page** + +_optional_|The page token for the next page|string +|query|**performer** + +_optional_|Username for which to filter logs.|string +|query|**endtime** + +_optional_|Latest time for logs. Format: "%m/%d/%Y" in UTC.|string +|query|**starttime** + +_optional_|Earliest time for logs. Format: "%m/%d/%Y" in UTC.|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + "http:///api/v1/organization/{orgname}/logs" +---- \ No newline at end of file diff --git a/modules/api-logs-listRepoLogs.adoc b/modules/api-logs-listRepoLogs.adoc new file mode 100644 index 000000000..45c032744 --- /dev/null +++ b/modules/api-logs-listRepoLogs.adoc @@ -0,0 +1,60 @@ + += listRepoLogs +List the logs for the specified repository. + +[discrete] +== GET /api/v1/repository/{repository}/logs + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Query parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|query|**next_page** + +_optional_|The page token for the next page|string +|query|**endtime** + +_optional_|Latest time for logs. Format: "%m/%d/%Y" in UTC.|string +|query|**starttime** + +_optional_|Earliest time for logs. Format: "%m/%d/%Y" in UTC.|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + "http:///api/v1/repository/{repository}/logs" +---- \ No newline at end of file diff --git a/modules/api-logs-listUserLogs.adoc b/modules/api-logs-listUserLogs.adoc new file mode 100644 index 000000000..27d68f5aa --- /dev/null +++ b/modules/api-logs-listUserLogs.adoc @@ -0,0 +1,49 @@ + += listUserLogs +List the logs for the current user. + +[discrete] +== GET /api/v1/user/logs + + + +**Authorizations: **oauth2_implicit (**user:admin**) + + + +[discrete] +== Query parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|query|**next_page** + +_optional_|The page token for the next page|string +|query|**performer** + +_optional_|Username for which to filter logs.|string +|query|**endtime** + +_optional_|Latest time for logs. Format: "%m/%d/%Y" in UTC.|string +|query|**starttime** + +_optional_|Earliest time for logs. Format: "%m/%d/%Y" in UTC.|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " -H "Accept: application/json" "/api/v1/user/logs" +---- \ No newline at end of file diff --git a/modules/api-logs.adoc b/modules/api-logs.adoc new file mode 100644 index 000000000..484025818 --- /dev/null +++ b/modules/api-logs.adoc @@ -0,0 +1,4 @@ + += logs +Access usage logs for organizations or repositories. + diff --git a/modules/api-manifest-addManifestLabel.adoc b/modules/api-manifest-addManifestLabel.adoc new file mode 100644 index 000000000..8ca4c9fe1 --- /dev/null +++ b/modules/api-manifest-addManifestLabel.adoc @@ -0,0 +1,69 @@ + += addManifestLabel +Adds a new label into the tag manifest. + +[discrete] +== POST /api/v1/repository/{repository}/manifest/{manifestref}/labels + + + +**Authorizations: **oauth2_implicit (**repo:write**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|path|**manifestref** + +_required_|The digest of the manifest|string +|=== + + +[discrete] +== Request body schema (application/json) + +Adds a label to a manifest + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**key** + +_required_|The key for the label|string +|**value** + +_required_|The value for the label|string +|**media_type** + +_required_|The media type for this label| +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + --data '{ + "key": "", + "value": "", + "media_type": "" + }' \ + https:///api/v1/repository//manifest//labels +---- \ No newline at end of file diff --git a/modules/api-manifest-deleteManifestLabel.adoc b/modules/api-manifest-deleteManifestLabel.adoc new file mode 100644 index 000000000..2567cd29f --- /dev/null +++ b/modules/api-manifest-deleteManifestLabel.adoc @@ -0,0 +1,48 @@ + += deleteManifestLabel +Deletes an existing label from a manifest. + +[discrete] +== DELETE /api/v1/repository/{repository}/manifest/{manifestref}/labels/{labelid} + + + +**Authorizations: **oauth2_implicit (**repo:write**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|path|**manifestref** + +_required_|The digest of the manifest|string +|path|**labelid** + +_required_|The ID of the label|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|204|Deleted| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + https:///api/v1/repository//manifest//labels/ +---- \ No newline at end of file diff --git a/modules/api-manifest-getManifestLabel.adoc b/modules/api-manifest-getManifestLabel.adoc new file mode 100644 index 000000000..b06777204 --- /dev/null +++ b/modules/api-manifest-getManifestLabel.adoc @@ -0,0 +1,49 @@ + += getManifestLabel +Retrieves the label with the specific ID under the manifest. + +[discrete] +== GET /api/v1/repository/{repository}/manifest/{manifestref}/labels/{labelid} + + + +**Authorizations: **oauth2_implicit (**repo:read**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|path|**manifestref** + +_required_|The digest of the manifest|string +|path|**labelid** + +_required_|The ID of the label|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + https:///api/v1/repository//manifest//labels/ +---- \ No newline at end of file diff --git a/modules/api-manifest-getRepoManifest.adoc b/modules/api-manifest-getRepoManifest.adoc new file mode 100644 index 000000000..7fb537feb --- /dev/null +++ b/modules/api-manifest-getRepoManifest.adoc @@ -0,0 +1,47 @@ + += getRepoManifest + + +[discrete] +== GET /api/v1/repository/{repository}/manifest/{manifestref} + + + +**Authorizations: **oauth2_implicit (**repo:read**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|path|**manifestref** + +_required_|The digest of the manifest|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + https:///api/v1/repository//manifest/ +---- \ No newline at end of file diff --git a/modules/api-manifest-listManifestLabels.adoc b/modules/api-manifest-listManifestLabels.adoc new file mode 100644 index 000000000..99f8b6870 --- /dev/null +++ b/modules/api-manifest-listManifestLabels.adoc @@ -0,0 +1,58 @@ + += listManifestLabels + + +[discrete] +== GET /api/v1/repository/{repository}/manifest/{manifestref}/labels + + + +**Authorizations: **oauth2_implicit (**repo:read**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|path|**manifestref** + +_required_|The digest of the manifest|string +|=== + + +[discrete] +== Query parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|query|**filter** + +_optional_|If specified, only labels matching the given prefix will be returned|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + https:///api/v1/repository//manifest//labels +---- \ No newline at end of file diff --git a/modules/api-manifest.adoc b/modules/api-manifest.adoc new file mode 100644 index 000000000..6ef654a89 --- /dev/null +++ b/modules/api-manifest.adoc @@ -0,0 +1,4 @@ + += manifest +Manage the manifests of a repository. + diff --git a/modules/api-mirror-changeRepoMirrorConfig.adoc b/modules/api-mirror-changeRepoMirrorConfig.adoc new file mode 100644 index 000000000..08a608c46 --- /dev/null +++ b/modules/api-mirror-changeRepoMirrorConfig.adoc @@ -0,0 +1,88 @@ + += changeRepoMirrorConfig +Allow users to modifying the repository's mirroring configuration. + +[discrete] +== PUT /api/v1/repository/{repository}/mirror + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Request body schema (application/json) + +Update the repository mirroring configuration. + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**is_enabled** + +_optional_|Used to enable or disable synchronizations.|boolean +|**external_reference** + +_optional_|Location of the external repository.|string +|**external_registry_username** + +_optional_|Username used to authenticate with external registry.| +|**external_registry_password** + +_optional_|Password used to authenticate with external registry.| +|**sync_start_date** + +_optional_|Determines the next time this repository is ready for synchronization.|string +|**sync_interval** + +_optional_|Number of seconds after next_start_date to begin synchronizing.|integer +|**robot_username** + +_optional_|Username of robot which will be used for image pushes.|string +|**root_rule** + +_optional_|A list of glob-patterns used to determine which tags should be synchronized.|object +|**external_registry_config** + +_optional_||object +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X PUT "https:///api/v1/repository///mirror" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "is_enabled": , <1> + "external_reference": "", + "external_registry_username": "", + "external_registry_password": "", + "sync_start_date": "", + "sync_interval": , + "robot_username": "", + "root_rule": { + "rule": "", + "rule_type": "" + } + }' +---- +<1> Disables automatic synchronization. \ No newline at end of file diff --git a/modules/api-mirror-createRepoMirrorConfig.adoc b/modules/api-mirror-createRepoMirrorConfig.adoc new file mode 100644 index 000000000..b5a61b5d2 --- /dev/null +++ b/modules/api-mirror-createRepoMirrorConfig.adoc @@ -0,0 +1,87 @@ + += createRepoMirrorConfig +Create a RepoMirrorConfig for a given Repository. + +[discrete] +== POST /api/v1/repository/{repository}/mirror + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Request body schema (application/json) + +Create the repository mirroring configuration. + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**is_enabled** + +_optional_|Used to enable or disable synchronizations.|boolean +|**external_reference** + +_required_|Location of the external repository.|string +|**external_registry_username** + +_optional_|Username used to authenticate with external registry.| +|**external_registry_password** + +_optional_|Password used to authenticate with external registry.| +|**sync_start_date** + +_required_|Determines the next time this repository is ready for synchronization.|string +|**sync_interval** + +_required_|Number of seconds after next_start_date to begin synchronizing.|integer +|**robot_username** + +_required_|Username of robot which will be used for image pushes.|string +|**root_rule** + +_required_|A list of glob-patterns used to determine which tags should be synchronized.|object +|**external_registry_config** + +_optional_||object +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST "https:///api/v1/repository///mirror" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "is_enabled": , + "external_reference": "", + "external_registry_username": "", + "external_registry_password": "", + "sync_start_date": "", + "sync_interval": , + "robot_username": "", + "root_rule": { + "rule": "", + "rule_type": "" + } + }' +---- \ No newline at end of file diff --git a/modules/api-mirror-getRepoMirrorConfig.adoc b/modules/api-mirror-getRepoMirrorConfig.adoc new file mode 100644 index 000000000..6baf05bf4 --- /dev/null +++ b/modules/api-mirror-getRepoMirrorConfig.adoc @@ -0,0 +1,44 @@ + += getRepoMirrorConfig +Return the Mirror configuration for a given Repository. + +[discrete] +== GET /api/v1/repository/{repository}/mirror + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation|<<_viewmirrorconfig,ViewMirrorConfig>> +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https:///api/v1/repository///mirror" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-mirror-syncCancel.adoc b/modules/api-mirror-syncCancel.adoc new file mode 100644 index 000000000..8c4b879d1 --- /dev/null +++ b/modules/api-mirror-syncCancel.adoc @@ -0,0 +1,43 @@ + += syncCancel +Update the sync_status for a given Repository's mirroring configuration. + +[discrete] +== POST /api/v1/repository/{repository}/mirror/sync-cancel + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST "https:///api/v1/repository///mirror/sync-cancel" \ +---- \ No newline at end of file diff --git a/modules/api-mirror-syncNow.adoc b/modules/api-mirror-syncNow.adoc new file mode 100644 index 000000000..b2d20c7dc --- /dev/null +++ b/modules/api-mirror-syncNow.adoc @@ -0,0 +1,44 @@ + += syncNow +Update the sync_status for a given Repository's mirroring configuration. + +[discrete] +== POST /api/v1/repository/{repository}/mirror/sync-now + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST "https:///api/v1/repository///mirror/sync-now" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-mirror.adoc b/modules/api-mirror.adoc new file mode 100644 index 000000000..a3de07d6d --- /dev/null +++ b/modules/api-mirror.adoc @@ -0,0 +1,4 @@ + += mirror + + diff --git a/modules/api-namespacequota-changeOrganizationQuota.adoc b/modules/api-namespacequota-changeOrganizationQuota.adoc new file mode 100644 index 000000000..82fdc3476 --- /dev/null +++ b/modules/api-namespacequota-changeOrganizationQuota.adoc @@ -0,0 +1,68 @@ + += changeOrganizationQuota + + +[discrete] +== PUT /api/v1/organization/{orgname}/quota/{quota_id} + + + +**Authorizations: **oauth2_implicit (**super:user**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**quota_id** + +_required_||string +|path|**orgname** + +_required_||string +|=== + + +[discrete] +== Request body schema (application/json) + +Description of a new organization quota + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**limit_bytes** + +_optional_|Number of bytes the organization is allowed|integer + +|**limits** + +_optional_|Human readable storage capacity of the organization. Accepts SI units like Mi, Gi, or Ti, as well as non-standard units like GB or MB. Must be mutually exclusive with `limit_bytes`.|string + +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X PUT "https:///api/v1/organization//quota/" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "limit_bytes": + }' +---- \ No newline at end of file diff --git a/modules/api-namespacequota-changeOrganizationQuotaLimit.adoc b/modules/api-namespacequota-changeOrganizationQuotaLimit.adoc new file mode 100644 index 000000000..c75b4ba41 --- /dev/null +++ b/modules/api-namespacequota-changeOrganizationQuotaLimit.adoc @@ -0,0 +1,69 @@ + += changeOrganizationQuotaLimit + + +[discrete] +== PUT /api/v1/organization/{orgname}/quota/{quota_id}/limit/{limit_id} + + + +**Authorizations: **oauth2_implicit (**super:user**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**quota_id** + +_required_||string +|path|**limit_id** + +_required_||string +|path|**orgname** + +_required_||string +|=== + + +[discrete] +== Request body schema (application/json) + +Description of changing organization quota limit + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**type** + +_optional_|Type of quota limit: "Warning" or "Reject"|string +|**threshold_percent** + +_optional_|Quota threshold, in percent of quota|integer +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X PUT "https:///api/v1/organization//quota//limit/" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "type": "", + "threshold_percent": + }' +---- \ No newline at end of file diff --git a/modules/api-namespacequota-createOrganizationQuota.adoc b/modules/api-namespacequota-createOrganizationQuota.adoc new file mode 100644 index 000000000..f5c59bcc9 --- /dev/null +++ b/modules/api-namespacequota-createOrganizationQuota.adoc @@ -0,0 +1,66 @@ + += createOrganizationQuota +Create a new organization quota. + +[discrete] +== POST /api/v1/organization/{orgname}/quota + + + +**Authorizations: **oauth2_implicit (**super:user**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**orgname** + +_required_||string +|=== + + +[discrete] +== Request body schema (application/json) + +Description of a new organization quota + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**limit_bytes** + +_required_|Number of bytes the organization is allowed|integer + +|**limits** + +_optional_|Human readable storage capacity of the organization. Accepts SI units like Mi, Gi, or Ti, as well as non-standard units like GB or MB. Must be mutually exclusive with `limit_bytes`.|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST "https:///api/v1/organization//quota" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "limit_bytes": 10737418240, + "limits": "10 Gi" + }' +---- \ No newline at end of file diff --git a/modules/api-namespacequota-createOrganizationQuotaLimit.adoc b/modules/api-namespacequota-createOrganizationQuotaLimit.adoc new file mode 100644 index 000000000..a3fccfd49 --- /dev/null +++ b/modules/api-namespacequota-createOrganizationQuotaLimit.adoc @@ -0,0 +1,68 @@ + += createOrganizationQuotaLimit + + +[discrete] +== POST /api/v1/organization/{orgname}/quota/{quota_id}/limit + + + +**Authorizations: **oauth2_implicit (**super:user**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**quota_id** + +_required_||string +|path|**orgname** + +_required_||string +|=== + + +[discrete] +== Request body schema (application/json) + +Description of a new organization quota limit + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**type** + +_required_|Type of quota limit: "Warning" or "Reject"|string +|**threshold_percent** + +_required_|Quota threshold, in percent of quota|integer +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST "https:///api/v1/organization//quota//limit" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "limit_bytes": 21474836480, + "type": "Reject", <1> + "threshold_percent": 90 <2> + }' +---- \ No newline at end of file diff --git a/modules/api-namespacequota-deleteOrganizationQuota.adoc b/modules/api-namespacequota-deleteOrganizationQuota.adoc new file mode 100644 index 000000000..8198db994 --- /dev/null +++ b/modules/api-namespacequota-deleteOrganizationQuota.adoc @@ -0,0 +1,47 @@ + += deleteOrganizationQuota + + +[discrete] +== DELETE /api/v1/organization/{orgname}/quota/{quota_id} + + + +**Authorizations: **oauth2_implicit (**super:user**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**quota_id** + +_required_||string +|path|**orgname** + +_required_||string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|204|Deleted| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X DELETE "https:///api/v1/organization//quota/" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-namespacequota-deleteOrganizationQuotaLimit.adoc b/modules/api-namespacequota-deleteOrganizationQuotaLimit.adoc new file mode 100644 index 000000000..a413f0f67 --- /dev/null +++ b/modules/api-namespacequota-deleteOrganizationQuotaLimit.adoc @@ -0,0 +1,49 @@ + += deleteOrganizationQuotaLimit + + +[discrete] +== DELETE /api/v1/organization/{orgname}/quota/{quota_id}/limit/{limit_id} + + + +**Authorizations: **oauth2_implicit (**super:user**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**quota_id** + +_required_||string +|path|**limit_id** + +_required_||string +|path|**orgname** + +_required_||string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|204|Deleted| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X DELETE "https:///api/v1/organization//quota//limit/" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-namespacequota-getOrganizationQuota.adoc b/modules/api-namespacequota-getOrganizationQuota.adoc new file mode 100644 index 000000000..3e5797264 --- /dev/null +++ b/modules/api-namespacequota-getOrganizationQuota.adoc @@ -0,0 +1,45 @@ + += getOrganizationQuota + + +[discrete] +== GET /api/v1/organization/{orgname}/quota/{quota_id} + + + +**Authorizations: ** +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**quota_id** + +_required_||string +|path|**orgname** + +_required_||string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https:///api/v1/organization//quota/" \ + -H "Authorization: Bearer "S +---- \ No newline at end of file diff --git a/modules/api-namespacequota-getOrganizationQuotaLimit.adoc b/modules/api-namespacequota-getOrganizationQuotaLimit.adoc new file mode 100644 index 000000000..5dceece76 --- /dev/null +++ b/modules/api-namespacequota-getOrganizationQuotaLimit.adoc @@ -0,0 +1,47 @@ + += getOrganizationQuotaLimit + + +[discrete] +== GET /api/v1/organization/{orgname}/quota/{quota_id}/limit/{limit_id} + + + +**Authorizations: ** +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**quota_id** + +_required_||string +|path|**limit_id** + +_required_||string +|path|**orgname** + +_required_||string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https:///api/v1/organization//quota//limit/" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-namespacequota-getUserQuota.adoc b/modules/api-namespacequota-getUserQuota.adoc new file mode 100644 index 000000000..f25452fdf --- /dev/null +++ b/modules/api-namespacequota-getUserQuota.adoc @@ -0,0 +1,45 @@ + += getUserQuota + + +[discrete] +== GET /api/v1/user/quota/{quota_id} + + + +**Authorizations: **oauth2_implicit (**user:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**quota_id** + +_required_||string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https:///api/v1/user/quota/{quota_id}" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-namespacequota-getUserQuotaLimit.adoc b/modules/api-namespacequota-getUserQuotaLimit.adoc new file mode 100644 index 000000000..43af0057c --- /dev/null +++ b/modules/api-namespacequota-getUserQuotaLimit.adoc @@ -0,0 +1,47 @@ + += getUserQuotaLimit + + +[discrete] +== GET /api/v1/user/quota/{quota_id}/limit/{limit_id} + + + +**Authorizations: **oauth2_implicit (**user:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**quota_id** + +_required_||string +|path|**limit_id** + +_required_||string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https:///api/v1/user/quota/{quota_id}/limit/{limit_id}" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-namespacequota-listOrganizationQuota.adoc b/modules/api-namespacequota-listOrganizationQuota.adoc new file mode 100644 index 000000000..ad266dab5 --- /dev/null +++ b/modules/api-namespacequota-listOrganizationQuota.adoc @@ -0,0 +1,42 @@ + += listOrganizationQuota + + +[discrete] +== GET /api/v1/organization/{orgname}/quota + + + +**Authorizations: ** +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**orgname** + +_required_||string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -k -X GET -H "Authorization: Bearer " -H 'Content-Type: application/json' https:///api/v1/organization//quota +---- \ No newline at end of file diff --git a/modules/api-namespacequota-listOrganizationQuotaLimit.adoc b/modules/api-namespacequota-listOrganizationQuotaLimit.adoc new file mode 100644 index 000000000..5fb7eacc1 --- /dev/null +++ b/modules/api-namespacequota-listOrganizationQuotaLimit.adoc @@ -0,0 +1,45 @@ + += listOrganizationQuotaLimit + + +[discrete] +== GET /api/v1/organization/{orgname}/quota/{quota_id}/limit + + + +**Authorizations: ** +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**quota_id** + +_required_||string +|path|**orgname** + +_required_||string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https:///api/v1/organization//quota//limit" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-namespacequota-listUserQuota.adoc b/modules/api-namespacequota-listUserQuota.adoc new file mode 100644 index 000000000..32e3a6314 --- /dev/null +++ b/modules/api-namespacequota-listUserQuota.adoc @@ -0,0 +1,34 @@ + += listUserQuota + + +[discrete] +== GET /api/v1/user/quota + + + +**Authorizations: **oauth2_implicit (**user:admin**) + + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https:///api/v1/user/quota" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-namespacequota-listUserQuotaLimit.adoc b/modules/api-namespacequota-listUserQuotaLimit.adoc new file mode 100644 index 000000000..257e5b0c6 --- /dev/null +++ b/modules/api-namespacequota-listUserQuotaLimit.adoc @@ -0,0 +1,45 @@ + += listUserQuotaLimit + + +[discrete] +== GET /api/v1/user/quota/{quota_id}/limit + + + +**Authorizations: **oauth2_implicit (**user:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**quota_id** + +_required_||string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https:///api/v1/user/quota/{quota_id}/limit" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-namespacequota.adoc b/modules/api-namespacequota.adoc new file mode 100644 index 000000000..631534eae --- /dev/null +++ b/modules/api-namespacequota.adoc @@ -0,0 +1,4 @@ + += namespacequota + + diff --git a/modules/api-organization-changeOrganizationDetails.adoc b/modules/api-organization-changeOrganizationDetails.adoc new file mode 100644 index 000000000..cae47e80e --- /dev/null +++ b/modules/api-organization-changeOrganizationDetails.adoc @@ -0,0 +1,63 @@ + += changeOrganizationDetails +Change the details for the specified organization. + +[discrete] +== PUT /api/v1/organization/{orgname} + + + +**Authorizations: **oauth2_implicit (**org:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Request body schema (application/json) + +Description of updates for an existing organization + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**email** + +_optional_|Organization contact email|string +|**invoice_email** + +_optional_|Whether the organization desires to receive emails for invoices|boolean +|**invoice_email_address** + +_optional_|The email address at which to receive invoices| +|**tag_expiration_s** + +_optional_|The number of seconds for tag expiration|integer +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + "/api/v1/organization/" +---- \ No newline at end of file diff --git a/modules/api-organization-createOrganization.adoc b/modules/api-organization-createOrganization.adoc new file mode 100644 index 000000000..19db938ca --- /dev/null +++ b/modules/api-organization-createOrganization.adoc @@ -0,0 +1,53 @@ + += createOrganization +Create a new organization. + +[discrete] +== POST /api/v1/organization/ + + + +**Authorizations: **oauth2_implicit (**user:admin**) + + + +[discrete] +== Request body schema (application/json) + +Description of a new organization. + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**name** + +_required_|Organization username|string +|**email** + +_optional_|Organization contact email|string +|**recaptcha_response** + +_optional_|The (may be disabled) recaptcha response code for verification|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{ + "name": "" + }' "https:///api/v1/organization/" +---- diff --git a/modules/api-organization-createOrganizationApplication.adoc b/modules/api-organization-createOrganizationApplication.adoc new file mode 100644 index 000000000..2e4e2f258 --- /dev/null +++ b/modules/api-organization-createOrganizationApplication.adoc @@ -0,0 +1,72 @@ + += createOrganizationApplication +Creates a new application under this organization. + +[discrete] +== POST /api/v1/organization/{orgname}/applications + + + +**Authorizations: **oauth2_implicit (**org:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Request body schema (application/json) + +Description of a new organization application. + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**name** + +_required_|The name of the application|string +|**redirect_uri** + +_optional_|The URI for the application's OAuth redirect|string +|**application_uri** + +_optional_|The URI for the application's homepage|string +|**description** + +_optional_|The human-readable description for the application|string +|**avatar_email** + +_optional_|The e-mail address of the avatar to use for the application|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X POST "https:///api/v1/organization//applications" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "name": "", + "redirect_uri": "", + "application_uri": "", + "description": "", + "avatar_email": "" + }' +---- \ No newline at end of file diff --git a/modules/api-organization-createProxyCacheConfig.adoc b/modules/api-organization-createProxyCacheConfig.adoc new file mode 100644 index 000000000..d9ce1787b --- /dev/null +++ b/modules/api-organization-createProxyCacheConfig.adoc @@ -0,0 +1,59 @@ + += createProxyCacheConfig +Creates proxy cache configuration for the organization. + +[discrete] +== POST /api/v1/organization/{orgname}/proxycache + + + +**Authorizations: ** +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Request body schema (application/json) + +Proxy cache configuration for an organization + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**upstream_registry** + +_required_|Name of the upstream registry that is to be cached|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST "https:///api/v1/organization//proxycache" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "upstream_registry": "" + }' +---- \ No newline at end of file diff --git a/modules/api-organization-deleteAdminedOrganization.adoc b/modules/api-organization-deleteAdminedOrganization.adoc new file mode 100644 index 000000000..f761a074a --- /dev/null +++ b/modules/api-organization-deleteAdminedOrganization.adoc @@ -0,0 +1,45 @@ + += deleteAdminedOrganization +Deletes the specified organization. + +[discrete] +== DELETE /api/v1/organization/{orgname} + + + +**Authorizations: **oauth2_implicit (**org:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|204|Deleted| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "/api/v1/organization/" +---- diff --git a/modules/api-organization-deleteOrganizationApplication.adoc b/modules/api-organization-deleteOrganizationApplication.adoc new file mode 100644 index 000000000..23bd6b348 --- /dev/null +++ b/modules/api-organization-deleteOrganizationApplication.adoc @@ -0,0 +1,45 @@ + += deleteOrganizationApplication +Deletes the application under this organization. + +[discrete] +== DELETE /api/v1/organization/{orgname}/applications/{client_id} + + + +**Authorizations: **oauth2_implicit (**org:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**client_id** + +_required_|The OAuth client ID|string +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|204|Deleted| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X DELETE "https:///api/v1/organization/{orgname}/applications/{client_id}" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-organization-deleteProxyCacheConfig.adoc b/modules/api-organization-deleteProxyCacheConfig.adoc new file mode 100644 index 000000000..03ee88965 --- /dev/null +++ b/modules/api-organization-deleteProxyCacheConfig.adoc @@ -0,0 +1,41 @@ + += deleteProxyCacheConfig +Delete proxy cache configuration for the organization. + +[discrete] +== DELETE /api/v1/organization/{orgname}/proxycache + + + +**Authorizations: ** +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|204|Deleted| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X DELETE "https:///api/v1/organization/{orgname}/proxycache" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-organization-getApplicationInformation.adoc b/modules/api-organization-getApplicationInformation.adoc new file mode 100644 index 000000000..355bd9621 --- /dev/null +++ b/modules/api-organization-getApplicationInformation.adoc @@ -0,0 +1,42 @@ + += getApplicationInformation +Get information on the specified application. + +[discrete] +== GET /api/v1/app/{client_id} + + + +**Authorizations: ** +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**client_id** + +_required_|The OAuth client ID|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https:///api/v1/app/" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-organization-getOrganization.adoc b/modules/api-organization-getOrganization.adoc new file mode 100644 index 000000000..e42557800 --- /dev/null +++ b/modules/api-organization-getOrganization.adoc @@ -0,0 +1,43 @@ + += getOrganization +Get the details for the specified organization. + +[discrete] +== GET /api/v1/organization/{orgname} + + + +**Authorizations: ** +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "/api/v1/organization/" +---- diff --git a/modules/api-organization-getOrganizationApplication.adoc b/modules/api-organization-getOrganizationApplication.adoc new file mode 100644 index 000000000..acbde91f2 --- /dev/null +++ b/modules/api-organization-getOrganizationApplication.adoc @@ -0,0 +1,45 @@ + += getOrganizationApplication +Retrieves the application with the specified client_id under the specified organization. + +[discrete] +== GET /api/v1/organization/{orgname}/applications/{client_id} + + + +**Authorizations: **oauth2_implicit (**org:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**client_id** + +_required_|The OAuth client ID|string +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X GET "https:///api/v1/organization//applications/" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-organization-getOrganizationApplications.adoc b/modules/api-organization-getOrganizationApplications.adoc new file mode 100644 index 000000000..0fed27fd1 --- /dev/null +++ b/modules/api-organization-getOrganizationApplications.adoc @@ -0,0 +1,44 @@ + += getOrganizationApplications +List the applications for the specified organization. + +[discrete] +== GET /api/v1/organization/{orgname}/applications + + + +**Authorizations: **oauth2_implicit (**org:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https:///api/v1/organization//applications" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-organization-getOrganizationCollaborators.adoc b/modules/api-organization-getOrganizationCollaborators.adoc new file mode 100644 index 000000000..8781212e7 --- /dev/null +++ b/modules/api-organization-getOrganizationCollaborators.adoc @@ -0,0 +1,43 @@ + += getOrganizationCollaborators +List outside collaborators of the specified organization. + +[discrete] +== GET /api/v1/organization/{orgname}/collaborators + + + +**Authorizations: **oauth2_implicit (**org:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X GET "https:///api/v1/organization/{orgname}/collaborators" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-organization-getOrganizationMember.adoc b/modules/api-organization-getOrganizationMember.adoc new file mode 100644 index 000000000..a18013b89 --- /dev/null +++ b/modules/api-organization-getOrganizationMember.adoc @@ -0,0 +1,46 @@ + += getOrganizationMember +Retrieves the details of a member of the organization. + +[discrete] +== GET /api/v1/organization/{orgname}/members/{membername} + + + +**Authorizations: **oauth2_implicit (**org:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**membername** + +_required_|The username of the organization member|string +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https:///api/v1/organization//members/" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-organization-getOrganizationMembers.adoc b/modules/api-organization-getOrganizationMembers.adoc new file mode 100644 index 000000000..8064dcdfb --- /dev/null +++ b/modules/api-organization-getOrganizationMembers.adoc @@ -0,0 +1,43 @@ + += getOrganizationMembers +List the human members of the specified organization. + +[discrete] +== GET /api/v1/organization/{orgname}/members + + + +**Authorizations: **oauth2_implicit (**org:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X GET "https:///api/v1/organization//members" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-organization-getProxyCacheConfig.adoc b/modules/api-organization-getProxyCacheConfig.adoc new file mode 100644 index 000000000..b4538da59 --- /dev/null +++ b/modules/api-organization-getProxyCacheConfig.adoc @@ -0,0 +1,42 @@ + += getProxyCacheConfig +Retrieves the proxy cache configuration of the organization. + +[discrete] +== GET /api/v1/organization/{orgname}/proxycache + + + +**Authorizations: ** +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https:///api/v1/organization/{orgname}/proxycache" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-organization-removeOrganizationMember.adoc b/modules/api-organization-removeOrganizationMember.adoc new file mode 100644 index 000000000..1da46ab3e --- /dev/null +++ b/modules/api-organization-removeOrganizationMember.adoc @@ -0,0 +1,47 @@ + += removeOrganizationMember +Removes a member from an organization, revoking all its repository priviledges and removing + it from all teams in the organization. + +[discrete] +== DELETE /api/v1/organization/{orgname}/members/{membername} + + + +**Authorizations: **oauth2_implicit (**org:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**membername** + +_required_|The username of the organization member|string +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|204|Deleted| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X DELETE "https:///api/v1/organization//members/" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-organization-updateOrganizationApplication.adoc b/modules/api-organization-updateOrganizationApplication.adoc new file mode 100644 index 000000000..bd4eda0dd --- /dev/null +++ b/modules/api-organization-updateOrganizationApplication.adoc @@ -0,0 +1,74 @@ + += updateOrganizationApplication +Updates an application under this organization. + +[discrete] +== PUT /api/v1/organization/{orgname}/applications/{client_id} + + + +**Authorizations: **oauth2_implicit (**org:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**client_id** + +_required_|The OAuth client ID|string +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Request body schema (application/json) + +Description of an updated application. + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**name** + +_required_|The name of the application|string +|**redirect_uri** + +_required_|The URI for the application's OAuth redirect|string +|**application_uri** + +_required_|The URI for the application's homepage|string +|**description** + +_optional_|The human-readable description for the application|string +|**avatar_email** + +_optional_|The e-mail address of the avatar to use for the application|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X PUT "https://quay-server.example.com/api/v1/organization/test/applications/12345" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "name": "Updated Application Name", + "redirect_uri": "https://example.com/oauth/callback", + "application_uri": "https://example.com", + "description": "Updated description for the application", + "avatar_email": "avatar@example.com" + }' +---- \ No newline at end of file diff --git a/modules/api-organization-validateProxyCacheConfig.adoc b/modules/api-organization-validateProxyCacheConfig.adoc new file mode 100644 index 000000000..fb1332f1e --- /dev/null +++ b/modules/api-organization-validateProxyCacheConfig.adoc @@ -0,0 +1,59 @@ + += validateProxyCacheConfig + + +[discrete] +== POST /api/v1/organization/{orgname}/validateproxycache + + + +**Authorizations: ** +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**orgname** + +_required_||string +|=== + + +[discrete] +== Request body schema (application/json) + +Proxy cache configuration for an organization + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**upstream_registry** + +_required_|Name of the upstream registry that is to be cached|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|202|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X POST "https:///api/v1/organization/{orgname}/validateproxycache" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "upstream_registry": "" + }' + +---- \ No newline at end of file diff --git a/modules/api-organization.adoc b/modules/api-organization.adoc new file mode 100644 index 000000000..e47fd53b4 --- /dev/null +++ b/modules/api-organization.adoc @@ -0,0 +1,4 @@ + += organization +Manage organizations, members and OAuth applications. + diff --git a/modules/api-permission-changeTeamPermissions.adoc b/modules/api-permission-changeTeamPermissions.adoc new file mode 100644 index 000000000..5fb525b90 --- /dev/null +++ b/modules/api-permission-changeTeamPermissions.adoc @@ -0,0 +1,59 @@ + += changeTeamPermissions +Update the existing team permission. + +[discrete] +== PUT /api/v1/repository/{repository}/permissions/team/{teamname} + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|path|**teamname** + +_required_|The name of the team to which the permission applies|string +|=== + + +[discrete] +== Request body schema (application/json) + +Description of a team permission. + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**role** + +_required_|Role to use for the team|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{"role": ""}' \ + "https://quay-server.example.com/api/v1/repository///permissions/team/" +---- \ No newline at end of file diff --git a/modules/api-permission-changeUserPermissions.adoc b/modules/api-permission-changeUserPermissions.adoc new file mode 100644 index 000000000..03b6a8736 --- /dev/null +++ b/modules/api-permission-changeUserPermissions.adoc @@ -0,0 +1,62 @@ + += changeUserPermissions +Update the perimssions for an existing repository. + +[discrete] +== PUT /api/v1/repository/{repository}/permissions/user/{username} + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**username** + +_required_|The username of the user to which the permission applies|string +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Request body schema (application/json) + +Description of a user permission. + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**role** + +_required_|Role to use for the user|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{"role": "admin"}' \ + https:///api/v1/repository///permissions/user/ +---- \ No newline at end of file diff --git a/modules/api-permission-deleteTeamPermissions.adoc b/modules/api-permission-deleteTeamPermissions.adoc new file mode 100644 index 000000000..c6efaa201 --- /dev/null +++ b/modules/api-permission-deleteTeamPermissions.adoc @@ -0,0 +1,47 @@ + += deleteTeamPermissions +Delete the permission for the specified team. + +[discrete] +== DELETE /api/v1/repository/{repository}/permissions/team/{teamname} + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|path|**teamname** + +_required_|The name of the team to which the permission applies|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|204|Deleted| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/repository///permissions/team/" +---- \ No newline at end of file diff --git a/modules/api-permission-deleteUserPermissions.adoc b/modules/api-permission-deleteUserPermissions.adoc new file mode 100644 index 000000000..7ecbc0d94 --- /dev/null +++ b/modules/api-permission-deleteUserPermissions.adoc @@ -0,0 +1,47 @@ + += deleteUserPermissions +Delete the permission for the user. + +[discrete] +== DELETE /api/v1/repository/{repository}/permissions/user/{username} + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**username** + +_required_|The username of the user to which the permission applies|string +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|204|Deleted| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + https:///api/v1/repository///permissions/user/ +---- \ No newline at end of file diff --git a/modules/api-permission-getTeamPermissions.adoc b/modules/api-permission-getTeamPermissions.adoc new file mode 100644 index 000000000..7f27a1301 --- /dev/null +++ b/modules/api-permission-getTeamPermissions.adoc @@ -0,0 +1,47 @@ + += getTeamPermissions +Fetch the permission for the specified team. + +[discrete] +== GET /api/v1/repository/{repository}/permissions/team/{teamname} + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|path|**teamname** + +_required_|The name of the team to which the permission applies|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/repository///permissions/team/" +---- \ No newline at end of file diff --git a/modules/api-permission-getUserPermissions.adoc b/modules/api-permission-getUserPermissions.adoc new file mode 100644 index 000000000..190e135d1 --- /dev/null +++ b/modules/api-permission-getUserPermissions.adoc @@ -0,0 +1,47 @@ + += getUserPermissions +Get the permission for the specified user. + +[discrete] +== GET /api/v1/repository/{repository}/permissions/user/{username} + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**username** + +_required_|The username of the user to which the permission applies|string +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/repository//permissions/user/" +---- \ No newline at end of file diff --git a/modules/api-permission-getUserTransitivePermission.adoc b/modules/api-permission-getUserTransitivePermission.adoc new file mode 100644 index 000000000..88a504de7 --- /dev/null +++ b/modules/api-permission-getUserTransitivePermission.adoc @@ -0,0 +1,47 @@ + += getUserTransitivePermission +Get the fetch the permission for the specified user. + +[discrete] +== GET /api/v1/repository/{repository}/permissions/user/{username}/transitive + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**username** + +_required_|The username of the user to which the permissions apply|string +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/repository//permissions/user//transitive" +---- \ No newline at end of file diff --git a/modules/api-permission-listRepoTeamPermissions.adoc b/modules/api-permission-listRepoTeamPermissions.adoc new file mode 100644 index 000000000..6d08c69b4 --- /dev/null +++ b/modules/api-permission-listRepoTeamPermissions.adoc @@ -0,0 +1,44 @@ + += listRepoTeamPermissions +List all team permission. + +[discrete] +== GET /api/v1/repository/{repository}/permissions/team/ + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/repository///permissions/team/" +---- \ No newline at end of file diff --git a/modules/api-permission-listRepoUserPermissions.adoc b/modules/api-permission-listRepoUserPermissions.adoc new file mode 100644 index 000000000..941b4a76a --- /dev/null +++ b/modules/api-permission-listRepoUserPermissions.adoc @@ -0,0 +1,46 @@ + += listRepoUserPermissions +List all user permissions. + +[discrete] +== GET /api/v1/repository/{repository}/permissions/user/ + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + https:///api/v1/repository///permissions/user// +---- \ No newline at end of file diff --git a/modules/api-permission.adoc b/modules/api-permission.adoc new file mode 100644 index 000000000..a17174a3e --- /dev/null +++ b/modules/api-permission.adoc @@ -0,0 +1,4 @@ + += permission +Manage repository permissions. + diff --git a/modules/api-policy-createOrganizationAutoPrunePolicy.adoc b/modules/api-policy-createOrganizationAutoPrunePolicy.adoc new file mode 100644 index 000000000..77d269fd1 --- /dev/null +++ b/modules/api-policy-createOrganizationAutoPrunePolicy.adoc @@ -0,0 +1,62 @@ + += createOrganizationAutoPrunePolicy +Creates an auto-prune policy for the organization + +[discrete] +== POST /api/v1/organization/{orgname}/autoprunepolicy/ + + + +**Authorizations: **oauth2_implicit (**org:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Request body schema (application/json) + +The policy configuration that is to be applied to the user namespace + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**method** + +_required_|The method to use for pruning tags (number_of_tags, creation_date)|string +|**value** + +_required_|The value to use for the pruning method (number of tags e.g. 10, time delta e.g. 7d (7 days))| +|**tagPattern** + +_optional_|Tags only matching this pattern will be pruned|string +|**tagPatternMatches** + +_optional_|Determine whether pruned tags should or should not match the tagPattern|boolean +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{"method": "number_of_tags", "value": 10}' http:///api/v1/organization//autoprunepolicy/ +---- \ No newline at end of file diff --git a/modules/api-policy-createRepositoryAutoPrunePolicy.adoc b/modules/api-policy-createRepositoryAutoPrunePolicy.adoc new file mode 100644 index 000000000..790a488e5 --- /dev/null +++ b/modules/api-policy-createRepositoryAutoPrunePolicy.adoc @@ -0,0 +1,62 @@ + += createRepositoryAutoPrunePolicy +Creates an auto-prune policy for the repository + +[discrete] +== POST /api/v1/repository/{repository}/autoprunepolicy/ + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Request body schema (application/json) + +The policy configuration that is to be applied to the user namespace + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**method** + +_required_|The method to use for pruning tags (number_of_tags, creation_date)|string +|**value** + +_required_|The value to use for the pruning method (number of tags e.g. 10, time delta e.g. 7d (7 days))| +|**tagPattern** + +_optional_|Tags only matching this pattern will be pruned|string +|**tagPatternMatches** + +_optional_|Determine whether pruned tags should or should not match the tagPattern|boolean +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{"method": "number_of_tags","value": 2}' http:///api/v1/repository///autoprunepolicy/ +---- \ No newline at end of file diff --git a/modules/api-policy-createUserAutoPrunePolicy.adoc b/modules/api-policy-createUserAutoPrunePolicy.adoc new file mode 100644 index 000000000..da859db8e --- /dev/null +++ b/modules/api-policy-createUserAutoPrunePolicy.adoc @@ -0,0 +1,60 @@ + += createUserAutoPrunePolicy +Creates the auto-prune policy for the currently logged in user + +[discrete] +== POST /api/v1/user/autoprunepolicy/ + + + +**Authorizations: **oauth2_implicit (**user:admin**) + + + +[discrete] +== Request body schema (application/json) + +The policy configuration that is to be applied to the user namespace + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**method** + +_required_|The method to use for pruning tags (number_of_tags, creation_date)|string +|**value** + +_required_|The value to use for the pruning method (number of tags e.g. 10, time delta e.g. 7d (7 days))| +|**tagPattern** + +_optional_|Tags only matching this pattern will be pruned|string +|**tagPatternMatches** + +_optional_|Determine whether pruned tags should or should not match the tagPattern|boolean +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST "https://quay-server.example.com/api/v1/user/autoprunepolicy/" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "method": "number_of_tags", + "value": 10, + "tagPattern": "v*", + "tagPatternMatches": true + }' +---- \ No newline at end of file diff --git a/modules/api-policy-deleteOrganizationAutoPrunePolicy.adoc b/modules/api-policy-deleteOrganizationAutoPrunePolicy.adoc new file mode 100644 index 000000000..baa6f7802 --- /dev/null +++ b/modules/api-policy-deleteOrganizationAutoPrunePolicy.adoc @@ -0,0 +1,46 @@ + += deleteOrganizationAutoPrunePolicy +Deletes the auto-prune policy for the organization + +[discrete] +== DELETE /api/v1/organization/{orgname}/autoprunepolicy/{policy_uuid} + + + +**Authorizations: **oauth2_implicit (**org:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**policy_uuid** + +_required_|The unique ID of the policy|string +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|204|Deleted| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X DELETE "https://quay-server.example.com/api/v1/organization/example_org/autoprunepolicy/example_policy_uuid" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-policy-deleteRepositoryAutoPrunePolicy.adoc b/modules/api-policy-deleteRepositoryAutoPrunePolicy.adoc new file mode 100644 index 000000000..326fcda93 --- /dev/null +++ b/modules/api-policy-deleteRepositoryAutoPrunePolicy.adoc @@ -0,0 +1,46 @@ + += deleteRepositoryAutoPrunePolicy +Deletes the auto-prune policy for the repository + +[discrete] +== DELETE /api/v1/repository/{repository}/autoprunepolicy/{policy_uuid} + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**policy_uuid** + +_required_|The unique ID of the policy|string +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|204|Deleted| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X DELETE "https://quay-server.example.com/api/v1/repository/example_namespace/example_repo/autoprunepolicy/123e4567-e89b-12d3-a456-426614174000" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-policy-deleteUserAutoPrunePolicy.adoc b/modules/api-policy-deleteUserAutoPrunePolicy.adoc new file mode 100644 index 000000000..2d3f5c3af --- /dev/null +++ b/modules/api-policy-deleteUserAutoPrunePolicy.adoc @@ -0,0 +1,44 @@ + += deleteUserAutoPrunePolicy +Deletes the auto-prune policy for the currently logged in user + +[discrete] +== DELETE /api/v1/user/autoprunepolicy/{policy_uuid} + + + +**Authorizations: **oauth2_implicit (**user:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**policy_uuid** + +_required_|The unique ID of the policy|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|204|Deleted| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X DELETE "https://quay-server.example.com/api/v1/user/autoprunepolicy/" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-policy-getOrganizationAutoPrunePolicy.adoc b/modules/api-policy-getOrganizationAutoPrunePolicy.adoc new file mode 100644 index 000000000..ea4db2a8a --- /dev/null +++ b/modules/api-policy-getOrganizationAutoPrunePolicy.adoc @@ -0,0 +1,45 @@ + += getOrganizationAutoPrunePolicy +Fetches the auto-prune policy for the organization + +[discrete] +== GET /api/v1/organization/{orgname}/autoprunepolicy/{policy_uuid} + + + +**Authorizations: **oauth2_implicit (**org:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**policy_uuid** + +_required_|The unique ID of the policy|string +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " http:///api/v1/organization//autoprunepolicy/ +---- \ No newline at end of file diff --git a/modules/api-policy-getRepositoryAutoPrunePolicy.adoc b/modules/api-policy-getRepositoryAutoPrunePolicy.adoc new file mode 100644 index 000000000..4a4dc6a8b --- /dev/null +++ b/modules/api-policy-getRepositoryAutoPrunePolicy.adoc @@ -0,0 +1,46 @@ + += getRepositoryAutoPrunePolicy +Fetches the auto-prune policy for the repository + +[discrete] +== GET /api/v1/repository/{repository}/autoprunepolicy/{policy_uuid} + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**policy_uuid** + +_required_|The unique ID of the policy|string +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/repository/example_namespace/example_repo/autoprunepolicy/123e4567-e89b-12d3-a456-426614174000" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-policy-getUserAutoPrunePolicy.adoc b/modules/api-policy-getUserAutoPrunePolicy.adoc new file mode 100644 index 000000000..d7bc517af --- /dev/null +++ b/modules/api-policy-getUserAutoPrunePolicy.adoc @@ -0,0 +1,44 @@ + += getUserAutoPrunePolicy +Fetches the auto-prune policy for the currently logged in user + +[discrete] +== GET /api/v1/user/autoprunepolicy/{policy_uuid} + + + +**Authorizations: **oauth2_implicit (**user:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**policy_uuid** + +_required_|The unique ID of the policy|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/user/autoprunepolicy/{policy_uuid}" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-policy-listOrganizationAutoPrunePolicies.adoc b/modules/api-policy-listOrganizationAutoPrunePolicies.adoc new file mode 100644 index 000000000..ee4603864 --- /dev/null +++ b/modules/api-policy-listOrganizationAutoPrunePolicies.adoc @@ -0,0 +1,44 @@ + += listOrganizationAutoPrunePolicies +Lists the auto-prune policies for the organization + +[discrete] +== GET /api/v1/organization/{orgname}/autoprunepolicy/ + + + +**Authorizations: **oauth2_implicit (**org:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/organization/example_org/autoprunepolicy/" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-policy-listRepositoryAutoPrunePolicies.adoc b/modules/api-policy-listRepositoryAutoPrunePolicies.adoc new file mode 100644 index 000000000..7bc2470e5 --- /dev/null +++ b/modules/api-policy-listRepositoryAutoPrunePolicies.adoc @@ -0,0 +1,44 @@ + += listRepositoryAutoPrunePolicies +Lists the auto-prune policies for the repository + +[discrete] +== GET /api/v1/repository/{repository}/autoprunepolicy/ + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/repository/example_namespace/example_repo/autoprunepolicy/" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-policy-listUserAutoPrunePolicies.adoc b/modules/api-policy-listUserAutoPrunePolicies.adoc new file mode 100644 index 000000000..2e1cadf35 --- /dev/null +++ b/modules/api-policy-listUserAutoPrunePolicies.adoc @@ -0,0 +1,34 @@ + += listUserAutoPrunePolicies +Lists the auto-prune policies for the currently logged in user + +[discrete] +== GET /api/v1/user/autoprunepolicy/ + + + +**Authorizations: **oauth2_implicit (**user:admin**) + + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/user/autoprunepolicy/" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-policy-updateOrganizationAutoPrunePolicy.adoc b/modules/api-policy-updateOrganizationAutoPrunePolicy.adoc new file mode 100644 index 000000000..f9f7dd659 --- /dev/null +++ b/modules/api-policy-updateOrganizationAutoPrunePolicy.adoc @@ -0,0 +1,69 @@ + += updateOrganizationAutoPrunePolicy +Updates the auto-prune policy for the organization + +[discrete] +== PUT /api/v1/organization/{orgname}/autoprunepolicy/{policy_uuid} + + + +**Authorizations: **oauth2_implicit (**org:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**policy_uuid** + +_required_|The unique ID of the policy|string +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Request body schema (application/json) + +The policy configuration that is to be applied to the user namespace + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**method** + +_required_|The method to use for pruning tags (number_of_tags, creation_date)|string +|**value** + +_required_|The value to use for the pruning method (number of tags e.g. 10, time delta e.g. 7d (7 days))| +|**tagPattern** + +_optional_|Tags only matching this pattern will be pruned|string +|**tagPatternMatches** + +_optional_|Determine whether pruned tags should or should not match the tagPattern|boolean +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|204|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X PUT -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{ + "method": "creation_date", + "value": "4d", + "tagPattern": "^v*", + "tagPatternMatches": true + }' "/api/v1/organization//autoprunepolicy/" +---- \ No newline at end of file diff --git a/modules/api-policy-updateRepositoryAutoPrunePolicy.adoc b/modules/api-policy-updateRepositoryAutoPrunePolicy.adoc new file mode 100644 index 000000000..b50a407f2 --- /dev/null +++ b/modules/api-policy-updateRepositoryAutoPrunePolicy.adoc @@ -0,0 +1,73 @@ + += updateRepositoryAutoPrunePolicy +Updates the auto-prune policy for the repository + +[discrete] +== PUT /api/v1/repository/{repository}/autoprunepolicy/{policy_uuid} + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**policy_uuid** + +_required_|The unique ID of the policy|string +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Request body schema (application/json) + +The policy configuration that is to be applied to the user namespace + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**method** + +_required_|The method to use for pruning tags (number_of_tags, creation_date)|string +|**value** + +_required_|The value to use for the pruning method (number of tags e.g. 10, time delta e.g. 7d (7 days))| +|**tagPattern** + +_optional_|Tags only matching this pattern will be pruned|string +|**tagPatternMatches** + +_optional_|Determine whether pruned tags should or should not match the tagPattern|boolean +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "method": "number_of_tags", + "value": "5", + "tagPattern": "^test.*", + "tagPatternMatches": true + }' \ + "https://quay-server.example.com/api/v1/repository///autoprunepolicy/" +---- \ No newline at end of file diff --git a/modules/api-policy-updateUserAutoPrunePolicy.adoc b/modules/api-policy-updateUserAutoPrunePolicy.adoc new file mode 100644 index 000000000..41a9622e4 --- /dev/null +++ b/modules/api-policy-updateUserAutoPrunePolicy.adoc @@ -0,0 +1,70 @@ + += updateUserAutoPrunePolicy +Updates the auto-prune policy for the currently logged in user + +[discrete] +== PUT /api/v1/user/autoprunepolicy/{policy_uuid} + + + +**Authorizations: **oauth2_implicit (**user:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**policy_uuid** + +_required_|The unique ID of the policy|string +|=== + + +[discrete] +== Request body schema (application/json) + +The policy configuration that is to be applied to the user namespace + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**method** + +_required_|The method to use for pruning tags (number_of_tags, creation_date)|string +|**value** + +_required_|The value to use for the pruning method (number of tags e.g. 10, time delta e.g. 7d (7 days))| +|**tagPattern** + +_optional_|Tags only matching this pattern will be pruned|string +|**tagPatternMatches** + +_optional_|Determine whether pruned tags should or should not match the tagPattern|boolean +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|204|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X PUT "https://quay-server.example.com/api/v1/user/autoprunepolicy/" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "method": "number_of_tags", + "value": "10", + "tagPattern": ".*-old", + "tagPatternMatches": true + }' +---- \ No newline at end of file diff --git a/modules/api-policy.adoc b/modules/api-policy.adoc new file mode 100644 index 000000000..5b2896375 --- /dev/null +++ b/modules/api-policy.adoc @@ -0,0 +1,4 @@ + += policy + + diff --git a/modules/api-prototype-createOrganizationPrototypePermission.adoc b/modules/api-prototype-createOrganizationPrototypePermission.adoc new file mode 100644 index 000000000..d482be834 --- /dev/null +++ b/modules/api-prototype-createOrganizationPrototypePermission.adoc @@ -0,0 +1,69 @@ + += createOrganizationPrototypePermission +Create a new permission prototype. + +[discrete] +== POST /api/v1/organization/{orgname}/prototypes + + + +**Authorizations: **oauth2_implicit (**org:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Request body schema (application/json) + +Description of a new prototype + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**role** + +_required_|Role that should be applied to the delegate|string +|**activating_user** + +_optional_|Repository creating user to whom the rule should apply|object +|**delegate** + +_required_|Information about the user or team to which the rule grants access|object +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST -H "Authorization: Bearer " -H "Content-Type: application/json" --data '{ + "role": "", + "delegate": { + "name": "", + "kind": "user" + }, + "activating_user": { + "name": "" + } + }' https:///api/v1/organization//prototypes +---- \ No newline at end of file diff --git a/modules/api-prototype-deleteOrganizationPrototypePermission.adoc b/modules/api-prototype-deleteOrganizationPrototypePermission.adoc new file mode 100644 index 000000000..73a0ebc3b --- /dev/null +++ b/modules/api-prototype-deleteOrganizationPrototypePermission.adoc @@ -0,0 +1,47 @@ + += deleteOrganizationPrototypePermission +Delete an existing permission prototype. + +[discrete] +== DELETE /api/v1/organization/{orgname}/prototypes/{prototypeid} + + + +**Authorizations: **oauth2_implicit (**org:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**prototypeid** + +_required_|The ID of the prototype|string +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|204|Deleted| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +curl -X DELETE \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + https:///api/v1/organization//prototypes/ +---- \ No newline at end of file diff --git a/modules/api-prototype-getOrganizationPrototypePermissions.adoc b/modules/api-prototype-getOrganizationPrototypePermissions.adoc new file mode 100644 index 000000000..b1cd84dde --- /dev/null +++ b/modules/api-prototype-getOrganizationPrototypePermissions.adoc @@ -0,0 +1,46 @@ + += getOrganizationPrototypePermissions +List the existing prototypes for this organization. + +[discrete] +== GET /api/v1/organization/{orgname}/prototypes + + + +**Authorizations: **oauth2_implicit (**org:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + https:///api/v1/organization//prototypes +---- \ No newline at end of file diff --git a/modules/api-prototype-updateOrganizationPrototypePermission.adoc b/modules/api-prototype-updateOrganizationPrototypePermission.adoc new file mode 100644 index 000000000..66823707f --- /dev/null +++ b/modules/api-prototype-updateOrganizationPrototypePermission.adoc @@ -0,0 +1,63 @@ + += updateOrganizationPrototypePermission +Update the role of an existing permission prototype. + +[discrete] +== PUT /api/v1/organization/{orgname}/prototypes/{prototypeid} + + + +**Authorizations: **oauth2_implicit (**org:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**prototypeid** + +_required_|The ID of the prototype|string +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Request body schema (application/json) + +Description of a the new prototype role + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**role** + +_optional_|Role that should be applied to the permission|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + --data '{ + "role": "write" + }' \ + https:///api/v1/organization//prototypes/ +---- diff --git a/modules/api-prototype.adoc b/modules/api-prototype.adoc new file mode 100644 index 000000000..1842d00ae --- /dev/null +++ b/modules/api-prototype.adoc @@ -0,0 +1,4 @@ + += prototype +Manage default permissions added to repositories. + diff --git a/modules/api-referrers-getReferrers.adoc b/modules/api-referrers-getReferrers.adoc new file mode 100644 index 000000000..657d07ebc --- /dev/null +++ b/modules/api-referrers-getReferrers.adoc @@ -0,0 +1,23 @@ + += getReferrers +List v2 API referrers of an image digest. + +[discrete] +== GET /v2/{organization_name}/{repository_name}/referrers/{digest} + +[discrete] +== Request body schema (application/json) + +Referrers of an image digest. + +|=== +|Type|Name|Description|Schema +|path|**orgname** + +_required_|The name of the organization|string +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|path|**referrers** + +_required_| Looks up the OCI referrers of a manifest under a repository.|string +|**manifest_digest** + +_required_|The digest of the manifest|string +|=== \ No newline at end of file diff --git a/modules/api-referrers.adoc b/modules/api-referrers.adoc new file mode 100644 index 000000000..b745bcddb --- /dev/null +++ b/modules/api-referrers.adoc @@ -0,0 +1,3 @@ + += referrers +List v2 API referrers \ No newline at end of file diff --git a/modules/api-repository-changeRepoState.adoc b/modules/api-repository-changeRepoState.adoc new file mode 100644 index 000000000..fbe6f32f2 --- /dev/null +++ b/modules/api-repository-changeRepoState.adoc @@ -0,0 +1,52 @@ + += changeRepoState +Change the state of a repository. + +[discrete] +== PUT /api/v1/repository/{repository}/changestate + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Request body schema (application/json) + +Change the state of the repository. + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**state** + +_required_|Determines whether pushes are allowed.|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + diff --git a/modules/api-repository-changeRepoVisibility.adoc b/modules/api-repository-changeRepoVisibility.adoc new file mode 100644 index 000000000..4611ebffd --- /dev/null +++ b/modules/api-repository-changeRepoVisibility.adoc @@ -0,0 +1,62 @@ + += changeRepoVisibility +Change the visibility of a repository. + +[discrete] +== POST /api/v1/repository/{repository}/changevisibility + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Request body schema (application/json) + +Change the visibility for the repository. + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**visibility** + +_required_|Visibility which the repository will start with|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example Command + +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "visibility": "private" + }' \ + "https://quay-server.example.com/api/v1/repository///changevisibility" +---- \ No newline at end of file diff --git a/modules/api-repository-createRepo.adoc b/modules/api-repository-createRepo.adoc new file mode 100644 index 000000000..a4915773f --- /dev/null +++ b/modules/api-repository-createRepo.adoc @@ -0,0 +1,62 @@ + += createRepo +Create a new repository. + +[discrete] +== POST /api/v1/repository + + + +**Authorizations: **oauth2_implicit (**repo:create**) + + + +[discrete] +== Request body schema (application/json) + +Description of a new repository + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**repository** + +_required_|Repository name|string +|**visibility** + +_required_|Visibility which the repository will start with|string +|**namespace** + +_optional_|Namespace in which the repository should be created. If omitted, the username of the caller is used|string +|**description** + +_required_|Markdown encoded description for the repository|string +|**repo_kind** + +_optional_|The kind of repository| +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "repository": "", + "visibility": "", + "description": "." + }' \ + "https://quay-server.example.com/api/v1/repository" +---- \ No newline at end of file diff --git a/modules/api-repository-deleteRepository.adoc b/modules/api-repository-deleteRepository.adoc new file mode 100644 index 000000000..55a682a1a --- /dev/null +++ b/modules/api-repository-deleteRepository.adoc @@ -0,0 +1,43 @@ + += deleteRepository +Delete a repository. + +[discrete] +== DELETE /api/v1/repository/{repository} + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|204|Deleted| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X DELETE -H "Authorization: Bearer " "/api/v1/repository//" +---- \ No newline at end of file diff --git a/modules/api-repository-getRepo.adoc b/modules/api-repository-getRepo.adoc new file mode 100644 index 000000000..e38283cb4 --- /dev/null +++ b/modules/api-repository-getRepo.adoc @@ -0,0 +1,56 @@ + += getRepo +Fetch the specified repository. + +[discrete] +== GET /api/v1/repository/{repository} + + + +**Authorizations: **oauth2_implicit (**repo:read**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Query parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|query|**includeTags** + +_optional_|Whether to include repository tags|boolean +|query|**includeStats** + +_optional_|Whether to include action statistics|boolean +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " "/api/v1/repository//" +---- \ No newline at end of file diff --git a/modules/api-repository-listRepos.adoc b/modules/api-repository-listRepos.adoc new file mode 100644 index 000000000..6b27cc238 --- /dev/null +++ b/modules/api-repository-listRepos.adoc @@ -0,0 +1,58 @@ + += listRepos +Fetch the list of repositories visible to the current user under a variety of situations. + +[discrete] +== GET /api/v1/repository + + + +**Authorizations: **oauth2_implicit (**repo:read**) + + + +[discrete] +== Query parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|query|**next_page** + +_optional_|The page token for the next page|string +|query|**repo_kind** + +_optional_|The kind of repositories to return|string +|query|**popularity** + +_optional_|Whether to include the repository's popularity metric.|boolean +|query|**last_modified** + +_optional_|Whether to include when the repository was last modified.|boolean +|query|**public** + +_required_|Adds any repositories visible to the user by virtue of being public|boolean +|query|**starred** + +_required_|Filters the repositories returned to those starred by the user|boolean +|query|**namespace** + +_required_|Filters the repositories returned to this namespace|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/repository?public=true&starred=false&namespace=" +---- \ No newline at end of file diff --git a/modules/api-repository-updateRepo.adoc b/modules/api-repository-updateRepo.adoc new file mode 100644 index 000000000..3ed1af06b --- /dev/null +++ b/modules/api-repository-updateRepo.adoc @@ -0,0 +1,61 @@ + += updateRepo +Update the description in the specified repository. + +[discrete] +== PUT /api/v1/repository/{repository} + + + +**Authorizations: **oauth2_implicit (**repo:write**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Request body schema (application/json) + +Fields which can be updated in a repository. + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**description** + +_required_|Markdown encoded description for the repository|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "description": "This is an updated description for the repository." + }' \ + "https://quay-server.example.com/api/v1/repository//" +---- \ No newline at end of file diff --git a/modules/api-repository.adoc b/modules/api-repository.adoc new file mode 100644 index 000000000..ee8a198e1 --- /dev/null +++ b/modules/api-repository.adoc @@ -0,0 +1,4 @@ + += repository +List, create and manage repositories. + diff --git a/modules/api-repositorynotification-createRepoNotification.adoc b/modules/api-repositorynotification-createRepoNotification.adoc new file mode 100644 index 000000000..050a1649b --- /dev/null +++ b/modules/api-repositorynotification-createRepoNotification.adoc @@ -0,0 +1,76 @@ + += createRepoNotification + + +[discrete] +== POST /api/v1/repository/{repository}/notification/ + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Request body schema (application/json) + +Information for creating a notification on a repository + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**event** + +_required_|The event on which the notification will respond|string +|**method** + +_required_|The method of notification (such as email or web callback)|string +|**config** + +_required_|JSON config information for the specific method of notification|object +|**eventConfig** + +_required_|JSON config information for the specific event of notification|object +|**title** + +_optional_|The human-readable title of the notification|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + --data '{ + "event": "", + "method": "", + "config": { + "": "" + }, + "eventConfig": { + "": "" + } + }' \ + https:///api/v1/repository///notification/ +---- \ No newline at end of file diff --git a/modules/api-repositorynotification-deleteRepoNotification.adoc b/modules/api-repositorynotification-deleteRepoNotification.adoc new file mode 100644 index 000000000..85ec967ab --- /dev/null +++ b/modules/api-repositorynotification-deleteRepoNotification.adoc @@ -0,0 +1,46 @@ + += deleteRepoNotification +Deletes the specified notification. + +[discrete] +== DELETE /api/v1/repository/{repository}/notification/{uuid} + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|path|**uuid** + +_required_|The UUID of the notification|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|204|Deleted| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + https:///api/v1/repository///notification/ +---- \ No newline at end of file diff --git a/modules/api-repositorynotification-getRepoNotification.adoc b/modules/api-repositorynotification-getRepoNotification.adoc new file mode 100644 index 000000000..9a8ea167c --- /dev/null +++ b/modules/api-repositorynotification-getRepoNotification.adoc @@ -0,0 +1,46 @@ + += getRepoNotification +Get information for the specified notification. + +[discrete] +== GET /api/v1/repository/{repository}/notification/{uuid} + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|path|**uuid** + +_required_|The UUID of the notification|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + https:///api/v1/repository//notification/ +---- \ No newline at end of file diff --git a/modules/api-repositorynotification-listRepoNotifications.adoc b/modules/api-repositorynotification-listRepoNotifications.adoc new file mode 100644 index 000000000..7b9c26085 --- /dev/null +++ b/modules/api-repositorynotification-listRepoNotifications.adoc @@ -0,0 +1,43 @@ + += listRepoNotifications +List the notifications for the specified repository. + +[discrete] +== GET /api/v1/repository/{repository}/notification/ + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " -H "Accept: application/json" https:///api/v1/repository///notification +---- \ No newline at end of file diff --git a/modules/api-repositorynotification-resetRepositoryNotificationFailures.adoc b/modules/api-repositorynotification-resetRepositoryNotificationFailures.adoc new file mode 100644 index 000000000..96c4da3cb --- /dev/null +++ b/modules/api-repositorynotification-resetRepositoryNotificationFailures.adoc @@ -0,0 +1,47 @@ + += resetRepositoryNotificationFailures +Resets repository notification to 0 failures. + +[discrete] +== POST /api/v1/repository/{repository}/notification/{uuid} + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|path|**uuid** + +_required_|The UUID of the notification|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|204|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + https:///api/v1/repository//notification/ +---- \ No newline at end of file diff --git a/modules/api-repositorynotification-testRepoNotification.adoc b/modules/api-repositorynotification-testRepoNotification.adoc new file mode 100644 index 000000000..b03145fae --- /dev/null +++ b/modules/api-repositorynotification-testRepoNotification.adoc @@ -0,0 +1,46 @@ + += testRepoNotification +Queues a test notification for this repository. + +[discrete] +== POST /api/v1/repository/{repository}/notification/{uuid}/test + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|path|**uuid** + +_required_|The UUID of the notification|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + https:///api/v1/repository//notification//test +---- \ No newline at end of file diff --git a/modules/api-repositorynotification.adoc b/modules/api-repositorynotification.adoc new file mode 100644 index 000000000..f37016266 --- /dev/null +++ b/modules/api-repositorynotification.adoc @@ -0,0 +1,4 @@ + += repositorynotification +List, create and manage repository events/notifications. + diff --git a/modules/api-repotoken-changeToken.adoc b/modules/api-repotoken-changeToken.adoc new file mode 100644 index 000000000..80fd33677 --- /dev/null +++ b/modules/api-repotoken-changeToken.adoc @@ -0,0 +1,50 @@ + += changeToken +Update the permissions for the specified repository token. + +[discrete] +== PUT /api/v1/repository/{repository}/tokens/{code} + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|path|**code** + +_required_|The token code|string +|=== + + +[discrete] +== Request body schema (application/json) + +Description of a token permission + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**role** + +_optional_|Role to use for the token|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== diff --git a/modules/api-repotoken-createToken.adoc b/modules/api-repotoken-createToken.adoc new file mode 100644 index 000000000..2997cb0a1 --- /dev/null +++ b/modules/api-repotoken-createToken.adoc @@ -0,0 +1,48 @@ + += createToken +Create a new repository token. + +[discrete] +== POST /api/v1/repository/{repository}/tokens/ + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Request body schema (application/json) + +Description of a new token. + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**friendlyName** + +_required_|Friendly name to help identify the token|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== diff --git a/modules/api-repotoken-deleteToken.adoc b/modules/api-repotoken-deleteToken.adoc new file mode 100644 index 000000000..de59b8169 --- /dev/null +++ b/modules/api-repotoken-deleteToken.adoc @@ -0,0 +1,37 @@ + += deleteToken +Delete the repository token. + +[discrete] +== DELETE /api/v1/repository/{repository}/tokens/{code} + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|path|**code** + +_required_|The token code|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|204|Deleted| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== diff --git a/modules/api-repotoken-getTokens.adoc b/modules/api-repotoken-getTokens.adoc new file mode 100644 index 000000000..7ab3a6f54 --- /dev/null +++ b/modules/api-repotoken-getTokens.adoc @@ -0,0 +1,37 @@ + += getTokens +Fetch the specified repository token information. + +[discrete] +== GET /api/v1/repository/{repository}/tokens/{code} + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|path|**code** + +_required_|The token code|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== diff --git a/modules/api-repotoken-listRepoTokens.adoc b/modules/api-repotoken-listRepoTokens.adoc new file mode 100644 index 000000000..2c4b124d6 --- /dev/null +++ b/modules/api-repotoken-listRepoTokens.adoc @@ -0,0 +1,35 @@ + += listRepoTokens +List the tokens for the specified repository. + +[discrete] +== GET /api/v1/repository/{repository}/tokens/ + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== diff --git a/modules/api-repotoken.adoc b/modules/api-repotoken.adoc new file mode 100644 index 000000000..f5dba68e3 --- /dev/null +++ b/modules/api-repotoken.adoc @@ -0,0 +1,4 @@ + += repotoken +Manage repository access tokens (DEPRECATED). + diff --git a/modules/api-robot-createOrgRobot.adoc b/modules/api-robot-createOrgRobot.adoc new file mode 100644 index 000000000..e0a84277a --- /dev/null +++ b/modules/api-robot-createOrgRobot.adoc @@ -0,0 +1,60 @@ + += createOrgRobot +Create a new robot in the organization. + +[discrete] +== PUT /api/v1/organization/{orgname}/robots/{robot_shortname} + + + +**Authorizations: **oauth2_implicit (**org:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**robot_shortname** + +_required_|The short name for the robot, without any user or organization prefix|string +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Request body schema (application/json) + +Optional data for creating a robot + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**description** + +_optional_|Optional text description for the robot|string +|**unstructured_metadata** + +_optional_|Optional unstructured metadata for the robot|object +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X PUT -H "Authorization: Bearer " "https:///api/v1/organization//robots/" +---- \ No newline at end of file diff --git a/modules/api-robot-createOrgRobotFederation.adoc b/modules/api-robot-createOrgRobotFederation.adoc new file mode 100644 index 000000000..4a209df26 --- /dev/null +++ b/modules/api-robot-createOrgRobotFederation.adoc @@ -0,0 +1,42 @@ += createOrgRobotFederation + +Create a federation configuration for the specified organization robot. + +[discrete] +== POST /api/v1/organization/{orgname}/robots/{robot_shortname}/federation + +Retrieve the federation configuration for the specified organization robot. + +**Authorizations: **oauth2_implicit (**user:admin**) + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|*orgname* + *robot_shortname* +_required_|The name of the organization and the short name for the robot, without any user or organization prefix|string +|=== + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful invocation | +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST "https://quay-server.example.com/api/v1/organization/{orgname}/robots/{robot_shortname}/federation" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" +---- diff --git a/modules/api-robot-createUserRobot.adoc b/modules/api-robot-createUserRobot.adoc new file mode 100644 index 000000000..bdd603fce --- /dev/null +++ b/modules/api-robot-createUserRobot.adoc @@ -0,0 +1,58 @@ + += createUserRobot +Create a new user robot with the specified name. + +[discrete] +== PUT /api/v1/user/robots/{robot_shortname} + + + +**Authorizations: **oauth2_implicit (**user:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**robot_shortname** + +_required_|The short name for the robot, without any user or organization prefix|string +|=== + + +[discrete] +== Request body schema (application/json) + +Optional data for creating a robot + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**description** + +_optional_|Optional text description for the robot|string +|**unstructured_metadata** + +_optional_|Optional unstructured metadata for the robot|object +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X PUT -H "Authorization: Bearer " "https:///api/v1/user/robots/" +---- \ No newline at end of file diff --git a/modules/api-robot-deleteOrgRobot.adoc b/modules/api-robot-deleteOrgRobot.adoc new file mode 100644 index 000000000..36c01dffa --- /dev/null +++ b/modules/api-robot-deleteOrgRobot.adoc @@ -0,0 +1,47 @@ + += deleteOrgRobot +Delete an existing organization robot. + +[discrete] +== DELETE /api/v1/organization/{orgname}/robots/{robot_shortname} + + + +**Authorizations: **oauth2_implicit (**org:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**robot_shortname** + +_required_|The short name for the robot, without any user or organization prefix|string +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|204|Deleted| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +curl -X DELETE \ + -H "Authorization: Bearer " \ + "/api/v1/organization//robots/" +---- \ No newline at end of file diff --git a/modules/api-robot-deleteOrgRobotFederation.adoc b/modules/api-robot-deleteOrgRobotFederation.adoc new file mode 100644 index 000000000..5d0f48dfc --- /dev/null +++ b/modules/api-robot-deleteOrgRobotFederation.adoc @@ -0,0 +1,31 @@ += deleteOrgRobotFederation + +Delete a federation configuration for the specified organization robot. + +[discrete] +== DELETE /api/v1/organization/{orgname}/robots/{robot_shortname}/federation + +**Authorizations: **oauth2_implicit (org) + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|orgname + +_required_|The name of the organization and the short name for the robot, without any user or organization prefix|string +|=== + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|204|Deleted| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== \ No newline at end of file diff --git a/modules/api-robot-deleteUserRobot.adoc b/modules/api-robot-deleteUserRobot.adoc new file mode 100644 index 000000000..43d62542a --- /dev/null +++ b/modules/api-robot-deleteUserRobot.adoc @@ -0,0 +1,44 @@ + += deleteUserRobot +Delete an existing robot. + +[discrete] +== DELETE /api/v1/user/robots/{robot_shortname} + + + +**Authorizations: **oauth2_implicit (**user:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**robot_shortname** + +_required_|The short name for the robot, without any user or organization prefix|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|204|Deleted| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + "/api/v1/user/robots/" +---- \ No newline at end of file diff --git a/modules/api-robot-getOrgRobot.adoc b/modules/api-robot-getOrgRobot.adoc new file mode 100644 index 000000000..83470ac3a --- /dev/null +++ b/modules/api-robot-getOrgRobot.adoc @@ -0,0 +1,47 @@ + += getOrgRobot +Returns the organization's robot with the specified name. + +[discrete] +== GET /api/v1/organization/{orgname}/robots/{robot_shortname} + + + +**Authorizations: **oauth2_implicit (**org:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**robot_shortname** + +_required_|The short name for the robot, without any user or organization prefix|string +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +curl -X GET \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/organization//robots/" +---- \ No newline at end of file diff --git a/modules/api-robot-getOrgRobotFederation.adoc b/modules/api-robot-getOrgRobotFederation.adoc new file mode 100644 index 000000000..5b5bf1575 --- /dev/null +++ b/modules/api-robot-getOrgRobotFederation.adoc @@ -0,0 +1,72 @@ +//// += getOrgRobotFederation + +Manage federation configuration for a robot account within an organization. + +[discrete] +== GET /api/v1/organization/{orgname}/robots/{robot_shortname}/federation + +Retrieve the federation configuration for the specified organization robot. + +**Authorizations: **oauth2_implicit (**user:admin**) + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|*orgname* + *robot_shortname* +_required_|The name of the organization and the short name for the robot, without any user or organization prefix|string +|=== + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== +//// + += Auth Federated Robot Token +Return an expiring robot token using the robot identity federation mechanism. + +[discrete] +== GET oauth2/federation/robot/token + +**Authorizations:** oauth2_implicit (**robot:auth**) + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful authentication and token generation|{ "token": "string" } +|401|Unauthorized: missing or invalid authentication|{ "error": "string" } +|=== + +[discrete] +== Request Body + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|body|**auth_result** + +_required_|The result of the authentication process, containing information about the robot identity.|{ "missing": "boolean", "error_message": "string", "context": { "robot": "RobotObject" } } +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/oauth2/federation/robot/token" \ + -H "Authorization: Bearer " +---- diff --git a/modules/api-robot-getOrgRobotPermissions.adoc b/modules/api-robot-getOrgRobotPermissions.adoc new file mode 100644 index 000000000..e2c965363 --- /dev/null +++ b/modules/api-robot-getOrgRobotPermissions.adoc @@ -0,0 +1,47 @@ + += getOrgRobotPermissions +Returns the list of repository permissions for the org's robot. + +[discrete] +== GET /api/v1/organization/{orgname}/robots/{robot_shortname}/permissions + + + +**Authorizations: **oauth2_implicit (**user:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**robot_shortname** + +_required_|The short name for the robot, without any user or organization prefix|string +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/organization//robots//permissions" +---- \ No newline at end of file diff --git a/modules/api-robot-getOrgRobots.adoc b/modules/api-robot-getOrgRobots.adoc new file mode 100644 index 000000000..09b2dfab0 --- /dev/null +++ b/modules/api-robot-getOrgRobots.adoc @@ -0,0 +1,58 @@ + += getOrgRobots +List the organization's robots. + +[discrete] +== GET /api/v1/organization/{orgname}/robots + + + +**Authorizations: **oauth2_implicit (**org:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Query parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|query|**limit** + +_optional_|If specified, the number of robots to return.|integer +|query|**token** + +_optional_|If false, the robot's token is not returned.|boolean +|query|**permissions** + +_optional_|Whether to include repositories and teams in which the robots have permission.|boolean +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " "https:///api/v1/organization//robots" +---- \ No newline at end of file diff --git a/modules/api-robot-getUserRobot.adoc b/modules/api-robot-getUserRobot.adoc new file mode 100644 index 000000000..5266691f1 --- /dev/null +++ b/modules/api-robot-getUserRobot.adoc @@ -0,0 +1,45 @@ + += getUserRobot +Returns the user's robot with the specified name. + +[discrete] +== GET /api/v1/user/robots/{robot_shortname} + + + +**Authorizations: **oauth2_implicit (**user:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**robot_shortname** + +_required_|The short name for the robot, without any user or organization prefix|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "/api/v1/user/robots/" +---- \ No newline at end of file diff --git a/modules/api-robot-getUserRobotPermissions.adoc b/modules/api-robot-getUserRobotPermissions.adoc new file mode 100644 index 000000000..9e4ee24cf --- /dev/null +++ b/modules/api-robot-getUserRobotPermissions.adoc @@ -0,0 +1,45 @@ + += getUserRobotPermissions +Returns the list of repository permissions for the user's robot. + +[discrete] +== GET /api/v1/user/robots/{robot_shortname}/permissions + + + +**Authorizations: **oauth2_implicit (**user:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**robot_shortname** + +_required_|The short name for the robot, without any user or organization prefix|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/user/robots//permissions" +---- \ No newline at end of file diff --git a/modules/api-robot-getUserRobots.adoc b/modules/api-robot-getUserRobots.adoc new file mode 100644 index 000000000..73adeb051 --- /dev/null +++ b/modules/api-robot-getUserRobots.adoc @@ -0,0 +1,49 @@ + += getUserRobots +List the available robots for the user. + +[discrete] +== GET /api/v1/user/robots + + + +**Authorizations: **oauth2_implicit (**user:admin**) + + + +[discrete] +== Query parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|query|**limit** + +_optional_|If specified, the number of robots to return.|integer +|query|**token** + +_optional_|If false, the robot's token is not returned.|boolean +|query|**permissions** + +_optional_|Whether to include repositories and teams in which the robots have permission.|boolean +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/user/robots?limit=10&token=false&permissions=true" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-robot-regenerateOrgRobotToken.adoc b/modules/api-robot-regenerateOrgRobotToken.adoc new file mode 100644 index 000000000..b9a91f01c --- /dev/null +++ b/modules/api-robot-regenerateOrgRobotToken.adoc @@ -0,0 +1,47 @@ + += regenerateOrgRobotToken +Regenerates the token for an organization robot. + +[discrete] +== POST /api/v1/organization/{orgname}/robots/{robot_shortname}/regenerate + + + +**Authorizations: **oauth2_implicit (**org:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**robot_shortname** + +_required_|The short name for the robot, without any user or organization prefix|string +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + "/api/v1/organization//robots//regenerate" +---- \ No newline at end of file diff --git a/modules/api-robot-regenerateUserRobotToken.adoc b/modules/api-robot-regenerateUserRobotToken.adoc new file mode 100644 index 000000000..49cb140dc --- /dev/null +++ b/modules/api-robot-regenerateUserRobotToken.adoc @@ -0,0 +1,44 @@ + += regenerateUserRobotToken +Regenerates the token for a user's robot. + +[discrete] +== POST /api/v1/user/robots/{robot_shortname}/regenerate + + + +**Authorizations: **oauth2_implicit (**user:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**robot_shortname** + +_required_|The short name for the robot, without any user or organization prefix|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + "/api/v1/user/robots//regenerate" +---- \ No newline at end of file diff --git a/modules/api-robot.adoc b/modules/api-robot.adoc new file mode 100644 index 000000000..459252f46 --- /dev/null +++ b/modules/api-robot.adoc @@ -0,0 +1,4 @@ + += robot +Manage user and organization robot accounts. + diff --git a/modules/api-search-conductRepoSearch.adoc b/modules/api-search-conductRepoSearch.adoc new file mode 100644 index 000000000..cf502e94c --- /dev/null +++ b/modules/api-search-conductRepoSearch.adoc @@ -0,0 +1,46 @@ + += conductRepoSearch +Get a list of apps and repositories that match the specified query. + +[discrete] +== GET /api/v1/find/repositories + + + +**Authorizations: ** + +[discrete] +== Query parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|query|**includeUsage** + +_optional_|Whether to include usage metadata|boolean +|query|**page** + +_optional_|The page.|integer +|query|**query** + +_optional_|The search query.|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/find/repositories?query=&page=1&includeUsage=true" \ + -H "Authorization: Bearer " +---- diff --git a/modules/api-search-conductSearch.adoc b/modules/api-search-conductSearch.adoc new file mode 100644 index 000000000..d5a01015f --- /dev/null +++ b/modules/api-search-conductSearch.adoc @@ -0,0 +1,44 @@ + += conductSearch +Get a list of entities and resources that match the specified query. + +[discrete] +== GET /api/v1/find/all + + + +**Authorizations: **oauth2_implicit (**repo:read**) + + + +[discrete] +== Query parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|query|**query** + +_optional_|The search query.|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/find/all?query=" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-search-getMatchingEntities.adoc b/modules/api-search-getMatchingEntities.adoc new file mode 100644 index 000000000..d0921434c --- /dev/null +++ b/modules/api-search-getMatchingEntities.adoc @@ -0,0 +1,56 @@ + += getMatchingEntities +Get a list of entities that match the specified prefix. + +[discrete] +== GET /api/v1/entities/{prefix} + + + +**Authorizations: ** +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**prefix** + +_required_||string +|=== + + +[discrete] +== Query parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|query|**includeOrgs** + +_optional_|Whether to include orgs names.|boolean +|query|**includeTeams** + +_optional_|Whether to include team names.|boolean +|query|**namespace** + +_optional_|Namespace to use when querying for org entities.|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/entities/?includeOrgs=&includeTeams=&namespace=" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-search.adoc b/modules/api-search.adoc new file mode 100644 index 000000000..fdf8d1aa9 --- /dev/null +++ b/modules/api-search.adoc @@ -0,0 +1,4 @@ + += search +Conduct searches against all registry context. + diff --git a/modules/api-secscan-getRepoManifestSecurity.adoc b/modules/api-secscan-getRepoManifestSecurity.adoc new file mode 100644 index 000000000..03b96004b --- /dev/null +++ b/modules/api-secscan-getRepoManifestSecurity.adoc @@ -0,0 +1,58 @@ + += getRepoManifestSecurity + + +[discrete] +== GET /api/v1/repository/{repository}/manifest/{manifestref}/security + + + +**Authorizations: **oauth2_implicit (**repo:read**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|path|**manifestref** + +_required_|The digest of the manifest|string +|=== + + +[discrete] +== Query parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|query|**vulnerabilities** + +_optional_|Include vulnerabilities informations|boolean +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + "https://quay-server.example.com/api/v1/repository///manifest//security?vulnerabilities=" +---- \ No newline at end of file diff --git a/modules/api-secscan.adoc b/modules/api-secscan.adoc new file mode 100644 index 000000000..06d2b7b98 --- /dev/null +++ b/modules/api-secscan.adoc @@ -0,0 +1,4 @@ + += secscan +List and manage repository vulnerabilities and other security information. + diff --git a/modules/api-superuser-approveServiceKey.adoc b/modules/api-superuser-approveServiceKey.adoc new file mode 100644 index 000000000..f1378e460 --- /dev/null +++ b/modules/api-superuser-approveServiceKey.adoc @@ -0,0 +1,62 @@ + += approveServiceKey + + +[discrete] +== POST /api/v1/superuser/approvedkeys/{kid} + + + +**Authorizations: **oauth2_implicit (**super:user**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**kid** + +_required_|The unique identifier for a service key|string +|=== + + +[discrete] +== Request body schema (application/json) + +Information for approving service keys + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**notes** + +_optional_|Optional approval notes|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "notes": "" + }' \ + "https:///api/v1/superuser/approvedkeys/" +---- \ No newline at end of file diff --git a/modules/api-superuser-changeOrganization.adoc b/modules/api-superuser-changeOrganization.adoc new file mode 100644 index 000000000..828cc8b20 --- /dev/null +++ b/modules/api-superuser-changeOrganization.adoc @@ -0,0 +1,71 @@ + += changeOrganization +Updates information about the specified user. + +[discrete] +== PUT /api/v1/superuser/organizations/{name} + + + +**Authorizations: **oauth2_implicit (**super:user**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**name** + +_required_|The name of the organizaton being managed|string +|=== + + +[discrete] +== Request body schema (application/json) + +Description of updates for an existing organization + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**email** + +_optional_|Organization contact email|string +|**invoice_email** + +_optional_|Whether the organization desires to receive emails for invoices|boolean +|**invoice_email_address** + +_optional_|The email address at which to receive invoices| +|**tag_expiration_s** + +_optional_|The number of seconds for tag expiration|integer +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "email": "", + "invoice_email": , + "invoice_email_address": "", + "tag_expiration_s": + }' \ + "https:///api/v1/superuser/organizations/" +---- \ No newline at end of file diff --git a/modules/api-superuser-changeOrganizationQuotaSuperUser.adoc b/modules/api-superuser-changeOrganizationQuotaSuperUser.adoc new file mode 100644 index 000000000..e11cb8ec5 --- /dev/null +++ b/modules/api-superuser-changeOrganizationQuotaSuperUser.adoc @@ -0,0 +1,63 @@ + += changeOrganizationQuotaSuperUser + + +[discrete] +== PUT /api/v1/superuser/users/{namespace}/quota/{quota_id} + + + +**Authorizations: **oauth2_implicit (**super:user**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**namespace** + +_required_||string +|path|**quota_id** + +_required_||string +|=== + + +[discrete] +== Request body schema (application/json) + +Description of a new organization quota + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**limit_bytes** + +_optional_|Number of bytes the organization is allowed|integer +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X PUT "https://quay-server.example.com/api/v1/superuser/users//quota/" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "limit_bytes": + }' +---- \ No newline at end of file diff --git a/modules/api-superuser-changeUserQuotaSuperUser.adoc b/modules/api-superuser-changeUserQuotaSuperUser.adoc new file mode 100644 index 000000000..0517f8a3d --- /dev/null +++ b/modules/api-superuser-changeUserQuotaSuperUser.adoc @@ -0,0 +1,63 @@ + += changeUserQuotaSuperUser + + +[discrete] +== PUT /api/v1/superuser/organization/{namespace}/quota/{quota_id} + + + +**Authorizations: **oauth2_implicit (**super:user**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**namespace** + +_required_||string +|path|**quota_id** + +_required_||string +|=== + + +[discrete] +== Request body schema (application/json) + +Description of a new organization quota + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**limit_bytes** + +_optional_|Number of bytes the organization is allowed|integer +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X PUT "https://quay-server.example.com/api/v1/superuser/organization//quota/" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "limit_bytes": + }' +---- \ No newline at end of file diff --git a/modules/api-superuser-createInstallUser.adoc b/modules/api-superuser-createInstallUser.adoc new file mode 100644 index 000000000..b2100618e --- /dev/null +++ b/modules/api-superuser-createInstallUser.adoc @@ -0,0 +1,51 @@ + += createInstallUser +Creates a new user. + +[discrete] +== POST /api/v1/superuser/users/ + + + +**Authorizations: **oauth2_implicit (**super:user**) + + + +[discrete] +== Request body schema (application/json) + +Data for creating a user + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**username** + +_required_|The username of the user being created|string +|**email** + +_optional_|The email address of the user being created|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{ + "username": "newuser", + "email": "newuser@example.com" +}' "https:///api/v1/superuser/users/" +---- \ No newline at end of file diff --git a/modules/api-superuser-createOrganizationQuotaSuperUser.adoc b/modules/api-superuser-createOrganizationQuotaSuperUser.adoc new file mode 100644 index 000000000..861aff96a --- /dev/null +++ b/modules/api-superuser-createOrganizationQuotaSuperUser.adoc @@ -0,0 +1,61 @@ + += createOrganizationQuotaSuperUser + + +[discrete] +== POST /api/v1/superuser/users/{namespace}/quota + + + +**Authorizations: **oauth2_implicit (**super:user**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**namespace** + +_required_||string +|=== + + +[discrete] +== Request body schema (application/json) + +Description of a new organization quota + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**limit_bytes** + +_optional_|Number of bytes the organization is allowed|integer +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST "https://quay-server.example.com/api/v1/superuser/users//quota" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "limit_bytes": + }' +---- \ No newline at end of file diff --git a/modules/api-superuser-createServiceKey.adoc b/modules/api-superuser-createServiceKey.adoc new file mode 100644 index 000000000..96fe1b1cc --- /dev/null +++ b/modules/api-superuser-createServiceKey.adoc @@ -0,0 +1,61 @@ + += createServiceKey + + +[discrete] +== POST /api/v1/superuser/keys + + + +**Authorizations: **oauth2_implicit (**super:user**) + + + +[discrete] +== Request body schema (application/json) + +Description of creation of a service key + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**service** + +_required_|The service authenticating with this key|string +|**name** + +_optional_|The friendly name of a service key|string +|**metadata** + +_optional_|The key/value pairs of this key's metadata|object +|**notes** + +_optional_|If specified, the extra notes for the key|string +|**expiration** + +_required_|The expiration date as a unix timestamp| +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "service": "", + "expiration": + }' \ + "/api/v1/superuser/keys" +---- \ No newline at end of file diff --git a/modules/api-superuser-createUserQuotaSuperUser.adoc b/modules/api-superuser-createUserQuotaSuperUser.adoc new file mode 100644 index 000000000..cf7f93ffc --- /dev/null +++ b/modules/api-superuser-createUserQuotaSuperUser.adoc @@ -0,0 +1,61 @@ + += createUserQuotaSuperUser + + +[discrete] +== POST /api/v1/superuser/organization/{namespace}/quota + + + +**Authorizations: **oauth2_implicit (**super:user**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**namespace** + +_required_||string +|=== + + +[discrete] +== Request body schema (application/json) + +Description of a new organization quota + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**limit_bytes** + +_required_|Number of bytes the organization is allowed|integer +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST "https://quay-server.example.com/api/v1/superuser/organization//quota" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "limit_bytes": 10737418240 + }' +---- \ No newline at end of file diff --git a/modules/api-superuser-deleteInstallUser.adoc b/modules/api-superuser-deleteInstallUser.adoc new file mode 100644 index 000000000..15774f8e3 --- /dev/null +++ b/modules/api-superuser-deleteInstallUser.adoc @@ -0,0 +1,46 @@ + += deleteInstallUser +Deletes a user. + +[discrete] +== DELETE /api/v1/superuser/users/{username} + + + +**Authorizations: **oauth2_implicit (**super:user**) + + + +[discrete] +== Request body schema (application/json) + +Data for deleting a user + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**username** + +_required_|The username of the user being deleted|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X DELETE -H "Authorization: Bearer " "https:///api/v1/superuser/users/{username}" +---- \ No newline at end of file diff --git a/modules/api-superuser-deleteOrganization.adoc b/modules/api-superuser-deleteOrganization.adoc new file mode 100644 index 000000000..4d0604844 --- /dev/null +++ b/modules/api-superuser-deleteOrganization.adoc @@ -0,0 +1,45 @@ + += deleteOrganization +Deletes the specified organization. + +[discrete] +== DELETE /api/v1/superuser/organizations/{name} + + + +**Authorizations: **oauth2_implicit (**super:user**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**name** + +_required_|The name of the organizaton being managed|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|204|Deleted| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + "https:///api/v1/superuser/organizations/" +---- \ No newline at end of file diff --git a/modules/api-superuser-deleteOrganizationQuotaSuperUser.adoc b/modules/api-superuser-deleteOrganizationQuotaSuperUser.adoc new file mode 100644 index 000000000..7eba4b703 --- /dev/null +++ b/modules/api-superuser-deleteOrganizationQuotaSuperUser.adoc @@ -0,0 +1,46 @@ + += deleteOrganizationQuotaSuperUser + + +[discrete] +== DELETE /api/v1/superuser/users/{namespace}/quota/{quota_id} + + + +**Authorizations: **oauth2_implicit (**super:user**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**namespace** + +_required_||string +|path|**quota_id** + +_required_||string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|204|Deleted| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X DELETE "https://quay-server.example.com/api/v1/superuser/users//quota/" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-superuser-deleteServiceKey.adoc b/modules/api-superuser-deleteServiceKey.adoc new file mode 100644 index 000000000..e3a3ef0ac --- /dev/null +++ b/modules/api-superuser-deleteServiceKey.adoc @@ -0,0 +1,45 @@ + += deleteServiceKey + + +[discrete] +== DELETE /api/v1/superuser/keys/{kid} + + + +**Authorizations: **oauth2_implicit (**super:user**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**kid** + +_required_|The unique identifier for a service key|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|204|Deleted| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + "https:///api/v1/superuser/keys/" +---- \ No newline at end of file diff --git a/modules/api-superuser-deleteUserQuotaSuperUser.adoc b/modules/api-superuser-deleteUserQuotaSuperUser.adoc new file mode 100644 index 000000000..00317eb84 --- /dev/null +++ b/modules/api-superuser-deleteUserQuotaSuperUser.adoc @@ -0,0 +1,46 @@ + += deleteUserQuotaSuperUser + + +[discrete] +== DELETE /api/v1/superuser/organization/{namespace}/quota/{quota_id} + + + +**Authorizations: **oauth2_implicit (**super:user**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**namespace** + +_required_||string +|path|**quota_id** + +_required_||string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|204|Deleted| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X DELETE "https://quay-server.example.com/api/v1/superuser/organization//quota/" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-superuser-getRegistrySize.adoc b/modules/api-superuser-getRegistrySize.adoc new file mode 100644 index 000000000..966022e3c --- /dev/null +++ b/modules/api-superuser-getRegistrySize.adoc @@ -0,0 +1,56 @@ + += getRegistrySize + + +[discrete] +== GET /api/v1/superuser/registrysize/ + +**Authorizations: **oauth2_implicit (**super:user**) + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**namespace** + +_required_||string +|=== + +Description of a image registry size + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|*size_bytes** + +_optional_|Number of bytes the organization is allowed|integer + +|*last_ran* | |integer + +|*queued* | |boolean + +|*running* | |boolean +|=== + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|CREATED| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https:///api/v1/superuser/registrysize/" +---- \ No newline at end of file diff --git a/modules/api-superuser-getRepoBuildLogsSuperUser.adoc b/modules/api-superuser-getRepoBuildLogsSuperUser.adoc new file mode 100644 index 000000000..1494a9613 --- /dev/null +++ b/modules/api-superuser-getRepoBuildLogsSuperUser.adoc @@ -0,0 +1,44 @@ + += getRepoBuildLogsSuperUser +Return the build logs for the build specified by the build uuid. + +[discrete] +== GET /api/v1/superuser/{build_uuid}/logs + + + +**Authorizations: **oauth2_implicit (**super:user**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**build_uuid** + +_required_|The UUID of the build|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/superuser//logs" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-superuser-getRepoBuildStatusSuperUser.adoc b/modules/api-superuser-getRepoBuildStatusSuperUser.adoc new file mode 100644 index 000000000..5202c4238 --- /dev/null +++ b/modules/api-superuser-getRepoBuildStatusSuperUser.adoc @@ -0,0 +1,44 @@ + += getRepoBuildStatusSuperUser +Return the status for the builds specified by the build uuids. + +[discrete] +== GET /api/v1/superuser/{build_uuid}/status + + + +**Authorizations: **oauth2_implicit (**super:user**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**build_uuid** + +_required_|The UUID of the build|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/superuser//status" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-superuser-getRepoBuildSuperUser.adoc b/modules/api-superuser-getRepoBuildSuperUser.adoc new file mode 100644 index 000000000..c5e1a1be0 --- /dev/null +++ b/modules/api-superuser-getRepoBuildSuperUser.adoc @@ -0,0 +1,44 @@ + += getRepoBuildSuperUser +Returns information about a build. + +[discrete] +== GET /api/v1/superuser/{build_uuid}/build + + + +**Authorizations: **oauth2_implicit (**super:user**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**build_uuid** + +_required_|The UUID of the build|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/superuser//build" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-superuser-getServiceKey.adoc b/modules/api-superuser-getServiceKey.adoc new file mode 100644 index 000000000..496250c9f --- /dev/null +++ b/modules/api-superuser-getServiceKey.adoc @@ -0,0 +1,45 @@ + += getServiceKey + + +[discrete] +== GET /api/v1/superuser/keys/{kid} + + + +**Authorizations: **oauth2_implicit (**super:user**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**kid** + +_required_|The unique identifier for a service key|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https:///api/v1/superuser/keys/" +---- \ No newline at end of file diff --git a/modules/api-superuser-listAllLogs.adoc b/modules/api-superuser-listAllLogs.adoc new file mode 100644 index 000000000..498022c4c --- /dev/null +++ b/modules/api-superuser-listAllLogs.adoc @@ -0,0 +1,52 @@ + += listAllLogs +List the usage logs for the current system. + +[discrete] +== GET /api/v1/superuser/logs + + + +**Authorizations: **oauth2_implicit (**super:user**) + + + +[discrete] +== Query parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|query|**next_page** + +_optional_|The page token for the next page|string +|query|**page** + +_optional_|The page number for the logs|integer +|query|**endtime** + +_optional_|Latest time to which to get logs (%m/%d/%Y %Z)|string +|query|**starttime** + +_optional_|Earliest time from which to get logs (%m/%d/%Y %Z)|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https:///api/v1/superuser/logs?starttime=&endtime=&page=&next_page=" +---- \ No newline at end of file diff --git a/modules/api-superuser-listAllOrganizations.adoc b/modules/api-superuser-listAllOrganizations.adoc new file mode 100644 index 000000000..ebebfb951 --- /dev/null +++ b/modules/api-superuser-listAllOrganizations.adoc @@ -0,0 +1,43 @@ + += listAllOrganizations +List the organizations for the current system. + +[discrete] +== GET /api/v1/superuser/organizations + + + +**Authorizations: **oauth2_implicit (**super:user**) + + + +[discrete] +== Query parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|name + +required|The name of the organization being managed|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " "https:///api/v1/superuser/organizations/" +---- \ No newline at end of file diff --git a/modules/api-superuser-listAllUsers.adoc b/modules/api-superuser-listAllUsers.adoc new file mode 100644 index 000000000..ff0086bf3 --- /dev/null +++ b/modules/api-superuser-listAllUsers.adoc @@ -0,0 +1,48 @@ + += listAllUsers +Returns a list of all users in the system. + +[discrete] +== GET /api/v1/superuser/users/ + + + +**Authorizations: **oauth2_implicit (**super:user**) + + + +[discrete] +== Query parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|query|**next_page** + +_optional_|The page token for the next page|string +|query|**limit** + +_optional_|Limit to the number of results to return per page. Max 100.|integer +|query|**disabled** + +_optional_|If false, only enabled users will be returned.|boolean +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " "https:///api/v1/superuser/users/" +---- \ No newline at end of file diff --git a/modules/api-superuser-listOrganizationQuotaSuperUser.adoc b/modules/api-superuser-listOrganizationQuotaSuperUser.adoc new file mode 100644 index 000000000..835b94e04 --- /dev/null +++ b/modules/api-superuser-listOrganizationQuotaSuperUser.adoc @@ -0,0 +1,44 @@ + += listOrganizationQuotaSuperUser + + +[discrete] +== GET /api/v1/superuser/users/{namespace}/quota + + + +**Authorizations: **oauth2_implicit (**super:user**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**namespace** + +_required_||string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/superuser/users//quota" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-superuser-listServiceKeys.adoc b/modules/api-superuser-listServiceKeys.adoc new file mode 100644 index 000000000..e442bde9f --- /dev/null +++ b/modules/api-superuser-listServiceKeys.adoc @@ -0,0 +1,35 @@ + += listServiceKeys + + +[discrete] +== GET /api/v1/superuser/keys + + + +**Authorizations: **oauth2_implicit (**super:user**) + + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https:///api/v1/superuser/keys" +---- \ No newline at end of file diff --git a/modules/api-superuser-listUserQuotaSuperUser.adoc b/modules/api-superuser-listUserQuotaSuperUser.adoc new file mode 100644 index 000000000..5edbee31d --- /dev/null +++ b/modules/api-superuser-listUserQuotaSuperUser.adoc @@ -0,0 +1,44 @@ + += listUserQuotaSuperUser + + +[discrete] +== GET /api/v1/superuser/organization/{namespace}/quota + + + +**Authorizations: **oauth2_implicit (**super:user**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**namespace** + +_required_||string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/superuser/organization//quota" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-superuser-postRegistrySize.adoc b/modules/api-superuser-postRegistrySize.adoc new file mode 100644 index 000000000..c37260c47 --- /dev/null +++ b/modules/api-superuser-postRegistrySize.adoc @@ -0,0 +1,64 @@ + += postRegistrySize + + +[discrete] +== POST /api/v1/superuser/registrysize/ + +**Authorizations: **oauth2_implicit (**super:user**) + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**namespace** + +_required_||string +|=== + + +[discrete] +== Request body schema (application/json) + +Description of a image registry size + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema + +|*last_ran* | |integer + +|*queued* | |boolean + +|*running* | |boolean +|=== + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|CREATED| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST "https://quay-server.example.com/api/v1/superuser/registrysize/" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "namespace": "", + "last_ran": 1700000000, + "queued": true, + "running": false + }' +---- \ No newline at end of file diff --git a/modules/api-superuser-updateServiceKey.adoc b/modules/api-superuser-updateServiceKey.adoc new file mode 100644 index 000000000..691667dd0 --- /dev/null +++ b/modules/api-superuser-updateServiceKey.adoc @@ -0,0 +1,68 @@ + += updateServiceKey + + +[discrete] +== PUT /api/v1/superuser/keys/{kid} + + + +**Authorizations: **oauth2_implicit (**super:user**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**kid** + +_required_|The unique identifier for a service key|string +|=== + + +[discrete] +== Request body schema (application/json) + +Description of updates for a service key + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**name** + +_optional_|The friendly name of a service key|string +|**metadata** + +_optional_|The key/value pairs of this key's metadata|object +|**expiration** + +_optional_|The expiration date as a unix timestamp| +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "name": "", + "metadata": {"": ""}, + "expiration": + }' \ + "https:///api/v1/superuser/keys/" +---- \ No newline at end of file diff --git a/modules/api-superuser.adoc b/modules/api-superuser.adoc new file mode 100644 index 000000000..7a91154b3 --- /dev/null +++ b/modules/api-superuser.adoc @@ -0,0 +1,4 @@ + += superuser +Superuser API. + diff --git a/modules/api-tag-changeTag.adoc b/modules/api-tag-changeTag.adoc new file mode 100644 index 000000000..ecb9bb2b9 --- /dev/null +++ b/modules/api-tag-changeTag.adoc @@ -0,0 +1,66 @@ + += changeTag +Change which image a tag points to or create a new tag. + +[discrete] +== PUT /api/v1/repository/{repository}/tag/{tag} + + + +**Authorizations: **oauth2_implicit (**repo:write**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|path|**tag** + +_required_|The name of the tag|string +|=== + + +[discrete] +== Request body schema (application/json) + +Makes changes to a specific tag + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**manifest_digest** + +_optional_|(If specified) The manifest digest to which the tag should point| +|**expiration** + +_optional_|(If specified) The expiration for the image| +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + --data '{ + "manifest_digest": "" + }' \ + https:///api/v1/repository///tag/ +---- \ No newline at end of file diff --git a/modules/api-tag-deleteFullTag.adoc b/modules/api-tag-deleteFullTag.adoc new file mode 100644 index 000000000..a4655f51e --- /dev/null +++ b/modules/api-tag-deleteFullTag.adoc @@ -0,0 +1,46 @@ + += deleteFullTag +Delete the specified repository tag. + +[discrete] +== DELETE /api/v1/repository/{repository}/tag/{tag} + + + +**Authorizations: **oauth2_implicit (**repo:write**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|path|**tag** + +_required_|The name of the tag|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|204|Deleted| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X DELETE "https://quay-server.example.com/api/v1/repository///tag/" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-tag-listRepoTags.adoc b/modules/api-tag-listRepoTags.adoc new file mode 100644 index 000000000..f8f253d8a --- /dev/null +++ b/modules/api-tag-listRepoTags.adoc @@ -0,0 +1,65 @@ + += listRepoTags + + +[discrete] +== GET /api/v1/repository/{repository}/tag/ + + + +**Authorizations: **oauth2_implicit (**repo:read**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Query parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|query|**onlyActiveTags** + +_optional_|Filter to only active tags.|boolean +|query|**page** + +_optional_|Page index for the results. Default 1.|integer +|query|**limit** + +_optional_|Limit to the number of results to return per page. Max 100.|integer +|query|**filter_tag_name** + +_optional_|Syntax: <op>:<name> Filters the tag names based on the operation.<op> can be 'like' or 'eq'.|string +|query|**specificTag** + +_optional_|Filters the tags to the specific tag.|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + https:///api/v1/repository///tag/ +---- \ No newline at end of file diff --git a/modules/api-tag-removeTagFromTimemachine.adoc b/modules/api-tag-removeTagFromTimemachine.adoc new file mode 100644 index 000000000..b3cb4c216 --- /dev/null +++ b/modules/api-tag-removeTagFromTimemachine.adoc @@ -0,0 +1,54 @@ + += removeTagFromTimemachine +Updates any expired tags with the matching name and manifest with an expiry outside the time machine window + +[discrete] +== POST /api/v1/repository/{repository}/tag/{tag}/expire + + + +**Authorizations: **oauth2_implicit (**repo:write**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|path|**tag** + +_required_|The name of the tag|string +|=== + + +[discrete] +== Request body schema (application/json) + +Removes tag from the time machine window + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**manifest_digest** + +_optional_|Required if is_alive set to false. If specified, the manifest digest that should be used. Ignored when setting alive to true.|string +|**include_submanifests** + +_optional_|If set to true, expire the sub-manifests as well|boolean +|**is_alive** + +_optional_|If true, set the expiry of the matching alive tag outside the time machine window. If false set the expiry of any expired tags with the same tag and manifest outside the time machine window.|boolean +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== diff --git a/modules/api-tag-restoreTag.adoc b/modules/api-tag-restoreTag.adoc new file mode 100644 index 000000000..48d281ae0 --- /dev/null +++ b/modules/api-tag-restoreTag.adoc @@ -0,0 +1,64 @@ + += restoreTag +Restores a repository tag back to a previous image in the repository. + +[discrete] +== POST /api/v1/repository/{repository}/tag/{tag}/restore + + + +**Authorizations: **oauth2_implicit (**repo:write**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|path|**tag** + +_required_|The name of the tag|string +|=== + + +[discrete] +== Request body schema (application/json) + +Restores a tag to a specific image + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**manifest_digest** + +_required_|If specified, the manifest digest that should be used|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + --data '{ + "manifest_digest": + }' \ + quay-server.example.com/api/v1/repository/quayadmin/busybox/tag/test/restore +---- \ No newline at end of file diff --git a/modules/api-tag.adoc b/modules/api-tag.adoc new file mode 100644 index 000000000..dbd8dc68f --- /dev/null +++ b/modules/api-tag.adoc @@ -0,0 +1,4 @@ + += tag +Manage the tags of a repository. + diff --git a/modules/api-team-deleteOrganizationTeam.adoc b/modules/api-team-deleteOrganizationTeam.adoc new file mode 100644 index 000000000..ac787ee2e --- /dev/null +++ b/modules/api-team-deleteOrganizationTeam.adoc @@ -0,0 +1,46 @@ + += deleteOrganizationTeam +Delete the specified team. + +[discrete] +== DELETE /api/v1/organization/{orgname}/team/{teamname} + + + +**Authorizations: **oauth2_implicit (**org:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**teamname** + +_required_|The name of the team|string +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|204|Deleted| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + "/api/v1/organization//team/" +---- \ No newline at end of file diff --git a/modules/api-team-deleteOrganizationTeamMember.adoc b/modules/api-team-deleteOrganizationTeamMember.adoc new file mode 100644 index 000000000..4edf232b4 --- /dev/null +++ b/modules/api-team-deleteOrganizationTeamMember.adoc @@ -0,0 +1,51 @@ + += deleteOrganizationTeamMember +Delete a member of a team. + + If the user is merely invited to join the team, then the invite is removed instead. + +[discrete] +== DELETE /api/v1/organization/{orgname}/team/{teamname}/members/{membername} + + + +**Authorizations: **oauth2_implicit (**org:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**teamname** + +_required_|The name of the team|string +|path|**membername** + +_required_|The username of the team member|string +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|204|Deleted| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + "/api/v1/organization//team//members/" +---- \ No newline at end of file diff --git a/modules/api-team-deleteTeamMemberEmailInvite.adoc b/modules/api-team-deleteTeamMemberEmailInvite.adoc new file mode 100644 index 000000000..7999012e5 --- /dev/null +++ b/modules/api-team-deleteTeamMemberEmailInvite.adoc @@ -0,0 +1,49 @@ + += deleteTeamMemberEmailInvite +Delete an invite of an email address to join a team. + +[discrete] +== DELETE /api/v1/organization/{orgname}/team/{teamname}/invite/{email} + + + +**Authorizations: **oauth2_implicit (**org:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**email** + +_required_||string +|path|**teamname** + +_required_||string +|path|**orgname** + +_required_||string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|204|Deleted| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command ++ +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + "/api/v1/organization//team//invite/" +---- \ No newline at end of file diff --git a/modules/api-team-getOrganizationTeamMembers.adoc b/modules/api-team-getOrganizationTeamMembers.adoc new file mode 100644 index 000000000..f7c74a055 --- /dev/null +++ b/modules/api-team-getOrganizationTeamMembers.adoc @@ -0,0 +1,58 @@ + += getOrganizationTeamMembers +Retrieve the list of members for the specified team. + +[discrete] +== GET /api/v1/organization/{orgname}/team/{teamname}/members + + + +**Authorizations: **oauth2_implicit (**org:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**teamname** + +_required_|The name of the team|string +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Query parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|query|**includePending** + +_optional_|Whether to include pending members|boolean +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "/api/v1/organization//team//members" +---- \ No newline at end of file diff --git a/modules/api-team-getOrganizationTeamPermissions.adoc b/modules/api-team-getOrganizationTeamPermissions.adoc new file mode 100644 index 000000000..03349ed04 --- /dev/null +++ b/modules/api-team-getOrganizationTeamPermissions.adoc @@ -0,0 +1,45 @@ + += getOrganizationTeamPermissions +Returns the list of repository permissions for the org's team. + +[discrete] +== GET /api/v1/organization/{orgname}/team/{teamname}/permissions + + + +**Authorizations: ** +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**teamname** + +_required_|The name of the team|string +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "/api/v1/organization//team//permissions" +---- \ No newline at end of file diff --git a/modules/api-team-inviteTeamMemberEmail.adoc b/modules/api-team-inviteTeamMemberEmail.adoc new file mode 100644 index 000000000..1bb1ae03a --- /dev/null +++ b/modules/api-team-inviteTeamMemberEmail.adoc @@ -0,0 +1,49 @@ + += inviteTeamMemberEmail +Invites an email address to an existing team. + +[discrete] +== PUT /api/v1/organization/{orgname}/team/{teamname}/invite/{email} + + + +**Authorizations: **oauth2_implicit (**org:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**email** + +_required_||string +|path|**teamname** + +_required_||string +|path|**orgname** + +_required_||string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + "/api/v1/organization//team//invite/" +---- \ No newline at end of file diff --git a/modules/api-team-updateOrganizationTeam.adoc b/modules/api-team-updateOrganizationTeam.adoc new file mode 100644 index 000000000..0c5b7255a --- /dev/null +++ b/modules/api-team-updateOrganizationTeam.adoc @@ -0,0 +1,65 @@ + += updateOrganizationTeam +Update the org-wide permission for the specified team. + +[NOTE] +==== +This API is also used to create a team. +==== + +[discrete] +== PUT /api/v1/organization/{orgname}/team/{teamname} + + + +**Authorizations: **oauth2_implicit (**org:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**teamname** + +_required_|The name of the team|string +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Request body schema (application/json) + +Description of a team + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**role** + +_required_|Org wide permissions that should apply to the team|string +|**description** + +_optional_|Markdown description for the team|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -k -X PUT -H 'Accept: application/json' -H 'Content-Type: application/json' -H "Authorization: Bearer " --data '{"role": "creator"}' https:///api/v1/organization//team/ +---- \ No newline at end of file diff --git a/modules/api-team-updateOrganizationTeamMember.adoc b/modules/api-team-updateOrganizationTeamMember.adoc new file mode 100644 index 000000000..956e06874 --- /dev/null +++ b/modules/api-team-updateOrganizationTeamMember.adoc @@ -0,0 +1,49 @@ + += updateOrganizationTeamMember +Adds or invites a member to an existing team. + +[discrete] +== PUT /api/v1/organization/{orgname}/team/{teamname}/members/{membername} + + + +**Authorizations: **oauth2_implicit (**org:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**teamname** + +_required_|The name of the team|string +|path|**membername** + +_required_|The username of the team member|string +|path|**orgname** + +_required_|The name of the organization|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + "/api/v1/organization//team//members/" +---- \ No newline at end of file diff --git a/modules/api-team.adoc b/modules/api-team.adoc new file mode 100644 index 000000000..98a7ef721 --- /dev/null +++ b/modules/api-team.adoc @@ -0,0 +1,4 @@ + += team +Create, list and manage an organization's teams. + diff --git a/modules/api-trigger-activateBuildTrigger.adoc b/modules/api-trigger-activateBuildTrigger.adoc new file mode 100644 index 000000000..795a22e81 --- /dev/null +++ b/modules/api-trigger-activateBuildTrigger.adoc @@ -0,0 +1,68 @@ + += activateBuildTrigger +Activate the specified build trigger. + +[discrete] +== POST /api/v1/repository/{repository}/trigger/{trigger_uuid}/activate + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**trigger_uuid** + +_required_|The UUID of the build trigger|string +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Request body schema (application/json) + + + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**config** + +_required_|Arbitrary json.|object +|**pull_robot** + +_optional_|The name of the robot that will be used to pull images.|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST "https://quay-server.example.com/api/v1/repository/example_namespace/example_repo/trigger/example-trigger-uuid/activate" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "config": { + "branch": "main" + }, + "pull_robot": "example+robot" + }' +---- \ No newline at end of file diff --git a/modules/api-trigger-deleteBuildTrigger.adoc b/modules/api-trigger-deleteBuildTrigger.adoc new file mode 100644 index 000000000..becc47dd3 --- /dev/null +++ b/modules/api-trigger-deleteBuildTrigger.adoc @@ -0,0 +1,46 @@ + += deleteBuildTrigger +Delete the specified build trigger. + +[discrete] +== DELETE /api/v1/repository/{repository}/trigger/{trigger_uuid} + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**trigger_uuid** + +_required_|The UUID of the build trigger|string +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|204|Deleted| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X DELETE "https://quay-server.example.com/api/v1/repository/example_namespace/example_repo/trigger/example-trigger-uuid" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-trigger-getBuildTrigger.adoc b/modules/api-trigger-getBuildTrigger.adoc new file mode 100644 index 000000000..c1253d879 --- /dev/null +++ b/modules/api-trigger-getBuildTrigger.adoc @@ -0,0 +1,46 @@ + += getBuildTrigger +Get information for the specified build trigger. + +[discrete] +== GET /api/v1/repository/{repository}/trigger/{trigger_uuid} + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**trigger_uuid** + +_required_|The UUID of the build trigger|string +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/repository/example_namespace/example_repo/trigger/example-trigger-uuid" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-trigger-listBuildTriggers.adoc b/modules/api-trigger-listBuildTriggers.adoc new file mode 100644 index 000000000..46ab2ebb0 --- /dev/null +++ b/modules/api-trigger-listBuildTriggers.adoc @@ -0,0 +1,44 @@ + += listBuildTriggers +List the triggers for the specified repository. + +[discrete] +== GET /api/v1/repository/{repository}/trigger/ + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/repository/example_namespace/example_repo/trigger/" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-trigger-listTriggerRecentBuilds.adoc b/modules/api-trigger-listTriggerRecentBuilds.adoc new file mode 100644 index 000000000..4ec3f6506 --- /dev/null +++ b/modules/api-trigger-listTriggerRecentBuilds.adoc @@ -0,0 +1,57 @@ + += listTriggerRecentBuilds +List the builds started by the specified trigger. + +[discrete] +== GET /api/v1/repository/{repository}/trigger/{trigger_uuid}/builds + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**trigger_uuid** + +_required_|The UUID of the build trigger|string +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Query parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|query|**limit** + +_optional_|The maximum number of builds to return|integer +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/repository/example_namespace/example_repo/trigger/example-trigger-uuid/builds?limit=10" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-trigger-manuallyStartBuildTrigger.adoc b/modules/api-trigger-manuallyStartBuildTrigger.adoc new file mode 100644 index 000000000..4a3cd8624 --- /dev/null +++ b/modules/api-trigger-manuallyStartBuildTrigger.adoc @@ -0,0 +1,69 @@ + += manuallyStartBuildTrigger +Manually start a build from the specified trigger. + +[discrete] +== POST /api/v1/repository/{repository}/trigger/{trigger_uuid}/start + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**trigger_uuid** + +_required_|The UUID of the build trigger|string +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Request body schema (application/json) + +Optional run parameters for activating the build trigger + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**branch_name** + +_optional_|(SCM only) If specified, the name of the branch to build.|string +|**commit_sha** + +_optional_|(Custom Only) If specified, the ref/SHA1 used to checkout a git repository.|string +|**refs** + +_optional_|(SCM Only) If specified, the ref to build.| +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST "https://quay-server.example.com/api/v1/repository/example_namespace/example_repo/trigger/example-trigger-uuid/start" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "branch_name": "main", + "commit_sha": "abcdef1234567890", + "refs": "refs/heads/main" + }' +---- \ No newline at end of file diff --git a/modules/api-trigger-updateBuildTrigger.adoc b/modules/api-trigger-updateBuildTrigger.adoc new file mode 100644 index 000000000..92aa6e0c1 --- /dev/null +++ b/modules/api-trigger-updateBuildTrigger.adoc @@ -0,0 +1,61 @@ + += updateBuildTrigger +Updates the specified build trigger. + +[discrete] +== PUT /api/v1/repository/{repository}/trigger/{trigger_uuid} + + + +**Authorizations: **oauth2_implicit (**repo:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**trigger_uuid** + +_required_|The UUID of the build trigger|string +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Request body schema (application/json) + +Options for updating a build trigger + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**enabled** + +_required_|Whether the build trigger is enabled|boolean +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X PUT "https://quay-server.example.com/api/v1/repository/example_namespace/example_repo/trigger/example-trigger-uuid" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{"enabled": true}' +---- \ No newline at end of file diff --git a/modules/api-trigger.adoc b/modules/api-trigger.adoc new file mode 100644 index 000000000..69d8dd810 --- /dev/null +++ b/modules/api-trigger.adoc @@ -0,0 +1,4 @@ + += trigger +Create, list and manage build triggers. + diff --git a/modules/api-user-createStar.adoc b/modules/api-user-createStar.adoc new file mode 100644 index 000000000..741ae5b9f --- /dev/null +++ b/modules/api-user-createStar.adoc @@ -0,0 +1,54 @@ + += createStar +Star a repository. + +[discrete] +== POST /api/v1/user/starred + + + +**Authorizations: **oauth2_implicit (**repo:read**) + + + +[discrete] +== Request body schema (application/json) + + + +[options="header", width=100%, cols=".^3a,.^9a,.^4a"] +|=== +|Name|Description|Schema +|**namespace** + +_required_|Namespace in which the repository belongs|string +|**repository** + +_required_|Repository name|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|201|Successful creation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X POST "https://quay-server.example.com/api/v1/user/starred" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "namespace": "", + "repository": "" + }' +---- \ No newline at end of file diff --git a/modules/api-user-deleteStar.adoc b/modules/api-user-deleteStar.adoc new file mode 100644 index 000000000..10ff5a061 --- /dev/null +++ b/modules/api-user-deleteStar.adoc @@ -0,0 +1,44 @@ + += deleteStar +Removes a star from a repository. + +[discrete] +== DELETE /api/v1/user/starred/{repository} + + + +**Authorizations: **oauth2_implicit (**user:admin**) + + +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|204|Deleted| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X DELETE "https://quay-server.example.com/api/v1/user/starred/namespace/repository-name" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-user-getLoggedInUser.adoc b/modules/api-user-getLoggedInUser.adoc new file mode 100644 index 000000000..1e12e6b18 --- /dev/null +++ b/modules/api-user-getLoggedInUser.adoc @@ -0,0 +1,34 @@ + += getLoggedInUser +Get user information for the authenticated user. + +[discrete] +== GET /api/v1/user/ + + + +**Authorizations: **oauth2_implicit (**user:read**) + + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation|<<_userview,UserView>> +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/user/" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-user-getUserInformation.adoc b/modules/api-user-getUserInformation.adoc new file mode 100644 index 000000000..fdcdaa6f3 --- /dev/null +++ b/modules/api-user-getUserInformation.adoc @@ -0,0 +1,42 @@ + += getUserInformation +Get user information for the specified user. + +[discrete] +== GET /api/v1/users/{username} + + + +**Authorizations: ** +[discrete] +== Path parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|path|**username** + +_required_||string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/users/example_user" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-user-listStarredRepos.adoc b/modules/api-user-listStarredRepos.adoc new file mode 100644 index 000000000..87030d47e --- /dev/null +++ b/modules/api-user-listStarredRepos.adoc @@ -0,0 +1,45 @@ + += listStarredRepos +List all starred repositories. + +[discrete] +== GET /api/v1/user/starred + + + +**Authorizations: **oauth2_implicit (**user:admin**) + + + +[discrete] +== Query parameters + +[options="header", width=100%, cols=".^2a,.^3a,.^9a,.^4a"] +|=== +|Type|Name|Description|Schema +|query|**next_page** + +_optional_|The page token for the next page|string +|=== + + +[discrete] +== Responses + +[options="header", width=100%, cols=".^2a,.^14a,.^4a"] +|=== +|HTTP Code|Description|Schema +|200|Successful invocation| +|400|Bad Request|<<_apierror,ApiError>> +|401|Session required|<<_apierror,ApiError>> +|403|Unauthorized access|<<_apierror,ApiError>> +|404|Not found|<<_apierror,ApiError>> +|=== + +[discrete] +== Example command + +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/user/starred?next_page=" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/api-user.adoc b/modules/api-user.adoc new file mode 100644 index 000000000..778285d56 --- /dev/null +++ b/modules/api-user.adoc @@ -0,0 +1,4 @@ + += user +Manage the current user. + diff --git a/modules/arch-georpl-features.adoc b/modules/arch-georpl-features.adoc new file mode 100644 index 000000000..98372e342 --- /dev/null +++ b/modules/arch-georpl-features.adoc @@ -0,0 +1,13 @@ +:_content-type: CONCEPT +[id="arch-georpl-features"] += Geo-replication features + +* When geo-replication is configured, container image pushes will be written to the preferred storage engine for that {productname} instance. This is typically the nearest storage backend within the region. + +* After the initial push, image data will be replicated in the background to other storage engines. + +* The list of replication locations is configurable and those can be different storage backends. + +* An image pull will always use the closest available storage engine, to maximize pull performance. + +* If replication has not been completed yet, the pull will use the source storage backend instead. \ No newline at end of file diff --git a/modules/arch-intro-access-control.adoc b/modules/arch-intro-access-control.adoc new file mode 100644 index 000000000..408ded38e --- /dev/null +++ b/modules/arch-intro-access-control.adoc @@ -0,0 +1,4 @@ +[[arch-intro-access-control]] += Access control + +{productname} provides both role-based access control (RBAC) and fine-grained access control, and has team features that allow for limited access control of repositories, organizations, and user privileges. {productname} access control features also provide support for dispersed organizations. \ No newline at end of file diff --git a/modules/arch-intro-build-automation.adoc b/modules/arch-intro-build-automation.adoc new file mode 100644 index 000000000..00e2de9f9 --- /dev/null +++ b/modules/arch-intro-build-automation.adoc @@ -0,0 +1,7 @@ +:_content-type: CONCEPT +[id="arch-intro-build-automation"] += Build automation + +{productname} supports building Dockerfiles using a set of worker nodes on {ocp} or Kubernetes platforms. Build triggers, such as GitHub webhooks, can be configured to automatically build new versions of your repositories when new code is committed. + +Prior to {productname} 3.7, {productname} ran Podman commands in virtual machines launched by pods. Running builds on virtual platforms requires enabling nested virtualization, which is not featured in {rhel} or {ocp}. As a result, builds had to run on bare metal clusters, which is an inefficient use of resources. With {productname} 3.7, this requirement was removed and builds could be run on {ocp} clusters running on virtualized or bare metal platforms. \ No newline at end of file diff --git a/modules/arch-intro-content-distribution.adoc b/modules/arch-intro-content-distribution.adoc new file mode 100644 index 000000000..33650b98b --- /dev/null +++ b/modules/arch-intro-content-distribution.adoc @@ -0,0 +1,14 @@ +:_content-type: CONCEPT +[id="arch-intro-content-distribution"] += Content distribution + +Content distribution features in {productname} include the following: + +Repository mirroring:: {productname} repository mirroring lets you mirror images from {productname} and other container registries, like JFrog Artifactory, Harbor, or Sonatype Nexus Repository, into your {productname} cluster. Using repository mirroring, you can synchronize images to {productname} based on repository names and tags. + +Geo-replication:: {productname} geo-replication allows multiple, geographically distributed {productname} deployments to work as a single registry from the perspective of a client or user. It significantly improves push and pull performance in a globally-distributed {productname} setup. Image data is asynchronously replicated in the background with transparent failover and redirection for clients. + +Deployment in disconnected or air-gapped environments:: {productname} is deployable in a disconnected environment in one of two ways: ++ +* {productname} and Clair connected to the internet, with an air-gapped {ocp} cluster accessing the {productname} registry through an explicit, allowlisted hole in the firewall. +* Using two independent {productname} and Clair installations. One installation is connected to the internet and another within a disconnected, or firewalled, environment. Image and vulnerability data is manually transferred from the connected environment to the disconnected environment using offline media. \ No newline at end of file diff --git a/modules/arch-intro-integration.adoc b/modules/arch-intro-integration.adoc new file mode 100644 index 000000000..24026c89a --- /dev/null +++ b/modules/arch-intro-integration.adoc @@ -0,0 +1,14 @@ +:_content-type: CONCEPT +[id="arch-intro-integration"] += Integration + +{productname} can integrate with almost all Git-compatible systems. {productname} offers automative configuration for GitHub, GitLab, or BitBucket, which allows users to continuously build and serve their containerized software. + +[id="arch-rest-api"] +== REST API + +{productname} provides a full OAuth 2, RESTful API. RESTful API offers the following benefits: + +* Availability from endpoints of each {productname} instance from the URL, for example, `\https://quay-server.example.com/api/v1` +* Allow users to connect to endpoints through a browser, to `GET`, `DELETE`, `POST`, and `PUT` {productname} settings provided by a discovery endpoint that is usable by Swagger. +* The API can be invoked by the URL, for example, `\https://quay-server.example.com/api/v1`, and uses JSON objects as payload. \ No newline at end of file diff --git a/modules/arch-intro-other-features.adoc b/modules/arch-intro-other-features.adoc new file mode 100644 index 000000000..12e694d56 --- /dev/null +++ b/modules/arch-intro-other-features.adoc @@ -0,0 +1,8 @@ +[[arch-intro-other-features]] += Other features + +* Full standards / spec support (Docker v2-2) +* Long-term protocol support +* OCI compatibility through test suite compliance +* Enterprise grade support +* Regular updates \ No newline at end of file diff --git a/modules/arch-intro-recent-features.adoc b/modules/arch-intro-recent-features.adoc new file mode 100644 index 000000000..cfba552d1 --- /dev/null +++ b/modules/arch-intro-recent-features.adoc @@ -0,0 +1,5 @@ +:_content-type: CONCEPT +[id="arch-intro-recent-features"] += Recently added features + +See the link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/red_hat_quay_release_notes/index[{productname} Release Notes] for information about the latest features, enhancements, deprecations, and known issues. \ No newline at end of file diff --git a/modules/arch-intro-scalability.adoc b/modules/arch-intro-scalability.adoc new file mode 100644 index 000000000..724b2b078 --- /dev/null +++ b/modules/arch-intro-scalability.adoc @@ -0,0 +1,5 @@ +:_content-type: CONCEPT +[id="arch-intro-scalability"] += Scalability and high availability (HA) + +The code base used for {productname} is the same as the code base used for link:https::/quay.io[Quay.io], which is the highly available container image registry hosted by Red Hat. Quay.io and {productname} offer a multitenant SaaS solution. As a result, users can be confident that their deployment can deliver at scale with high availability, whether their deployment is on-prem or on a public cloud. \ No newline at end of file diff --git a/modules/arch-intro-security.adoc b/modules/arch-intro-security.adoc new file mode 100644 index 000000000..1e92be54c --- /dev/null +++ b/modules/arch-intro-security.adoc @@ -0,0 +1,41 @@ +:_content-type: CONCEPT +[id="arch-intro-security"] += Security + +{productname} is built for real enterprise use cases where content governance and security are two major focus areas. + +{productname} content governance and security includes built-in vulnerability scanning through Clair. + +[id="arch-tls-ssl-config"] +== TLS/SSL configuration + +You can configure SSL/TLS for the {productname} registry in the configuration tool UI or in the configuration bundle. SSL/TLS connections to the database, to image storage, and to Redis can also be specified through the configuration tool. + +Sensitive fields in the database and at run time are automatically encrypted. You can also require HTTPS and verify certificates for the {productname} registry during mirror operations. + +[id="arch-intro-clair"] +== Clair + +Clair is an open source application that leverages static code analyses for parsing image content and reporting vulnerabilities affecting the content. Clair is packaged with {productname} and can be used in both standalone and Operator deployments. It can be run in highly scalable configurations, where components can be scaled separately as appropriate for enterprise environments. + +[id="arch-operator-security"] +== {productname} Operator security + +When {productname} is deployed using the {productname} Operator, the `tls` component is set to `managed` by default and the {ocp}'s Certificate Authority is used to create HTTPS endpoints and to rotate TLS certificates. + +If you set the `tls` component to `unmanaged`, you can provide custom certificates to the pass-through Routes, however you are responsible for certificate rotation. + +[id="arch-builders"] +== Fully isolated builds + +{productname} now supports building Dockerfiles that uses both bare metal and virtual builders. + +By using bare-metal worker nodes, each build is done in an ephemeral virtual machine to ensure isolation and security while the build is running. This provides the best protection against rogue payloads. + +Running builds directly in a container does not have the same isolation as when using virtual machines, but it still provides good protection. + + +[id="arch-rbac"] +== Role-based access controls + +{productname} provides full isolation of registry content by organization and team with fine-grained entitlements for read, write, and administrative access by users and automated tools. diff --git a/modules/arch-intro.adoc b/modules/arch-intro.adoc new file mode 100644 index 000000000..ca2445f31 --- /dev/null +++ b/modules/arch-intro.adoc @@ -0,0 +1,11 @@ +:_content-type: CONCEPT +[id="arch-intro"] += {productname} overview + +{productname} is a distributed and highly available container image registry for your enterprise. + +{productname} container registry platform provides secure storage, distribution, access controls, geo-replications, repository mirroring, and governance of containers and cloud-native artifacts on any infrastructure. It is available as a standalone component or as an Operator for {ocp}, and is deployable on-prem or on a public cloud. + +image:178_Quay_architecture_0821_features.png[Quay features] + +This guide provides an insight into architectural patterns to use when deploying {productname}. This guide also offers sizing guidance and deployment prerequisites, along with best practices for ensuring high availability for your {productname} registry. diff --git a/modules/arch-mirror-registry.adoc b/modules/arch-mirror-registry.adoc new file mode 100644 index 000000000..68e328c0c --- /dev/null +++ b/modules/arch-mirror-registry.adoc @@ -0,0 +1,17 @@ +:_content-type: CONCEPT +[id="arch-mirror-registry"] += Mirror registry for Red Hat OpenShift + +The _mirror registry for Red Hat OpenShift_ is small-scale version of {productname} that you can use as a target for mirroring the required container images of {ocp} for disconnected installations. + +For disconnected deployments of {ocp}, a container registry is required to carry out the installation of the clusters. To run a production-grade registry service on such a cluster, you must create a separate registry deployment to install the first cluster. The _mirror registry for Red Hat OpenShift_ addresses this need and is included in every {ocp} subscription. It is available for download on the link:https://console.redhat.com/openshift/downloads#tool-mirror-registry[OpenShift console *Downloads*] page. + +The _mirror registry for Red Hat OpenShift_ allows users to install a small-scale version of {productname} and its required components using the `mirror-registry` command line interface (CLI) tool. The _mirror registry for Red Hat OpenShift_ is deployed automatically with pre-configured local storage and a local database. It also includes auto-generated user credentials and access permissions with a single set of inputs and no additional configuration choices to get started. + +The _mirror registry for Red Hat OpenShift_ provides a pre-determined network configuration and reports deployed component credentials and access URLs upon success. A limited set of optional configuration inputs like fully qualified domain name (FQDN) services, superuser name and password, and custom TLS certificates are also provided. This provides users with a container registry so that they can easily create an offline mirror of all {ocp} release content when running {ocp} in restricted network environments. + +The _mirror registry for Red Hat OpenShift_ is limited to hosting images that are required to install a disconnected {ocp} cluster, such as release images or Operator images. It uses local storage. Content built by customers should not be hosted by the _mirror registry for Red Hat OpenShift_. + +Unlike {productname}, the _mirror registry for Red Hat OpenShift_ is not a highly-available registry. Only local file system storage is supported. Using the _mirror registry for Red Hat OpenShift_ with more than one cluster is discouraged, because multiple clusters can create a single point of failure when updating your cluster fleet. It is advised to use the _mirror registry for Red Hat OpenShift_ to install a cluster that can host a production-grade, highly available registry such as {productname}, which can serve {ocp} content to other clusters. + +More information is available at link:https://docs.openshift.com/container-platform/{ocp-y}/installing/disconnected_install/installing-mirroring-creating-registry.html[Creating a mirror registry with _mirror registry for Red Hat OpenShift_]. \ No newline at end of file diff --git a/modules/arch-prereqs.adoc b/modules/arch-prereqs.adoc new file mode 100644 index 000000000..c3e5ac0d4 --- /dev/null +++ b/modules/arch-prereqs.adoc @@ -0,0 +1,5 @@ +:_content-type: CONCEPT +[id="arch-prereqs"] += {productname} prerequisites + +Before deploying {productname}, you must provision image storage, a database, and Redis. diff --git a/modules/attributes.adoc b/modules/attributes.adoc index 11d3e30a4..6aefa202a 100644 --- a/modules/attributes.adoc +++ b/modules/attributes.adoc @@ -1,11 +1,46 @@ :productname: Red Hat Quay +:productname-ocp: Red Hat Quay on OpenShift Container Platform +:quayio: Quay.io :productshortname: Quay -:productversion: 3 -:productmin: 3.1.1 +:imagesdir: ../images +:ocp: OpenShift Container Platform +:odf: Red Hat OpenShift Data Foundation +:qbo: Quay Bridge Operator +:rhel: Red Hat Enterprise Linux (RHEL) +:rhel-short: RHEL +:ocp-y: 4.17 + ifeval::["{productname}" == "Project Quay"] -:imagesdir: images +:upstream: +:productname: Project Quay +:productversion: 3 +:producty: 3.14 +:productminv: v3.14.0 +:productrepo: quay.io/projectquay +:quayimage: quay +:clairimage: clair +:clairproductminv: 4.8 +:builderimage: quay-builder +:builderqemuimage: quay-builder-qemu:main +:postgresimage: centos/postgresql-10-centos7@sha256:de1560cb35e5ec643e7b3a772ebaac8e3a7a2a8e8271d9e91ff023539b4dfb33 +:redisimage: centos/redis-32-centos7@sha256:06dbb609484330ec6be6090109f1fa16e936afcf975d1cbc5fff3e6c7cae7542 endif::[] ifeval::["{productname}" == "Red Hat Quay"] -:imagesdir: ../images +:downstream: +:productname: Red Hat Quay +:productversion: 3 +:producty: 3.14 +:producty-n1: 3.13 +:productmin: 3.14.0 +:productminv: v3.14.0 +:productrepo: registry.redhat.io/quay +:clairnewver: v3.14 +:quayimage: quay-rhel8 +:clairimage: clair-rhel8 +:clairproductminv: 4.8 +:builderimage: quay-builder-rhel8 +:builderqemuimage: quay-builder-qemu-rhcos +:postgresimage: registry.redhat.io/rhel8/postgresql-13 +:redisimage: registry.redhat.io/rhel8/redis-6:1-110 endif::[] diff --git a/modules/authentication-troubleshooting-issues.adoc b/modules/authentication-troubleshooting-issues.adoc new file mode 100644 index 000000000..c8b8a2c03 --- /dev/null +++ b/modules/authentication-troubleshooting-issues.adoc @@ -0,0 +1,50 @@ +:_content-type: CONCEPT +[id="authentication-troubleshooting-issues"] += Troubleshooting {productname} authentication and authorization issues for specific users + +Use the following procedure to troubleshoot authentication and authorization issues for specific users. + +.Procedure + +. Exec into the {productname} pod or container. For more information, see "Interacting with the {productname} database". + +. Enter the following command to show all users for external authentication: ++ +[source,terminal] +---- +quay=# select * from federatedlogin; +---- ++ +.Example output ++ +[source,terminal] +---- +id | user_id | service_id | service_ident | metadata_json +----+---------+------------+---------------------------------------------+------------------------------------------- +1 | 1 | 3 | testuser0 | {} +2 | 1 | 8 | PK7Zpg2Yu2AnfUKG15hKNXqOXirqUog6G-oE7OgzSWc | {"service_username": "live.com#testuser0"} +3 | 2 | 3 | testuser1 | {} +4 | 2 | 4 | 110875797246250333431 | {"service_username": "testuser1"} +5 | 3 | 3 | testuser2 | {} +6 | 3 | 1 | 26310880 | {"service_username": "testuser2"} +(6 rows) +---- + +. Verify that the users are inserted into the `user` table: ++ +[source,terminal] +---- +quay=# select username, email from "user"; +---- ++ +.Example output ++ +[source,terminal] +---- +username | email +-----------+---------------------- +testuser0 | testuser0@outlook.com +testuser1 | testuser1@gmail.com +testuser2 | testuser2@redhat.com +(3 rows) +---- \ No newline at end of file diff --git a/modules/authentication-troubleshooting.adoc b/modules/authentication-troubleshooting.adoc new file mode 100644 index 000000000..ca4b997f8 --- /dev/null +++ b/modules/authentication-troubleshooting.adoc @@ -0,0 +1,17 @@ +:_content-type: CONCEPT +[id="authentication-troubleshooting"] += Troubleshooting {productname} authentication + +Authentication and authorization is crucial for secure access to {productname}. Together, they safeguard sensitive container images, verify user identities, enforce access controls, facilitate auditing and accountability, and enable seamless integration with external identity providers. By prioritizing authentication, organizations can bolster the overall security and integrity of their container registry environment. + +The following authentication methods are supported by {productname}: + +* *Username and password*. Users can authentication by providing their username and password, which are validated against the user database configured in {productname}. This traditional method requires users to enter their credentials to gain access. + +* *OAuth*. {productname} supports OAuth authentication, which allows users to authenticate using their credentials from third party services like Google, GitHub, or Keycloak. OAuth enables a seamless and federated login experience, eliminating the need for separate account creation and simplifying user management. + +* *OIDC*. OpenID Connect enables single sign-on (SSO) capabilities and integration with enterprise identity providers. With OpenID Connect, users can authenticate using their existing organizational credentials, providing a unified authentication experience across various systems and applications. + +* *Token-based authentication*. Users can obtain unique tokens that grant access to specific resources within {productname}. Tokens can be obtained through various means, such as OAuth or by generating API tokens within the {productname} user interface. Token-based authentication is often used for automated or programmatic access to the registry. + +* *External identity provider*. {productname} can integrate with external identity providers, such as LDAP or AzureAD, for authentication purposes. This integration allows organizations to use their existing identity management infrastructure, enabling centralized user authentication and reducing the need for separate user databases. \ No newline at end of file diff --git a/modules/automating-quay-using-the-api.adoc b/modules/automating-quay-using-the-api.adoc new file mode 100644 index 000000000..eccf374d6 --- /dev/null +++ b/modules/automating-quay-using-the-api.adoc @@ -0,0 +1,112 @@ +:_content-type: REFERENCE +[id="automating-quay-using-the-api"] += Automating {productname} processes by using the API + +With the API, {productname} administrators and users with access to the API can automate repetitive tasks such as repository management or image pruning. + +The following example shows you how you might use a Python script and a cron job to automate the deletion of OAuth 2 applications _except_ the administrator's token. This might be useful if you want to ensure an application associated with an OAuth 2 access token is cycled after a certain period of time. + +.Prerequisites + +* You have access to the {productname} API, which entails having already created an OAuth 2 access token. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. +* You have installed the Python `requests` library using. +* You have enabled cron jobs on your machine. +* You have created several organization applications, including one that will not be deleted. + +.Procedure + +. Create a Python script that executes an API command. The following example is used to delete organization applications using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleteorganizationapplication[`DELETE /api/v1/organization/{orgname}/applications/{client_id}`] API endpoint. ++ +.example.py file +[source,python] +---- +import requests <1> + +# Hard-coded values +API_BASE_URL = "http:///api/v1" <2> +ACCESS_TOKEN = "" <3> +ORG_NAME = "" <4> + +def get_all_organization_applications(): + url = f"{API_BASE_URL}/organization/{ORG_NAME}/applications" + headers = { + "Authorization": f"Bearer {ACCESS_TOKEN}" + } + + response = requests.get(url, headers=headers) + + if response.status_code == 200: + try: + applications = response.json() + # Print the raw response for debugging + print("Raw response:", applications) + + # Adjust parsing logic based on the response structure + if isinstance(applications, dict) and 'applications' in applications: + applications = applications['applications'] + + if isinstance(applications, list): + print("Organization applications retrieved successfully:") + for app in applications: + # Updated key from 'title' to 'name' + print(f"Name: {app['name']}, Client ID: {app['client_id']}") + return applications + else: + print("Unexpected response format.") + return [] + except requests.exceptions.JSONDecodeError: + print("Error decoding JSON response:", response.text) + return [] + else: + print(f"Failed to retrieve applications. Status code: {response.status_code}, Response: {response.text}") + return [] + +def delete_organization_application(client_id): + url = f"{API_BASE_URL}/organization/{ORG_NAME}/applications/{client_id}" + headers = { + "Authorization": f"Bearer {ACCESS_TOKEN}" + } + + response = requests.delete(url, headers=headers) + + if response.status_code == 204: + print(f"Application {client_id} deleted successfully.") + else: + print(f"Failed to delete application {client_id}. Status code: {response.status_code}, Response: {response.text}") + +def main(): + applications = get_all_organization_applications() + for app in applications: + if app['name'] != "": <5> # Skip the "admin-token-app" + delete_organization_application(app['client_id']) + else: + print(f"Skipping deletion of application: {app['name']}") + +# Execute the main function +main() +---- +<1> Includes the `import` library in your Python code. +<2> The URL of your registry appended with `/api/v1`. +<3> Your OAuth 2 access token. +<4> The organization that holds the application. +<5> The name of the application token to remain. + +. Save the script as `prune_applications.py`. + +. Create a cron job that automatically runs the script: + +.. Open the crontab editor by running the following command: ++ +[source,terminal] +---- +$ crontab -e +---- + +.. In the editor, add the cron job for running the script. The following example runs the script once per month: ++ +[source,text] +---- +0 0 1 * * sudo python /path/to/prune_images.py >> /var/log/prune_images.log 2>&1 +---- + diff --git a/modules/backing-up-and-restoring-intro.adoc b/modules/backing-up-and-restoring-intro.adoc new file mode 100644 index 000000000..a1eacfae7 --- /dev/null +++ b/modules/backing-up-and-restoring-intro.adoc @@ -0,0 +1,5 @@ +:_content-type: CONCEPT +[id="backing-up-and-restoring-intro"] += Backing up and restoring {productname} managed by the {productname} Operator + +Use the content within this section to back up and restore {productname} when managed by the {productname} Operator on {ocp} diff --git a/modules/backing-up-red-hat-quay-operator.adoc b/modules/backing-up-red-hat-quay-operator.adoc new file mode 100644 index 000000000..409bf7809 --- /dev/null +++ b/modules/backing-up-red-hat-quay-operator.adoc @@ -0,0 +1,345 @@ +:_content-type: PROCEDURE +[id="backing-up-red-hat-quay-operator"] += Backing up {productname} + +Database backups should be performed regularly using either the supplied tools on the PostgreSQL image or your own backup infrastructure. The {productname} Operator does not ensure that the PostgreSQL database is backed up. + +[NOTE] +==== +This procedure covers backing up your {productname} PostgreSQL database. It does not cover backing up the Clair PostgreSQL database. Strictly speaking, backing up the Clair PostgreSQL database is not needed because it can be recreated. If you opt to recreate it from scratch, you will wait for the information to be repopulated after all images inside of your {productname} deployment are scanned. During this downtime, security reports are unavailable. + +If you are considering backing up the Clair PostgreSQL database, you must consider that its size is dependent upon the number of images stored inside of {productname}. As a result, the database can be extremely large. +==== + +This procedure describes how to create a backup of {productname-ocp} using the Operator. + +.Prerequisites + +* A healthy {productname} deployment on {ocp} using the {productname} Operator. The status condition `Available` is set to `true`. +* The components `quay`, `postgres` and `objectstorage` are set to `managed: true` +* If the component `clair` is set to `managed: true` the component `clairpostgres` is also set to `managed: true` (starting with {productname} v3.7 or later) + +[NOTE] +==== +If your deployment contains partially unmanaged database or storage components and you are using external services for PostgreSQL or S3-compatible object storage to run your {productname} deployment, you must refer to the service provider or vendor documentation to create a backup of the data. +You can refer to the tools described in this guide as a starting point on how to backup your external PostgreSQL database or object storage. +==== + +[id="quay-configuration-backup"] +== {productname} configuration backup + +Use the following procedure to back up your {productname} configuration. + +.Procedure + +. To back the `QuayRegistry` custom resource by exporting it, enter the following command: ++ +[source,terminal] +---- +$ oc get quayregistry -n -o yaml > quay-registry.yaml +---- + +. Edit the resulting `quayregistry.yaml` and remove the status section and the following metadata fields: ++ +[source,yaml] +---- + metadata.creationTimestamp + metadata.finalizers + metadata.generation + metadata.resourceVersion + metadata.uid +---- + +. Backup the managed keys secret by entering the following command: ++ +[NOTE] +==== +If you are running a version older than {productname} 3.7.0, this step can be skipped. Some secrets are automatically generated while deploying {productname} for the first time. These are stored in a secret called `-quay_registry_managed_secret_keys` in the namespace of the `QuayRegistry` resource. +==== ++ +[source,terminal] +---- +$ oc get secret -n _quay_registry_managed_secret_keys -o yaml > managed_secret_keys.yaml +---- + +. Edit the resulting `managed_secret_keys.yaml` file and remove the entry `metadata.ownerReferences`. Your `managed_secret_keys.yaml` file should look similar to the following: ++ +[source,yaml] +---- +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: _quay_registry_managed_secret_keys> + namespace: +data: + CONFIG_EDITOR_PW: + DATABASE_SECRET_KEY: + DB_ROOT_PW: + DB_URI: + SECRET_KEY: + SECURITY_SCANNER_V4_PSK: +---- ++ +All information under the `data` property should remain the same. + +. Redirect the current `Quay` configuration file by entering the following command: ++ +[source,terminal] +---- +$ oc get secret -n $(oc get quayregistry -n -o jsonpath='{.spec.configBundleSecret}') -o yaml > config-bundle.yaml +---- + +. Backup the `/conf/stack/config.yaml` file mounted inside of the `Quay` pods: ++ +[source,terminal] +---- +$ oc exec -it quay_pod_name -- cat /conf/stack/config.yaml > quay_config.yaml +---- + +[id="scaling-down-quay-deployment"] +== Scaling down your {productname} deployment + +Use the following procedure to scale down your {productname} deployment. + +[IMPORTANT] +==== +This step is needed to create a consistent backup of the state of your {productname} deployment. Do not omit this step, including in setups where PostgreSQL databases and/or S3-compatible object storage are provided by external services (unmanaged by the {productname} Operator). +==== + +.Procedure + +. Depending on the version of your {productname} deployment, scale down your deployment using one of the following options. + +.. *For Operator version 3.7 and newer:* Scale down the {productname} deployment by disabling auto scaling and overriding the replica count for {productname}, mirror workers, and Clair (if managed). Your `QuayRegistry` resource should look similar to the following: ++ +[source,yaml] +---- +apiVersion: quay.redhat.com/v1 +kind: QuayRegistry +metadata: + name: registry + namespace: ns +spec: + components: + … + - kind: horizontalpodautoscaler + managed: false <1> + - kind: quay + managed: true + overrides: <2> + replicas: 0 + - kind: clair + managed: true + overrides: + replicas: 0 + - kind: mirror + managed: true + overrides: + replicas: 0 + … +---- +<1> Disable auto scaling of Quay, Clair and Mirroring workers +<2> Set the replica count to 0 for components accessing the database and objectstorage + +.. *For Operator version 3.6 and earlier*: Scale down the {productname} deployment by scaling down the {productname} registry first and then the managed {productname} resources: ++ +[source,terminal] +---- +$ oc scale --replicas=0 deployment $(oc get deployment -n |awk '/^quay-operator/ {print $1}') -n +---- ++ +[source,terminal] +---- +$ oc scale --replicas=0 deployment $(oc get deployment -n |awk '/quay-app/ {print $1}') -n +---- ++ +[source,terminal] +---- +$ oc scale --replicas=0 deployment $(oc get deployment -n |awk '/quay-mirror/ {print $1}') -n +---- ++ +[source,terminal] +---- +$ oc scale --replicas=0 deployment $(oc get deployment -n |awk '/clair-app/ {print $1}') -n +---- + +. Wait for the `registry-quay-app`, `registry-quay-mirror` and `registry-clair-app` pods (depending on which components you set to be managed by the {productname} Operator) to disappear. You can check their status by running the following command: ++ +[source,terminal] +---- +$ oc get pods -n +---- ++ +Example output: ++ +[source,terminal] +---- +$ oc get pod +---- ++ +.Example output ++ +[source,terminal] +---- +quay-operator.v3.7.1-6f9d859bd-p5ftc 1/1 Running 0 12m +quayregistry-clair-postgres-7487f5bd86-xnxpr 1/1 Running 1 (12m ago) 12m +quayregistry-quay-app-upgrade-xq2v6 0/1 Completed 0 12m +quayregistry-quay-database-859d5445ff-cqthr 1/1 Running 0 12m +quayregistry-quay-redis-84f888776f-hhgms 1/1 Running 0 12m +---- + +[id="backing-up-managed-database"] +== Backing up the {productname} managed database + +Use the following procedure to back up the {productname} managed database. + +[NOTE] +==== +If your {productname} deployment is configured with external, or unmanged, PostgreSQL database(s), refer to your vendor's documentation on how to create a consistent backup of these databases. +==== + +.Procedure + +. Identify the Quay PostgreSQL pod name: ++ +[source,terminal] +---- +$ oc get pod -l quay-component=postgres -n -o jsonpath='{.items[0].metadata.name}' +---- ++ +Example output: ++ +[source,terminal] +---- +quayregistry-quay-database-59f54bb7-58xs7 +---- + +. Obtain the Quay database name: ++ +[source,terminal] +---- +$ oc -n rsh $(oc get pod -l app=quay -o NAME -n |head -n 1) cat /conf/stack/config.yaml|awk -F"/" '/^DB_URI/ {print $4}' +quayregistry-quay-database +---- + +. Download a backup database: ++ +[source,terminal] +---- +$ oc -n exec quayregistry-quay-database-59f54bb7-58xs7 -- /usr/bin/pg_dump -C quayregistry-quay-database > backup.sql +---- + +[id="backing-up-managed-object-storage"] +=== Backing up the {productname} managed object storage + +Use the following procedure to back up the {productname} managed object storage. The instructions in this section apply to the following configurations: + +* Standalone, multi-cloud object gateway configurations +* OpenShift Data Foundations storage requires that the {productname} Operator provisioned an S3 object storage bucket from, through the ObjectStorageBucketClaim API + +[NOTE] +==== +If your {productname} deployment is configured with external (unmanged) object storage, refer to your vendor's documentation on how to create a copy of the content of Quay's storage bucket. +==== + +.Procedure + +. Decode and export the `AWS_ACCESS_KEY_ID` by entering the following command: ++ +[source,terminal] +---- +$ export AWS_ACCESS_KEY_ID=$(oc get secret -l app=noobaa -n -o jsonpath='{.items[0].data.AWS_ACCESS_KEY_ID}' |base64 -d) +---- + +. Decode and export the `AWS_SECRET_ACCESS_KEY_ID` by entering the following command: ++ +[source,terminal] +---- +$ export AWS_SECRET_ACCESS_KEY=$(oc get secret -l app=noobaa -n -o jsonpath='{.items[0].data.AWS_SECRET_ACCESS_KEY}' |base64 -d) +---- + +. Create a new directory: ++ +[source,terminal] +---- +$ mkdir blobs +---- + +[NOTE] +==== +You can also use link:https://rclone.org/[rclone] or link:https://s3tools.org/s3cmd[sc3md] instead of the AWS command line utility. +==== + +. Copy all blobs to the directory by entering the following command: ++ +[source,terminal] +---- +$ aws s3 sync --no-verify-ssl --endpoint https://$(oc get route s3 -n openshift-storage -o jsonpath='{.spec.host}') s3://$(oc get cm -l app=noobaa -n -o jsonpath='{.items[0].data.BUCKET_NAME}') ./blobs +---- + +[id="scaling-up-quay-deployment"] +== Scale the {productname} deployment back up + +. Depending on the version of your {productname} deployment, scale up your deployment using one of the following options. + +.. *For Operator version 3.7 and newer:* Scale up the {productname} deployment by re-enabling auto scaling, if desired, and removing the replica overrides for Quay, mirror workers and Clair as applicable. Your `QuayRegistry` resource should look similar to the following: ++ +[source,yaml] +---- +apiVersion: quay.redhat.com/v1 +kind: QuayRegistry +metadata: + name: registry + namespace: ns +spec: + components: + … + - kind: horizontalpodautoscaler + managed: true <1> + - kind: quay <2> + managed: true + - kind: clair + managed: true + - kind: mirror + managed: true + … +---- +<1> Re-enables auto scaling of Quay, Clair and Mirroring workers again (if desired) +<2> Replica overrides are removed again to scale the Quay components back up + +.. *For Operator version 3.6 and earlier:* Scale up the {productname} deployment by scaling up the {productname} registry: ++ +[source,terminal] +---- +$ oc scale --replicas=1 deployment $(oc get deployment -n | awk '/^quay-operator/ {print $1}') -n +---- + +. Check the status of the {productname} deployment by entering the following command: ++ +[source,terminal] +---- +$ oc wait quayregistry registry --for=condition=Available=true -n +---- ++ +Example output: ++ +[source,yaml] +---- +apiVersion: quay.redhat.com/v1 +kind: QuayRegistry +metadata: + ... + name: registry + namespace: + ... +spec: + ... +status: + - lastTransitionTime: '2022-06-20T05:31:17Z' + lastUpdateTime: '2022-06-20T17:31:13Z' + message: All components reporting as healthy + reason: HealthChecksPassing + status: 'True' + type: Available +---- diff --git a/modules/backing-up-red-hat-quay-standalone.adoc b/modules/backing-up-red-hat-quay-standalone.adoc new file mode 100644 index 000000000..89d413e99 --- /dev/null +++ b/modules/backing-up-red-hat-quay-standalone.adoc @@ -0,0 +1,138 @@ +:_content-type: PROCEDURE +[[backing-up-red-hat-quay-standalone]] += Backing up {productname} on standalone deployments + +This procedure describes how to create a backup of {productname} on standalone deployments. + +.Prerequisites + +.Procedure + +. Create a temporary backup directory, for example, `quay-backup`: ++ +[source,terminal] +---- +$ mkdir /tmp/quay-backup +---- + +. The following example command denotes the local directory that the {productname} was started in, for example, `/opt/quay-install`: ++ +[subs="verbatim,attributes"] +---- +$ podman run --name quay-app \ + -v /opt/quay-install/config:/conf/stack:Z \ + -v /opt/quay-install/storage:/datastorage:Z \ + {productrepo}/{quayimage}:{productminv} +---- ++ +Change into the directory that bind-mounts to `/conf/stack` inside of the container, for example, `/opt/quay-install`, by running the following command: ++ +[source,terminal] +---- +$ cd /opt/quay-install +---- + +. Compress the contents of your {productname} deployment into an archive in the `quay-backup` directory by entering the following command: ++ +[source,terminal] +---- +$ tar cvf /tmp/quay-backup/quay-backup.tar.gz * +---- ++ +Example output: ++ +[source,terminal] +---- +config.yaml +config.yaml.bak +extra_ca_certs/ +extra_ca_certs/ca.crt +ssl.cert +ssl.key +---- + +. Back up the Quay container service by entering the following command: ++ +[subs="verbatim,attributes"] +---- +$ podman inspect quay-app | jq -r '.[0].Config.CreateCommand | .[]' | paste -s -d ' ' - + + /usr/bin/podman run --name quay-app \ + -v /opt/quay-install/config:/conf/stack:Z \ + -v /opt/quay-install/storage:/datastorage:Z \ + {productrepo}/{quayimage}:{productminv} +---- + +. Redirect the contents of your `conf/stack/config.yaml` file to your temporary `quay-config.yaml` file by entering the following command: ++ +[source,terminal] +---- +$ podman exec -it quay cat /conf/stack/config.yaml > /tmp/quay-backup/quay-config.yaml +---- + +. Obtain the `DB_URI` located in your temporary `quay-config.yaml` by entering the following command: ++ +[source,terminal] +---- +$ grep DB_URI /tmp/quay-backup/quay-config.yaml +---- ++ +Example output: ++ +---- +$ postgresql://:test123@172.24.10.50/quay +---- + +. Extract the PostgreSQL contents to your temporary backup directory in a backup .sql file by entering the following command: ++ +[source,terminal] +---- +$ pg_dump -h 172.24.10.50 -p 5432 -d quay -U -W -O > /tmp/quay-backup/quay-backup.sql +---- + +. Print the contents of your `DISTRIBUTED_STORAGE_CONFIG` by entering the following command: ++ +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + default: + - S3Storage + - s3_bucket: + storage_path: /registry + s3_access_key: + s3_secret_key: + host: + s3_region: +---- + +. Export the `AWS_ACCESS_KEY_ID` by using the `access_key` credential obtained in Step 7: ++ +[source,terminal] +---- +$ export AWS_ACCESS_KEY_ID= +---- + +. Export the `AWS_SECRET_ACCESS_KEY` by using the `secret_key` obtained in Step 7: ++ +[source,terminal] +---- +$ export AWS_SECRET_ACCESS_KEY= +---- + +. Sync the `quay` bucket to the `/tmp/quay-backup/blob-backup/` directory from the `hostname` of your `DISTRIBUTED_STORAGE_CONFIG`: ++ +[source,terminal] +---- +$ aws s3 sync s3:// /tmp/quay-backup/blob-backup/ --source-region us-east-2 +---- ++ +Example output: ++ +---- +download: s3:///registry/sha256/9c/9c3181779a868e09698b567a3c42f3744584ddb1398efe2c4ba569a99b823f7a to registry/sha256/9c/9c3181779a868e09698b567a3c42f3744584ddb1398efe2c4ba569a99b823f7a +download: s3:///registry/sha256/e9/e9c5463f15f0fd62df3898b36ace8d15386a6813ffb470f332698ecb34af5b0d to registry/sha256/e9/e9c5463f15f0fd62df3898b36ace8d15386a6813ffb470f332698ecb34af5b0d +---- +[NOTE] +==== +It is recommended that you delete the `quay-config.yaml` file after syncing the `quay` bucket because it contains sensitive information. The `quay-config.yaml` file will not be lost because it is backed up in the `quay-backup.tar.gz` file. +==== diff --git a/modules/branding-quay-deployment.adoc b/modules/branding-quay-deployment.adoc new file mode 100644 index 000000000..d5cc433dd --- /dev/null +++ b/modules/branding-quay-deployment.adoc @@ -0,0 +1,27 @@ +:_content-type: PROCEDURE +[id="branding-quay-deployment"] += Branding a {productname} deployment on the legacy UI + +You can brand the UI of your {productname} deployment by changing the registry title, logo, footer image, and by directing users to a website embedded in the footer image. + +.Procedure + +. Update your {productname} `config.yaml` file to add the following parameters: ++ +[source,yaml] +---- +BRANDING: + logo: <1> + footer_img: <2> + footer_url: <3> +--- +REGISTRY_TITLE: <4> +REGISTRY_TITLE_SHORT: <5> +---- +<1> The URL of the image that will appear at the top of your {productname} deployment. +<2> The URL of the image that will appear at the bottom of your {productname} deployment. +<3> The URL of the website that users will be directed to when clicking the footer image. +<4> The long-form title for the registry. This is displayed in frontend of your {productname} deployment, for example, at the sign in page of your organization. +<5> The short-form title for the registry. The title is displayed on various pages of your organization, for example, as the title of the tutorial on your organization's *Tutorial* page. + +. Restart your {productname} deployment. After restarting, your {productname} deployment is updated with a new logo, footer image, and footer image URL. \ No newline at end of file diff --git a/modules/build-automation-intro.adoc b/modules/build-automation-intro.adoc new file mode 100644 index 000000000..ac7f1c652 --- /dev/null +++ b/modules/build-automation-intro.adoc @@ -0,0 +1,7 @@ +[[build-automation-intro]] += Build automation + + +* Seamless Git integration +* Build workers +* Webhooks \ No newline at end of file diff --git a/modules/build-enhanced-arch.adoc b/modules/build-enhanced-arch.adoc new file mode 100644 index 000000000..f123f588a --- /dev/null +++ b/modules/build-enhanced-arch.adoc @@ -0,0 +1,9 @@ +:_content-type: PROCEDURE +[id="red-hat-quay-builds-architecture"] += {productname} enhanced build architecture + +The following image shows the expected design flow and architecture of the enhanced build features: + +image:quay-builds-architecture.png[Enhanced Quay builds architecture] + +With this enhancement, the build manager first creates the `Job Object`. Then, the `Job Object` then creates a pod using the `quay-builder-image`. The `quay-builder-image` will contain the `quay-builder binary` and the `Podman` service. The created pod runs as `unprivileged`. The `quay-builder binary` then builds the image while communicating status and retrieving build information from the Build Manager. diff --git a/modules/build-enhancements.adoc b/modules/build-enhancements.adoc new file mode 100644 index 000000000..ed740c5e4 --- /dev/null +++ b/modules/build-enhancements.adoc @@ -0,0 +1,27 @@ +:_content-type: PROCEDURE +[id="red-hat-quay-builders-enhancement"] += Virtual builds with {productname-ocp} + +ifeval::["{context}" == "use-quay"] +Documentation for the _builds_ feature has been moved to link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html/builders_and_image_automation/index[Builders and image automation]. This chapter will be removed in a future version of {productname}. +endif::[] + +ifeval::["{context}" == "operator-features"] +Documentation for the _builds_ feature has been moved to link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html/builders_and_image_automation/index[Builders and image automation]. This chapter will be removed in a future version of {productname}. +endif::[] + +ifeval::["{context}" == "quay-builders-image-automation"] +The procedures in this section explain how to create an environment for _bare metal builds_ for {productname-ocp}. + + +_Virtual builds_ can be run on virtualized machines with {productname-ocp}. With this method, the _build manager_ first creates the `Job Object` resource. Then, the `Job Object` creates a pod using the `quay-builder-image`. The `quay-builder-image` contains the `quay-builder` binary and the Podman service. The created pod runs as `unprivileged`. The `quay-builder` binary then builds the image while communicating status and retrieving build information from the _build manager_. + +[id="quay-builds-limitations"] +== Virtual builds limitations + +The following limitations apply to the _virtual builds_ feature: + +* Running _virtual builds_ with {productname-ocp} in an unprivileged context might cause some commands that were working under the previous build strategy to fail. Attempts to change the build strategy could potentially cause performance issues and reliability with the build. + +* Running _virtual builds_ directly in a container does not have the same isolation as using virtual machines. Changing the build environment might also cause builds that were previously working to fail. +endif::[] diff --git a/modules/build-limitations.adoc b/modules/build-limitations.adoc new file mode 100644 index 000000000..c1b6d8f02 --- /dev/null +++ b/modules/build-limitations.adoc @@ -0,0 +1,7 @@ +:_content-type: CONCEPT +[id="red-hat-quay-build-limitations"] += {productname} build limitations + +Running builds in {productname} in an unprivileged context might cause some commands that were working under the previous build strategy to fail. Attempts to change the build strategy could potentially cause performance issues and reliability with the build. + +Running builds directly in a container does not have the same isolation as using virtual machines. Changing the build environment might also caused builds that were previously working to fail. diff --git a/modules/build-logs-not-loading.adoc b/modules/build-logs-not-loading.adoc new file mode 100644 index 000000000..34774312f --- /dev/null +++ b/modules/build-logs-not-loading.adoc @@ -0,0 +1,5 @@ +:_content-type: CONCEPT +[id="build-logs-not-loading"] += Build logs are not loading + +In some cases, attempting to load logs for a repository build results in only a throbber icon, and no logs are displayed. This typically occurs when you are using a browser equipped with one of the following extensions: AdBlock, uBlock, or Privacy Badger. These browser extensions can cause the loading of build logs to be cancelled. To resolve this issue, disable the browser extension and reload the page. \ No newline at end of file diff --git a/modules/build-pre-configuration.adoc b/modules/build-pre-configuration.adoc new file mode 100644 index 000000000..258a88ec4 --- /dev/null +++ b/modules/build-pre-configuration.adoc @@ -0,0 +1,5 @@ +:_content-type: CONCEPT +[id="build-pre-configuration"] += Setting up {productname} builders with {ocp} + +You must pre-configure {productname-ocp} to allow the use of the _builder_ workers before using the _builds_ feature. \ No newline at end of file diff --git a/modules/build-trigger-error.adoc b/modules/build-trigger-error.adoc new file mode 100644 index 000000000..2a7b5b3fc --- /dev/null +++ b/modules/build-trigger-error.adoc @@ -0,0 +1,5 @@ +:_content-type: CONCEPT +[id="build-trigger-error"] += Unable to add a build trigger + +In some cases, attempting to add a build trigger results in the following error message: `You are not admin on the SCM repository`. In order for {productname} to add the webhook callback necessary for Build Triggers, the user granting {productname} access to the SCM repository must have administrative access on that repository. \ No newline at end of file diff --git a/modules/build-trigger-overview.adoc b/modules/build-trigger-overview.adoc new file mode 100644 index 000000000..bda4be983 --- /dev/null +++ b/modules/build-trigger-overview.adoc @@ -0,0 +1,7 @@ +:_content-type: PROCEDURE +[id="build-trigger-overview"] += Build triggers + +_Build triggers_ are automated mechanisms that start a container image build when specific conditions are met, such as changes to source code, updates to dependencies, or link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/use_red_hat_quay/#webhook[creating a webhook call]. These triggers help automate the image-building process and ensure that the container images are always up-to-date without manual intervention. + +The following sections cover content related to creating a build trigger, tag naming conventions, how to skip a source control-triggered build, starting a _build_, or manually triggering a _build_. diff --git a/modules/builders-virtual-environment.adoc b/modules/builders-virtual-environment.adoc new file mode 100644 index 000000000..8934a3086 --- /dev/null +++ b/modules/builders-virtual-environment.adoc @@ -0,0 +1,247 @@ +:_content-type: PROCEDURE +[id="builders-virtual-environment"] += Configuring virtual builds for {productname-ocp} + +The procedures in this section explain how to create an environment for _virtual builds_ for {productname-ocp}. + +[NOTE] +==== +* If you are using Amazon Web Service (AWS) S3 storage, you must modify your storage bucket in the AWS console, prior to running builders. See "Modifying your AWS S3 storage bucket" in the following section for the required parameters. +* If you are using a Google Cloud Platform (GCP) object bucket, you must configure cross-origin resource sharing (CORS) to enable _virtual builds_. +==== + +.Prerequisites + +* You have an {ocp} cluster provisioned with the {productname} Operator running. +* You have set the `tls` component to `unmanaged` and uploaded custom SSL/TLS certificates to the {productname} Operator. For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/securing_red_hat_quay/index#ssl-tls-quay-overview[SSL and TLS for {productname}]. +* You have configured the {ocp} TLS component for builds. +* You are logged into {ocp} as a cluster administrator. + +.Procedure + +. Create a new project where your virtual builders will be run, for example, `virtual-builders`, by running the following command: ++ +[source,terminal] +---- +$ oc new-project virtual-builders +---- + +. Create a `ServiceAccount` in the project that will be used to run _builds_ by entering the following command: ++ +[source,terminal] +---- +$ oc create sa -n virtual-builders quay-builder +---- ++ +.Example output ++ +[source,terminal] +---- +serviceaccount/quay-builder created +---- + +. Provide the created service account with editing permissions so that it can run a _build_: ++ +[source,terminal] +---- +$ oc adm policy -n virtual-builders add-role-to-user edit system:serviceaccount:virtual-builders:quay-builder +---- ++ +.Example output ++ +[source,terminal] +---- +clusterrole.rbac.authorization.k8s.io/edit added: "system:serviceaccount:virtual-builders:quay-builder" +---- + +. Grant the _builder_ worker `anyuid scc` permissions by entering the following command. This requires cluster administrator privileges, which is required because _builders_ must run as the Podman user for unprivileged or rootless builds to work. ++ +[source,terminal] +---- +$ oc adm policy -n virtual-builders add-scc-to-user anyuid -z quay-builder +---- ++ +.Example output ++ +[source,terminal] +---- +clusterrole.rbac.authorization.k8s.io/system:openshift:scc:anyuid added: "quay-builder" +---- + +. Obtain the token for the _builder_ service account by entering the following command: ++ +[source,terminal] +---- +$ oc create token quay-builder -n virtual-builders +---- ++ +[NOTE] +==== +When the token expires you will need to request a new token. Optionally, you can also add a custom expiration. For example, specify `--duration 20160m` to retain the token for two weeks. +==== ++ +.Example output ++ +[source,terminal] +---- +eyJhbGciOiJSUzI1NiIsImtpZCI6IldfQUJkaDVmb3ltTHZ0dGZMYjhIWnYxZTQzN2dJVEJxcDJscldSdEUtYWsifQ... +---- + +. Determine the _builder_ route by entering the following command: ++ +[source,terminal] +---- +$ oc get route -n quay-enterprise +---- ++ +.Example output +[source,terminal] +---- +NAME: example-registry-quay-builder +HOST/PORT: example-registry-quay-builder-quay-enterprise.apps.stevsmit-cluster-new.gcp.quaydev.org +PATH: +SERVICES: example-registry-quay-app +PORT: grpc +TERMINATION: passthrough/Redirect +WILDCARD: None +---- + +. Generate a self-signed SSL/TlS certificate with the `.crt` extension by entering the following command: ++ +[source,terminal] +---- +$ oc extract cm/kube-root-ca.crt -n openshift-apiserver +---- ++ +.Example output ++ +[source,terminal] +---- +ca.crt +---- + +. Rename the `ca.crt` file to `build-cluster.crt` by entering the following command: ++ +[source,terminal] +---- +$ mv ca.crt build-cluster.crt +---- + +. Update the `config.yaml` file of your {productname-ocp} deployment to include an appropriate _virtual builds_ configuration by using the {ocp} web console. + +.. Click *Operators* -> *Installed Operators* -> *Red Hat Quay* -> *Quay Registry*. + +.. Click the name of your registry, for example, *example-registry*. + +.. Under *Config Bundle Secret*, click the name of your configuration bundle, for example, *extra-ca-certificate-config-bundle-secret*. + +.. Click *Actions* -> *Edit Secret*. + +.. Add an appropriate _virtual builds_ configuration using the following as a reference: ++ +[source,yaml] +---- +FEATURE_USER_INITIALIZE: true +BROWSER_API_CALLS_XHR_ONLY: false +SUPER_USERS: +- +FEATURE_USER_CREATION: false +FEATURE_QUOTA_MANAGEMENT: true +FEATURE_BUILD_SUPPORT: True +BUILDMAN_HOSTNAME: <1> +BUILD_MANAGER: + - ephemeral + - ALLOWED_WORKER_COUNT: 1 + ORCHESTRATOR_PREFIX: buildman/production/ + JOB_REGISTRATION_TIMEOUT: 3600 <2> + ORCHESTRATOR: + REDIS_HOST: <3> + REDIS_PASSWORD: "" + REDIS_SSL: false + REDIS_SKIP_KEYSPACE_EVENT_SETUP: false + EXECUTORS: + - EXECUTOR: kubernetesPodman + NAME: openshift + BUILDER_NAMESPACE: <4> + SETUP_TIME: 180 + MINIMUM_RETRY_THRESHOLD: 0 + BUILDER_CONTAINER_IMAGE: quay.io/projectquay/quay-builder:{producty} + # Kubernetes resource options + K8S_API_SERVER: <5> + K8S_API_TLS_CA: <6> + VOLUME_SIZE: 8G + KUBERNETES_DISTRIBUTION: openshift + CONTAINER_MEMORY_LIMITS: 1G <7> + CONTAINER_CPU_LIMITS: 300m <8> + CONTAINER_MEMORY_REQUEST: 1G <9> + CONTAINER_CPU_REQUEST: 300m <10> + NODE_SELECTOR_LABEL_KEY: "" + NODE_SELECTOR_LABEL_VALUE: "" + SERVICE_ACCOUNT_NAME: + SERVICE_ACCOUNT_TOKEN: <11> + HTTP_PROXY: + HTTPS_PROXY: + NO_PROXY: +---- ++ +<1> The build route is obtained by running `$ oc get route -n` with the namespace of your {productname-ocp} deployment. A port must be provided at the end of the route, and it should use the following format: `[quayregistry-cr-name]-quay-builder-[ocp-namespace].[ocp-domain-name]:443`. +<2> If the `JOB_REGISTRATION_TIMEOUT` parameter is set too low, you might receive the following error: `failed to register job to build manager: rpc error: code = Unauthenticated desc = Invalid build token: Signature has expired`. This parameter should be set to at least `240`. +<3> If your Redis host has a password or SSL/TLS certificates, you must update this field accordingly. +<4> Set to match the name of your _virtual builds_ namespace. This example used `virtual-builders`. +<5> The `K8S_API_SERVER` is obtained by running `$ oc cluster-info`. +<6> You must manually create and add your custom CA cert, for example, `K8S_API_TLS_CA: /conf/stack/extra_ca_certs/build-cluster.crt`. +<7> Defaults to `5120Mi` if left unspecified. +<8> For _virtual builds_, you must ensure that there are enough resources in your cluster. Defaults to `1000m` if left unspecified. +<9> Defaults to `3968Mi` if left unspecified. +<10> Defaults to `500m` if left unspecified. +<11> Obtained when running `$ oc create sa`. ++ +.Example _virtual builds_ configuration +[source,yaml] +---- +FEATURE_USER_INITIALIZE: true +BROWSER_API_CALLS_XHR_ONLY: false +SUPER_USERS: +- quayadmin +FEATURE_USER_CREATION: false +FEATURE_QUOTA_MANAGEMENT: true +FEATURE_BUILD_SUPPORT: True +BUILDMAN_HOSTNAME: example-registry-quay-builder-quay-enterprise.apps.docs.quayteam.org:443 +BUILD_MANAGER: + - ephemeral + - ALLOWED_WORKER_COUNT: 1 + ORCHESTRATOR_PREFIX: buildman/production/ + JOB_REGISTRATION_TIMEOUT: 3600 + ORCHESTRATOR: + REDIS_HOST: example-registry-quay-redis + REDIS_PASSWORD: "" + REDIS_SSL: false + REDIS_SKIP_KEYSPACE_EVENT_SETUP: false + EXECUTORS: + - EXECUTOR: kubernetesPodman + NAME: openshift + BUILDER_NAMESPACE: virtual-builders + SETUP_TIME: 180 + MINIMUM_RETRY_THRESHOLD: 0 + BUILDER_CONTAINER_IMAGE: quay.io/projectquay/quay-builder:{producty} + # Kubernetes resource options + K8S_API_SERVER: api.docs.quayteam.org:6443 + K8S_API_TLS_CA: /conf/stack/extra_ca_certs/build-cluster.crt + VOLUME_SIZE: 8G + KUBERNETES_DISTRIBUTION: openshift + CONTAINER_MEMORY_LIMITS: 1G + CONTAINER_CPU_LIMITS: 300m + CONTAINER_MEMORY_REQUEST: 1G + CONTAINER_CPU_REQUEST: 300m + NODE_SELECTOR_LABEL_KEY: "" + NODE_SELECTOR_LABEL_VALUE: "" + SERVICE_ACCOUNT_NAME: quay-builder + SERVICE_ACCOUNT_TOKEN: "eyJhbGciOiJSUzI1NiIsImtpZCI6IldfQUJkaDVmb3ltTHZ0dGZMYjhIWnYxZTQzN2dJVEJxcDJscldSdEUtYWsifQ" + HTTP_PROXY: + HTTPS_PROXY: + NO_PROXY: +---- + +.. Click *Save* on the *Edit Secret* page. + +. Restart your {productname-ocp} registry with the new configuration. \ No newline at end of file diff --git a/modules/builds-overview.adoc b/modules/builds-overview.adoc new file mode 100644 index 000000000..8a6a89ada --- /dev/null +++ b/modules/builds-overview.adoc @@ -0,0 +1,22 @@ +:_content-type: CONCEPT +[id="builds-overview"] += {productname} builds overview + +_{productname} builds_, or just _builds_, are a feature that enable the automation of container image builds. The _builds_ feature uses worker nodes to build images from Dockerfiles or other build specifications. These builds can be triggered manually or automatically via webhooks from repositories like GitHub, allowing users to integrate continuous integration (CI) and continuous delivery (CD) pipelines into their workflow. + +The _builds_ feature is supported on {productname-ocp} and Kubernetes clusters. For Operator-based deployments and Kubernetes clusters, _builds_ are created by using a _build manager_ that coordinates and handles the build jobs. _Builds_ support building Dockerfile on both bare metal platforms and on virtualized platforms with _virtual builders_. This versatility allows organizations to adapt to existing infrastructure while leveraging {productname}'s container image build capabilities. + +The key features of _{productname} builds_ feature include: + +* Automated builds triggered by code commits or version control events +* Support for Docker and Podman container images +* Fine-grained control over build environments and resources +* Integration with Kubernetes and {ocp} for scalable builds +* Compatibility with bare metal and virtualized infrastructure + +[NOTE] +==== +Running _builds_ directly in a container on bare metal platforms does not have the same isolation as when using virtual machines, however, it still provides good protection. +==== + +_Builds_ are highly complex, and administrators are encouraged to review the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_architecture/index#arch-intro-build-automation[Build automation] architecture guide before continuing. \ No newline at end of file diff --git a/modules/cannot-access-private-repo.adoc b/modules/cannot-access-private-repo.adoc new file mode 100644 index 000000000..35b20d351 --- /dev/null +++ b/modules/cannot-access-private-repo.adoc @@ -0,0 +1,26 @@ +:_content-type: CONCEPT +[id="cannot-access-private-repo"] += Unable to access private repositories using Amazon EC2 Container Service + +In some cases, authentication fails while attempting to use Amazon Elastic Container Service (ECS). This error occurs when the authentication configuration in the `ecs.config` file is missing. + +In order for ECS to pull down Docker images, the following information must be included in the ECS configuration file that is located in the `/etc/ecs/ecs.conf` file: + +[source,yaml] +---- +ECS_ENGINE_AUTH_TYPE=dockercfg +ECS_ENGINE_AUTH_DATA={"https://quay.io": {"auth": "YOURAUTHTOKENFROMDOCKERCFG", "email": "user@example.com"}} +---- + +If you are using a robot account, you must include the username: + +[source,terminal] +---- +ECS_ENGINE_AUTH_TYPE=dockercfg <1> +ECS_ENGINE_AUTH_DATA={"https://quay.io": {"auth": "YOURAUTHTOKENFROMDOCKERCFG", "email": ".", "username": "USERNAME"}} +---- +<1> This field is the contents of the `auths` attribute in `.docker/config.json` starting at Docker version 1.7.0, or the contents of `.dockercfg` before that. + +After you have updated the configuration file, restart the ECS service. + +For more information about ECS, see link:https://docs.aws.amazon.com/AmazonECS/latest/developerguide/private-auth.html[Private registry authentication for tasks]. \ No newline at end of file diff --git a/modules/cannot-locate-dockerfile.adoc b/modules/cannot-locate-dockerfile.adoc new file mode 100644 index 000000000..b57f663bf --- /dev/null +++ b/modules/cannot-locate-dockerfile.adoc @@ -0,0 +1,9 @@ +:_content-type: CONCEPT +[id="cannot-locate-dockerfile"] += Unable to locate specified Dockerfile + +When building an image, the following error is returned: `A build step failed: API error (500): Cannot locate specified Dockerfile: Dockerfile`. This occurs for one of two reasons: + +* *The `.dockerignore` file contains the Dockerfile.* Unlike Docker Hub, the Dockerfile is part of the Build Context on {productname}. The Dockerfile must not appear in the `.dockerignore` file. Remove the Dockerfile from the `.dockerignore` file to resolve the issue. + +* *The build trigger is incorrect.* Verify the Dockerfile location and the branch or tag value specified in the build trigger. \ No newline at end of file diff --git a/modules/cannot-reach-registry-endpoint.adoc b/modules/cannot-reach-registry-endpoint.adoc new file mode 100644 index 000000000..870cfc140 --- /dev/null +++ b/modules/cannot-reach-registry-endpoint.adoc @@ -0,0 +1,5 @@ +:_content-type: CONCEPT +[id="cannot-reach-registry-endpoint"] += Unable to reach registry endpoint + +In some cases, trying to pull a Docker image returns the following error: `Could not reach any registry endpoint`. This usually occurs because you are attempting to pull a non-existent tag. If you do not specify a tag, newer version of Docker attempt to pull the "latest" tag, regardless of whether it actually exists. \ No newline at end of file diff --git a/modules/changing-storage-solution.adoc b/modules/changing-storage-solution.adoc new file mode 100644 index 000000000..924c071d3 --- /dev/null +++ b/modules/changing-storage-solution.adoc @@ -0,0 +1,12 @@ +:_content-type: PROCEDURE +[id="changing-storage-solution"] += Unable to change storage solution for Quay pods + +In some cases, there are 2 persistent volume claims (PVCs) present in the `Quay` namespace, and the `Quay` pod is binding to the incorrect PVC instead of the expected one. When attempting to manually change the PVC to the desired storage solution, it might revert back to the incorrect storage solution. Because the storage class used by the local PVC is automatically set as the local PVC, your local PVC is selected over the {productname} PVC. + +As a workaround for this issue, you can change the default storage class to one that controls the desired persistent volume claim. Afterwards, the {productname} Operator, by default, refers to the PVC created by the default storage class. See the {ocp} documentation for link:https://docs.openshift.com/container-platform/{ocp-y}/storage/dynamic-provisioning.html#change-default-storage-class_dynamic-provisioning[Changing the default storage class] to resolve this issue. + +[role="_additional-resources"] +.Additional resources + +For more information, see link:https://access.redhat.com/solutions/6202532[Can't change Storage Solution for Quay pods]. \ No newline at end of file diff --git a/modules/clair-add-info.adoc b/modules/clair-add-info.adoc new file mode 100644 index 000000000..69a94a163 --- /dev/null +++ b/modules/clair-add-info.adoc @@ -0,0 +1,4 @@ +[[clair-add-info]] += Additional Information + +For detailed documentation on the internals of Clair, including how the microservices are structured, please see the link:https://quay.github.io/clair[Upstream Clair] and link:https://quay.github.io/claircore[ClairCore] documentation. diff --git a/modules/clair-advanced-configuration-overview.adoc b/modules/clair-advanced-configuration-overview.adoc new file mode 100644 index 000000000..e036fd434 --- /dev/null +++ b/modules/clair-advanced-configuration-overview.adoc @@ -0,0 +1,5 @@ +:_content-type: CONCEPT +[id="clair-advanced-configuration-overview"] += Advanced Clair configuration + +Use the procedures in the following sections to configure advanced Clair settings. \ No newline at end of file diff --git a/modules/clair-airgap.adoc b/modules/clair-airgap.adoc new file mode 100644 index 000000000..553b79a7e --- /dev/null +++ b/modules/clair-airgap.adoc @@ -0,0 +1,5 @@ +:_content-type: CONCEPT +[id="clair-airgap"] +== Air gapped Clair + +For flexability, Clair supports running updaters in a separate environment and importing the results. This is aimed at supporting installations that reject the Clair cluster from communication with the internet directly. diff --git a/modules/clair-analyses.adoc b/modules/clair-analyses.adoc new file mode 100644 index 000000000..fd368ddf9 --- /dev/null +++ b/modules/clair-analyses.adoc @@ -0,0 +1,28 @@ +[[clair-analyses]] += Understanding Clair analyses + +Clair analyses can be broken down into three distinct parts: + +- **Indexing**: Indexing starts with submitting a `Manifest` to Clair. On receipt, Clair will fetch layers, scan their contents, and return an intermediate representation called an `IndexReport`. ++ +Manifests are Clair's representation of a container image. Clair leverages the fact `OCI Manifests` and `Layers` are content-addressed to reduce duplicated work. ++ +Once a `Manifest` is indexed, the `IndexReport` is persisted for later retrieval. + +- **Matching**: Matching is taking an `IndexReport` and correlating vulnerabilities affecting the `Manifest` the report represents. ++ +Clair continuously ingests new security data and a request to the matcher will always provide users with the most up to date vulnerability analysis of an `IndexReport`. + +- **Notifications**: Clair implements a notification service. When new vulnerabilities are discovered, the notifier service will determine if these vulnerabilities affect any indexed `Manifests`. The notifier will then take action according to its configuration. + +== Notifications for vulnerabilities found by Clair + +Since {productname} 3.4, different notifications are triggered for various repository events. These notifications vary based on enabled features. + +[NOTE] +==== +This includes the event type `Package Vulnerability Found` +==== + +`Additional Filter` can be applied for `Security Level`, and there are various notification methods. Custom notification titles are also optional. + diff --git a/modules/clair-authentication.adoc b/modules/clair-authentication.adoc new file mode 100644 index 000000000..a2f3a6276 --- /dev/null +++ b/modules/clair-authentication.adoc @@ -0,0 +1,31 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: CONCEPT +[id="clair-authentication"] += Clair authentication + +In its current iteration, Clair v4 (Clair) handles authentication internally. + +[NOTE] +==== +Previous versions of Clair used JWT Proxy to gate authentication. +==== + +Authentication is configured by specifying configuration objects underneath the `auth` key of the configuration. Multiple authentication configurations might be present, but they are used preferentially in the following order: + +. PSK. With this authentication configuration, Clair implements JWT-based authentication using a pre-shared key. + +. Configuration. For example: ++ +[source,yaml] +---- +auth: + psk: + key: >- + MDQ4ODBlNDAtNDc0ZC00MWUxLThhMzAtOTk0MzEwMGQwYTMxCg== + iss: 'issuer' +---- ++ +In this configuration the `auth` field requires two parameters: `iss`, which is the issuer to validate all incoming requests, and `key`, which is a base64 coded symmetric key for validating the requests. \ No newline at end of file diff --git a/modules/clair-clairctl-standalone.adoc b/modules/clair-clairctl-standalone.adoc new file mode 100644 index 000000000..501e5057f --- /dev/null +++ b/modules/clair-clairctl-standalone.adoc @@ -0,0 +1,30 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: PROCEDURE +[id="clair-disconnected-standalone-configuration"] += Setting up a self-managed deployment of Clair for a disconnected {ocp} cluster + +Use the following procedures to set up a self-managed deployment of Clair for a disconnected {ocp} cluster. + +[id="clair-clairctl-standalone"] +== Installing the clairctl command line utility tool for a self-managed Clair deployment on {ocp} + +Use the following procedure to install the `clairctl` CLI tool for self-managed Clair deployments on {ocp}. + +.Procedure + +. Install the `clairctl` program for a self-managed Clair deployment by using the `podman cp` command, for example: ++ +[source,terminal] +---- +$ sudo podman cp clairv4:/usr/bin/clairctl ./clairctl +---- + +. Set the permissions of the `clairctl` file so that it can be executed and run by the user, for example: ++ +[source,terminal] +---- +$ chmod u+x ./clairctl +---- \ No newline at end of file diff --git a/modules/clair-clairctl.adoc b/modules/clair-clairctl.adoc new file mode 100644 index 000000000..a76527c47 --- /dev/null +++ b/modules/clair-clairctl.adoc @@ -0,0 +1,35 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: PROCEDURE +[id="clair-disconnected-ocp-configuration"] += Setting up Clair in a disconnected {ocp} cluster + +Use the following procedures to set up an {ocp} provisioned Clair pod in a disconnected {ocp} cluster. + +[id="clair-clairctl-ocp"] +== Installing the clairctl command line utility tool for {ocp} deployments + +Use the following procedure to install the `clairctl` CLI tool for {ocp} deployments. + +.Procedure + +. Install the `clairctl` program for a Clair deployment in an {ocp} cluster by entering the following command: ++ +[source,terminal] +---- +$ oc -n quay-enterprise exec example-registry-clair-app-64dd48f866-6ptgw -- cat /usr/bin/clairctl > clairctl +---- ++ +[NOTE] +==== +Unofficially, the `clairctl` tool can be downloaded +==== + +. Set the permissions of the `clairctl` file so that it can be executed and run by the user, for example: ++ +[source,terminal] +---- +$ chmod u+x ./clairctl +---- \ No newline at end of file diff --git a/modules/clair-concepts.adoc b/modules/clair-concepts.adoc new file mode 100644 index 000000000..fecc969db --- /dev/null +++ b/modules/clair-concepts.adoc @@ -0,0 +1,181 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: CONCEPT +[id="clair-concepts"] += Clair concepts + +The following sections provide a conceptual overview of how Clair works. + +[id="clair-practice"] +== Clair in practice + +A Clair analysis is broken down into three distinct parts: indexing, matching, and notification. + +[id="clair-indexing-concept"] +=== Indexing + +Clair's indexer service plays a crucial role in understanding the makeup of a container image. In Clair, container image representations called "manifests." Manifests are used to comprehend the contents of the image's layers. To streamline this process, Clair takes advantage of the fact that Open Container Initiative (OCI) manifests and layers are designed for content addressing, reducing repetitive tasks. + +During indexing, a manifest that represents a container image is taken and broken down into its essential components. The indexer's job is to uncover the image's contained packages, its origin distribution, and the package repositories it relies on. This valuable information is then recorded and stored within Clair's database. The insights gathered during indexing serve as the basis for generating a comprehensive vulnerability report. This report can be seamlessly transferred to a matcher node for further analysis and action, helping users make informed decisions about their container images' security. + +ifeval::["{context}" == "quay-io"] +The `IndexReport` is stored in Clair's database. In {quayio}, it is automatically fed to a `matcher` node to compute the vulnerability report after an image is pushed to a repository. +endif::[] +ifeval::["{context}" == "clair"] +The `IndexReport` is stored in Clair's database. It can be fed to a `matcher` node to compute the vulnerability report. +endif::[] + +//// +[id="content-addressability"] +==== Content addressability + +Clair treats all manifests and layers as _content addressable_. In the context of Clair, content addressable means that when a specific manifest is indexed, it is not indexed again unless it is required; this is the same for individual layers. + +For example, consider how many images in a registry might use `ubuntu:artful` as a base layer. If the developers prefer basing their images off of Ubuntu, it could be a large majority of images. Treating the layers and manifests as content addressable means that Clair only fetches and analyzes the base layer one time. + +In some cases, Clair should re-index a manifest. For example, when an internal component such as a package scanner is updated, Clair performs the analysis with the new package scanner. Clair has enough information to determine that a component has changed and that the `IndexReport` might be different the second time, and as a result it re-indexes the manifest. + +ifeval::["{context}" == "clair"] +A client can track Clair's `index_state` endpoint to understand when an internal component has changed, and can subsequently issue re-indexes. See the Clair API guide to learn how to view Clair's API specification. +//// + +[id="clair-matching-concept"] +=== Matching + +With Clair, a matcher node is responsible for matching vulnerabilities to a provided index report. + +Matchers are responsible for keeping the database of vulnerabilities up to date. Matchers run a set of updaters, which periodically probe their data sources for new content. New vulnerabilities are stored in the database when they are discovered. + +The matcher API is designed to always provide the most recent vulnerability report when queried. The vulnerability report summarizes both a manifest's content and any vulnerabilities affecting the content. + +ifeval::["{context}" == "quay-io"] +On {quayio}, this interval is set to 6 hours. +endif::[] +New vulnerabilities are stored in the database when they are discovered. + +ifeval::["{context}" == "clair"] +The matcher API is designed to be used often. It is designed to always provide the most recent `VulnerabilityReport` when queried. The `VulnerabilityReport` summarizes both a manifest's content and any vulnerabilities affecting the content. +endif::[] + +// See. . . to learn more about how to view the Clair API specification and to work with the matcher API. + +//// +[id="remote-matching"] +==== Remote matching + +A remote matcher acts similar to a matcher, however remote matchers use API calls to fetch vulnerability data for a provided `IndexReport`. Remote matchers are useful when it is impossible to persist data from a given source into the database. + +The CRDA remote matcher is responsible for fetching vulnerabilities from Red Hat Code Ready Dependency Analytics (CRDA). By default, this matcher serves 100 requests per minute. The rate limiting can be lifted by requesting a dedicated API key, which is done by submitting link:https://developers.redhat.com/content-gateway/link/3872178[the API key request form]. + +To enable CRDA remote matching, see "Enabling CRDA for Clair". +//// + +[id="clair-notifications-concept"] +=== Notifier service + +ifeval::["{context}" == "quay-io"] +By default, the notifier service on {quayio} is disabled. As a result, repository owners cannot setup notifications when new CVEs are reported. However, when CVE databases are updated, which is every 6 hours on {quayio}, new vulnerabilities affecting previously indexed manifests are automatically updated. As a result, manual re-scans are not required, and users can view new CVEs directly on {quayio}. See "Viewing Clair security scans" for more information. +endif::[] + +ifeval::["{context}" == "clair"] +Clair uses a notifier service that keeps track of new security database updates and informs users if new or removed vulnerabilities affect an indexed manifest. + +When the notifier becomes aware of new vulnerabilities affecting a previously indexed manifest, it uses the configured methods in your `config.yaml` file to issue notifications about the new changes. Returned notifications express the most severe vulnerability discovered because of the change. This avoids creating excessive notifications for the same security database update. + +When a user receives a notification, it issues a new request against the matcher to receive an up to date vulnerability report. +//// +The notification schema is the JSON marshalled form of the following types: + +[source,json] +---- +// Reason indicates the catalyst for a notification +type Reason string +const ( + Added Reason = "added" + Removed Reason = "removed" + Changed Reason = "changed" +) +type Notification struct { + ID uuid.UUID `json:"id"` + Manifest claircore.Digest `json:"manifest"` + Reason Reason `json:"reason"` + Vulnerability VulnSummary `json:"vulnerability"` +} +type VulnSummary struct { + Name string `json:"name"` + Description string `json:"description"` + Package *claircore.Package `json:"package,omitempty"` + Distribution *claircore.Distribution `json:"distribution,omitempty"` + Repo *claircore.Repository `json:"repo,omitempty"` + Severity string `json:"severity"` + FixedInVersion string `json:"fixed_in_version"` + Links string `json:"links"` +} +---- +//// + +You can subscribe to notifications through the following mechanics: + +* Webhook delivery +* AMQP delivery +* STOMP delivery + +Configuring the notifier is done through the Clair YAML configuration file. + +//// +[id=webhook-delivery] +==== Webhook delivery + +When you configure the notifier for webhook delivery, you provide the service with the following pieces of information: + +* A target URL where the webhook will fire. +* The callback URL where the notifier might be reached, including its API path. For example, `http://clair-notifier/notifier/api/v1/notifications`. + +When the notifier has determined an updated security database has been changed the affected status of an indexed manifest, it delivers the following JSON body to the configured target: + +[source,json] +---- +{ + "notification_id": {uuid_string}, + "callback": {url_to_notifications} +} +---- + +On receipt, the server can browse to the URL provided in the callback field. + +[id="amqp-delivery"] +==== AMQP delivery + +The Clair notifier also supports delivering notifications to an AMQP broker. With AMQP delivery, you can control whether a callback is delivered to the broker or whether notifications are directly delivered to the queue. This allows the developer of the AMQP consumer to determine the logic of notification processing. + +[NOTE] +==== +AMQP delivery only supports AMQP 0.x protocol (for example, RabbitMQ). If you need to publish notifications to AMQP 1.x message queue (for example, ActiveMQ), you can use STOMP delivery. +==== + +[id="amqp-direct-delivery"] +===== AMQP direct delivery + +If the Clair notifier's configuration specifies `direct: true` for AMQP delivery, notifications are delivered directly to the configured exchange. + +When `direct` is set, the `rollup` property might be set to instruct the notifier to send a maximum number of notifications in a single AMQP. This provides balance between the size of the message and the number of messages delivered to the queue. + +[id="notifier-testing-development"] +==== Notifier testing and development mode + +The notifier has a testing and development mode that can be enabled with the `NOTIFIER_TEST_MODE` parameter. This parameter can be set to any value. + +When the `NOTIFIER_TEST_MODE` parameter is set, the notifier begins sending fake notifications to the configured delivery mechanism every `poll_interval` interval. This provides an easy way to implement and test new or existing deliverers. + +The notifier runs in `NOTIFIER_TEST_MODE` until the environment variable is cleared and the service is restarted. + +[id="deleting-notifications"] +==== Deleting notifications + +To delete the notification, you can use the `DELETE` API call. Deleting a notification ID manually cleans up resources in the notifier. If you do not use the `DELETE` API call, the notifier waits a predetermined length of time before clearing delivered notifications from its database. +endif::[] + +// For more information on the `DELETE` API call, see. . . +//// \ No newline at end of file diff --git a/modules/clair-crda-configuration.adoc b/modules/clair-crda-configuration.adoc new file mode 100644 index 000000000..975ca1997 --- /dev/null +++ b/modules/clair-crda-configuration.adoc @@ -0,0 +1,34 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: PROCEDURE +[id="clair-crda-configuration"] += Enabling Clair CRDA + +Java scanning depends on a public, Red Hat provided API service called Code Ready Dependency Analytics (CRDA). CRDA is only available with internet access and is not enabled by default. + +Use the following procedure to integrate the CRDA service with a custom API key and enable CRDA for Java and Python scanning. + +.Prerequisites + +* {productname} 3.7 or greater + +.Procedure + +. Submit link:https://developers.redhat.com/content-gateway/link/3872178[the API key request form] to obtain the Quay-specific CRDA remote matcher. + +. Set the CRDA configuration in your `clair-config.yaml` file: ++ +[source,terminal] +---- +matchers: + config: + crda: + url: https://gw.api.openshift.io/api/v2/ + key: <1> + source: <2> +---- ++ +<1> Insert the Quay-specific CRDA remote matcher from link:https://developers.redhat.com/content-gateway/link/3872178[the API key request form] here. +<2> The hostname of your Quay server. \ No newline at end of file diff --git a/modules/clair-cve.adoc b/modules/clair-cve.adoc new file mode 100644 index 000000000..5c9142bae --- /dev/null +++ b/modules/clair-cve.adoc @@ -0,0 +1,17 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: CONCEPT +[id="clair-cve"] += CVE ratings from the National Vulnerability Database + +As of Clair v4.2, Common Vulnerability Scoring System (CVSS) enrichment data is now viewable in the {productname} UI. Additionally, Clair v4.2 adds CVSS scores from the National Vulnerability Database for detected vulnerabilities. + +With this change, if the vulnerability has a CVSS score that is within 2 levels of the distribution score, the {productname} UI present's the distribution's score by default. For example: + +image:clair-4-2-enrichment-data.png[Clair v4.2 data display] + +This differs from the previous interface, which would only display the following information: + +image:clair-4-0-cve-report.png[Clair v4 data display] diff --git a/modules/clair-disconnected.adoc b/modules/clair-disconnected.adoc new file mode 100644 index 000000000..4241fe660 --- /dev/null +++ b/modules/clair-disconnected.adoc @@ -0,0 +1,18 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: CONCEPT +[id="clair-disconnected-environments"] += Clair in disconnected environments + +Clair uses a set of components called _updaters_ to handle the fetching and parsing of data from various vulnerability databases. Updaters are set up by default to pull vulnerability data directly from the internet and work for immediate use. However, some users might require {productname} to run in a disconnected environment, or an environment without direct access to the internet. Clair supports disconnected environments by working with different types of update workflows that take network isolation into consideration. This works by using the `clairctl` command line interface tool, which obtains updater data from the internet by using an open host, securely transferring the data to an isolated host, and then important the updater data on the isolated host into Clair. + +Use this guide to deploy Clair in a disconnected environment. + +[NOTE] +==== +Currently, Clair enrichment data is CVSS data. Enrichment data is currently unsupported in disconnected environments. +==== + +For more information about Clair updaters, see "Clair updaters". \ No newline at end of file diff --git a/modules/clair-distroless-container-images.adoc b/modules/clair-distroless-container-images.adoc new file mode 100644 index 000000000..b728a2c4d --- /dev/null +++ b/modules/clair-distroless-container-images.adoc @@ -0,0 +1,5 @@ +:_content-type: CONCEPT +[id="clair-distroless-container-images"] += Does Clair supporting scanning of disto-less container images? + +Support for scanning distro-less containers was added in Clair 4.6.1. This feature is not present in earlier versions. For Clair on the {productname} Operator, this feature was released with {productname} 3.8.7. \ No newline at end of file diff --git a/modules/clair-export-bundle-standalone.adoc b/modules/clair-export-bundle-standalone.adoc new file mode 100644 index 000000000..bacdc3636 --- /dev/null +++ b/modules/clair-export-bundle-standalone.adoc @@ -0,0 +1,24 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: PROCEDURE +[id="clair-export-bundle-standalone"] += Exporting the updaters bundle from a connected Clair instance + +Use the following procedure to export the updaters bundle from a Clair instance that has access to the internet. + +.Prerequisites + +* You have installed the `clairctl` command line utility tool. +* You have deployed Clair. +* The `disable_updaters` and `airgap` parameters are set to `true` in your Clair `config.yaml` file. + +.Procedure + +* From a Clair instance that has access to the internet, use the `clairctl` CLI tool with your configuration file to export the updaters bundle. For example: ++ +[source,terminal] +---- +$ ./clairctl --config ./config.yaml export-updaters updates.gz +---- \ No newline at end of file diff --git a/modules/clair-export-bundle.adoc b/modules/clair-export-bundle.adoc new file mode 100644 index 000000000..0c11ce7c2 --- /dev/null +++ b/modules/clair-export-bundle.adoc @@ -0,0 +1,24 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: PROCEDURE +[id="clair-export-bundle"] += Exporting the updaters bundle from a connected Clair instance + +Use the following procedure to export the updaters bundle from a Clair instance that has access to the internet. + +.Prerequisites + +* You have installed the `clairctl` command line utility tool. +* You have retrieved and decoded the Clair configuration secret, and saved it to a Clair `config.yaml` file. +* The `disable_updaters` and `airgap` parameters are set to `true` in your Clair `config.yaml` file. + +.Procedure + +* From a Clair instance that has access to the internet, use the `clairctl` CLI tool with your configuration file to export the updaters bundle. For example: ++ +[source,terminal] +---- +$ ./clairctl --config ./config.yaml export-updaters updates.gz +---- \ No newline at end of file diff --git a/modules/clair-intro.adoc b/modules/clair-intro.adoc new file mode 100644 index 000000000..3338ac0fb --- /dev/null +++ b/modules/clair-intro.adoc @@ -0,0 +1,14 @@ +[[clair-intro]] += {productname} vulnerability scanning using Clair + +Clair is equipped with three types of scanners, and a matcher and an updater: + +- **Distribution Scanner**: This scanner discovers `Distribution` information, which is typically the base operator system the layer demonstrates features of. + +- **Package Scanner**: This scanner performs a package scan on the selected layer and returns all of the found packages. + +- **Repository Scanner**: This scanner discovers any package repositories that are present in the layers. + +- **Matcher**: Matcher implementation is responsible for telling ClairCore which packages to query, how to query the security advisory database, and whether the discovered `Vulnerability` from the security advisory database affects the provided package. + +- **Updater**: The updater is responsible for fetching a security advisory database and parsing the contents. diff --git a/modules/clair-notifications.adoc b/modules/clair-notifications.adoc new file mode 100644 index 000000000..8bf726c6e --- /dev/null +++ b/modules/clair-notifications.adoc @@ -0,0 +1,6 @@ +[[clair-notifications]] += Clair Notifications + +When Clair received a new vulnerability affecting a previously indexed manifest, it will notify {productname} so that a new scan can be requested. Only the most severe vulnerabilities trigger a notification to avoid excessive scan requests. This notification mechanism is automatically set up when Clair is configured in {productname}'s configuration. + +Clair notifications can also be set up for external consumption via AMQP and STOMP protocols. For details on how to set this up please consult the link:https://quay.github.io/clair/concepts/notifications.html[upstream Clair documentation]. diff --git a/modules/clair-openshift-airgap-database-standalone.adoc b/modules/clair-openshift-airgap-database-standalone.adoc new file mode 100644 index 000000000..535e5c59d --- /dev/null +++ b/modules/clair-openshift-airgap-database-standalone.adoc @@ -0,0 +1,64 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: PROCEDURE +[id="clair-openshift-airgap-database-standalone"] += Configuring access to the Clair database in the disconnected {ocp} cluster + +Use the following procedure to configure access to the Clair database in your disconnected {ocp} cluster. + +.Prerequisites + +* You have installed the `clairctl` command line utility tool. +* You have deployed Clair. +* The `disable_updaters` and `airgap` parameters are set to `true` in your Clair `config.yaml` file. +* You have exported the updaters bundle from a Clair instance that has access to the internet. + +.Procedure + +. Determine your Clair database service by using the `oc` CLI tool, for example: +[source,terminal] ++ +---- +$ oc get svc -n quay-enterprise +---- ++ +.Example output ++ +[source,terminal] +---- +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +example-registry-clair-app ClusterIP 172.30.224.93 80/TCP,8089/TCP 4d21h +example-registry-clair-postgres ClusterIP 172.30.246.88 5432/TCP 4d21h +... +---- + +. Forward the Clair database port so that it is accessible from the local machine. For example: ++ +[source,terminal] +---- +$ oc port-forward -n quay-enterprise service/example-registry-clair-postgres 5432:5432 +---- + +. Update your Clair `config.yaml` file, for example: ++ +[source,yaml] +---- +indexer: + connstring: host=localhost port=5432 dbname=postgres user=postgres password=postgres sslmode=disable <1> + layer_scan_concurrency: 5 + migrations: true + scanlock_retry: 10 + airgap: true + scanner: + repo: + rhel-repository-scanner: <2> + repo2cpe_mapping_file: /data/repository-to-cpe.json + package: + rhel_containerscanner: <3> + name2repos_mapping_file: /data/container-name-repos-map.json +---- +<1> Replace the value of the `host` in the multiple `connstring` fields with `localhost`. +<2> For more information about the `rhel-repository-scanner` parameter, see "Mapping repositories to Common Product Enumeration information". +<3> For more information about the `rhel_containerscanner` parameter, see "Mapping repositories to Common Product Enumeration information". diff --git a/modules/clair-openshift-airgap-database.adoc b/modules/clair-openshift-airgap-database.adoc new file mode 100644 index 000000000..6f1de127e --- /dev/null +++ b/modules/clair-openshift-airgap-database.adoc @@ -0,0 +1,64 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: PROCEDURE +[id="clair-openshift-airgap-database"] += Configuring access to the Clair database in the disconnected {ocp} cluster + +Use the following procedure to configure access to the Clair database in your disconnected {ocp} cluster. + +.Prerequisites + +* You have installed the `clairctl` command line utility tool. +* You have retrieved and decoded the Clair configuration secret, and saved it to a Clair `config.yaml` file. +* The `disable_updaters` and `airgap` parameters are set to `true` in your Clair `config.yaml` file. +* You have exported the updaters bundle from a Clair instance that has access to the internet. + +.Procedure + +. Determine your Clair database service by using the `oc` CLI tool, for example: +[source,terminal] ++ +---- +$ oc get svc -n quay-enterprise +---- ++ +.Example output ++ +[source,terminal] +---- +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +example-registry-clair-app ClusterIP 172.30.224.93 80/TCP,8089/TCP 4d21h +example-registry-clair-postgres ClusterIP 172.30.246.88 5432/TCP 4d21h +... +---- + +. Forward the Clair database port so that it is accessible from the local machine. For example: ++ +[source,terminal] +---- +$ oc port-forward -n quay-enterprise service/example-registry-clair-postgres 5432:5432 +---- + +. Update your Clair `config.yaml` file, for example: ++ +[source,yaml] +---- +indexer: + connstring: host=localhost port=5432 dbname=postgres user=postgres password=postgres sslmode=disable <1> + layer_scan_concurrency: 5 + migrations: true + scanlock_retry: 10 + airgap: true + scanner: + repo: + rhel-repository-scanner: <2> + repo2cpe_mapping_file: /data/repository-to-cpe.json + package: + rhel_containerscanner: <3> + name2repos_mapping_file: /data/container-name-repos-map.json +---- +<1> Replace the value of the `host` in the multiple `connstring` fields with `localhost`. +<2> For more information about the `rhel-repository-scanner` parameter, see "Mapping repositories to Common Product Enumeration information". +<3> For more information about the `rhel_containerscanner` parameter, see "Mapping repositories to Common Product Enumeration information". diff --git a/modules/clair-openshift-airgap-import-bundle-standalone.adoc b/modules/clair-openshift-airgap-import-bundle-standalone.adoc new file mode 100644 index 000000000..577e85622 --- /dev/null +++ b/modules/clair-openshift-airgap-import-bundle-standalone.adoc @@ -0,0 +1,26 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: PROCEDURE +[id="clair-openshift-airgap-import-bundle-standalone"] += Importing the updaters bundle into the disconnected {ocp} cluster + +Use the following procedure to import the updaters bundle into your disconnected {ocp} cluster. + +.Prerequisites + +* You have installed the `clairctl` command line utility tool. +* You have deployed Clair. +* The `disable_updaters` and `airgap` parameters are set to `true` in your Clair `config.yaml` file. +* You have exported the updaters bundle from a Clair instance that has access to the internet. +* You have transferred the updaters bundle into your disconnected environment. + +.Procedure + +* Use the `clairctl` CLI tool to import the updaters bundle into the Clair database that is deployed by {ocp}: ++ +[source,terminal] +---- +$ ./clairctl --config ./clair-config.yaml import-updaters updates.gz +---- diff --git a/modules/clair-openshift-airgap-import-bundle.adoc b/modules/clair-openshift-airgap-import-bundle.adoc new file mode 100644 index 000000000..c4f28b537 --- /dev/null +++ b/modules/clair-openshift-airgap-import-bundle.adoc @@ -0,0 +1,27 @@ + +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: PROCEDURE +[id="clair-openshift-airgap-import-bundle"] += Importing the updaters bundle into the disconnected {ocp} cluster + +Use the following procedure to import the updaters bundle into your disconnected {ocp} cluster. + +.Prerequisites + +* You have installed the `clairctl` command line utility tool. +* You have retrieved and decoded the Clair configuration secret, and saved it to a Clair `config.yaml` file. +* The `disable_updaters` and `airgap` parameters are set to `true` in your Clair `config.yaml` file. +* You have exported the updaters bundle from a Clair instance that has access to the internet. +* You have transferred the updaters bundle into your disconnected environment. + +.Procedure + +* Use the `clairctl` CLI tool to import the updaters bundle into the Clair database that is deployed by {ocp}. For example: ++ +[source,terminal] +---- +$ ./clairctl --config ./clair-config.yaml import-updaters updates.gz +---- diff --git a/modules/clair-openshift-airgap-update.adoc b/modules/clair-openshift-airgap-update.adoc new file mode 100644 index 000000000..032bce1d6 --- /dev/null +++ b/modules/clair-openshift-airgap-update.adoc @@ -0,0 +1,13 @@ +[[clair-openshift-airgap-update]] += Manually updating the vulnerability databases for Clair in an air-gapped OpenShift cluster + +Clair utilizes packages called `updaters` that encapsulate the logic of fetching and parsing different vulnerability databases. Clair supports running updaters in a different environment and importing the results. This is aimed at supporting installations that disallow the Clair cluster from talking to the Internet directly. + +To manually update the vulnerability databases for Clair in an air-gapped OpenShift cluster, use the following steps: + +* Obtain the `clairctl` program +* Retrieve the Clair config +* Use `clairctl` to export the updaters bundle from a Clair instance that has access to the internet +* Update the Clair config in the air-gapped OpenShift cluster to allow access to the Clair database +* Transfer the updaters bundle from the system with internet access, to make it available inside the air-gapped environment +* Use `clairctl` to import the updaters bundle into the Clair instance for the air-gapped OpenShift cluster diff --git a/modules/clair-openshift-config.adoc b/modules/clair-openshift-config.adoc new file mode 100644 index 000000000..4689bc893 --- /dev/null +++ b/modules/clair-openshift-config.adoc @@ -0,0 +1,35 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: PROCEDURE +[id="clair-openshift-config"] += Retrieving and decoding the Clair configuration secret for Clair deployments on {ocp} + +Use the following procedure to retrieve and decode the configuration secret for an {ocp} provisioned Clair instance on {ocp}. + +.Prerequisites + +* You have installed the `clairctl` command line utility tool. + +.Procedure + +. Enter the following command to retrieve and decode the configuration secret, and then save it to a Clair configuration YAML: ++ +[source,terminal] +---- +$ oc get secret -n quay-enterprise example-registry-clair-config-secret -o "jsonpath={$.data['config\.yaml']}" | base64 -d > clair-config.yaml +---- + +. Update the `clair-config.yaml` file so that the `disable_updaters` and `airgap` parameters are set to `true`, for example: ++ +[source,yaml] +---- +--- +indexer: + airgap: true +--- +matcher: + disable_updaters: true +--- +---- \ No newline at end of file diff --git a/modules/clair-openshift-manual.adoc b/modules/clair-openshift-manual.adoc new file mode 100644 index 000000000..a7f990dfe --- /dev/null +++ b/modules/clair-openshift-manual.adoc @@ -0,0 +1,266 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: PROCEDURE +[id="manually-deploy-clair-ocp"] += Setting up Clair on {productname} Operator deployment + +Use the following procedure to configure Clair on a {productname} {ocp} deployment. + +.Prerequisites + +* Your {productname} Operator has been upgraded to 3.4.0 or greater. + +.Procedure + +. Enter the following command to set your current project to the name of the project that is running {productname}: ++ +[source,terminal] +---- +$ oc project quay-enterprise +---- + +. Create a Postgres deployment file for Clair, for example, `clairv4-postgres.yaml`: ++ +[source,yaml] +---- +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: clairv4-postgres + namespace: quay-enterprise + labels: + quay-component: clairv4-postgres +spec: + replicas: 1 + selector: + matchLabels: + quay-component: clairv4-postgres + template: + metadata: + labels: + quay-component: clairv4-postgres + spec: + volumes: + - name: postgres-data + persistentVolumeClaim: + claimName: clairv4-postgres + containers: + - name: postgres + image: postgres:11.5 + imagePullPolicy: "IfNotPresent" + ports: + - containerPort: 5432 + env: + - name: POSTGRES_USER + value: "postgres" + - name: POSTGRES_DB + value: "clair" + - name: POSTGRES_PASSWORD + value: "postgres" + - name: PGDATA + value: "/etc/postgres/data" + volumeMounts: + - name: postgres-data + mountPath: "/etc/postgres" +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: clairv4-postgres + labels: + quay-component: clairv4-postgres +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "5Gi" + volumeName: "clairv4-postgres" + storageClassName: <1> +--- +apiVersion: v1 +kind: Service +metadata: + name: clairv4-postgres + labels: + quay-component: clairv4-postgres +spec: + type: ClusterIP + ports: + - port: 5432 + protocol: TCP + name: postgres + targetPort: 5432 + selector: + quay-component: clairv4-postgres +---- +<1> If left unspecified, defaults to `quay-storageclass`. + +. Enter the following command to the deploy the Postgres database: ++ +[source,terminal] +---- +$ oc create -f ./clairv4-postgres.yaml +---- + +. Create a `config.yaml` file for Clair, for example: ++ +[source,yaml] +---- +introspection_addr: :8089 +http_listen_addr: :8081 +log_level: debug +indexer: + connstring: host=clairv4-postgres port=5432 dbname=clair user=postgres password=postgres sslmode=disable + scanlock_retry: 10 + layer_scan_concurrency: 5 + migrations: true +matcher: + connstring: host=clairv4-postgres port=5432 dbname=clair user=postgres password=postgres sslmode=disable + max_conn_pool: 100 + migrations: true + indexer_addr: clair-indexer +notifier: + connstring: host=clairv4-postgres port=5432 dbname=clair user=postgres password=postgres sslmode=disable + delivery: 1m + poll_interval: 5m + migrations: true +auth: + psk: + key: MTU5YzA4Y2ZkNzJoMQ== <1> + iss: ["quay"] +# tracing and metrics +trace: + name: "jaeger" + probability: 1 + jaeger: + agent: + endpoint: "localhost:6831" + service_name: "clair" +metrics: + name: "prometheus" +---- +<1> To generate a Clair pre-shared key (PSK), enable `scanning` in the Security Scanner section of the User Interface and click `Generate PSK`. ++ +More information about Clair's configuration format can be found in link:https://quay.github.io/clair/reference/config.html[upstream Clair documentation]. + +. Enter the following command to create a secret from the Clair `config.yaml` file: ++ +---- +$ oc create secret generic clairv4-config-secret --from-file=./config.yaml +---- + +. Create a deployment file for Clair, for example, `clair-combo.yaml`: ++ +[source,yaml,subs="verbatim,attributes"] +---- +--- +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + labels: + quay-component: clair-combo + name: clair-combo +spec: + replicas: 1 + selector: + matchLabels: + quay-component: clair-combo + template: + metadata: + labels: + quay-component: clair-combo + spec: + containers: + - image: {productrepo}/{clairimage}:{productminv} <1> + imagePullPolicy: IfNotPresent + name: clair-combo + env: + - name: CLAIR_CONF + value: /clair/config.yaml + - name: CLAIR_MODE + value: combo + ports: + - containerPort: 8080 + name: clair-http + protocol: TCP + - containerPort: 8089 + name: clair-intro + protocol: TCP + volumeMounts: + - mountPath: /clair/ + name: config + imagePullSecrets: + - name: redhat-pull-secret + restartPolicy: Always + volumes: + - name: config + secret: + secretName: clairv4-config-secret +--- +apiVersion: v1 +kind: Service +metadata: + name: clairv4 <2> + labels: + quay-component: clair-combo +spec: + ports: + - name: clair-http + port: 80 + protocol: TCP + targetPort: 8080 + - name: clair-introspection + port: 8089 + protocol: TCP + targetPort: 8089 + selector: + quay-component: clair-combo + type: ClusterIP +---- +<1> Use the latest Clair image name and version. +<2> With the `Service` set to `clairv4`, the scanner endpoint for Clair v4 is entered into the {productname} `config.yaml` file in the `SECURITY_SCANNER_V4_ENDPOINT` as `\http://clairv4`. + +. Enter the following command to create the Clair deployment: ++ +---- +$ oc create -f ./clair-combo.yaml +---- + +. Add the following entries to your `config.yaml` file for your {productname} deployment. ++ +[source,yaml] +---- +FEATURE_SECURITY_NOTIFICATIONS: true +FEATURE_SECURITY_SCANNER: true +FEATURE_SECURITY_SCANNER_NOTIFY_ON_NEW_INDEX: true +SECURITY_SCANNER_V4_ENDPOINT: <1> +SECURITY_SCANNER_V4_PSK: <2> +---- +<1> Obtained through the {productname} configuration tool. This parameter must be manually added if you do not use the {productname} configuration tool. +<2> Obtained through the {productname} configuration tool. This parameter must be manually added if you do not use the {productname} configuration tool. + + +. Enter the following command to delete the original configuration secret for your `quay-enterprise` project: ++ +[source,terminal] +---- +$ oc delete secret quay-enterprise-config-secret +---- + +. Deploy the modified `config.yaml` to the secret containing that file: ++ +[source,terminal] +---- +$ oc create secret generic quay-enterprise-config-secret --from-file=./config.yaml +---- + +. Restart your {productname} pods. ++ +[NOTE] +==== +Deleting the `quay-app` pods causes pods with the updated configuration to be deployed. +==== \ No newline at end of file diff --git a/modules/clair-openshift.adoc b/modules/clair-openshift.adoc new file mode 100644 index 000000000..401ba8a27 --- /dev/null +++ b/modules/clair-openshift.adoc @@ -0,0 +1,9 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: CONCEPT +[id="clair-quay-operator-overview"] += Clair on {ocp} + +To set up Clair v4 (Clair) on a {productname} deployment on {ocp}, it is recommended to use the {productname} Operator. By default, the {productname} Operator installs or upgrades a Clair deployment along with your {productname} deployment and configure Clair automatically. diff --git a/modules/clair-postgresql-database-update.adoc b/modules/clair-postgresql-database-update.adoc new file mode 100644 index 000000000..c9b4e49ee --- /dev/null +++ b/modules/clair-postgresql-database-update.adoc @@ -0,0 +1,99 @@ +[id="upgrading-clair-postgresql-database"] += Upgrading the Clair PostgreSQL database + +If you are upgrading {productname} to version 13, you must migrate your Clair PostgreSQL database version from PostgreSQL version 13 -> version 15. This requires bringing down your Clair PostgreSQL 13 database and running a migration script to initiate the process. + +Use the following procedure to upgrade your Clair PostgreSQL database from version 13 -> to version 15. + +[IMPORTANT] +==== +Clair security scans might become temporarily disrupted after the migration procedure has succeeded. +==== + +.Procedure + +. Stop the {productname} container by entering the following command: ++ +[source,terminal] +---- +$ sudo podman stop +---- + +. Stop the Clair container by running the following command: ++ +[source,terminal] +---- +$ sudo podman stop +---- + +. Run the following Podman process from SCLOrg's link:https://github.com/sclorg/postgresql-container/tree/master/13#data-migration[Data Migration] procedure, which allows for data migration from a remote PostgreSQL server: ++ +[source,terminal] +---- +$ sudo podman run -d --name <1> + -e POSTGRESQL_MIGRATION_REMOTE_HOST= \ <2> + -e POSTGRESQL_MIGRATION_ADMIN_PASSWORD=remoteAdminP@ssword \ + -v \ <3> + [ OPTIONAL_CONFIGURATION_VARIABLES ] + registry.redhat.io/rhel8/postgresql-15 +---- ++ +<1> Insert a name for your Clair PostgreSQL 15 migration database. +<2> Your new Clair PostgreSQL 15 database container IP address. Can obtained by running the following command: `sudo podman inspect -f "{{.NetworkSettings.IPAddress}}" postgresql-quay`. +<3> You must specify a different volume mount point than the one from your initial Clair PostgreSQL 13 deployment, and modify the access control lists for said directory. For example: ++ +[source,terminal] +---- +$ mkdir -p /host/data/clair-postgresql15-directory +---- ++ +[source,terminal] +---- +$ setfacl -m u:26:-wx /host/data/clair-postgresql15-directory +---- ++ +This prevents data from being overwritten by the new container. + +. Stop the Clair PostgreSQL 13 container: ++ +[source,terminal] +---- +$ sudo podman stop +---- + +. After completing the PostgreSQL migration, run the Clair PostgreSQL 15 container, using the new data volume mount from Step 3, for example, ``: ++ +[source,terminal] +---- +$ sudo podman run -d --rm --name \ + -e POSTGRESQL_USER= \ + -e POSTGRESQL_PASSWORD= \ + -e POSTGRESQL_DATABASE= \ + -e POSTGRESQL_ADMIN_PASSWORD= \ + -p 5433:5432 \ + -v \ + registry.redhat.io/rhel8/postgresql-15 +---- + +. Start the {productname} container by entering the following command: ++ +[source,terminal] +---- +$ sudo podman run -d --rm -p 80:8080 -p 443:8443 --name=quay \ +-v /home//quay-poc/config:/conf/stack:Z \ +-v /home//quay-poc/storage:/datastorage:Z \ +{productrepo}/{quayimage}:{productminv} +---- + +. Start the Clair container by entering the following command: ++ +[source,terminal] +---- +$ sudo podman run -d --name clairv4 \ +-p 8081:8081 -p 8088:8088 \ +-e CLAIR_CONF=/clair/config.yaml \ +-e CLAIR_MODE=combo \ +registry.redhat.io/quay/clair-rhel8:{productminv} +---- + +For more information, see link:https://github.com/sclorg/postgresql-container/tree/master/13#data-migration[Data Migration]. \ No newline at end of file diff --git a/modules/clair-severity-mapping.adoc b/modules/clair-severity-mapping.adoc new file mode 100644 index 000000000..0d3865f29 --- /dev/null +++ b/modules/clair-severity-mapping.adoc @@ -0,0 +1,165 @@ + +// Module included in the following assemblies: +// +// clair/master.adoc +//quayio/master.adoc + +:_content-type: CONCEPT +[id="clair-severity-mapping"] += Clair severity mapping + +Clair offers a comprehensive approach to vulnerability assessment and management. One of its essential features is the normalization of security databases' severity strings. This process streamlines the assessment of vulnerability severities by mapping them to a predefined set of values. Through this mapping, clients can efficiently react to vulnerability severities without the need to decipher the intricacies of each security database's unique severity strings. These mapped severity strings align with those found within the respective security databases, ensuring consistency and accuracy in vulnerability assessment. + + +[id="clair-severity-strings"] +== Clair severity strings + +Clair alerts users with the following severity strings: + +* Unknown +* Negligible +* Low +* Medium +* High +* Critical + +These severity strings are similar to the strings found within the relevant security database. + +[discrete] +[id="clair-mapping-alpine"] +=== Alpine mapping + +Alpine SecDB database does not provide severity information. All vulnerability severities will be Unknown. + +[cols="1,1",options="header"] +|=== +| Alpine Severity | Clair Severity +| * |Unknown + +|=== + +[discrete] +[id="clair-mapping-aws"] +=== AWS mapping + +AWS UpdateInfo database provides severity information. + +[cols="1,1",options="header"] +|=== +| AWS Severity | Clair Severity +|low |Low +|medium |Medium +|important | High +|critical | Critical +|=== + +[discrete] +[id="clair-mapping-debian"] +=== Debian mapping + +Debian Oval database provides severity information. + +[cols="1,1",options="header"] +|=== +| Debian Severity | Clair Severity +| * | Unknown +|Unimportant | Low +| Low | Medium +| Medium | High +| High | Critical +|=== + +[discrete] +[id="clair-mapping-oracle"] +=== Oracle mapping + +Oracle Oval database provides severity information. + +[cols="1,1",options="header"] +|=== +| Oracle Severity | Clair Severity +|N/A | Unknown +|LOW | Low +|MODERATE | Medium +|IMPORTANT | High +|CRITICAL | Critical + +|=== + +[discrete] +[id="clair-mapping-rhel"] +=== RHEL mapping + +RHEL Oval database provides severity information. + +[cols="1,1",options="header"] +|=== +| RHEL Severity | Clair Severity +|None | Unknown +|Low | Low +|Moderate | Medium +|Important | High +|Critical | Critical + +|=== + +[discrete] +[id="clair-mapping-suse"] +=== SUSE mapping + +SUSE Oval database provides severity information. + +[cols="1,1",options="header"] +|=== +| Severity | Clair Severity +|None | Unknown +|Low | Low +|Moderate | Medium +|Important | High +|Critical | Critical +|=== + +[discrete] +[id="clair-mapping-ubuntu"] +=== Ubuntu mapping + +Ubuntu Oval database provides severity information. + +[cols="1,1",options="header"] +|=== +| Severity | Clair Severity +|Untriaged |Unknown +|Negligible | Negligible +|Low | Low +|Medium | Medium +|High | High +|Critical | Critical +|=== + +[discrete] +[id="clair-mapping-osv"] +=== OSV mapping + +.CVSSv3 +[cols="2,2",options="header"] +|=== +| Base Score | Clair Severity +|0.0 | Negligible +|0.1-3.9 | Low +|4.0-6.9 | Medium +|7.0-8.9 | High +|9.0-10.0 | Critical + +|=== + +.CVSSv2 + +[cols="2,2",options="header"] +|=== +| Base Score | Clair Severity +|0.0-3.9 | Low +|4.0-6.9 | Medium +|7.0-10 | High + +|=== + diff --git a/modules/clair-standalone-config-location.adoc b/modules/clair-standalone-config-location.adoc new file mode 100644 index 000000000..18dc6e628 --- /dev/null +++ b/modules/clair-standalone-config-location.adoc @@ -0,0 +1,47 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: PROCEDURE +[id="clair-standalone-config-location"] += Deploying a self-managed Clair container for disconnected {ocp} clusters + +Use the following procedure to deploy a self-managed Clair container for disconnected {ocp} clusters. + +.Prerequisites + +* You have installed the `clairctl` command line utility tool. + +.Procedure + +. Create a folder for your Clair configuration file, for example: ++ +[source,terminal] +---- +$ mkdir /etc/clairv4/config/ +---- + +. Create a Clair configuration file with the `disable_updaters` parameter set to `true`, for example: ++ +[source,yaml] +---- +--- +indexer: + airgap: true +--- +matcher: + disable_updaters: true +--- +---- + +. Start Clair by using the container image, mounting in the configuration from the file you created: ++ +[subs="verbatim,attributes"] +---- +$ sudo podman run -it --rm --name clairv4 \ +-p 8081:8081 -p 8088:8088 \ +-e CLAIR_CONF=/clair/config.yaml \ +-e CLAIR_MODE=combo \ +-v /etc/clairv4/config:/clair:Z \ +{productrepo}/{clairimage}:{productminv} +---- diff --git a/modules/clair-standalone-config.adoc b/modules/clair-standalone-config.adoc new file mode 100644 index 000000000..7fa219cd3 --- /dev/null +++ b/modules/clair-standalone-config.adoc @@ -0,0 +1,45 @@ +[[clair-standalone-config]] += Clair configuration + +Detailed information on Clair configuration is available at link:https://github.com/quay/clair/blob/main/Documentation/reference/config.md[]. + +* Create a config.yaml file in your `/etc/` directory, for example, `/etc/clairv4/config/config.yaml`. Use the following example, which provides a minimal configuration for use in a proof of concept deployment: ++ +[source,yaml] +---- +http_listen_addr: :8081 +introspection_addr: :8089 +log_level: debug +indexer: + connstring: host=quay-server.example.com port=5433 dbname=clair user=clairuser password=clairpass sslmode=disable + scanlock_retry: 10 + layer_scan_concurrency: 5 + migrations: true +matcher: + connstring: host=quay-server.example.com port=5433 dbname=clair user=clairuser password=clairpass sslmode=disable + max_conn_pool: 100 + migrations: true + indexer_addr: clair-indexer +notifier: +  connstring: host=quay-server.example.com port=5433 dbname=clair user=clairuser password=clairpass sslmode=disable +  delivery_interval: 1m +  poll_interval: 5m +  migrations: true +auth: +  psk: +    key: "MTU5YzA4Y2ZkNzJoMQ==" +    iss: ["quay"] +# tracing and metrics +trace: + name: "jaeger" + probability: 1 + jaeger: + agent: + endpoint: "localhost:6831" + service_name: "clair" +metrics: + name: "prometheus" +---- ++ +* `http_listen_addr` is set to the port of the Clair HTTP endpoint that you specified in the Quay configuration tool, in this case `:8081`. +* The Clair pre-shared key (PSK) that you generated in the Quay configuration tool is used for authentication, with the issuer, specified in the `iss` field, set to `quay`. diff --git a/modules/clair-standalone-configure.adoc b/modules/clair-standalone-configure.adoc new file mode 100644 index 000000000..116246c10 --- /dev/null +++ b/modules/clair-standalone-configure.adoc @@ -0,0 +1,159 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: PROCEDURE +[id="clair-standalone-configure"] += Setting up Clair on standalone {productname} deployments + +For standalone {productname} deployments, you can set up Clair manually. + +.Procedure + +. In your {productname} installation directory, create a new directory for the Clair database data: ++ +[source,terminal] +---- +$ mkdir /home//quay-poc/postgres-clairv4 +---- + +. Set the appropriate permissions for the `postgres-clairv4` file by entering the following command: ++ +[source,terminal] +---- +$ setfacl -m u:26:-wx /home//quay-poc/postgres-clairv4 +---- + +. Deploy a Clair PostgreSQL database by entering the following command: ++ +[source,terminal] +---- +$ sudo podman run -d --name postgresql-clairv4 \ + -e POSTGRESQL_USER=clairuser \ + -e POSTGRESQL_PASSWORD=clairpass \ + -e POSTGRESQL_DATABASE=clair \ + -e POSTGRESQL_ADMIN_PASSWORD=adminpass \ + -p 5433:5432 \ + -v /home//quay-poc/postgres-clairv4:/var/lib/pgsql/data:Z \ + registry.redhat.io/rhel8/postgresql-15 +---- + +. Install the PostgreSQL `uuid-ossp` module for your Clair deployment: ++ +[source,terminal] +---- +$ sudo podman exec -it postgresql-clairv4 /bin/bash -c 'echo "CREATE EXTENSION IF NOT EXISTS \"uuid-ossp\"" | psql -d clair -U postgres' +---- ++ +.Example output +[source,terminal] +---- +CREATE EXTENSION +---- ++ +[NOTE] +==== +Clair requires the `uuid-ossp` extension to be added to its PostgreSQL database. For users with proper privileges, creating the extension will automatically be added by Clair. If users do not have the proper privileges, the extension must be added before start Clair. + +If the extension is not present, the following error will be displayed when Clair attempts to start: `ERROR: Please load the "uuid-ossp" extension. (SQLSTATE 42501)`. +==== + +. Stop the `Quay` container if it is running and restart it in configuration mode, loading the existing configuration as a volume: ++ +[subs="verbatim,attributes"] +---- +$ sudo podman run --rm -it --name quay_config \ + -p 80:8080 -p 443:8443 \ + -v $QUAY/config:/conf/stack:Z \ + {productrepo}/{quayimage}:{productminv} config secret +---- + +. Log in to the configuration tool and click *Enable Security Scanning* in the *Security Scanner* section of the UI. + +. Set the HTTP endpoint for Clair using a port that is not already in use on the `quay-server` system, for example, `8081`. + +. Create a pre-shared key (PSK) using the *Generate PSK* button. ++ +.Security Scanner UI +image:poc-quay-scanner-config.png[Security Scanner] + +. Validate and download the `config.yaml` file for {productname}, and then stop the `Quay` container that is running the configuration editor. + +. Extract the new configuration bundle into your {productname} installation directory, for example: ++ +[source,terminal] +---- +$ tar xvf quay-config.tar.gz -d /home//quay-poc/ +---- + +. Create a folder for your Clair configuration file, for example: ++ +[source,terminal] +---- +$ mkdir /etc/opt/clairv4/config/ +---- + +. Change into the Clair configuration folder: ++ +[source,terminal] +---- +$ cd /etc/opt/clairv4/config/ +---- + +. Create a Clair configuration file, for example: ++ +[source,yaml] +---- +http_listen_addr: :8081 +introspection_addr: :8088 +log_level: debug +indexer: + connstring: host=quay-server.example.com port=5433 dbname=clair user=clairuser password=clairpass sslmode=disable + scanlock_retry: 10 + layer_scan_concurrency: 5 + migrations: true +matcher: + connstring: host=quay-server.example.com port=5433 dbname=clair user=clairuser password=clairpass sslmode=disable + max_conn_pool: 100 + migrations: true + indexer_addr: clair-indexer +notifier: + connstring: host=quay-server.example.com port=5433 dbname=clair user=clairuser password=clairpass sslmode=disable + delivery_interval: 1m + poll_interval: 5m + migrations: true +auth: + psk: + key: "MTU5YzA4Y2ZkNzJoMQ==" + iss: ["quay"] +# tracing and metrics +trace: + name: "jaeger" + probability: 1 + jaeger: + agent: + endpoint: "localhost:6831" + service_name: "clair" +metrics: + name: "prometheus" +---- ++ +For more information about Clair's configuration format, see link:https://quay.github.io/clair/reference/config.html[Clair configuration reference]. + +. Start Clair by using the container image, mounting in the configuration from the file you created: ++ +[subs="verbatim,attributes"] +---- +$ sudo podman run -d --name clairv4 \ +-p 8081:8081 -p 8088:8088 \ +-e CLAIR_CONF=/clair/config.yaml \ +-e CLAIR_MODE=combo \ +-v /etc/opt/clairv4/config:/clair:Z \ +{productrepo}/{clairimage}:{productminv} +---- ++ +[NOTE] +==== +Running multiple Clair containers is also possible, but for deployment scenarios beyond a single container the use of a container orchestrator like Kubernetes or {ocp} is strongly recommended. +==== + diff --git a/modules/clair-standalone-database.adoc b/modules/clair-standalone-database.adoc new file mode 100644 index 000000000..17b9c56b3 --- /dev/null +++ b/modules/clair-standalone-database.adoc @@ -0,0 +1,32 @@ +[[clair-standalone-database]] += Deploying a separate database for Clair + +Clair requires a Postgres database. You can share a common database between Quay and Clair if Quay is also using Postgres, but in this example a separate, Clair-specific database is deployed. + +In this proof of concept scenario, you will use a directory on the local file system to persist database data. + +. In the installation folder, denoted here by the variable $QUAY, create a directory for the Clair database data and set the permissions appropriately: ++ +.... +$ mkdir -p $QUAY/postgres-clairv4 +$ setfacl -m u:26:-wx $QUAY/postgres-clairv4 +.... +. Use podman to run the Postgres container, specifying the username, password, database name and port, together with the volume definition for database data. As the standard Postgres port, `5432`, is already in use by the Quay deployment, expose a different port, in this instance `5433`: ++ +[subs="verbatim,attributes"] +.... +$ sudo podman run -d --rm --name postgresql-clairv4 \ + -e POSTGRESQL_USER=clairuser \ + -e POSTGRESQL_PASSWORD=clairpass \ + -e POSTGRESQL_DATABASE=clair \ + -e POSTGRESQL_ADMIN_PASSWORD=adminpass \ + -p 5433:5432 \ + -v $QUAY/postgres-clairv4:/var/lib/pgsql/data:Z \ + {postgresimage} +.... +. Ensure that the Postgres `uuid-ossp` module is installed, as it is required by Clair: ++ +.... +$ sudo podman exec -it postgresql-clairv4 /bin/bash -c 'echo "CREATE EXTENSION IF NOT EXISTS \"uuid-ossp\"" | psql -d clair -U postgres' +.... + diff --git a/modules/clair-standalone-intro.adoc b/modules/clair-standalone-intro.adoc new file mode 100644 index 000000000..4fa36cf3c --- /dev/null +++ b/modules/clair-standalone-intro.adoc @@ -0,0 +1,10 @@ +[[clair-standalone-intro]] += Deploying Clair V4 + +Clair is an application for parsing image contents and reporting vulnerabilities affecting the contents. This is performed via static analysis and not at runtime. Clair's analysis is broken into three distinct parts: + +* **Indexing:** Indexing starts with submitting a Manifest to Clair. On receipt, Clair will fetch layers, scan their contents, and return an intermediate representation called an IndexReport. Manifests are Clair's representation of a container image. Clair leverages the fact that OCI Manifests and Layers are content-addressed to reduce duplicated work. Once a Manifest is indexed, the IndexReport is persisted for later retrieval. + +* **Matching:** Matching takes an IndexReport and correlates vulnerabilities affecting the manifest that the report represents. Clair is continually ingesting new security data and a request to the matcher will always provide you with the most up to date vulnerability analysis of an IndexReport. + +* **Notifications:** Clair implements a notification service. When new vulnerabilities are discovered, the notifier service will determine if these vulnerabilities affect any indexed Manifests. The notifier will then take action according to its configuration. diff --git a/modules/clair-standalone-quay-config.adoc b/modules/clair-standalone-quay-config.adoc new file mode 100644 index 000000000..d3c1e1033 --- /dev/null +++ b/modules/clair-standalone-quay-config.adoc @@ -0,0 +1,48 @@ +[[clair-standalone-quay-config]] += Quay configuration for Clair + +. Stop the `Quay` container if it is running, and restart it in configuration mode, loading the existing configuration as a volume: ++ +[subs="verbatim,attributes"] +---- +$ sudo podman run --rm -it --name quay_config \ + -p 80:8080 -p 443:8443 \ + -v $QUAY/config:/conf/stack:Z \ + {productrepo}/{quayimage}:{productminv} config secret +---- + +. Log in to the configuration tool and enable scanning in the Security Scanner section of the UI. Set the HTTP endpoint for Clair using a port that is not already in use on the `quay-server` system, for example `8081`. Create a Clair pre-shared key (PSK) using the `Generate PSK` button, for example: ++ +* **Security Scanner Endpoint:** `\http://quay-server.example.com:8081` +* **Security Scanner PSK:** `MTU5YzA4Y2ZkNzJoMQ==` ++ +The UI for setting the scanner data is shown in the following image: ++ +.Security Scanner UI +image:poc-quay-scanner-config.png[Security Scanner] + +. Validate and download the `configuration` file and then stop the `Quay` container that is running the configuration editor. Extract the configuration bundle as before into the `$QUAY/config` directory. ++ +.... +$ cp ~/Downloads/quay-config.tar.gz $QUAY/config +$ cd $QUAY/config +$ tar xvf quay-config.tar.gz +.... + +The {productname} configuration file is now updated to contain the following fields for the security scanner: + +[source,yaml] +---- +... +FEATURE_SECURITY_NOTIFICATIONS: true +FEATURE_SECURITY_SCANNER: true +... +SECURITY_SCANNER_INDEXING_INTERVAL: 30 +SECURITY_SCANNER_V4_MANIFEST_CLEANUP: true +SECURITY_SCANNER_V4_ENDPOINT: http://quay-server.example.com:8081 +SECURITY_SCANNER_V4_PSK: MTU5YzA4Y2ZkNzJoMQ== +SERVER_HOSTNAME: quay-server.example.com +SECURITY_SCANNER_V4_INDEX_MAX_LAYER_SIZE: 8G <1> +... +---- +<1> The recommended maximum of `SECURITY_SCANNER_V4_INDEX_MAX_LAYER_SIZE` is `10G`. diff --git a/modules/clair-standalone-running.adoc b/modules/clair-standalone-running.adoc new file mode 100644 index 000000000..d348be786 --- /dev/null +++ b/modules/clair-standalone-running.adoc @@ -0,0 +1,25 @@ +[[clair-standalone-running]] += Running Clair + +. Use the `podman run` command to run the Clair container, exposing the HTTP endpoint port that you specified in the configuration tool, in this case `8081`: ++ +[subs="verbatim,attributes"] +.... +$ sudo podman run -d --rm --name clairv4 \ + -p 8081:8081 -p 8089:8089 \ + -e CLAIR_CONF=/clair/config.yaml -e CLAIR_MODE=combo \ + -v /etc/clairv4/config:/clair:Z \ + {productrepo}/{clairimage}:{productminv} +.... + + +. Next, restart the `Quay` container using the updated configuration file containing the scanner settings: ++ +[subs="verbatim,attributes"] +---- +$ sudo podman run -d --rm -p 80:8080 -p 443:8443 \ + --name=quay \ + -v $QUAY/config:/conf/stack:Z \ + -v $QUAY/storage:/datastorage:Z \ + {productrepo}/{quayimage}:{productminv} +---- diff --git a/modules/clair-standalone-upgrade.adoc b/modules/clair-standalone-upgrade.adoc new file mode 100644 index 000000000..7cf7ef06c --- /dev/null +++ b/modules/clair-standalone-upgrade.adoc @@ -0,0 +1,38 @@ +:_content-type: CONCEPT +[id="clair-standalone-upgrade"] += Using Clair with an upstream image for {productname} + +For most users, independent upgrades of Clair from the current version ({clairproductminv}) are unnecessary. In some cases, however, customers might want to pull an image of Clair from the link:https://quay.io/repository/projectquay/clair[upstream repository] for various reasons, such as for specific bug fixes or to try new features that have not yet been released downstream. You can use the following procedure to run an upstream version of Clair with {productname}. + +[IMPORTANT] +==== +Upstream versions of Clair have not been fully tested for compatibility with {productname}. As a result, this combination might cause issues with your deployment. +==== + +.Procedure + +. Enter the following command to stop Clair if it is running: ++ +[source,terminal] +---- +$ podman stop +---- + +. Navigate to the link:https://quay.io/repository/projectquay/clair[upstream repository], find the version of Clair that you want to use, and pull it to your local machine. For example: ++ +[source,terminal] +---- +$ podman pull quay.io/projectquay/clair:nightly-2024-02-03 +---- + +. Start Clair by using the container image, mounting in the configuration from the file you created: ++ +[source,terminal] +---- +$ podman run -d --name clairv4 \ +-p 8081:8081 -p 8088:8088 \ +-e CLAIR_CONF=/clair/config.yaml \ +-e CLAIR_MODE=combo \ +-v /etc/opt/clairv4/config:/clair:Z \ +quay.io/projectquay/clair:nightly-2024-02-03 +---- diff --git a/modules/clair-standalone-using.adoc b/modules/clair-standalone-using.adoc new file mode 100644 index 000000000..f3705ffc8 --- /dev/null +++ b/modules/clair-standalone-using.adoc @@ -0,0 +1,28 @@ +[[clair-standalone-using]] += Using Clair security scanning + + +. From the command line, log in to the registry: ++ +.... +$ sudo podman login --tls-verify=false quay-server.example.com +Username: quayadmin +Password: +Login Succeeded! +.... + +. Pull, tag and push a sample image to the registry: ++ +.... +$ sudo podman pull ubuntu:20.04 +$ sudo podman tag docker.io/library/ubuntu:20.04 quay-server.example.com/quayadmin/ubuntu:20.04 +$ sudo podman push --tls-verify=false quay-server.example.com/quayadmin/ubuntu:20.04 +.... + +The results from the security scanning can be seen in the Quay UI, as shown in the following images: + +.Report summary +image:poc-clair-1.png[Scanning summary] + +.Report details +image:poc-clair-2.png[Scanning details] \ No newline at end of file diff --git a/modules/clair-testing.adoc b/modules/clair-testing.adoc new file mode 100644 index 000000000..c1d79ca6f --- /dev/null +++ b/modules/clair-testing.adoc @@ -0,0 +1,55 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: PROCEDURE +[id="clair-testing"] += Testing Clair + +Use the following procedure to test Clair on either a standalone {productname} deployment, or on an {ocp} Operator-based deployment. + +.Prerequisites + +* You have deployed the Clair container image. + +.Procedure + +. Pull a sample image by entering the following command: ++ +[source,terminal] +---- +$ podman pull ubuntu:20.04 +---- + +. Tag the image to your registry by entering the following command: ++ +[source,terminal] +---- +$ sudo podman tag docker.io/library/ubuntu:20.04 //ubuntu:20.04 +---- + +. Push the image to your {productname} registry by entering the following command: ++ +[source,terminal] +---- +$ sudo podman push --tls-verify=false quay-server.example.com/quayadmin/ubuntu:20.04 +---- + +. Log in to your {productname} deployment through the UI. + +. Click the repository name, for example, *quayadmin/ubuntu*. + +. In the navigation pane, click *Tags*. ++ +.Report summary +image:clair-reposcan.png[Security scan information appears for scanned repository images] + +. Click the image report, for example, *45 medium*, to show a more detailed report: ++ +.Report details +image:clair-vulnerabilities.png[See all vulnerabilities or only those that are fixable] ++ +[NOTE] +==== +In some cases, Clair shows duplicate reports on images, for example, `ubi8/nodejs-12` or `ubi8/nodejs-16`. This occurs because vulnerabilities with same name are for different packages. This behavior is expected with Clair vulnerability reporting and will not be addressed as a bug. +==== \ No newline at end of file diff --git a/modules/clair-troubleshooting-issues.adoc b/modules/clair-troubleshooting-issues.adoc new file mode 100644 index 000000000..50914530e --- /dev/null +++ b/modules/clair-troubleshooting-issues.adoc @@ -0,0 +1,173 @@ +:_content-type: PROCEDURE +[id="clair-troubleshooting-issues"] += Troubleshooting Clair issue + +Use the following procedures to troubleshoot Clair. + +[id="verify-image-compatibility"] +== Verifying image compatibility + +If you are using Clair, ensure that the images you are trying to scan are supported by Clair. Clair has certain requirements and does not support all image formats or configurations. + +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/manage_red_hat_quay/clair-vulnerability-scanner#clair-vulnerability-scanner-hosts[Clair vulnerability databases]. + +[id="allowlist-clair-updaters"] +== Allowlisting Clair updaters + +If you are using Clair behind a proxy configuration, you must allowlist the updaters in your proxy or firewall configuration. For more information about updater URLs, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/vulnerability_reporting_with_clair_on_red_hat_quay/clair-concepts#clair-updater-urls[Clair updater URLs]. + +[id="clair-dependencies-update"] +== Updating Clair scanner and its dependencies + +Ensure that you are using the latest version of Clair security scanner. Outdated versions might lack support for newer image formats, or might have known issues. + +Use the following procedure to check your version of Clair. + +[NOTE] +==== +Checking Clair logs can also be used to check if there are any errors from the updaters microservice in your Clair logs. By default, Clair updates the vulnerability database every 30 minutes. +==== + +.Procedure + +. Check your version of Clair. + +.. If you are running Clair on {productname-ocp}, enter the following command: ++ +[source,terminal] +---- +$ oc logs clair-pod +---- + +.. If you are running a standalone deployment of {productname} and using a Clair container, enter the following command: ++ +[source,terminal] +---- +$ podman logs clair-container +---- ++ +.Example output ++ +[source,terminal] +---- +"level":"info", +"component":"main", +"version":"v4.5.1", +---- + +[id="enabling-debug-mode-clair"] +== Enabling debug mode for Clair + +By default, debug mode for Clair is disabled. You can enable debug mode for Clair by updating your `clair-config.yaml` file. + +.Prerequisites + +* For Clair on {productname-ocp} deployments, you must link:https://docs.redhat.com/en/documentation/red_hat_quay/3.12/html-single/vulnerability_reporting_with_clair_on_red_hat_quay/index#custom-clair-configuration-managed-database[Running a custom Clair configuration with a managed Clair database]. + +Use the following procedure to enable debug mode for Clair. + +.Procedure + +. Update your `clair-config.yaml` file to include the debug option. + +.. On standalone {productname} deployments: + +... Add the following configuration field to your `clair-config.yaml` file: ++ +[source,yaml] +---- +log_level: debug +---- + +... Restart your Clair deployment by entering the following command: ++ +[source,terminal] +---- +$ podman restart +---- + +.. On {productname-ocp} deployments: + +... On the {ocp} web console, click *Operators* -> *Installed Operators* -> *Quay Registry*. + +... Click the name of your registry, for example, *Example Registry*. You are redirected to the *Details* page of your registry. + +... Click the Config Bundle Secret, for example, *example-registry-config-bundle-xncls*. + +... Confirm that you are running a custom Clair configuration by looking for the `clair-config.yaml` file under the *Data* section of the *Details* page of your secret. + +... If you have a `clair-config.yaml` file, click *Actions* -> *Edit Secret*. If you do not, see "Running a custom Clair configuration with a managed Clair database". + +... Update your `clair-config.yaml` file to include the `log_level: debug` configuration variable. For example: ++ +[source,yaml] +---- +log_level: debug +---- + +... Click *Save*. + +... You can check the status of your Clair deployment by clicking *Workloads* -> *Pods*. The `clair-app` pod should report `1/1` under the *Ready* category. + +... You can confirm that Clair is returning debugging information by clicking the *clair-app* pod that is ready -> *Logs*. + +[id="checking-clair-configuration"] +== Checking Clair configuration + +Check your Clair `config.yaml` file to ensure that there are no misconfigurations or inconsistencies that could lead to issues. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/vulnerability_reporting_with_clair_on_red_hat_quay/index#config-fields-overview[Clair configuration overview]. + +[id="inspect-image-metadata"] +== Inspect image metadata + +In some cases, you might receive an *Unsupported* message. This might indicate that the scanner is unable to extract the necessary metadata from the image. Check if the image metadata is properly formatted and accessible. + +[role="_additional-resources"] +.Additional resources + +For more information, see link:https://access.redhat.com/articles/7018077[Troubleshooting Clair]. + + +//// +[id="check-logs-updaters-errors"] +== Checking logs for updaters errors + +Check if there are any errors from the updaters microservice in your Clair logs. By default, Clair updates the vulnerability database every 30 minutes. + +Use the following procedure to check your Clair logs. + +.Procedure + +. Check your Clair logs. + +.. If you are running Clair on the {productname} Operator, enter the following command: ++ +[source,terminal] +---- +$ oc logs clair-pod +---- + +.. If you are running a standalone deployment of {productname} and using a Clair container, enter the following command: ++ +[source,terminal] +---- +$ podman logs clair-container +---- + + +[id="updating-cve-database"] +== Updating the CVE database + +Updating the CVE database can be a memory and CPU intensive task, especially if there are several CVEs that must be parsed. If the resources are exhausted during this process, the system kernel can stop the offending process. This should be visible in Docker logs, Podman logs, or in the system journal. For example: + +[source,terminal] +---- +May 14 21:48:14 vm-mtr3-live-k8s-00-ranchernode-4 kernel: [36611.338195] [26556] 0 26556 734467 386889 4165632 0 937 clair + +May 14 21:48:14 vm-mtr3-live-k8s-00-ranchernode-4 kernel: [36611.338227] Memory cgroup out of memory: Kill process 26556 (clair) score 1922 or sacrifice child + +May 14 21:48:14 vm-mtr3-live-k8s-00-ranchernode-4 kernel: [36611.339573] Killed process 26556 (clair) total-vm:2937868kB, anon-rss:1536364kB, file-rss:11192kB, shmem-rss:0kB + +May 14 21:48:14 vm-mtr3-live-k8s-00-ranchernode-4 kernel: [36611.396171] oom_reaper: reaped process 26556 (clair), now anon-rss:0kB, file-rss:0kB, shmem-rss:0kB +---- +//// + diff --git a/modules/clair-unmanaged.adoc b/modules/clair-unmanaged.adoc new file mode 100644 index 000000000..dadda3417 --- /dev/null +++ b/modules/clair-unmanaged.adoc @@ -0,0 +1,11 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: CONCEPT +[id="unmanaged-clair-configuration"] += Unmanaged Clair configuration + +{productname} users can run an unmanaged Clair configuration with the {productname} {ocp} Operator. This feature allows users to create an unmanaged Clair database, or run their custom Clair configuration without an unmanaged database. + +An unmanaged Clair database allows the {productname} Operator to work in a geo-replicated environment, where multiple instances of the Operator must communicate with the same database. An unmanaged Clair database can also be used when a user requires a highly-available (HA) Clair database that exists outside of a cluster. \ No newline at end of file diff --git a/modules/clair-updater-urls.adoc b/modules/clair-updater-urls.adoc new file mode 100644 index 000000000..551b61903 --- /dev/null +++ b/modules/clair-updater-urls.adoc @@ -0,0 +1,38 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: CONCEPT +[id="clair-updater-urls"] += Information about Clair updaters + +The following table provides details about each Clair updater, including the configuration parameter, a brief description, relevant URLs, and the associated components that they interact with. This list is not exhaustive, and some servers might issue redirects, while certain request URLs are dynamically constructed to ensure accurate vulnerability data retrieval. + +For Clair, each updater is responsible for fetching and parsing vulnerability data related to a specific package type or distribution. For example, the Debian updater focuses on Debian-based Linux distributions, while the AWS updater focuses on vulnerabilities specific to Amazon Web Services' Linux distributions. Understanding the package type is important for vulnerability management because different package types might have unique security concerns and require specific updates and patches. + +[NOTE] +==== +If you are using a proxy server in your environment with Clair's updater URLs, you must identify which URL needs to be added to the proxy allowlist to ensure that Clair can access them unimpeded. Use the following table to add updater URLs to your proxy allowlist. +==== + +.Clair updater information +[cols="1a,3a,3a,2a",options="header"] +|=== +|Updater |Description | URLs | Component +|`alpine` |The Alpine updater is responsible for fetching and parsing vulnerability data related to packages in Alpine Linux distributions. | * `\https://secdb.alpinelinux.org/` | Alpine Linux SecDB database +|`aws` | The AWS updater is focused on AWS Linux-based packages, ensuring that vulnerability information specific to Amazon Web Services' custom Linux distributions is kept up-to-date. | * `\http://repo.us-west-2.amazonaws.com/2018.03/updates/x86_64/mirror.list` +* `\https://cdn.amazonlinux.com/2/core/latest/x86_64/mirror.list` +* `\https://cdn.amazonlinux.com/al2023/core/mirrors/latest/x86_64/mirror.list` | Amazon Web Services (AWS) UpdateInfo +|`debian` |The Debian updater is essential for tracking vulnerabilities in packages associated with Debian-based Linux distributions. | * `\https://deb.debian.org/` +* `\https://security-tracker.debian.org/tracker/data/json` | Debian Security Tracker +|`clair.cvss`| The Clair Common Vulnerability Scoring System (CVSS) updater focuses on maintaining data about vulnerabilities and their associated CVSS scores. This is not tied to a specific package type but rather to the severity and risk assessment of vulnerabilities in general. | * `\https://nvd.nist.gov/feeds/json/cve/1.1/` | National Vulnerability Database (NVD) feed for Common Vulnerabilities and Exposures (CVE) data in JSON format +|`oracle` |The Oracle updater is dedicated to Oracle Linux packages, maintaining data on vulnerabilities that affect Oracle Linux systems. | * `\https://linux.oracle.com/security/oval/com.oracle.elsa-*.xml.bz2` | Oracle Oval database +|`photon`| The Photon updater deals with packages in VMware Photon OS. | * `\https://packages.vmware.com/photon/photon_oval_definitions/` | VMware Photon OS oval definitions +|`rhel` |The Red Hat Enterprise Linux (RHEL) updater is responsible for maintaining vulnerability data for packages in Red Hat's Enterprise Linux distribution. | * `\https://access.redhat.com/security/cve/` +* `\https://access.redhat.com/security/data/oval/v2/PULP_MANIFEST` | Red Hat Enterprise Linux (RHEL) Oval database +|`rhcc` | The Red Hat Container Catalog (RHCC) updater is connected to Red Hat's container images. This updater ensures that vulnerability information related to Red Hat's containerized software is kept current. | * `\https://access.redhat.com/security/data/metrics/cvemap.xml` | Resource Handler Configuration Controller (RHCC) database +|`suse`| The SUSE updater manages vulnerability information for packages in the SUSE Linux distribution family, including openSUSE, SUSE Enterprise Linux, and others. | * `\https://support.novell.com/security/oval/` | SUSE Oval database +|`ubuntu` | The Ubuntu updater is dedicated to tracking vulnerabilities in packages associated with Ubuntu-based Linux distributions. Ubuntu is a popular distribution in the Linux ecosystem. | * `\https://security-metadata.canonical.com/oval/com.ubuntu.*.cve.oval.xml` +* `\https://api.launchpad.net/1.0/` | Ubuntu Oval Database +|`osv` | The Open Source Vulnerability (OSV) updater specializes in tracking vulnerabilities within open source software components. OSV is a critical resource that provides detailed information about security issues found in various open source projects. | * `\https://osv-vulnerabilities.storage.googleapis.com/` | Open Source Vulnerabilities database +|=== \ No newline at end of file diff --git a/modules/clair-updaters.adoc b/modules/clair-updaters.adoc new file mode 100644 index 000000000..73db138e5 --- /dev/null +++ b/modules/clair-updaters.adoc @@ -0,0 +1,11 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: CONCEPT +[id="clair-updaters"] += Clair updaters + +Clair uses `Go` packages called _updaters_ that contain the logic of fetching and parsing different vulnerability databases. + +Updaters are usually paired with a matcher to interpret if, and how, any vulnerability is related to a package. Administrators might want to update the vulnerability database less frequently, or not import vulnerabilities from databases that they know will not be used. \ No newline at end of file diff --git a/modules/clair-using.adoc b/modules/clair-using.adoc new file mode 100644 index 000000000..334c32450 --- /dev/null +++ b/modules/clair-using.adoc @@ -0,0 +1,27 @@ +:_content-type: PROCEDURE +[id="clair-using"] += Using Clair + +Use the following procedure to ensure that Clair is working on your {productname} Operator deployment. + +.Prerequisites + +* You have configured Clair for your {ocp} deployment. + +.Procedure + +. Log in to your {productname} cluster and select an organization for which you have configured +Clair scanning. + +. Select a repository from that organization that holds some images and +select Tags from the left navigation. The following figure shows an example +of a repository with two images that have been scanned: ++ +image:clair-reposcan.png[Security scan information appears for scanned repository images] + +. If vulnerabilities are found, select to under the Security Scan column +for the image to see either all vulnerabilities or those that are fixable. The +following figure shows information on all vulnerabilities found: ++ +image:clair-vulnerabilities.png[See all vulnerabilities or only those that are fixable] + diff --git a/modules/clair-vulnerability-scanner-hosts.adoc b/modules/clair-vulnerability-scanner-hosts.adoc new file mode 100644 index 000000000..e133afbc7 --- /dev/null +++ b/modules/clair-vulnerability-scanner-hosts.adoc @@ -0,0 +1,35 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: CONCEPT +[id="clair-vulnerability-scanner-hosts"] += Clair vulnerability databases + +Clair uses the following vulnerability databases to report for issues in your images: + +* Ubuntu Oval database +* Debian Security Tracker +* {rhel} Oval database +* SUSE Oval database +* Oracle Oval database +* Alpine SecDB database +* VMware Photon OS database +* Amazon Web Services (AWS) UpdateInfo +* link:https://osv.dev/[Open Source Vulnerability (OSV) Database] + +For information about how Clair does security mapping with the different databases, see +link:https://quay.github.io/claircore/concepts/severity_mapping.html[Claircore Severity Mapping]. + +[id="information-about-clair-osv"] +== Information about Open Source Vulnerability (OSV) database for Clair + +Open Source Vulnerability (OSV) is a vulnerability database and monitoring service that focuses on tracking and managing security vulnerabilities in open source software. + +OSV provides a comprehensive and up-to-date database of known security vulnerabilities in open source projects. It covers a wide range of open source software, including libraries, frameworks, and other components that are used in software development. For a full list of included ecosystems, see link:https://ossf.github.io/osv-schema/#affectedpackage-field[defined ecosystems]. + +Clair also reports vulnerability and security information for `golang`, `java`, and `ruby` ecosystems through the Open Source Vulnerability (OSV) database. + +By leveraging OSV, developers and organizations can proactively monitor and address security vulnerabilities in open source components that they use, which helps to reduce the risk of security breaches and data compromises in projects. + +For more information about OSV, see link:https://osv.dev/[the OSV website]. \ No newline at end of file diff --git a/modules/clair-vulnerability-scanner-overview.adoc b/modules/clair-vulnerability-scanner-overview.adoc new file mode 100644 index 000000000..82580353f --- /dev/null +++ b/modules/clair-vulnerability-scanner-overview.adoc @@ -0,0 +1,24 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: CONCEPT +[id="clair-vulnerability-scanner"] += Clair security scanner + +ifeval::["{context}" == "quay-io"] +Clair v4 (Clair) is an open source application that leverages static code analyses for parsing image content and reporting vulnerabilities affecting the content. Clair is packaged with {quayio}, is automatically enabled, and is managed by the {productname} development team. + +For {quayio} users, images are automatically indexed after they are pushed to your repository. Reports are then fetched from Clair, which matches images against its CVE's database to report security information. This process happens automatically on {quayio}, and manual recans are not required. +endif::[] + +ifeval::["{context}" == "clair"] +Clair v4 (Clair) is an open source application that leverages static code analyses for parsing image content and reporting vulnerabilities affecting the content. Clair is packaged with {productname} and can be used in both standalone and Operator deployments. It can be run in highly scalable configurations, where components can be scaled separately as appropriate for enterprise environments. +endif::[] + +ifeval::["{context}" == "quay-security"] +Clair v4 (Clair) is an open source application that leverages static code analyses for parsing image content and reporting vulnerabilities affecting the content. Clair is packaged with {productname} and can be used in both standalone and Operator deployments. It can be run in highly scalable configurations, where components can be scaled separately as appropriate for enterprise environments. + +For more information about Clair security scanner, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/vulnerability_reporting_with_clair_on_red_hat_quay/index[Vulnerability reporting with Clair on {productname}]. +endif::[] + diff --git a/modules/clairv2-compare-v4.adoc b/modules/clairv2-compare-v4.adoc new file mode 100644 index 000000000..01a2266ad --- /dev/null +++ b/modules/clairv2-compare-v4.adoc @@ -0,0 +1,28 @@ +[[clairv2-compare-v4]] +=== Clair v2 and Clair v4 Comparison + +.Clair v2 and Clair v4 component comparison +[cols="2,1,1",options="header"] +|=== +|Component |Clair v2 |Clair v4 +|API layers +|In Clair v2, clients were required to provide layers to the API. +|Clair v4 is manifest-based, providing an easier API for users. + +|Insights and reports +|Clair v2 provided only insights on vulnerabilities +|Clair v4 provides detailed reports on the content of the container, which can be fed to other tools for analyses or inventory purposes. + +|Architecture +|Clair v2 ran as a monolithic application. +|Clair v4 divides functionality across multiple services for ease of development and scaling use cases. + +|Support for language packages +|Clair v2 does not support computer language packages. +|Clair v4 supports Python language packages, with plans of adding more in future versions. + +|Package locator +|Clair v2 did not provide details on where packages were located inside of the container. +|Clair v4 identifies where packages are located inside of the container. +|=== + diff --git a/modules/clairv2-to-v4.adoc b/modules/clairv2-to-v4.adoc new file mode 100644 index 000000000..e13c0d4ce --- /dev/null +++ b/modules/clairv2-to-v4.adoc @@ -0,0 +1,9 @@ +[[clairv2-to-v4]] += Migrating from Clair v2 to Clair v4 + +Starting with {productname} 3.4, Clair v4 is used by default. It will also be the only version of Clair continually supported, as older versions of {productname} are not supported with Clair v4 in production. Users should continue using Clair v2 if using a version of {productname} earlier than 3.4. + +Existing {productname} 3.3 deployments will be upgraded to Clair v4 when managed via the {productname} Operator. Manually upgraded {productname} deployments can install Clair v4 side-by-side, which will cause the following: + +* All new image vulnerability scans to be performed by Clair v4 +* Existing images to be rescanned by Clair v4 diff --git a/modules/clairv4-air-gapped.adoc b/modules/clairv4-air-gapped.adoc new file mode 100644 index 000000000..aaf377af9 --- /dev/null +++ b/modules/clairv4-air-gapped.adoc @@ -0,0 +1,10 @@ +[[clairv4-air-gapped]] += Air-gapped Clair v4 + +{productname} 3.4 and later and Clair v4 are supported in disconnected environments. By default, Clair v4 will attempt to run automated updates against Red Hat servers. When Clair v4 in network environments is disconnected from the internet: + +* The Clair v4 auto-update is disabled in the Clair `config` bundle. +* On a system with internet access, the vulnerability database updates is performed manually and exported to a disk. +* The on-disk data is then transferred to the target system with offline media. It is then manually imported. + +For more information on air-gapped Clair v4 and using `clairctl`, the command line tool, see https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/deploy_red_hat_quay_on_openshift_with_the_quay_operator/quay_operator_features#clair-disconnected-environments[Manually updating the vulnerability databases for Clair in an air-gapped OpenShift cluster] diff --git a/modules/clairv4-arch.adoc b/modules/clairv4-arch.adoc new file mode 100644 index 000000000..5927e469b --- /dev/null +++ b/modules/clairv4-arch.adoc @@ -0,0 +1,18 @@ +[[clairv4-arch]] += Clair v4 architecture + +Clair v4 utilizes the ClairCore library as its engine for examining contents and reporting vulnerabilities. At a high level, you can consider Clair as a service wrapper to the functionality provided in the ClairCore library. + +== ClairCore + +ClairCore is the engine behind Clair v4's container security solution. The ClairCore package exports domain models, interfaces that are necessary to plug into the business logic, and a default set of implementations. This default set of implementations defines the support matrix. + +ClairCore relies on Postgres for its persistence and the library will handle migrations if configured to do so. + +The diagram below is a high level overview of ClairCore's architecture. + +image:clair-core-architecture.png[Connection not secure] + +When a `claircore.Manifest` is submitted to the LibIndex, the library will index its constituent parts and create a report with its findings. + +When a `claircore.IndexReport` is provided to LibVuln, the library will discover vulnerabilities affecting it and generate a `claircore.Volunerability` report. diff --git a/modules/clairv4-intro.adoc b/modules/clairv4-intro.adoc new file mode 100644 index 000000000..f0b3e62b7 --- /dev/null +++ b/modules/clairv4-intro.adoc @@ -0,0 +1,9 @@ +[[clairv4-intro]] +== Clair v4 + +Released with {productname} 3.4, Clair v4 is the latest version of Clair. It is built on a new architecture consisting of Clair Core and a service wrapper. Clair v4 made several enhancements to Clair v2, including: + +* Support for the Python programming language package. Support for additional languages is planned for future versions of Clair and {productname}. +* Immutable data model and a new manifest-oriented API. +* Refocus on the latest Open Container Initiative (OCI) specifications. +* Image hashes and layer hashes are now treated as content addressable, so that images are uniquely identified as a whole. diff --git a/modules/clairv4-limitations.adoc b/modules/clairv4-limitations.adoc new file mode 100644 index 000000000..a8a657120 --- /dev/null +++ b/modules/clairv4-limitations.adoc @@ -0,0 +1,12 @@ +[[clairv4-limitations]] += Clair v4 limitations + +The following limitations are currently being addressed by the development team: + +* As of Clair v4, both operating system level and programming language packages are covered. The latter is currently limited to Python, however support for other languages will be added in the future. + +* There is currently limited multi-arch support on Clair v4, which works for package managers like `rpm`, `yum`, and `dnf` that compensates for differences in endianess. + +* Clair v4 does not currently support MSFT Windows images. + +* Clair v4 does not currently support slim / scratch container images. \ No newline at end of file diff --git a/modules/con_deploy_quay_start_using.adoc b/modules/con_deploy_quay_start_using.adoc index 18e83ae91..5e6bb3a95 100644 --- a/modules/con_deploy_quay_start_using.adoc +++ b/modules/con_deploy_quay_start_using.adoc @@ -2,5 +2,5 @@ With {productname} now running, you can: * Select Tutorial from the Quay home page to try the 15-minute tutorial. In the tutorial, you learn to log into Quay, start a container, create images, push repositories, view repositories, and change repository permissions with Quay. -* Refer to the link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/use_red_hat_quay/[Use {productname}] for information on working +* Refer to the link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/use_red_hat_quay/[Use {productname}] for information on working with {productname} repositories. diff --git a/modules/con_quay_ha_prereq.adoc b/modules/con_quay_ha_prereq.adoc index 2b1796652..52c8ff578 100644 --- a/modules/con_quay_ha_prereq.adoc +++ b/modules/con_quay_ha_prereq.adoc @@ -11,7 +11,7 @@ Here are a few things you need to know before you begin the {productname} high a * Either Postgres or MySQL can be used to provide the database service. Postgres was chosen here as the database because it includes the features needed to support Clair security scanning. Other options include: ** Crunchy Data PostgreSQL Operator: Although not supported directly by Red Hat, -the link:https://access.crunchydata.com/documentation/postgres-operator/latest/[CrunchDB Operator] +the link:https://access.crunchydata.com/documentation/postgres-operator/latest/[Postgres Operator] is available from link:https://www.crunchydata.com/[Crunchy Data] for use with {productname}. If you take this route, you should have a support contract with Crunchy Data and work directly with them for usage guidance or issues relating to the operator and their database. @@ -21,7 +21,7 @@ link:https://access.redhat.com/support/policy/updates/rhquay/policies[{productna for details on support for third-party databases and other components. * Ceph Object Gateway (also called RADOS Gateway) is one example of a product that -can provide the the object storage needed by {productname}. If you want your +can provide the object storage needed by {productname}. If you want your {productname} setup to do geo-replication, Ceph Object Gateway or other supported object storage is required. For cloud installations, you can use any of the following cloud object storage: @@ -31,7 +31,7 @@ object storage is required. For cloud installations, you can use any of the foll ** Ceph Object Gateway ** OpenStack Swift ** CloudFront + S3 -** NooBaa S3 Storage (link:https://access.redhat.com/support/offerings/techpreview[Technology Preview]) +** NooBaa S3 Storage * The haproxy server is used in this example, although you can use any proxy service that works for your environment. @@ -44,8 +44,30 @@ your environment. Each system should have the following attributes: -* **Red Hat Enterprise Linux (RHEL)**: Obtain the latest Red Hat Enterprise Linux server media from the link:https://access.redhat.com/downloads/content/69/ver=/rhel---7/7.5/x86_64/product-software[Downloads page] and follow instructions from the link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html-single/installation_guide/index[Red Hat Enterprise Linux 7 Installation Guide] to install RHEL on each system. -** **Valid Red Hat Subscription**: Obtain Red Hat Enterprise Linux server subscriptions and apply one to each system. +//* **Red Hat Enterprise Linux (RHEL)**: Obtain the latest Red Hat Enterprise Linux server media from the link:https://access.redhat.com/downloads/content/69/ver=/rhel---7/7.5/x86_64/product-software[Downloads page] and follow instructions from the link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html-single/installation_guide/index[Red Hat Enterprise Linux 7 Installation Guide] to install RHEL on each system. +//** **Valid Red Hat Subscription**: Obtain Red Hat Enterprise Linux server subscriptions and apply one to each system. +* **Red Hat Enterprise Linux (RHEL)** 8: Obtain the latest Red Hat Enterprise Linux 8 server media from the link:https://access.redhat.com/downloads/content/479/ver=/rhel---9/9.0/x86_64/product-software[Downloads page] and follow the installation instructions available in the link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/[Product Documentation for Red Hat Enterprise Linux 9]. +** **Valid Red Hat Subscription**: Configure a valid Red Hat Enterprise Linux 8 server subscription. + ** **CPUs**: Two or more virtual CPUs ** **RAM**: 4GB for each A and B system; 8GB for each C system ** **Disk space**: About 20GB of disk space for each A and B system (10GB for the operating system and 10GB for docker storage). At least 30GB of disk space for C systems (or more depending on required container storage). + +[[using-podman]] +== Using podman + +This document uses podman for creating and deploying containers. If you do not have podman available on your system, you should be able to use the equivalent docker commands. For more information on podman and related technologies, see link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html-single/building_running_and_managing_containers/index[Building, running, and managing Linux containers on Red Hat Enterprise Linux 8]. + +[NOTE] +==== +Podman is strongly recommended for highly available, production quality deployments of {productname}. Docker has not been tested with {productname} {producty}, and will be deprecated in a future release. +==== + + +//// +== Restarting containers + +Because the `--restart` option is not fully supported by podman, instead of using `--restart`, you could configure `podman` as a systemd service, as described +in +https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html-single/building_running_and_managing_containers/index#porting-containers-to-systemd-using-podman_building-running-and-managing-containers[Porting containers to systemd using Podman] +//// diff --git a/modules/con_quay_intro.adoc b/modules/con_quay_intro.adoc index 0ff530ba0..e52645213 100644 --- a/modules/con_quay_intro.adoc +++ b/modules/con_quay_intro.adoc @@ -1,38 +1,59 @@ -= Overview +:_content-type: CONCEPT +[id="poc-overview"] += {productname} features -Features of Quay include: +{productname} is regularly released with new features and software updates. The following features are available for {productname} deployments, however the list is not exhaustive: * High availability * Geo-replication -* Repository mirroring (new in {productname} v3.1, link:https://access.redhat.com/support/offerings/techpreview[Technology Preview]) -* Docker v2, schema 2 (multiarch) support +* Repository mirroring +* Docker v2, schema 2 (multi-arch) support * Continuous integration * Security scanning with Clair * Custom log rotation * Zero downtime garbage collection * 24/7 support -Quay provides support for: +Users should check the link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes/index#doc-wrapper[{productname} Release Notes] for the latest feature information. + +[id="poc-support"] += {productname} support + +{productname} provides support for the following: * Multiple authentication and access methods * Multiple storage backends -* Custom certificates for Quay, Clair, and storage backends +* Custom certificates for `Quay`, `Clair`, and storage backend containers * Application registries * Different container image types -= Architecture +[id="poc-architecture"] +== Architecture + +{productname} includes several core components, both internal and external. + +For a fuller architectural breakdown, see the link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_architecture/index[{productname} architecture] guide. + +[id="poc-internal-components"] +=== Internal components -Quay is made up of several core components for a basic setup. In highly available setups, an additional object storage component is needed. The core components are: +{productname} includes the following internal components: -* **Database**: Used by {productname} as its primary metadata storage (not for image storage). -* **Redis (key, value store)**: Stores live builder logs and the Red Hat Quay tutorial. -* **Quay (container registry)**: Runs the quay container as a service, consisting of several components in the pod. -* **Clair**: Scans container images for vulnerabilities and suggests fixes. +* **Quay (container registry)**. Runs the `Quay` container as a service, consisting of several components in the pod. +* **Clair**. Scans container images for vulnerabilities and suggests fixes. -For the high availability installation, you need to use one of the following types of storage: +[id="poc-external-components"] +=== External components -* **Public cloud storage**: In public cloud environments, you should use the cloud provider's object storage, such as Amazon S3 (for AWS) or Google Cloud Storage (for Google Cloud). +{productname} includes the following external components: -* **Private cloud storage**: In private clouds, an S3 or Swift compliant Object Store is needed, such as Ceph RADOS, or OpenStack Swift. +* **Database**. Used by {productname} as its primary metadata storage. Note that this is not for image storage. +* **Redis (key-value store)**. Stores live builder logs and the {productname} tutorial. Also includes the locking mechanism that is required for garbage collection. +* **Cloud storage**. For supported deployments, one of the following storage types must be used: +** **Public cloud storage**. In public cloud environments, you should use the cloud provider's object storage, such as Amazon Web Services's Amazon S3 or Google Cloud's Google Cloud Storage. +** **Private cloud storage**. In private clouds, an S3 or Swift compliant Object Store is needed, such as Ceph RADOS, or OpenStack Swift. -Local storage is supported for the Red Hat Quay test-only installation, but not for high-availability. +[WARNING] +==== +Do not use "Locally mounted directory" Storage Engine for any production configurations. Mounted NFS volumes are not supported. Local storage is meant for {productname} test-only installations. +==== diff --git a/modules/con_quay_openshift_prereq.adoc b/modules/con_quay_openshift_prereq.adoc deleted file mode 100644 index 33e4d0626..000000000 --- a/modules/con_quay_openshift_prereq.adoc +++ /dev/null @@ -1,47 +0,0 @@ -= Installing {productname} on OpenShift - -[NOTE] -==== -This procedure presents guidance on how to set up a deployment of {productname} on an OpenShift cluster. -==== - -== Prerequisites - -Here are a few things you need to know before you begin -the {productname} on OpenShift deployment: - -* *OpenShift cluster*: You need a privileged account to an OpenShift 3.x or 4.x cluster on which to deploy -the {productname}. That account must have the ability to create namespaces at the cluster scope. -To use Quay builders, OpenShift 3 is required. - -* *Storage*: AWS cloud storage is used as an example in the following procedure. -As an alternative, you can create Ceph cloud storage using steps -from the link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/deploy_red_hat_quay_-_high_availability/#set_up_ceph[Set up Ceph] section of the high availability {productname} deployment guide. -The following is a list of other supported cloud storage: - -** Amazon S3 (see link:https://access.redhat.com/solutions/3680151[S3 IAM Bucket Policy] for details on configuring an S3 bucket policy for Quay) -** Azure Blob Storage -** Google Cloud Storage -** Ceph Object Gateway -** OpenStack Swift -** CloudFront + S3 -** NooBaa S3 Storage (link:https://access.redhat.com/support/offerings/techpreview[Technology Preview]) - -* *Services*: Because you are deploying to an OpenShift cluster, instead of to bare metal -or virtual machines, you simply need to make sure that your cluster has enough capacity to run -the following containerized services: - -** *Database*: We recommend you use an enterprise-quality database for production use of {productname}. -Postgres is used as an example in this document. Other options include: -*** Crunchy Data PostgreSQL Operator: Although not supported directly by Red Hat, -the link:https://access.crunchydata.com/documentation/postgres-operator/latest/[CrunchDB Operator] -is available from link:https://www.crunchydata.com/[Crunchy Data] for use with {productname}. -If you take this route, you should have a support contract with Crunchy Data and -work directly with them for usage guidance or issues relating to the operator and their database. -*** If your organization already has a high-availability (HA) database, you can use that database -with {productname}. See the -link:https://access.redhat.com/support/policy/updates/rhquay/policies[{productname} Support Policy] -for details on support for third-party databases and other components. -** *Key-value database*: Redis is used to serve live builder logs and Quay -tutorial content to your {productname} configuration. -** *{productname}*: The quay container provides the features to manage the Quay registry. diff --git a/modules/con_quay_single_prereq.adoc b/modules/con_quay_single_prereq.adoc index 1a9354461..aa9fe73d2 100644 --- a/modules/con_quay_single_prereq.adoc +++ b/modules/con_quay_single_prereq.adoc @@ -1,20 +1,58 @@ -= Preparing for {productname} (basic) +:_content-type: CONCEPT +[id="poc-prerequisites"] += Prerequisites +ifeval::["{productname}" == "Red Hat Quay"] +//* **Red Hat Enterprise Linux (RHEL)**: Obtain the latest Red Hat Enterprise Linux 7 server media from the link:https://access.redhat.com/downloads/content/69/ver=/rhel---7/7.9/x86_64/product-software[Downloads page] and follow the installation instructions from the link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/installation_guide/index[Red Hat Enterprise Linux 7 Installation Guide]. +* Red Hat Enterprise Linux (RHEL) 9 +** To obtain the latest version of {rhel} 9, see link:https://access.redhat.com/downloads/content/479/ver=/rhel---9/9.0/x86_64/product-software[Download Red Hat Enterprise Linux]. +** For installation instructions, see the link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/[Product Documentation for Red Hat Enterprise Linux 9]. +* An active subscription to Red Hat +endif::[] +* Two or more virtual CPUs +* 4 GB or more of RAM +* Approximately 30 GB of disk space on your test system, which can be broken down as follows: +** Approximately 10 GB of disk space for the {rhel} operating system. +** Approximately 10 GB of disk space for Docker storage for running three containers. +** Approximately 10 GB of disk space for {productname} local storage. ++ [NOTE] ==== -This installation process is only for POC purposes and is not intended for use as a production install. +CEPH or other local storage might require more memory. ==== ++ +More information on sizing can be found at link:https://access.redhat.com/articles/5177961[Quay 3.x Sizing Guidelines]. +* The following architectures are supported for {productname}: +** amd64/x86_64 +** s390x +** ppc64le -== Prerequisites +[id="poc-using-podman"] +== Installing Podman -For a {productname} Registry installation (appropriate for non-production purposes), you need one system (physical or virtual machine) that has the following attributes: +This document uses Podman for creating and deploying containers. -* **Red Hat Enterprise Linux (RHEL)**: Obtain the latest Red Hat Enterprise Linux server media from the link:https://access.redhat.com/downloads/content/69/ver=/rhel---7/7.5/x86_64/product-software[Downloads page] and follow instructions from the link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html-single/installation_guide/index[Red Hat Enterprise Linux 7 Installation Guide]. -* **Valid Red Hat Subscription**: Obtain a valid Red Hat Enterprise Linux server subscription. -* **CPUs**: Two or more virtual CPUs -* **RAM**: 4GB or more -* **Disk space**: (dependant on storage needs for registry) - - About 30GB of disk space should be enough for a test system (broken down in the following manner): - - At least 10GB of disk space for the operating system (Red Hat Enterprise Linux Server). - - At least 10GB of disk space for docker storage (to run 3 containers) - - At least 10GB of disk space for Quay local storage (CEPH or other local storage might require more memory) +For more information on Podman and related technologies, see link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html-single/building_running_and_managing_containers/index[Building, running, and managing Linux containers on Red Hat Enterprise Linux 9]. + +[IMPORTANT] +==== +If you do not have Podman installed on your system, the use of equivalent Docker commands might be possible, however this is not recommended. Docker has not been tested with {productname} {producty}, and will be deprecated in a future release. Podman is recommended for highly available, production quality deployments of {productname} {producty}. +==== + +Use the following procedure to install Podman. + +.Procedure + +* Enter the following command to install Podman: ++ +[source,terminal] +---- +$ sudo yum install -y podman +---- + +* Alternatively, you can install the `container-tools` module, which pulls in the full set of container software packages: ++ +[source,terminal] +---- +$ sudo yum module install -y container-tools +---- \ No newline at end of file diff --git a/modules/con_schema.adoc b/modules/con_schema.adoc index 958cac12c..e2058a847 100644 --- a/modules/con_schema.adoc +++ b/modules/con_schema.adoc @@ -1,282 +1,5 @@ -[[quay-schema]] -= Schema for {productname} +:_content-type: CONCEPT +[id="quay-schema"] += Schema for {productname} configuration -[NOTE] -==== -All fields are optional unless otherwise marked. -==== - -* **AUTHENTICATION_TYPE** [string] required: The authentication engine to use for credential authentication. -** **enum**: Database, LDAP, JWT, Keystone, OIDC. -** **Example**: `Database` -* **BUILDLOGS_REDIS** [object] required: Connection information for Redis for build logs caching. -** **HOST** [string] required: The hostname at which Redis is accessible. -*** **Example**: `my.redis.cluster` -** **PASSWORD** [string]: The password to connect to the Redis instance. -*** **Example**: `mypassword` -** **PORT** [number]: The port at which Redis is accessible. -*** **Example**: `1234` -* **DB_URI** [string] required: The URI at which to access the database, including any credentials. -** **Reference**: https://www.postgresql.org/docs/9.3/static/libpq-connect.html#AEN39495 -** **Example**: `mysql+pymysql://username:password@dns.of.database/quay` -* **DEFAULT_TAG_EXPIRATION** [string] required: The default, configurable tag expiration time for time machine. Defaults to `2w`. -** **Pattern**: ``^[0-9]+(w|m|d|h|s)$`` -* **DISTRIBUTED_STORAGE_CONFIG** [object] required: Configuration for storage engine(s) to use in {productname}. Each key is a unique ID for a storage engine, with the value being a tuple of the type and configuration for that engine. -** **Example**: `{"local_storage": ["LocalStorage", {"storage_path": "some/path/"}]}` -* **DISTRIBUTED_STORAGE_PREFERENCE** [array] required: The preferred storage engine(s) (by ID in DISTRIBUTED_STORAGE_CONFIG) to use. A preferred engine means it is first checked for pulling and images are pushed to it. -** **Min Items**: None -*** **Example**: `[u's3_us_east', u's3_us_west']` -*** **array item** [string] -** **preferred_url_scheme** [string] required: The URL scheme to use when hitting {productname}. If {productname} is behind SSL *at all*, this *must* be `https`. -*** **enum**: `http, https` -*** **Example**: `https` -* **SERVER_HOSTNAME** [string] required: The URL at which {productname} is accessible, without the scheme. -** **Example**: `quay.io` -* **TAG_EXPIRATION_OPTIONS** [array] required: The options that users can select for expiration of tags in their namespace (if enabled). -** **Min Items**: None -** **array item** [string] -** **Pattern**: ``^[0-9]+(w|m|d|h|s)$`` -* **USER_EVENTS_REDIS** [object] required: Connection information for Redis for user event handling. -** **HOST** [string] required: The hostname at which Redis is accessible. -*** **Example**: `my.redis.cluster` -** **PASSWORD** [string]: The password to connect to the Redis instance. -*** **Example**: `mypassword` -** **PORT** [number]: The port at which Redis is accessible. -*** **Example**: `1234` -* **ACTION_LOG_ARCHIVE_LOCATION** [string]: If action log archiving is enabled, the storage engine in which to place the archived data. -** **Example**: `s3_us_east` -* **ACTION_LOG_ARCHIVE_PATH'** [string]: If action log archiving is enabled, the path in storage in which to place the archived data. -** **Example**: `archives/actionlogs` -* **APP_SPECIFIC_TOKEN_EXPIRATION** [string, `null`]: The expiration for external app tokens. Defaults to None. -** **Pattern**: `^[0-9]+(w|m|d|h|s)$` -* **ALLOW_PULLS_WITHOUT_STRICT_LOGGING** [boolean]: If true, pulls in which the pull audit log entry cannot be written will still succeed. Useful if the database can fallback into a read-only state and it is desired for pulls to continue during that time. Defaults to False. -** **Example**: `True` -* **AVATAR_KIND** [string]: The types of avatars to display, either generated inline (local) or Gravatar (gravatar) -** **enum**: local, gravatar -* **BITBUCKET_TRIGGER_CONFIG** ['object', 'null']: Configuration for using BitBucket for build triggers. -** **consumer_key** [string] required: The registered consumer key(client ID) for this {productname} instance. -*** **Example**: `0e8dbe15c4c7630b6780` -** **CONSUMER_SECRET** [string] required: The registered consumer secret(client secret) for this {productname} instance -*** **Example**: e4a58ddd3d7408b7aec109e85564a0d153d3e846 -* **BITTORRENT_ANNOUNCE_URL** [string]: The URL of the announce endpoint on the bittorrent tracker. -** **Pattern**: ``^http(s)?://(.)+$`` -** **Example**: `https://localhost:6881/announce` -* **BITTORRENT_PIECE_SIZE** [number]: The bittorent piece size to use. If not specified, defaults to 512 * 1024. -** **Example**: `524288` -* **BROWSER_API_CALLS_XHR_ONLY** [boolean]: If enabled, only API calls marked as being made by an XHR will be allowed from browsers. Defaults to True. -** **Example**: False -* **CONTACT_INFO** [array]: If specified, contact information to display on the contact page. If only a single piece of contact information is specified, the contact footer will link directly. -** **Min Items**: 1 -** **Unique Items**: True -*** **array item 0** [string]: Adds a link to send an e-mail -*** **Pattern**: ``^mailto:(.)+$`` -*** **Example**: `mailto:support@quay.io` -** **array item 1** [string]: Adds a link to visit an IRC chat room -*** **Pattern**: ``^irc://(.)+$`` -*** **Example**: `irc://chat.freenode.net:6665/quay` -** **array item 2** [string]: Adds a link to call a phone number -*** **Pattern**: ``^tel:(.)+$`` -*** **Example**: `tel:+1-888-930-3475` -** **array item 3** [string]: Adds a link to a defined URL -*** **Pattern**: ``^http(s)?://(.)+$`` -*** **Example**: `https://twitter.com/quayio` -* **BLACKLIST_V2_SPEC** [string]: The Docker CLI versions to which {productname} will respond that V2 is *unsupported*. Defaults to `<1.6.0`. -** **Reference**: http://pythonhosted.org/semantic_version/reference.html#semantic_version.Spec -** **Example**: `<1.8.0` -* **DB_CONNECTION_ARGS** [object]: If specified, connection arguments for the database such as timeouts and SSL. -** **threadlocals** [boolean] required: Whether to use thread-local connections. Should *ALWAYS* be `true` -** **autorollback** [boolean] required: Whether to use auto-rollback connections. Should *ALWAYS* be `true` -** **ssl** [object]: SSL connection configuration -*** **ca** [string] required: Absolute container path to the CA certificate to use for SSL connections. -*** **Example**: `conf/stack/ssl-ca-cert.pem` -* **DEFAULT_NAMESPACE_MAXIMUM_BUILD_COUNT** [number, `null`]: If not None, the default maximum number of builds that can be queued in a namespace. -** **Example**: `20` -* **DIRECT_OAUTH_CLIENTID_WHITELIST** [array]: A list of client IDs of *{productname}-managed* applications that are allowed to perform direct OAuth approval without user approval. -** **Min Items**: None -** **Unique Items**: True -** **Reference**: https://coreos.com/quay-enterprise/docs/latest/direct-oauth.html -*** **array item** [string] -* **DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS** [array]: The list of storage engine(s) (by ID in DISTRIBUTED_STORAGE_CONFIG) whose images should be fully replicated, by default, to all other storage engines. -** **Min Items**: None -** **Example**: `s3_us_east, s3_us_west` -*** **array item** [string] -* **EXTERNAL_TLS_TERMINATION** [boolean]: If TLS is supported, but terminated at a layer before {productname}, must be true. -** **Example**: `True` -* **ENABLE_HEALTH_DEBUG_SECRET** [string, `null`]: If specified, a secret that can be given to health endpoints to see full debug info when not authenticated as a superuser. -** **Example**: `somesecrethere` -* **EXPIRED_APP_SPECIFIC_TOKEN_GC** [string, `null`]: Duration of time expired external app tokens will remain before being garbage collected. Defaults to 1d. -** **pattern**: `^[0-9]+(w|m|d|h|s)$` -* **FEATURE_ACI_CONVERSION** [boolean]: Whether to enable conversion to ACIs. Defaults to False. -** **Example**: `False` -* **FEATURE_ACTION_LOG_ROTATION** [boolean]: Whether or not to rotate old action logs to storage. Defaults to False. -** **Example**: `False` -* **FEATURE_ADVERTISE_V2** [boolean]: Whether the v2/ endpoint is visible. Defaults to True. -** **Example**: `True` -* **FEATURE_ANONYMOUS_ACCESS** [boolean]: Whether to allow anonymous users to browse and pull public repositories. Defaults to True. -** **Example**: `True` -* **FEATURE_APP_REGISTRY** [boolean]: Whether to enable support for App repositories. Defaults to False. -** **Example**: `False` -* **FEATURE_APP_SPECIFIC_TOKENS** [boolean]: If enabled, users can create tokens for use by the Docker CLI. Defaults to True. -** **Example**: False -* **FEATURE_BITBUCKET_BUILD** [boolean]: Whether to support Bitbucket build triggers. Defaults to False. -** **Example**: `False` -* **FEATURE_BITTORRENT** [boolean]: Whether to allow using Bittorrent-based pulls. Defaults to False. -** **Reference**: https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/manage_red_hat_quay/#bittorrent-based-distribution -** **Example**: `False` -* **FEATURE_BUILD_SUPPORT** [boolean]: Whether to support Dockerfile build. Defaults to True. -** **Example**: `True` -* **FEATURE_CHANGE_TAG_EXPIRARTION** [boolean]: Whether users and organizations are allowed to change the tag expiration for tags in their namespace. Defaults to True. -** **Example**: `False` -* **FEATURE_DIRECT_LOGIN** [boolean]: Whether users can directly login to the UI. Defaults to True. -** **Example**: `True` -* **FEATURE_GITHUB_BUILD** [boolean]: Whether to support GitHub build triggers. Defaults to False. -** **Example**: `False` -* **FEATURE_GITHUB_LOGIN** [boolean]: Whether GitHub login is supported. Defaults to False. -** **Example**: `False` -* **FEATURE_GITLAB_BUILD**[boolean]: Whether to support GitLab build triggers. Defaults to False. -** **Example**: `False` -* **FEATURE_GOOGLE_LOGIN** [boolean]: Whether Google login is supported. Defaults to False. -** **Example**: `False` -* **FEATURE_INVITE_ONLY_USER_CREATION** [boolean]: Whether users being created must be invited by another user. Defaults to False. -** **Example**: `False` -* **FEATURE_LIBRARY_SUPPORT** [boolean]: Whether to allow for "namespace-less" repositories when pulling and pushing from Docker. Defaults to True. -** **Example**: `True` -* **FEATURE_MAILING** [boolean]: Whether emails are enabled. Defaults to True. -** **Example**: `True` -* **FEATURE_NONSUPERUSER_TEAM_SYNCING_SETUP** [boolean]: If enabled, non-superusers can setup syncing on teams to backing LDAP or Keystone. Defaults To False. -** **Example**: `True` -* **FEATURE_PARTIAL_USER_AUTOCOMPLETE** [boolean]: If set to true, autocompletion will apply to partial usernames. Defaults to True. -** **Example**: `True` -* **FEATURE_PERMANENT_SESSIONS** [boolean]: Whether sessions are permanent. Defaults to True. -** **Example**: `True` -* **FEATURE_PROXY_STORAGE** [boolean]: Whether to proxy all direct download URLs in storage via the registry nginx. Defaults to False. -** **Example**: `False` -* **FEATURE_PUBLIC_CATALOG** [boolean]: If set to true, the `_catalog` endpoint returns public repositories. Otherwise, only private repositories can be returned. Defaults to False. -** **Example**: `False` -* **FEATURE_READER_BUILD_LOGS** [boolean]: If set to true, build logs may be read by those with read access to the repo, rather than only write access or admin access. Defaults to False. -** **Example**: False -* **FEATURE_RECAPTCHA** [boolean]: Whether Recaptcha is necessary for user login and recovery. Defaults to False. -** **Example**: `False` -** **Reference**: https://www.google.com/recaptcha/intro/ -* **FEATURE_REQUIRE_ENCRYPTED_BASIC_AUTH** [boolean]: Whether non-encrypted passwords (as opposed to encrypted tokens) can be used for basic auth. Defaults to False. -** **Example**: `False` -* **FEATURE_REQUIRE_TEAM_INVITE** [boolean]: Whether to require invitations when adding a user to a team. Defaults to True. -** **Example**: `True` -* **FEATURE_SECURITY_NOTIFICATIONS** [boolean]: If the security scanner is enabled, whether to turn on/off security notifications. Defaults to False. -** **Example**: `False` -* **FEATURE_SECURITY_SCANNER** [boolean]: Whether to turn on/off the security scanner. Defaults to False. -** **Reference**: https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/manage_red_hat_quay/#clair-initial-setup -** **Example**: `False` -* **FEATURE_STORAGE_REPLICATION** [boolean]: Whether to automatically replicate between storage engines. Defaults to False. -** **Example**: `False` -* **FEATURE_SUPER_USERS** [boolean]: Whether superusers are supported. Defaults to True. -** **Example**: `True` -* **FEATURE_TEAM_SYNCING** [boolean]: Whether to allow for team membership to be synced from a backing group in the authentication engine (LDAP or Keystone). -** **Example**: `True` -* **FEATURE_USER_CREATION** [boolean] :Whether users can be created (by non-superusers). Defaults to True. -** **Example**: `True` -* **FEATURE_USER_LOG_ACCESS** [boolean]: If set to true, users will have access to audit logs for their namespace. Defaults to False. -** **Example**: `True` -* **FEATURE_USER_METADATA** [boolean]: Whether to collect and support user metadata. Defaults to False. -** **Example**: `False` -* **FEATURE_USER_RENAME** [boolean]: If set to true, users can rename their own namespace. Defaults to False. -** **Example**: `True` -* **GITHUB_LOGIN_CONFIG** [object, 'null']: Configuration for using GitHub (Enterprise) as an external login provider. -** **Reference**: https://coreos.com/quay-enterprise/docs/latest/github-auth.html -** **allowed_organizations** [array]: The names of the GitHub (Enterprise) organizations whitelisted to work with the ORG_RESTRICT option. -*** **Min Items**: None -*** **Unique Items**: True -**** **array item** [string] -** **API_ENDPOINT** [string]: The endpoint of the GitHub (Enterprise) API to use. Must be overridden for github.com. -*** **Example**: `https://api.github.com/` -** **CLIENT_ID** [string] required: The registered client ID for this {productname} instance; cannot be shared with GITHUB_TRIGGER_CONFIG. -*** **Reference**: https://coreos.com/quay-enterprise/docs/latest/github-app.html -*** **Example**: `0e8dbe15c4c7630b6780` -** **CLIENT_SECRET** [string] required: The registered client secret for this {productname} instance. -*** **Reference**: https://coreos.com/quay-enterprise/docs/latest/github-app.html -*** **Example**: `e4a58ddd3d7408b7aec109e85564a0d153d3e846` -** **GITHUB_ENDPOINT** [string] required: The endpoint of the GitHub (Enterprise) being hit. -*** **Example**: `https://github.com/` -** **ORG_RESTRICT** [boolean]: If true, only users within the organization whitelist can login using this provider. -** **Example**: `True` -* **GITHUB_TRIGGER_CONFIG** [object, `null`]: Configuration for using GitHub (Enterprise) for build triggers. -** **Reference**: https://coreos.com/quay-enterprise/docs/latest/github-build.html -** **API_ENDPOINT** [string]: The endpoint of the GitHub (Enterprise) API to use. Must be overridden for github.com. -*** **Example**: `https://api.github.com/` -** **CLIENT_ID** [string] required: The registered client ID for this {productname} instance; cannot be shared with GITHUB_LOGIN_CONFIG. -*** **Reference**: https://coreos.com/quay-enterprise/docs/latest/github-app.html -*** **Example**: `0e8dbe15c4c7630b6780` -** **CLIENT_SECRET** [string] required: The registered client secret for this {productname} instance. -*** **Reference**: https://coreos.com/quay-enterprise/docs/latest/github-app.html -*** **Example**: `e4a58ddd3d7408b7aec109e85564a0d153d3e846` -** **GITHUB_ENDPOINT** [string] required: The endpoint of the GitHub (Enterprise) being hit. -*** **Example**: `https://github.com/` -* **GITLAB_TRIGGER_CONFIG** [object]: Configuration for using Gitlab (Enterprise) for external authentication. -** **CLIENT_ID** [string] required: The registered client ID for this {productname} instance. -*** **Example**: `0e8dbe15c4c7630b6780` -** **CLIENT_SECRET** [string] required: The registered client secret for this {productname} instance. -*** **Example**: `e4a58ddd3d7408b7aec109e85564a0d153d3e846` -*** **gitlab_endpoint** [string] required: The endpoint at which Gitlab(Enterprise) is running. -**** **Example**: `https://gitlab.com` -* **GOOGLE_LOGIN_CONFIG** [object, `null`]: Configuration for using Google for external authentication -** **CLIENT_ID** [string] required: The registered client ID for this {productname} instance. -*** **Example**: `0e8dbe15c4c7630b6780` -** **CLIENT_SECRET** [string] required: The registered client secret for this {productname} instance. -*** **Example**: e4a58ddd3d7408b7aec109e85564a0d153d3e846 -* **HEALTH_CHECKER** [string]: The configured health check. -** **Example**: `('RDSAwareHealthCheck', {'access_key': 'foo', 'secret_key': 'bar'})` -* **LOG_ARCHIVE_LOCATION** [string]:If builds are enabled, the storage engine in which to place the archived build logs. -** **Example**: `s3_us_east` -* **LOG_ARCHIVE_PATH** [string]: If builds are enabled, the path in storage in which to place the archived build logs. -** **Example**: `archives/buildlogs` -* **MAIL_DEFAULT_SENDER** [string, `null`]: If specified, the e-mail address used as the `from` when {productname} sends e-mails. If none, defaults to `support@quay.io`. -** **Example**: `support@myco.com` -* **MAIL_PASSWORD** [string, `null`]: The SMTP password to use when sending e-mails. -** **Example**: `mypassword` -* **MAIL_PORT** [number]: The SMTP port to use. If not specified, defaults to 587. -** **Example**: `588` -* **MAIL_SERVER** [string]: The SMTP server to use for sending e-mails. Only required if FEATURE_MAILING is set to true. -** **Example**: `smtp.somedomain.com` -* **MAIL_USERNAME** [string, 'null']: The SMTP username to use when sending e-mails. -** **Example**: `myuser` -* **MAIL_USE_TLS** [boolean]: If specified, whether to use TLS for sending e-mails. -** **Example**: `True` -* **MAXIMUM_LAYER_SIZE** [string]: Maximum allowed size of an image layer. Defaults to 20G. -** **Pattern**: ``^[0-9]+(G|M)$`` -** **Example**: `100G` -* **PUBLIC_NAMESPACES** [array]: If a namespace is defined in the public namespace list, then it will appear on *all* user's repository list pages, regardless of whether that user is a member of the namespace. Typically, this is used by an enterprise customer in configuring a set of "well-known" namespaces. -** **Min Items**: None -** **Unique Items**: True -*** **array item** [string] -* **PROMETHEUS_NAMESPACE** [string]: The prefix applied to all exposed Prometheus metrics. Defaults to `quay`. -** **Example**: `myregistry` -* **RECAPTCHA_SITE_KEY** [string]: If recaptcha is enabled, the site key for the Recaptcha service. -* **RECAPTCHA_SECRET_KEY** [string]: 'If recaptcha is enabled, the secret key for the Recaptcha service. -* **REGISTRY_TITLE** [string]: If specified, the long-form title for the registry. Defaults to `Quay Enterprise`. -** **Example**: `Corp Container Service` -* **REGISTRY_TITLE_SHORT** [string]: If specified, the short-form title for the registry. Defaults to `Quay Enterprise`. -** **Example**: `CCS` -* **SECURITY_SCANNER_ENDPOINT** [string]: The endpoint for the security scanner. -** **Pattern**: ``^http(s)?://(.)+$`` -** **Example**: `http://192.168.99.101:6060` -* **SECURITY_SCANNER_INDEXING_INTERVAL** [number]: The number of seconds between indexing intervals in the security scanner. Defaults to 30. -** **Example**: `30` -* **SESSION_COOKIE_SECURE** [boolean]: Whether the `secure` property should be set on session cookies. Defaults to False. Recommended to be True for all installations using SSL. -** **Example**: True -** **Reference**: https://en.wikipedia.org/wiki/Secure_cookies -* **SUPER_USERS** [array]: {productname} usernames of those users to be granted superuser privileges. -** **Min Items**: None -** **Unique Items**: True -*** **array item** [string] -* **TEAM_RESYNC_STALE_TIME** [string]: If team syncing is enabled for a team, how often to check its membership and resync if necessary (Default: 30m). -** **Pattern**: ``^[0-9]+(w|m|d|h|s)$`` -** **Example**: `2h` -* **USERFILES_LOCATION** [string]: ID of the storage engine in which to place user-uploaded files. -** **Example**: `s3_us_east` -* **USERFILES_PATH** [string]: Path under storage in which to place user-uploaded files. -** **Example**: `userfiles` -* **USER_RECOVERY_TOKEN_LIFETIME** [string]: The length of time a token for recovering a user accounts is valid. Defaults to 30m. -** **Example**: `10m` -** **Pattern**: `^[0-9]+(w|m|d|h|s)$` -* **V2_PAGINATION_SIZE** [number]: The number of results returned per page in V2 registry APIs. -** **Example**: `100` +Most {productname} configuration information is stored in the `config.yaml` file. All configuration options are described in the link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index#doc-wrapper[{productname} Configuration Guide]. \ No newline at end of file diff --git a/modules/con_upgrade_v3.adoc b/modules/con_upgrade_v3.adoc index 853fbcff5..39269fbfc 100644 --- a/modules/con_upgrade_v3.adoc +++ b/modules/con_upgrade_v3.adoc @@ -1,7 +1,13 @@ [[upgrade-v3-concept]] -= Overview of {productname} v3 Upgrade += Overview of upgrade +Follow the procedure below if you are starting with a {productname} 2.y.z cluster. Before upgrading to the latest {productname} 3.x version, you must first migrate that cluster to 3.0.5, as described link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_0_5_from_2_9_5[here]. Once your cluster is running 3.0.5, you can then upgrade to the latest 3.x version by sequentially upgrading to each minor version in turn. For example: -Before beginning your {productname} v2 to v3 upgrade, please note the following: +. 3.0.5 -> 3.1.3 +. 3.1.3 -> 3.2.2 +. 3.2.2 -> 3.3.4 +. 3.3.4 -> 3.4.z + +Before beginning your {productname} 2.y.z to 3.0 upgrade, please note the following: * **Synchronous upgrade**: For a synchronous upgrade, expect less than one hour of total downtime for small installations. Consider a small installation to contain a few thousand container image tags or fewer. @@ -31,13 +37,12 @@ To assure the best results, we recommend the following prerequisites: * Back up your storage (also a general best practice). -* Upgrade your current {productname} 2.y.z setup to the latest 2.y.z version (currently 2.9.5) before starting the v3 upgrade. To do that, you can: +* Upgrade your current {productname} 2.y.z setup to the latest 2.9.z version (currently 2.9.5) before starting the v3 upgrade. To do that: -- While the {productname} cluster is still running, take one node and change the quay container on that system -to a quay container that is running the latest 2.9.z version. +- While the {productname} cluster is still running, take one node and change the `Quay` container on that system to a `Quay` container that is running the latest 2.9.z version. - Wait for all the database migrations to run, bringing the database up to the latest 2.9.z version. This should only take a few minutes to a half an hour. -- Once that is done, replace the quay container on all the existing nodes with the same latest 2.9.z version. +- Once that is done, replace the `Quay` container on all the existing nodes with the same latest 2.9.z version. With the entire {productname} cluster on the new version, you can proceed to the v3 upgrade. diff --git a/modules/conc_quay-bridge-operator.adoc b/modules/conc_quay-bridge-operator.adoc new file mode 100644 index 000000000..e3d9a3830 --- /dev/null +++ b/modules/conc_quay-bridge-operator.adoc @@ -0,0 +1,16 @@ +:_content-type: CONCEPT +[id="quay-bridge-operator"] += Integrating {productname} into {ocp} with the {qbo} + +The {qbo} duplicates the features of the integrated {ocp} registry into the new {productname} registry. Using the {qbo}, you can replace the integrated container registry in {ocp} with a {productname} registry. + +The features enabled with the {qbo} include: + +* Synchronizing {ocp} namespaces as {productname} organizations. +* Creating robot accounts for each default namespace service account. +* Creating secrets for each created robot account, and associating each robot secret to a service account as `Mountable` and `Image Pull Secret`. +* Synchronizing {ocp} image streams as {productname} repositories. +* Automatically rewriting new builds making use of image streams to output to {productname}. +* Automatically importing an image stream tag after a build completes. + +By using the following procedures, you can enable bi-directional communication between your {productname} and {ocp} clusters. diff --git a/modules/config-additional-ca-certs-operator.adoc b/modules/config-additional-ca-certs-operator.adoc new file mode 100644 index 000000000..def829b9f --- /dev/null +++ b/modules/config-additional-ca-certs-operator.adoc @@ -0,0 +1,8 @@ +[id="config-additional-cas-ocp"] += Adding additional Certificate Authorities to {productname-ocp} + +On {productname-ocp}, the `extra_ca_certs` configuration field is is used to populate additional Certificate Authorities (CAs) into the CA directory, which then adds the CAs into the system trust bundle. These certificates are used by {productname} to verify SSL/TLS connections with external services like LDAP, OIDC, and storage systems. + +When deploying or redeploying {productname-ocp}, you can add one, or multiple, CAs into the CA directory to ensure that external services are properly secured and validated. On {productname-ocp} deployments, you must manually add the `extra_ca_certs` configuration field to your `config.yaml` file and re-upload the `config.yaml` to {ocp}. + +The following procedures show you how to download your existing configuration file, add additional CAs to your {productname-ocp} deployment, and then re-upload the configuration file. \ No newline at end of file diff --git a/modules/config-api-default.adoc b/modules/config-api-default.adoc new file mode 100644 index 000000000..6acb11abd --- /dev/null +++ b/modules/config-api-default.adoc @@ -0,0 +1,39 @@ += Retrieving the default configuration + +If you are running the configuration tool for the first time, and do not have an existing configuration, you can retrieve the default configuration. Start the container in config mode: + +[subs="verbatim,attributes"] +---- +$ sudo podman run --rm -it --name quay_config \ + -p 8080:8080 \ + {productrepo}/{quayimage}:{productminv} config secret +---- + +Use the `config` endpoint of the configuration API to get the default: + +.... +$ curl -X GET -u quayconfig:secret http://quay-server:8080/api/v1/config  | jq +.... + +The value returned is the default configuration in JSON format: + +[source, json] +---- +{ +  "config.yaml": { +    "AUTHENTICATION_TYPE": "Database", +    "AVATAR_KIND": "local", +    "DB_CONNECTION_ARGS": { +      "autorollback": true, +      "threadlocals": true +    }, +    "DEFAULT_TAG_EXPIRATION": "2w", +    "EXTERNAL_TLS_TERMINATION": false, +    "FEATURE_ACTION_LOG_ROTATION": false, +    "FEATURE_ANONYMOUS_ACCESS": true, +    "FEATURE_APP_SPECIFIC_TOKENS": true, + .... + } + +} +---- \ No newline at end of file diff --git a/modules/config-api-intro.adoc b/modules/config-api-intro.adoc new file mode 100644 index 000000000..c11c2a609 --- /dev/null +++ b/modules/config-api-intro.adoc @@ -0,0 +1,5 @@ +:_content-type: CONCEPT +[id="config-using-api"] += Using the configuration API + +The configuration tool exposes 4 endpoints that can be used to build, validate, bundle and deploy a configuration. The config-tool API is documented at link:https://github.com/quay/config-tool/blob/master/pkg/lib/editor/API.md[]. In this section, you will see how to use the API to retrieve the current configuration and how to validate any changes you make. \ No newline at end of file diff --git a/modules/config-api-required.adoc b/modules/config-api-required.adoc new file mode 100644 index 000000000..adf44f908 --- /dev/null +++ b/modules/config-api-required.adoc @@ -0,0 +1,56 @@ += Determining the required fields + +You can determine the required fields by posting an empty configuration structure to the `config/validate` endpoint: + +.... +curl -u quayconfig:secret --header 'Content-Type: application/json' --request POST --data ' +{ +  "config.yaml": { + } + +} http://quay-server:8080/api/v1/config/validate | jq +.... + + +The value returned is an array indicating which fields are required: + +[source,yaml] +---- +[ +  { +    "FieldGroup": "Database", +    "Tags": [ +      "DB_URI" +    ], +    "Message": "DB_URI is required." +  }, +  { +    "FieldGroup": "DistributedStorage", +    "Tags": [ +      "DISTRIBUTED_STORAGE_CONFIG" +    ], +    "Message": "DISTRIBUTED_STORAGE_CONFIG must contain at least one storage location." +  }, +  { +    "FieldGroup": "HostSettings", +    "Tags": [ +      "SERVER_HOSTNAME" +    ], +    "Message": "SERVER_HOSTNAME is required" +  }, +  { +    "FieldGroup": "HostSettings", +    "Tags": [ +      "SERVER_HOSTNAME" +    ], +    "Message": "SERVER_HOSTNAME must be of type Hostname" +  }, +  { +    "FieldGroup": "Redis", +    "Tags": [ +      "BUILDLOGS_REDIS" +    ], +    "Message": "BUILDLOGS_REDIS is required" +  } +] +---- \ No newline at end of file diff --git a/modules/config-api-retrieve.adoc b/modules/config-api-retrieve.adoc new file mode 100644 index 000000000..85e505995 --- /dev/null +++ b/modules/config-api-retrieve.adoc @@ -0,0 +1,46 @@ += Retrieving the current configuration + +If you have already configured and deployed the Quay registry, stop the container and restart it in configuration mode, loading the existing configuration as a volume: + +[subs="verbatim,attributes"] +---- +$ sudo podman run --rm -it --name quay_config \ + -p 8080:8080 \ + -v $QUAY/config:/conf/stack:Z \ + {productrepo}/{quayimage}:{productminv} config secret +---- + +Use the `config` endpoint of the API to get the current configuration: + +.... +$ curl -X GET -u quayconfig:secret http://quay-server:8080/api/v1/config  | jq +.... + + +The value returned is the current configuration in JSON format, including database and Redis configuration data: + +[source, json] +---- +{ +  "config.yaml": { + .... +    "BROWSER_API_CALLS_XHR_ONLY": false, +    "BUILDLOGS_REDIS": { +      "host": "quay-server", +      "password": "strongpassword", +      "port": 6379 +    }, +    "DATABASE_SECRET_KEY": "4b1c5663-88c6-47ac-b4a8-bb594660f08b", +    "DB_CONNECTION_ARGS": { +      "autorollback": true, +      "threadlocals": true +    }, +    "DB_URI": "postgresql://quayuser:quaypass@quay-server:5432/quay", +    "DEFAULT_TAG_EXPIRATION": "2w", + .... + + + } + +} +---- \ No newline at end of file diff --git a/modules/config-api-validate.adoc b/modules/config-api-validate.adoc new file mode 100644 index 000000000..3620ddd8b --- /dev/null +++ b/modules/config-api-validate.adoc @@ -0,0 +1,31 @@ += Validating configuration using the API + +You can validate a configuration by posting it to the `config/validate` endpoint: + +.... +curl -u quayconfig:secret --header 'Content-Type: application/json' --request POST --data ' +{ +  "config.yaml": { + .... +    "BROWSER_API_CALLS_XHR_ONLY": false, +    "BUILDLOGS_REDIS": { +      "host": "quay-server", +      "password": "strongpassword", +      "port": 6379 +    }, +    "DATABASE_SECRET_KEY": "4b1c5663-88c6-47ac-b4a8-bb594660f08b", +    "DB_CONNECTION_ARGS": { +      "autorollback": true, +      "threadlocals": true +    }, +    "DB_URI": "postgresql://quayuser:quaypass@quay-server:5432/quay", +    "DEFAULT_TAG_EXPIRATION": "2w", + .... + + } + +} http://quay-server:8080/api/v1/config/validate | jq +.... + + +The returned value is an array containing the errors found in the configuration. If the configuration is valid, an empty array `[]` is returned. \ No newline at end of file diff --git a/modules/config-custom-ssl-certs-kubernetes.adoc b/modules/config-custom-ssl-certs-kubernetes.adoc new file mode 100644 index 000000000..ed2fab52d --- /dev/null +++ b/modules/config-custom-ssl-certs-kubernetes.adoc @@ -0,0 +1,53 @@ +[id="config-custom-ssl-cert-kubernetes"] += Adding custom SSL/TLS certificates when {productname} is deployed on Kubernetes + +When deployed on Kubernetes, {productname} mounts in a secret as a volume to store config assets. Currently, this breaks the upload certificate function of the superuser panel. + +As a temporary workaround, `base64` encoded certificates can be added to the secret _after_ {productname} has been deployed. + +Use the following procedure to add custom SSL/TLS certificates when {productname} is deployed on Kubernetes. + +.Prerequisites + +* {productname} has been deployed. +* You have a custom `ca.crt` file. + +.Procedure + +. Base64 encode the contents of an SSL/TLS certificate by entering the following command: ++ +[source,terminal] +---- +$ cat ca.crt | base64 -w 0 +---- ++ +.Example output ++ +[source,terminal] +---- +...c1psWGpqeGlPQmNEWkJPMjJ5d0pDemVnR2QNCnRsbW9JdEF4YnFSdVd3PT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= +---- + +. Enter the following `kubectl` command to edit the `quay-enterprise-config-secret` file: ++ +[source,terminal] +---- +$ kubectl --namespace quay-enterprise edit secret/quay-enterprise-config-secret +---- + +. Add an entry for the certificate and paste the full `base64` encoded stringer under the entry. For example: ++ +[source,terminal] +---- + custom-cert.crt: +c1psWGpqeGlPQmNEWkJPMjJ5d0pDemVnR2QNCnRsbW9JdEF4YnFSdVd3PT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= +---- + +. Use the `kubectl delete` command to remove all {productname} pods. For example: ++ +[source,terminal] +---- +$ kubectl delete pod quay-operator.v3.7.1-6f9d859bd-p5ftc quayregistry-clair-postgres-7487f5bd86-xnxpr quayregistry-quay-app-upgrade-xq2v6 quayregistry-quay-database-859d5445ff-cqthr quayregistry-quay-redis-84f888776f-hhgms +---- ++ +Afterwards, the {productname} deployment automatically schedules replace pods with the new certificate data. \ No newline at end of file diff --git a/modules/config-custom-ssl-certs-manual.adoc b/modules/config-custom-ssl-certs-manual.adoc new file mode 100644 index 000000000..464cbaa93 --- /dev/null +++ b/modules/config-custom-ssl-certs-manual.adoc @@ -0,0 +1,95 @@ +[id="config-extra-ca-certs-standalone"] += Adding additional Certificate Authorities to the {productname} container + +The `extra_ca_certs` directory is the directory where additional Certificate Authorities (CAs) can be stored to extend the set of trusted certificates. These certificates are used by {productname} to verify SSL/TLS connections with external services. When deploying {productname}, you can place the necessary CAs in this directory to ensure that connections to services like LDAP, OIDC, and storage systems are properly secured and validated. + +For standalone {productname} deployments, you must create this directory and copy the additional CA certificates into that directory. + +.Prerequisites + +* You have a CA for the desired service. + +.Procedure + +. View the certificate to be added to the container by entering the following command: ++ +[source,terminal] +---- +$ cat storage.crt +---- ++ +.Example output ++ +[source,terminal] +---- +-----BEGIN CERTIFICATE----- +MIIDTTCCAjWgAwIBAgIJAMVr9ngjJhzbMA0GCSqGSIb3DQEBCwUAMD0xCzAJBgNV... +-----END CERTIFICATE----- +---- + +. Create the `extra_ca_certs` in the `/config` folder of your {productname} directory by entering the following command: ++ +[source,terminal] +---- +$ mkdir -p /path/to/quay_config_folder/extra_ca_certs +---- + +. Copy the CA file to the `extra_ca_certs` folder. For example: ++ +[source,terminal] +---- +$ cp storage.crt /path/to/quay_config_folder/extra_ca_certs/ +---- + +. Ensure that the `storage.crt` file exists within the `extra_ca_certs` folder by entering the following command: ++ +[source,terminal] +---- +$ tree /path/to/quay_config_folder/extra_ca_certs +---- ++ +.Example output ++ +[source,terminal] +---- +/path/to/quay_config_folder/extra_ca_certs +├── storage.crt---- +---- + +. Obtain the `CONTAINER ID` of your `Quay` consider by entering the following command: ++ +[source,terminal] +---- +$ podman ps +---- ++ +.Example output ++ +[source,terminal] +---- +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS +5a3e82c4a75f //quay:{productminv} "/sbin/my_init" 24 hours ago Up 18 hours 0.0.0.0:80->80/tcp, 0.0.0.0:443->443/tcp, 443/tcp grave_keller +---- + +. Restart the container by entering the following command ++ +[source,terminal] +---- +$ podman restart 5a3e82c4a75f +---- + +. Confirm that the certificate was copied into the container namespace by running the following command: ++ +[source,terminal] +---- +$ podman exec -it 5a3e82c4a75f cat /etc/ssl/certs/storage.pem +---- ++ +.Example output ++ +[source,terminal] +---- +-----BEGIN CERTIFICATE----- +MIIDTTCCAjWgAwIBAgIJAMVr9ngjJhzbMA0GCSqGSIb3DQEBCwUAMD0xCzAJBgNV... +-----END CERTIFICATE----- +---- \ No newline at end of file diff --git a/modules/config-debug-variables.adoc b/modules/config-debug-variables.adoc new file mode 100644 index 000000000..d95578730 --- /dev/null +++ b/modules/config-debug-variables.adoc @@ -0,0 +1,17 @@ +:_content-type: REFERENCE +[id="config-debug-variables"] += Debug variables + +The following debug variables are available on {productname}. + +.Debug configuration variables +[cols="3a,1a,2a",options="header"] +|=== +| Variable | Type | Description +| **DEBUGLOG** | Boolean | Whether to enable or disable debug logs. +| **USERS_DEBUG** |Integer. Either `0` or `1`. | Used to debug LDAP operations in clear text, including passwords. Must be used with `DEBUGLOG=TRUE`. + +[IMPORTANT] +==== +Setting `USERS_DEBUG=1` exposes credentials in clear text. This variable should be removed from the {productname} deployment after debugging. The log file that is generated with this environment variable should be scrutinized, and passwords should be removed before sending to other users. Use with caution. +==== +|=== \ No newline at end of file diff --git a/modules/config-disclaimer.adoc b/modules/config-disclaimer.adoc new file mode 100644 index 000000000..199446f38 --- /dev/null +++ b/modules/config-disclaimer.adoc @@ -0,0 +1,9 @@ +:_content-type: CONCEPT +[id="config-disclaimer"] += {productname} configuration disclaimer + +With both standalone and Operator-based deployments of {productname} certain features and configuration parameters are not actively used or implemented. As a result, feature flags, such as those that enable or disable certain features, and configuration parameters that are not explicitly documented or requested for documentation by Red Hat Support, should only be modified with caution. Unused features or parameters might not be fully tested, supported, or compatible with {productname}. Modifying unused features parameters might lead to unexpected issues or disruptions with your deployment. + +For information about configuring {productname} in standalone deployments, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#advanced-quay-configuration[Advanced {productname} configuration] + +For information about configuring {productname} Operator deployments, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/deploying_the_red_hat_quay_operator_on_openshift_container_platform/index#operator-config-cli[Configuring {productname} on {ocp}] \ No newline at end of file diff --git a/modules/config-envvar-dbpool.adoc b/modules/config-envvar-dbpool.adoc new file mode 100644 index 000000000..85b272a47 --- /dev/null +++ b/modules/config-envvar-dbpool.adoc @@ -0,0 +1,53 @@ +:_content-type: REFERENCE +[id="config-envvar-dbpool"] += Database connection pooling + +{productname} is composed of many different processes which all run within the same container. Many of these processes interact with the database. + +Database connection pooling is enabled by default, and each process that interacts with the database contains a connection pool. These per-process connection pools are configured to maintain a maximum of 20 connections. Under heavy load, it is possible to fill the connection pool for every process within a {productname} container. Under certain deployments and loads, this might require analysis to ensure that {productname} does not exceed the configured database's maximum connection count. + +Overtime, the connection pools release idle connections. To release all connections immediately, {productname} requires a restart. + +For standalone {productname} deployments, database connection pooling can be toggled off when starting your deployment. For example: + +[source,terminal] +---- +$ sudo podman run -d --rm -p 80:8080 -p 443:8443 \ + --name=quay \ + -v $QUAY/config:/conf/stack:Z \ + -v $QUAY/storage:/datastorage:Z \ + -e DB_CONNECTION_POOLING=false + registry.redhat.io/quay/quay-rhel8:v3.12.1 +---- + +For {productname-ocp}, database connection pooling can be configured by modifying the `QuayRegistry` custom resource definition (CRD). For example: +[source,yaml] +.Example QuayRegistry CRD +---- +spec: + components: + - kind: quay + managed: true + overrides: + env: + - name: DB_CONNECTION_POOLING + value: "false" +---- + +.Database connection pooling configuration +[cols="3a,1a,2a",options="header"] +|=== +| Variable | Type | Description +| **DB_CONNECTION_POOLING** | String | Whether to enable or disable database connection pooling. Defaults to true. Accepted values are `"true"` or `"false"` +|=== + +If database connection pooling is enabled, it is possible to change the maximum size of the connection pool. This can be done through the following `config.yaml` option: + +.config.yaml +[source,yaml] +---- +... +DB_CONNECTION_ARGS: + max_connections: 10 +... +---- \ No newline at end of file diff --git a/modules/config-envvar-georepl.adoc b/modules/config-envvar-georepl.adoc new file mode 100644 index 000000000..32faf8382 --- /dev/null +++ b/modules/config-envvar-georepl.adoc @@ -0,0 +1,12 @@ +:_content-type: REFERENCE +[id="config-envvar-georepl"] += Geo-replication + +The same configuration should be used across all regions, with exception of the storage backend, which can be configured explicitly using the `QUAY_DISTRIBUTED_STORAGE_PREFERENCE` environment variable. + +.Geo-replication configuration +[cols="3a,1a,2a",options="header"] +|=== +| Variable | Type | Description +| **QUAY_DISTRIBUTED_STORAGE_PREFERENCE** | String | The preferred storage engine (by ID in DISTRIBUTED_STORAGE_CONFIG) to use. +|=== diff --git a/modules/config-envvar-intro.adoc b/modules/config-envvar-intro.adoc new file mode 100644 index 000000000..9ef3a636a --- /dev/null +++ b/modules/config-envvar-intro.adoc @@ -0,0 +1,7 @@ +:_content-type: REFERENCE +[id="config-envar-intro"] += Environment variables + +{productname} supports a limited number of environment variables for dynamic configuration. + + diff --git a/modules/config-envvar-worker-connection.adoc b/modules/config-envvar-worker-connection.adoc new file mode 100644 index 000000000..38178f418 --- /dev/null +++ b/modules/config-envvar-worker-connection.adoc @@ -0,0 +1,23 @@ +:_content-type: REFERENCE +[id="config-envvar-worker-connection"] += HTTP connection counts + +It is possible to specify the quantity of simultaneous HTTP connections using environment variables. These can be specified as a whole, or for a specific component. The default for each is `50` parallel connections per process. + +.HTTP connection counts configuration +[cols="3a,1a,2a",options="header"] +|=== +| Variable | Type | Description +| **WORKER_CONNECTION_COUNT** | Number | Simultaneous HTTP connections + + + +**Default:** 50 +| **WORKER_CONNECTION_COUNT_REGISTRY** | Number | Simultaneous HTTP connections for registry + + + +**Default:** WORKER_CONNECTION_COUNT +| **WORKER_CONNECTION_COUNT_WEB** | Number | Simultaneous HTTP connections for web UI + + + +**Default:** WORKER_CONNECTION_COUNT +| **WORKER_CONNECTION_COUNT_SECSCAN** | Number | Simultaneous HTTP connections for Clair + + + +**Default:** WORKER_CONNECTION_COUNT +|=== \ No newline at end of file diff --git a/modules/config-envvar-worker-count.adoc b/modules/config-envvar-worker-count.adoc new file mode 100644 index 000000000..c79f92fb4 --- /dev/null +++ b/modules/config-envvar-worker-count.adoc @@ -0,0 +1,20 @@ +:_content-type: REFERENCE +[id="config-envvar-worker-count"] += Worker count variables + +.Worker count variables +[cols="3a,1a,2a",options="header"] +|=== +| Variable | Type | Description + +| **WORKER_COUNT** | Number | Generic override for number of processes +| **WORKER_COUNT_REGISTRY** | Number | Specifies the number of processes to handle Registry requests within the `Quay` container + + + +**Values:** Integer between `8` and `64` +| **WORKER_COUNT_WEB** | Number | Specifies the number of processes to handle UI/Web requests within the container + + + + **Values:** Integer between `2` and `32` +| **WORKER_COUNT_SECSCAN** | Number | Specifies the number of processes to handle Security Scanning (e.g. Clair) integration within the container + + + +**Values:** Integer. Because the Operator specifies 2 vCPUs for resource requests and limits, setting this value between `2` and `4` is safe. However, users can run more, for example, `16`, if warranted. +|=== \ No newline at end of file diff --git a/modules/config-extra-ca-certs-quay.adoc b/modules/config-extra-ca-certs-quay.adoc new file mode 100644 index 000000000..872ef8d92 --- /dev/null +++ b/modules/config-extra-ca-certs-quay.adoc @@ -0,0 +1,6 @@ +[id="config-extra-ca-certs-quay"] += Adding additional Certificate Authorities for {productname} + +Certificate Authorities (CAs) are used by {productname} to verify SSL/TLS connections with external services, like ODIC providers, LDAP providers, storage providers, and so on. + +The following sections provide information about uploading additional CAs to {productname} depending on your deployment type. \ No newline at end of file diff --git a/modules/config-fields-aci.adoc b/modules/config-fields-aci.adoc new file mode 100644 index 000000000..718e61fe2 --- /dev/null +++ b/modules/config-fields-aci.adoc @@ -0,0 +1,15 @@ +[[config-fields-aci]] += ACI configuration fields + + +.ACI configuration fields +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **FEATURE_ACI_CONVERSION** | Boolean | Whether to enable conversion to ACIs + + + +**Default:** False +| **GPG2_PRIVATE_KEY_FILENAME** | String | The filename of the private key used to decrypte ACIs +| **GPG2_PRIVATE_KEY_NAME** | String | The name of the private key used to sign ACIs +| **GPG2_PUBLIC_KEY_FILENAME** | String | The filename of the public key used to encrypt ACIs +|=== diff --git a/modules/config-fields-actionlog.adoc b/modules/config-fields-actionlog.adoc new file mode 100644 index 000000000..61aa12e95 --- /dev/null +++ b/modules/config-fields-actionlog.adoc @@ -0,0 +1,170 @@ +[id="config-fields-actionlog"] += Action log configuration fields + +== Action log storage configuration + +.Action log storage configuration +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **FEATURE_LOG_EXPORT** | Boolean | Whether to allow exporting of action logs. + + + +**Default:** `True` +| **LOGS_MODEL** | String | Specifies the preferred method for handling log data. + + + +**Values:** One of `database`, `transition_reads_both_writes_es`, `elasticsearch`, `splunk` + +**Default:** `database` +| **LOGS_MODEL_CONFIG** | Object | Logs model config for action logs. + +| **ALLOW_WITHOUT_STRICT_LOGGING** | Boolean | When set to `True`, if the external log system like Splunk or ElasticSearch is intermittently unavailable, allows users to push images normally. Events are logged to the stdout instead. Overrides `ALLOW_PULLS_WITHOUT_STRICT_LOGGING` if set. + + + + **Default:** False +|=== + +[id="elasticsearch-log-configuration-fields"] +=== Elasticsearch configuration fields + +The following fields are available when configuring Elasticsearch for {productname}. + +* **LOGS_MODEL_CONFIG** [object]: Logs model config for action logs. +** **elasticsearch_config** [object]: Elasticsearch cluster configuration. +*** **access_key** [string]: Elasticsearch user (or IAM key for AWS ES). +**** **Example**: `some_string` +*** **host** [string]: Elasticsearch cluster endpoint. +**** **Example**: `host.elasticsearch.example` +*** **index_prefix** [string]: Elasticsearch's index prefix. +**** **Example**: `logentry_` +*** **index_settings** [object]: Elasticsearch's index settings +*** **use_ssl** [boolean]: Use ssl for Elasticsearch. Defaults to `True`. +**** **Example**: `True` +*** **secret_key** [string]: Elasticsearch password (or IAM secret for AWS ES). +**** **Example**: `some_secret_string` +*** **aws_region** [string]: Amazon web service region. +**** **Example**: `us-east-1` +*** **port** [number]: Elasticsearch cluster endpoint port. +**** **Example**: `1234` +** **kinesis_stream_config** [object]: AWS Kinesis Stream configuration. +*** **aws_secret_key** [string]: AWS secret key. +**** **Example**: `some_secret_key` +*** **stream_name** [string]: Kinesis stream to send action logs to. +**** **Example**: `logentry-kinesis-stream` +*** **aws_access_key** [string]: AWS access key. +**** **Example**: `some_access_key` +*** **retries** [number]: Max number of attempts made on a single request. +**** **Example**: `5` +*** **read_timeout** [number]: Number of seconds before timeout when reading from a connection. +**** **Example**: `5` +*** **max_pool_connections** [number]: The maximum number of connections to keep in a connection pool. +**** **Example**: `10` +*** **aws_region** [string]: AWS region. +**** **Example**: `us-east-1` +*** **connect_timeout** [number]: Number of seconds before timeout when attempting to make a connection. +**** **Example**: `5` +** **producer** [string]: Logs producer if logging to Elasticsearch. +*** **enum**: kafka, elasticsearch, kinesis_stream +*** **Example**: `kafka` +** **kafka_config** [object]: Kafka cluster configuration. +*** **topic** [string]: Kafka topic to publish log entries to. +**** **Example**: `logentry` +*** **bootstrap_servers** [array]: List of Kafka brokers to bootstrap the client from. +*** **max_block_seconds** [number]: Max number of seconds to block during a `send()`, either because the buffer is full or metadata unavailable. +**** **Example**: `10` + +[id="splunk-configuration-fields"] +=== Splunk configuration fields + +The following fields are available when configuring Splunk for {productname}. + +//splunk +** **producer** [string]: `splunk`. Use when configuring Splunk. +** **splunk_config** [object]: Logs model configuration for Splunk action logs or the Splunk cluster configuration. +*** **host** [string]: Splunk cluster endpoint. +*** **port** [integer]: Splunk management cluster endpoint port. +*** **bearer_token** [string]: The bearer token for Splunk. +*** **verify_ssl** [boolean]: Enable (`True`) or disable (`False`) TLS/SSL verification for HTTPS connections. +*** **index_prefix** [string]: Splunk's index prefix. +*** **ssl_ca_path** [string]: The relative container path to a single `.pem` file containing a certificate authority (CA) for SSL validation. + +.Example Splunk configuration +[source,yaml] +---- +# ... +LOGS_MODEL: splunk +LOGS_MODEL_CONFIG: + producer: splunk + splunk_config: + host: http://.remote.csb + port: 8089 + bearer_token: + url_scheme: + verify_ssl: False + index_prefix: + ssl_ca_path: +# ... +---- + +[id="splunk-hec-configuration-fields"] +=== Splunk HEC configuration fields + +The following fields are available when configuring Splunk HTTP Event Collector (HEC) for {productname}. + +** **producer** [string]: `splunk_hec`. Use when configuring Splunk HEC. +** **splunk_hec_config** [object]: Logs model configuration for Splunk HTTP event collector action logs configuration. +*** **host** [string]: Splunk cluster endpoint. +*** **port** [integer]: Splunk management cluster endpoint port. +*** **hec_token** [string]: HEC token for Splunk. +*** **url_scheme** [string]: The URL scheme for access the Splunk service. If Splunk is behind SSL/TLS, must be `https`. +*** **verify_ssl** [boolean]: Enable (`true`) or disable (`false`) SSL/TLS verification for HTTPS connections. +*** **index** [string]: The Splunk index to use. +*** **splunk_host** [string]: The host name to log this event. +*** **splunk_sourcetype** [string]: The name of the Splunk `sourcetype` to use. + +[source,yaml] +---- +# ... +LOGS_MODEL: splunk +LOGS_MODEL_CONFIG: + producer: splunk_hec + splunk_hec_config: <1> + host: prd-p-aaaaaq.splunkcloud.com <2> + port: 8088 <3> + hec_token: 12345678-1234-1234-1234-1234567890ab <4> + url_scheme: https <5> + verify_ssl: False <6> + index: quay <7> + splunk_host: quay-dev <8> + splunk_sourcetype: quay_logs <9> +# ... +---- + +== Action log rotation and archiving configuration + +.Action log rotation and archiving configuration +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **FEATURE_ACTION_LOG_ROTATION** | Boolean | Enabling log rotation and archival will move all logs older than 30 days to storage. + + + +**Default:** `false` + +| **ACTION_LOG_ARCHIVE_LOCATION** | String | If action log archiving is enabled, the storage engine in which to place the archived data. + + + +**Example:**: `s3_us_east` +| **ACTION_LOG_ARCHIVE_PATH** | String | If action log archiving is enabled, the path in storage in which to place the archived data. + + + +**Example:** `archives/actionlogs` +| **ACTION_LOG_ROTATION_THRESHOLD** | String | The time interval after which to rotate logs. + + + +**Example:** `30d` +|=== + +== Action log audit configuration + +.Audit logs configuration field +[cols="2a,1a,2a",options="header"] +|=== +|Field | Type |Description +| **ACTION_LOG_AUDIT_LOGINS** | Boolean | When set to `True`, tracks advanced events such as logging into, and out of, the UI, and logging in using Docker for regular users, robot accounts, and for application-specific token accounts. + + + +**Default:** `True` +|=== diff --git a/modules/config-fields-app-tokens.adoc b/modules/config-fields-app-tokens.adoc new file mode 100644 index 000000000..aa158f181 --- /dev/null +++ b/modules/config-fields-app-tokens.adoc @@ -0,0 +1,18 @@ +[[config-fields-app-tokens]] += App tokens configuration fields + +.App tokens configuration fields +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **FEATURE_APP_SPECIFIC_TOKENS** | Boolean | If enabled, users can create tokens for use by the Docker CLI + + + +**Default:** True +| **APP_SPECIFIC_TOKEN_EXPIRATION** | String | The expiration for external app tokens. + + + +**Default** None + +**Pattern:** `^[0-9]+(w\|m\|d\|h\|s)$` +| **EXPIRED_APP_SPECIFIC_TOKEN_GC** | String | Duration of time expired external app tokens will remain before being garbage collected + + + +**Default:** 1d +|=== \ No newline at end of file diff --git a/modules/config-fields-basic.adoc b/modules/config-fields-basic.adoc new file mode 100644 index 000000000..f5266a08d --- /dev/null +++ b/modules/config-fields-basic.adoc @@ -0,0 +1,44 @@ +[[config-fields-basic]] += Basic configuration fields + +.Basic configuration +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **REGISTRY_TITLE** | String | If specified, the long-form title for the registry. Displayed in frontend of your {productname} deployment, for example, at the sign in page of your organization. Should not exceed 35 characters. + + +**Default:** + +`Red Hat Quay` +| **REGISTRY_TITLE_SHORT** | String | If specified, the short-form title for the registry. Title is displayed on various pages of your organization, for example, as the title of the tutorial on your organization's *Tutorial* page. + + +**Default:** + +`Red Hat Quay` + +| **CONTACT_INFO** | Array of String | If specified, contact information to display on the contact page. If only a single piece of contact information is specified, the contact footer will link directly. +|**[0]** | String | Adds a link to send an e-mail. + + + +**Pattern:** + +`^mailto:(.)+$` + +**Example:** + +`mailto:support@quay.io` +|**[1]** | String | Adds a link to visit an IRC chat room. + + + +**Pattern:** + +`^irc://(.)+$` + +**Example:** + +`irc://chat.freenode.net:6665/quay` + +|**[2]** | String | Adds a link to call a phone number. + + + +**Pattern:** + +`^tel:(.)+$` + +**Example:** + +`tel:+1-888-930-3475` + +|**[3]** | String |Adds a link to a defined URL. + + + +**Pattern:** + +`^http(s)?://(.)+$` + +**Example:** + +`https://twitter.com/quayio` +|=== \ No newline at end of file diff --git a/modules/config-fields-branding.adoc b/modules/config-fields-branding.adoc new file mode 100644 index 000000000..9ddd89623 --- /dev/null +++ b/modules/config-fields-branding.adoc @@ -0,0 +1,38 @@ +:_content-type: CONCEPT +[id="config-fields-branding"] += Branding configuration fields + +.Branding configuration fields +[cols="3a,1a,2a",options="header"] +|=== +|Field | Type |Description +| **BRANDING** | Object | Custom branding for logos and URLs in the {productname} UI. + +| **.logo** + +(Required) | String | Main logo image URL. + + +The header logo defaults to 205x30 PX. The form logo on the {productname} sign in screen of the web UI defaults to 356.5x39.7 PX. + + +**Example:** + +`/static/img/quay-horizontal-color.svg` +| **.footer_img** | String | Logo for UI footer. Defaults to 144x34 PX. + + + +**Example:** + +`/static/img/RedHat.svg` +| **.footer_url** | String | Link for footer image. + + + +**Example:** + +`https://redhat.com` +|=== + +[id="example-config-fields-branding"] +== Example configuration for {productname} branding + +.Branding config.yaml example +[source,yaml] +---- +BRANDING: + logo: https://www.mend.io/wp-content/media/2020/03/5-tips_small.jpg + footer_img: https://www.mend.io/wp-content/media/2020/03/5-tips_small.jpg + footer_url: https://opensourceworld.org/ +---- \ No newline at end of file diff --git a/modules/config-fields-build-logs.adoc b/modules/config-fields-build-logs.adoc new file mode 100644 index 000000000..69a950ee0 --- /dev/null +++ b/modules/config-fields-build-logs.adoc @@ -0,0 +1,17 @@ +[id="config-fields-build-logs"] += Build logs configuration fields + +.Build logs configuration fields +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **FEATURE_READER_BUILD_LOGS** | Boolean | If set to true, build logs can be read by those with `read` access to the repository, rather than only `write` access or `admin` access. + + + +**Default:** `False` +| **LOG_ARCHIVE_LOCATION** | String | The storage location, defined in `DISTRIBUTED_STORAGE_CONFIG`, in which to place the archived build logs. + + + +**Example:** `s3_us_east` +| **LOG_ARCHIVE_PATH** | String | The path under the configured storage engine in which to place the archived build logs in `.JSON` format. + + + +**Example:** `archives/buildlogs` +|=== \ No newline at end of file diff --git a/modules/config-fields-build-manager.adoc b/modules/config-fields-build-manager.adoc new file mode 100644 index 000000000..cc824bed8 --- /dev/null +++ b/modules/config-fields-build-manager.adoc @@ -0,0 +1,50 @@ +[id="config-fields-build-manager"] += Build manager configuration fields + +.Build manager configuration fields +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +|*ALLOWED_WORKER_COUNT* |String | Defines how many Build Workers are instantiated per {productname} pod. Typically set to `1`. +|*ORCHESTRATOR_PREFIX* |String | Defines a unique prefix to be added to all Redis keys. This is useful to isolate Orchestrator values from other Redis keys. +|*REDIS_HOST* |Object | The hostname for your Redis service. +|*REDIS_PASSWORD* |String | The password to authenticate into your Redis service. +|*REDIS_SSL* |Boolean |Defines whether or not your Redis connection uses SSL/TLS. +|*REDIS_SKIP_KEYSPACE_EVENT_SETUP* |Boolean | By default, {productname} does not set up the keyspace events required for key events at runtime. To do so, set `REDIS_SKIP_KEYSPACE_EVENT_SETUP` to `false`. +|*EXECUTOR* |String | Starts a definition of an Executor of this type. Valid values are `kubernetes` and `ec2`. +|*BUILDER_NAMESPACE* |String | Kubernetes namespace where {productname} Builds will take place. +|*K8S_API_SERVER* |Object | Hostname for API Server of the {ocp} cluster where Builds will take place. +|*K8S_API_TLS_CA* |Object | The filepath in the `Quay` container of the Build cluster's CA certificate for the `Quay` application to trust when making API calls. +|*KUBERNETES_DISTRIBUTION* |String | Indicates which type of Kubernetes is being used. Valid values are `openshift` and `k8s`. +|*CONTAINER_** |Object | Define the resource requests and limits for each `build` pod. +|*NODE_SELECTOR_** |Object | Defines the node selector label name-value pair where `build` Pods should be scheduled. +|*CONTAINER_RUNTIME* |Object | Specifies whether the Builder should run `docker` or `podman`. Customers using Red Hat's `quay-builder` image should set this to `podman`. +|*SERVICE_ACCOUNT_NAME/SERVICE_ACCOUNT_TOKEN* |Object | Defines the Service Account name or token that will be used by `build` pods. +|*QUAY_USERNAME/QUAY_PASSWORD* |Object | Defines the registry credentials needed to pull the {productname} build worker image that is specified in the `WORKER_IMAGE` field. +ifdef::upstream[] +This is useful if pulling a non-public quay-builder image from quay.io. +endif::upstream[] +ifdef::downstream[] +Customers should provide a Red Hat Service Account credential as defined in the section "Creating Registry Service Accounts" against registry.redhat.io in the article at https://access.redhat.com/RegistryAuthentication. +endif::downstream[] +|*WORKER_IMAGE* |Object |Image reference for the {productname} Builder image. +ifdef::upstream[] +quay.io/quay/quay-builder +endif::upstream[] +ifdef::downstream[] +registry.redhat.io/quay/quay-builder +endif::downstream[] +|*WORKER_TAG* |Object |Tag for the Builder image desired. The latest version is {producty}. +|*BUILDER_VM_CONTAINER_IMAGE* |Object | The full reference to the container image holding the internal VM needed to run each {productname} Build. +ifdef::upstream[] +(`quay.io/quay/quay-builder-qemu-fedoracoreos:latest`). +endif::upstream[] +ifdef::downstream[] +(`registry.redhat.io/quay/quay-builder-qemu-rhcos:{producty}`). +endif::downstream[] +|*SETUP_TIME* |String | Specifies the number of seconds at which a Build times out if it has not yet registered itself with the Build Manager. Defaults at `500` seconds. Builds that time out are attempted to be restarted three times. If the Build does not register itself after three attempts it is considered failed. + +|*MINIMUM_RETRY_THRESHOLD* |String | This setting is used with multiple Executors. It indicates how many retries are attempted to start a Build before a different Executor is chosen. Setting to `0` means there are no restrictions on how many tries the build job needs to have. This value should be kept intentionally small (three or less) to ensure failovers happen quickly during infrastructure failures. You must specify a value for this setting. For example, `Kubernetes` is set as the first executor and `EC2` as the second executor. If you want the last attempt to run a job to always be executed on EC2 and not Kubernetes, you can set the Kubernetes executor's `MINIMUM_RETRY_THRESHOLD` to `1` and EC2's `MINIMUM_RETRY_THRESHOLD` to `0` (defaults to `0` if not set). In this case, the Kubernetes' `MINIMUM_RETRY_THRESHOLD` *retries_remaining(1)* would evaluate to `False`, therefore falling back to the second executor configured. +|*SSH_AUTHORIZED_KEYS* |Object | List of SSH keys to bootstrap in the `ignition` config. This allows other keys to be used to SSH into the EC2 instance or QEMU virtual machine (VM). +|=== + diff --git a/modules/config-fields-clair-auth.adoc b/modules/config-fields-clair-auth.adoc new file mode 100644 index 000000000..c68b309b7 --- /dev/null +++ b/modules/config-fields-clair-auth.adoc @@ -0,0 +1,33 @@ +:_content-type: CONCEPT +[id="config-fields-clair-auth"] += Clair authorization configuration fields + +The following authorization configuration fields are available for Clair. + +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **auth** | Object | Defines Clair's external and intra-service JWT based authentication. If multiple `auth` mechanisms are defined, Clair picks one. Currently, multiple mechanisms are unsupported. + +| **.psk** | String | Defines pre-shared key authentication. + +| **.psk.key** | String | A shared base64 encoded key distributed between all parties signing and verifying JWTs. + +| **.psk.iss** | String | A list of JWT issuers to verify. An empty list accepts any issuer in a JWT claim. +|=== + +[discrete] +== Example authorization configuration + +The following `authorization` snippet is for a minimal configuration. + +.Example authorization configuration +[source,yaml] +---- +# ... +auth: + psk: + key: MTU5YzA4Y2ZkNzJoMQ== <1> + iss: ["quay"] +# ... +---- \ No newline at end of file diff --git a/modules/config-fields-clair-indexer.adoc b/modules/config-fields-clair-indexer.adoc new file mode 100644 index 000000000..81f4467ea --- /dev/null +++ b/modules/config-fields-clair-indexer.adoc @@ -0,0 +1,52 @@ +:_content-type: CONCEPT +[id="config-fields-clair-indexer"] += Clair indexer configuration fields + +The following table describes the configuration fields for Clair's `indexer` component. + +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **indexer** | Object | Provides Clair indexer node configuration. + +| **.airgap** | Boolean | Disables HTTP access to the internet for indexers and fetchers. Private IPv4 and IPv6 addresses are allowed. Database connections are unaffected. + +| **.connstring** | String | A Postgres connection string. Accepts format as a URL or libpq connection string. + +| **.index_report_request_concurrency** | Integer | Rate limits the number of index report creation requests. Setting this to `0` attemps to auto-size this value. Setting a negative value means unlimited. The auto-sizing is a multiple of the number of available cores. + +The API returns a `429` status code if concurrency is exceeded. + +| **.scanlock_retry** | Integer | A positive integer representing seconds. Concurrent indexers lock on manifest scans to avoid clobbering. This value tunes how often a waiting indexer polls for the lock. + +| **.layer_scan_concurrency** | Integer | Positive integer limiting the number of concurrent layer scans. Indexers will match a manifest's layer concurrently. This value tunes the number of layers an indexer scans in parallel. + +| **.migrations** | Boolean | Whether indexer nodes handle migrations to their database. + +| **.scanner** | String | Indexer configuration. + +Scanner allows for passing configuration options to layer scanners. The scanner will have this configuration pass to it on construction if designed to do so. + +| **.scanner.dist** | String | A map with the name of a particular scanner and arbitrary YAML as a value. + +| **.scanner.package** | String | A map with the name of a particular scanner and arbitrary YAML as a value. + +| **.scanner.repo** | String | A map with the name of a particular scanner and arbitrary YAML as a value. +|=== + +[discrete] +== Example indexer configuration + +The following example shows a hypothetical indexer configuration for Clair. + +.Example indexer configuration +[source,yaml] +---- +# ... +indexer: + connstring: host=quay-server.example.com port=5433 dbname=clair user=clairuser password=clairpass sslmode=disable + scanlock_retry: 10 + layer_scan_concurrency: 5 + migrations: true +# ... +---- diff --git a/modules/config-fields-clair-matcher.adoc b/modules/config-fields-clair-matcher.adoc new file mode 100644 index 000000000..bcb797b6a --- /dev/null +++ b/modules/config-fields-clair-matcher.adoc @@ -0,0 +1,66 @@ +:_content-type: CONCEPT +[id="config-fields-clair-matcher"] += Clair matcher configuration fields + +The following table describes the configuration fields for Clair's `matcher` component. + +[NOTE] +==== +Differs from `matchers` configuration fields. +==== + +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **matcher** | Object | Provides Clair matcher node configuration. + +| **.cache_age** | String | Controls how long users should be hinted to cache responses for. + +| **.connstring** | String | A Postgres connection string. Accepts format as a URL or libpq connection string. + +| **.max_conn_pool** | Integer | Limits the database connection pool size. + +Clair allows for a custom connection pool size. This number directly sets how many active database connections are allowed concurrently. + +This parameter will be ignored in a future version. Users should configure this through the connection string. + +| **.indexer_addr** | String | A matcher contacts an indexer to create a vulnerability report. The location of this indexer is required. + +Defaults to `30m`. + +| **.migrations** | Boolean | Whether matcher nodes handle migrations to their databases. + +| **.period** | String | Determines how often updates for new security advisories take place. + +Defaults to `6h`. + +| **.disable_updaters** | Boolean | Whether to run background updates or not. + +Default: `False` + +| **.update_retention** | Integer | Sets the number of update operations to retain between garbage collection cycles. This should be set to a safe MAX value based on database size constraints. + +Defaults to `10m`. + +If a value of less than `0` is provided, garbage collection is disabled. `2` is the minimum value to ensure updates can be compared to notifications. +|=== + +[discrete] +== Example matcher configuration + +.Example matcher configuration +[source,yaml] +---- +# ... +matcher: + connstring: >- + host= port=5432 dbname= user= password=D + sslmode=verify-ca sslcert=/etc/clair/ssl/cert.pem sslkey=/etc/clair/ssl/key.pem + sslrootcert=/etc/clair/ssl/ca.pem + indexer_addr: http://clair-v4/ + disable_updaters: false + migrations: true + period: 6h + update_retention: 2 +# ... +---- \ No newline at end of file diff --git a/modules/config-fields-clair-matchers.adoc b/modules/config-fields-clair-matchers.adoc new file mode 100644 index 000000000..4c3e95396 --- /dev/null +++ b/modules/config-fields-clair-matchers.adoc @@ -0,0 +1,43 @@ +:_content-type: CONCEPT +[id="config-fields-clair-matchers"] += Clair matchers configuration fields + +The following table describes the configuration fields for Clair's `matchers` component. + +[NOTE] +==== +Differs from `matcher` configuration fields. +==== + +.Matchers configuration fields +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **matchers** | Array of strings | Provides configuration for the in-tree `matchers`. + +| **.names** | String | A list of string values informing the matcher factory about enabled matchers. If value is set to `null`, the default list of matchers run. The following strings are accepted: +*alpine-matcher*, *aws-matcher*, *debian-matcher*, *gobin*, *java-maven*, *oracle*, *photon*, *python*, *rhel*, *rhel-container-matcher*, *ruby*, *suse*, *ubuntu-matcher* + +| **.config** | String | Provides configuration to a specific matcher. + +A map keyed by the name of the matcher containing a sub-object which will be provided to the matchers factory constructor. For example: + +|=== + +[discrete] +== Example matchers configuration + +The following example shows a hypothetical Clair deployment that only requires only the `alpine`, `aws`, `debian`, `oracle` matchers. + +.Example matchers configuration +[source,yaml] +---- +# ... +matchers: + names: + - "alpine-matcher" + - "aws" + - "debian" + - "oracle" +# ... +---- \ No newline at end of file diff --git a/modules/config-fields-clair-metrics.adoc b/modules/config-fields-clair-metrics.adoc new file mode 100644 index 000000000..0acb7c719 --- /dev/null +++ b/modules/config-fields-clair-metrics.adoc @@ -0,0 +1,34 @@ +:_content-type: CONCEPT +[id="config-fields-clair-metrics"] += Clair metrics configuration fields + +The following metrics configuration fields are available for Clair. + +[cols="3a,1a,2a",options="header"] + +|=== +| Field | Type | Description +| **metrics** | Object | Defines distributed tracing configuration based on OpenTelemetry. + +| **.name** | String | The name of the metrics in use. + +| **.prometheus** | String | Configuration for a Prometheus metrics exporter. + +| **.prometheus.endpoint** | String | Defines the path where metrics are served. +|=== + +[discrete] +== Example metrics configuration + +The following example shows a hypothetical metrics configuration for Clair. + +.Example metrics configuration +[source,yaml] +---- +# ... +metrics: + name: "prometheus" + prometheus: + endpoint: "/metricsz" +# ... +---- \ No newline at end of file diff --git a/modules/config-fields-clair-notifiers.adoc b/modules/config-fields-clair-notifiers.adoc new file mode 100644 index 000000000..87afce75e --- /dev/null +++ b/modules/config-fields-clair-notifiers.adoc @@ -0,0 +1,225 @@ +:_content-type: CONCEPT +[id="config-fields-clair-notifiers"] += Clair notifier configuration fields + +The general notifier configuration fields for Clair are listed below. + +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **notifier** | Object | Provides Clair notifier node configuration. + +| **.connstring** | String | Postgres connection string. Accepts format as URL, or libpq connection string. + +| **.migrations** | Boolean | Whether notifier nodes handle migrations to their database. + +| **.indexer_addr** | String | A notifier contacts an indexer to create or obtain manifests affected by vulnerabilities. The location of this indexer is required. + +| **.matcher_addr** | String | A notifier contacts a matcher to list update operations and acquire diffs. The location of this matcher is required. + +| **.poll_interval** | String | The frequency at which the notifier will query a matcher for update operations. + +| **.delivery_interval** | String | The frequency at which the notifier attempts delivery of created, or previously failed, notifications. + +| **.disable_summary** | Boolean | Controls whether notifications should be summarized to one per manifest. +|=== + +[discrete] +== Example notifier configuration + +The following `notifier` snippet is for a minimal configuration. + +.Example notifier configuration +[source,yaml] +---- +# ... +notifier: + connstring: >- + host=DB_HOST port=5432 dbname=notifier user=DB_USER password=DB_PASS + sslmode=verify-ca sslcert=/etc/clair/ssl/cert.pem sslkey=/etc/clair/ssl/key.pem + sslrootcert=/etc/clair/ssl/ca.pem + indexer_addr: http://clair-v4/ + matcher_addr: http://clair-v4/ + delivery_interval: 5s + migrations: true + poll_interval: 15s + webhook: + target: "http://webhook/" + callback: "http://clair-notifier/notifier/api/v1/notifications" + headers: "" + amqp: null + stomp: null +# ... +---- + +[id="clair-webhook-config-fields"] +== Clair webhook configuration fields + +The following webhook fields are available for the Clair notifier environment. + +.Clair webhook fields +|=== + +| **.webhook** | Object | Configures the notifier for webhook delivery. + +| **.webhook.target** | String | URL where the webhook will be delivered. + +| **.webhook.callback** | String | The callback URL where notifications can be retrieved. The notification ID will be appended to this URL. + +This will typically be where the Clair notifier is hosted. + +| **.webhook.headers** | String | A map associating a header name to a list of values. +|=== + +[discrete] +== Example webhook configuration + +.Example webhook configuration +[source,yaml] +---- +# ... +notifier: +# ... + webhook: + target: "http://webhook/" + callback: "http://clair-notifier/notifier/api/v1/notifications" +# ... +---- + +[id="clair-amqp-config-fields"] +== Clair amqp configuration fields + +The following Advanced Message Queuing Protocol (AMQP) fields are available for the Clair notifier environment. + +|=== +| **.amqp** | Object | Configures the notifier for AMQP delivery. + +[NOTE] +==== +Clair does not declare any AMQP components on its own. All attempts to use an exchange or queue are passive only and will fail. Broker administrators should setup exchanges and queues ahead of time. +==== + +| **.amqp.direct** | Boolean | If `true`, the notifier will deliver individual notifications (not a callback) to the configured AMQP broker. + +| **.amqp.rollup** | Integer | When `amqp.direct` is set to `true`, this value informs the notifier of how many notifications to send in a direct delivery. For example, if `direct` is set to `true`, and `amqp.rollup` is set to `5`, the notifier delivers no more than 5 notifications in a single JSON payload to the broker. Setting the value to `0` effectively sets it to `1`. + +| **.amqp.exchange** | Object | The AMQP exchange to connect to. + +| **.amqp.exchange.name** | String | The name of the exchange to connect to. + +| **.amqp.exchange.type** | String | The type of the exchange. Typically one of the following: *direct*, *fanout*, *topic*, *headers*. + +| **.amqp.exchange.durability** | Boolean | Whether the configured queue is durable. + +| **.amqp.exchange.auto_delete** | Boolean | Whether the configured queue uses an `auto_delete_policy`. + +| **.amqp.routing_key** | String | The name of the routing key each notification is sent with. + +| **.amqp.callback** | String | If `amqp.direct` is set to `false`, this URL is provided in the notification callback sent to the broker. This URL should point to Clair's notification API endpoint. + +| **.amqp.uris** | String | A list of one or more AMQP brokers to connect to, in priority order. + +| **.amqp.tls** | Object | Configures TLS/SSL connection to an AMQP broker. + +| **.amqp.tls.root_ca** | String | The filesystem path where a root CA can be read. + +| **.amqp.tls.cert** | String | The filesystem path where a TLS/SSL certificate can be read. + +[NOTE] +==== +Clair also allows `SSL_CERT_DIR`, as documented for the Go `crypto/x509` package. +==== + +| **.amqp.tls.key** | String | The filesystem path where a TLS/SSL private key can be read. +|=== + +[discrete] +== Example AMQP configuration + +The following example shows a hypothetical AMQP configuration for Clair. + +.Example AMQP configuration +[source,yaml] +---- +# ... +notifier: +# ... + amqp: + exchange: + name: "" + type: "direct" + durable: true + auto_delete: false + uris: ["amqp://user:pass@host:10000/vhost"] + direct: false + routing_key: "notifications" + callback: "http://clair-notifier/notifier/api/v1/notifications" + tls: + root_ca: "optional/path/to/rootca" + cert: "madatory/path/to/cert" + key: "madatory/path/to/key" +# ... +---- + +[id="clair-stomp-config-fields"] +== Clair STOMP configuration fields + +The following Simple Text Oriented Message Protocol (STOMP) fields are available for the Clair notifier environment. + +|=== +| **.stomp** | Object | Configures the notifier for STOMP delivery. + +| **.stomp.direct** | Boolean | If `true`, the notifier delivers individual notifications (not a callback) to the configured STOMP broker. + +| **.stomp.rollup** | Integer | If `stomp.direct` is set to `true`, this value limits the number of notifications sent in a single direct delivery. For example, if `direct` is set to `true`, and `rollup` is set to `5`, the notifier delivers no more than 5 notifications in a single JSON payload to the broker. Setting the value to `0` effectively sets it to `1`. + +| **.stomp.callback** | String | If `stomp.callback` is set to `false`, the provided URL in the notification callback is sent to the broker. This URL should point to Clair's notification API endpoint. + +| **.stomp.destination** | String | The STOMP destination to deliver notifications to. + +| **.stomp.uris** | String | A list of one or more STOMP brokers to connect to in priority order. + +| **.stomp.tls** | Object | Configured TLS/SSL connection to STOMP broker. + +| **.stomp.tls.root_ca** | String | The filesystem path where a root CA can be read. + +[NOTE] +==== +Clair also respects `SSL_CERT_DIR`, as documented for the Go `crypto/x509` package. +==== + +| **.stomp.tls.cert** | String | The filesystem path where a TLS/SSL certificate can be read. + +| **.stomp.tls.key** | String | The filesystem path where a TLS/SSL private key can be read. + +| **.stomp.user** | String | Configures login details for the STOMP broker. + +| **.stomp.user.login** | String | The STOMP login to connect with. + +| **.stomp.user.passcode** | String | The STOMP passcode to connect with. +|=== + +[discrete] +== Example STOMP configuration + +The following example shows a hypothetical STOMP configuration for Clair. + +.Example STOMP configuration +[source,yaml] +---- +# ... +notifier: +# ... + stomp: + desitnation: "notifications" + direct: false + callback: "http://clair-notifier/notifier/api/v1/notifications" + login: + login: "username" + passcode: "passcode" + tls: + root_ca: "optional/path/to/rootca" + cert: "madatory/path/to/cert" + key: "madatory/path/to/key" +# ... +---- \ No newline at end of file diff --git a/modules/config-fields-clair-trace.adoc b/modules/config-fields-clair-trace.adoc new file mode 100644 index 000000000..9f4f17d17 --- /dev/null +++ b/modules/config-fields-clair-trace.adoc @@ -0,0 +1,54 @@ +:_content-type: CONCEPT +[id="config-fields-clair-trace"] += Clair trace configuration fields + +The following trace configuration fields are available for Clair. + +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **trace** | Object | Defines distributed tracing configuration based on OpenTelemetry. + +| **.name** | String | The name of the application traces will belong to. + +| **.probability** | Integer | The probability a trace will occur. + +| **.jaeger** | Object | Defines values for Jaeger tracing. + +| **.jaeger.agent** | Object | Defines values for configuring delivery to a Jaeger agent. + +| **.jaeger.agent.endpoint** | String | An address in the `:` syntax where traces can be submitted. + +| **.jaeger.collector** | Object | Defines values for configuring delivery to a Jaeger collector. + +| **.jaeger.collector.endpoint** | String | An address in the `:` syntax where traces can be submitted. + +| **.jaeger.collector.username** | String | A Jaeger username. + +| **.jaeger.collector.password** | String | A Jaeger password. + +| **.jaeger.service_name** | String | The service name registered in Jaeger. + +| **.jaeger.tags** | String | Key-value pairs to provide additional metadata. + +| **.jaeger.buffer_max** | Integer | The maximum number of spans that can be buffered in memory before they are sent to the Jaeger backend for storage and analysis. +|=== + +[discrete] +== Example trace configuration + +The following example shows a hypothetical trace configuration for Clair. + +.Example trace configuration +[source,yaml] +---- +# ... +trace: + name: "jaeger" + probability: 1 + jaeger: + agent: + endpoint: "localhost:6831" + service_name: "clair" +# ... +---- \ No newline at end of file diff --git a/modules/config-fields-clair-updaters.adoc b/modules/config-fields-clair-updaters.adoc new file mode 100644 index 000000000..82d40099e --- /dev/null +++ b/modules/config-fields-clair-updaters.adoc @@ -0,0 +1,40 @@ +:_content-type: CONCEPT +[id="config-fields-clair-updaters"] += Clair updaters configuration fields + +The following table describes the configuration fields for Clair's `updaters` component. + +.Updaters configuration fields +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **updaters** | Object | Provides configuration for the matcher's update manager. + +| **.sets** | String | A list of values informing the update manager which updaters to run. + +If value is set to `null`, the default set of updaters runs the following: *alpine*, *aws*, *clair.cvss*, *debian*, *oracle*, *photon*, *osv*, *rhel*, *rhcc* *suse*, *ubuntu* + +If left blank, zero updaters run. + +| **.config** | String | Provides configuration to specific updater sets. + +A map keyed by the name of the updater set containing a sub-object which will be provided to the updater set's constructor. For a list of the sub-objects for each updater, see "Advanced updater configuration". +|=== + +[discrete] +== Example updaters configuration + +In the following configuration, only the `rhel` set is configured. The `ignore_unpatched` variable, which is specific to the `rhel` updater, is also defined. + +.Example updaters configuration +[source,yaml] +---- +# ... +updaters: + sets: + - rhel + config: + rhel: + ignore_unpatched: false +# ... +---- \ No newline at end of file diff --git a/modules/config-fields-db.adoc b/modules/config-fields-db.adoc new file mode 100644 index 000000000..78d774a40 --- /dev/null +++ b/modules/config-fields-db.adoc @@ -0,0 +1,91 @@ +:_content-type: CONCEPT +[id="config-fields-db"] += Database configuration + +This section describes the database configuration fields available for {productname} deployments. + +[id="database-uri"] +== Database URI + +With {productname}, connection to the database is configured by using the required `DB_URI` field. + +The following table describes the `DB_URI` configuration field: + +.Database URI +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **DB_URI** + +(Required) | String | The URI for accessing the database, including any credentials. + +Example `DB_URI` field: + +*postgresql://quayuser:quaypass@quay-server.example.com:5432/quay* +|=== + +[id="database-connection-arguments"] +== Database connection arguments + +Optional connection arguments are configured by the `DB_CONNECTION_ARGS` parameter. Some of the key-value pairs defined under `DB_CONNECTION_ARGS` are generic, while others are database specific. + +The following table describes database connection arguments: + +.Database connection arguments +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **DB_CONNECTION_ARGS** | Object | Optional connection arguments for the database, such as timeouts and SSL/TLS. +| **.autorollback**| Boolean | Whether to use thread-local connections. + +Should always be `true` +| **.threadlocals**| Boolean | Whether to use auto-rollback connections. + +Should always be `true` +// TODO 36 max_connections, timeout, stale_timeout +// | {nbsp}{nbsp}{nbsp}.max_connections| Number | +// | {nbsp}{nbsp}{nbsp}.timeout | Number | +// | {nbsp}{nbsp}{nbsp}.stale_timeout | Number | +|=== + +[id="config-fields-postgres"] +=== PostgreSQL SSL/TLS connection arguments + +With SSL/TLS, configuration depends on the database you are deploying. The following example shows a PostgreSQL SSL/TLS configuration: + +[source,yaml] +---- +DB_CONNECTION_ARGS: + sslmode: verify-ca + sslrootcert: /path/to/cacert +---- + +The `sslmode` option determines whether, or with, what priority a secure SSL/TLS TCP/IP connection will be negotiated with the server. There are six modes: + +.SSL/TLS options +[options="header"] +|=== +|Mode |Description + +| **disable** | Your configuration only tries non-SSL/TLS connections. +| **allow** | Your configuration first tries a non-SSL/TLS connection. Upon failure, tries an SSL/TLS connection. +| **prefer** + +(Default) | Your configuration first tries an SSL/TLS connection. Upon failure, tries a non-SSL/TLS connection. +| **require** | Your configuration only tries an SSL/TLS connection. If a root CA file is present, it verifies the certificate in the same way as if verify-ca was specified. +| **verify-ca** | Your configuration only tries an SSL/TLS connection, and verifies that the server certificate is issued by a trusted certificate authority (CA). +| **verify-full** | Only tries an SSL/TLS connection, and verifies that the server certificate is issued by a trusted CA and that the requested server hostname matches that in the certificate. +|=== + +For more information on the valid arguments for PostgreSQL, see link:https://www.postgresql.org/docs/current/libpq-connect.html[Database Connection Control Functions]. + + +[id="mysql-ssl-connection-arguments"] +=== MySQL SSL/TLS connection arguments + +The following example shows a sample MySQL SSL/TLS configuration: + +[source.yaml] +---- +DB_CONNECTION_ARGS: + ssl: + ca: /path/to/cacert +---- + +Information on the valid connection arguments for MySQL is available at link:https://dev.mysql.com/doc/refman/8.0/en/connecting-using-uri-or-key-value-pairs.html[Connecting to the Server Using URI-Like Strings or Key-Value Pairs]. diff --git a/modules/config-fields-dockerfile-build.adoc b/modules/config-fields-dockerfile-build.adoc new file mode 100644 index 000000000..d0dbc3a86 --- /dev/null +++ b/modules/config-fields-dockerfile-build.adoc @@ -0,0 +1,78 @@ +[id="config-fields-dockerfile-build"] += Dockerfile build triggers fields + +.Dockerfile build support +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| +**FEATURE_BUILD_SUPPORT** | Boolean | Whether to support Dockerfile build. + + + +**Default:** `False` +| **SUCCESSIVE_TRIGGER_FAILURE_DISABLE_THRESHOLD** | Number | If not set to `None`, the number of successive failures that can occur before a build trigger is automatically disabled. + + + +**Default:** `100` +| **SUCCESSIVE_TRIGGER_INTERNAL_ERROR_DISABLE_THRESHOLD** | Number | If not set to `None`, the number of successive internal errors that can occur before a build trigger is automatically disabled + + + +**Default:** `5` +|=== + +== GitHub build triggers + +.GitHub build triggers +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **FEATURE_GITHUB_BUILD** | Boolean | Whether to support GitHub build triggers. + + + +**Default:** `False` +| {nbsp} | {nbsp} | {nbsp} +| **GITHUB_TRIGGER_CONFIG** | Object | Configuration for using GitHub Enterprise for build triggers. +| {nbsp}{nbsp}{nbsp}**.GITHUB_ENDPOINT** + +{nbsp}{nbsp}{nbsp}(Required) | String | The endpoint for GitHub Enterprise. + + + +**Example:** `https://github.com/` +| {nbsp}{nbsp}{nbsp}**.API_ENDPOINT** | String | The endpoint of the GitHub Enterprise API to use. Must be overridden for `github.com`. + + + +**Example**: `https://api.github.com/` +| {nbsp}{nbsp}{nbsp}**.CLIENT_ID** + +{nbsp}{nbsp}{nbsp}(Required) | String | The registered client ID for this {productname} instance; this cannot be shared with `GITHUB_LOGIN_CONFIG`. +| {nbsp}{nbsp}{nbsp}**.CLIENT_SECRET** + +{nbsp}{nbsp}{nbsp}(Required) | String | The registered client secret for this {productname} instance. +|=== + +== BitBucket build triggers + +.BitBucket build triggers +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **FEATURE_BITBUCKET_BUILD** | Boolean | Whether to support Bitbucket build triggers. + + + +**Default:** False +| {nbsp} | {nbsp} | {nbsp} +| **BITBUCKET_TRIGGER_CONFIG** | Object | Configuration for using BitBucket for build triggers. +| {nbsp}{nbsp}{nbsp}**.CONSUMER_KEY** + +{nbsp}{nbsp}{nbsp}(Required) | String | The registered consumer key (client ID) for this {productname} instance. +| {nbsp}{nbsp}{nbsp}**.CONSUMER_SECRET** + +{nbsp}{nbsp}{nbsp}(Required) | String | The registered consumer secret (client secret) for this {productname} instance. +|=== + +== GitLab build triggers + +.GitLab build triggers +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **FEATURE_GITLAB_BUILD** | Boolean | Whether to support GitLab build triggers. + + + +**Default:** False +| {nbsp} | {nbsp} | {nbsp} +| **GITLAB_TRIGGER_CONFIG** | Object | Configuration for using Gitlab for build triggers. +| {nbsp}{nbsp}{nbsp}**.GITLAB_ENDPOINT** + +{nbsp}{nbsp}{nbsp}(Required) | String | The endpoint at which Gitlab Enterprise is running. +| {nbsp}{nbsp}{nbsp}**.CLIENT_ID** + +{nbsp}{nbsp}{nbsp}(Required) | String | The registered client ID for this {productname} instance. +| {nbsp}{nbsp}{nbsp}**.CLIENT_SECRET** + +{nbsp}{nbsp}{nbsp}(Required) | String | The registered client secret for this {productname} instance. +|=== diff --git a/modules/config-fields-footer.adoc b/modules/config-fields-footer.adoc new file mode 100644 index 000000000..20c205994 --- /dev/null +++ b/modules/config-fields-footer.adoc @@ -0,0 +1,40 @@ +:_content-type: CONCEPT +[id="config-fields-footer"] += UI footer configuration fields + +The following configuration fields have been added to the original (v1) UI. You can use these fields to customize the footer of your on-prem v1 UI. + +|=== +| Field | Type | Description + +|*FOOTER_LINKS* |Object | Enable customization of footer links in {productname}'s UI for on-prem installations. + +|*.TERMS_OF_SERVICE_URL* | String | Custom terms of service for on-prem installations. + + + +**Example:** + +`https://index.hr` + +|*.PRIVACY_POLICY_URL* | String | Custom privacy policy for on-prem installations. + + + +**Example:** + +`https://example.hr` +|*.SECURITY_URL* | String | Custom security page for on-prem installations. + + + +**Example:** + +`https://example.hr` + +| **.ABOUT_URL** | String | Custom about page for on-prem installations. + + + +**Example:** + +`https://example.hr` +|=== + +.Example footer links YAML +[source,yaml] +---- +FOOTER_LINKS: + "TERMS_OF_SERVICE_URL": "https://www.index.hr" + "PRIVACY_POLICY_URL": "https://www.example.hr" + "SECURITY_URL": "https://www.example.hr" + "ABOUT_URL": "https://www.example.hr" +---- \ No newline at end of file diff --git a/modules/config-fields-general-clair.adoc b/modules/config-fields-general-clair.adoc new file mode 100644 index 000000000..2bda71416 --- /dev/null +++ b/modules/config-fields-general-clair.adoc @@ -0,0 +1,36 @@ +:_content-type: CONCEPT +[id="config-fields-required-clair"] += Clair general fields + +The following table describes the general configuration fields available for a Clair deployment. + +[cols="3a,1a,2a",options="header"] +|=== +| Field | Typhttp_listen_ae | Description +| **http_listen_addr** | String | Configures where the HTTP API is exposed. + +Default: `:6060` + +| **introspection_addr** | String | Configures where Clair's metrics and health endpoints are exposed. + +| **log_level** | String | Sets the logging level. Requires one of the following strings: *debug-color*, *debug*, *info*, *warn*, *error*, *fatal*, *panic* + +| **tls** | String | A map containing the configuration for serving the HTTP API of TLS/SSL and HTTP/2. + +| **.cert** | String | The TLS certificate to be used. Must be a full-chain certificate. +|=== + +[discrete] +== Example configuration for general Clair fields + +The following example shows a Clair configuration. + +.Example configuration for general Clair fields +[source,yaml] +---- +# ... +http_listen_addr: 0.0.0.0:6060 +introspection_addr: 0.0.0.0:8089 +log_level: info +# ... +---- \ No newline at end of file diff --git a/modules/config-fields-hcp.adoc b/modules/config-fields-hcp.adoc new file mode 100644 index 000000000..07e16e55a --- /dev/null +++ b/modules/config-fields-hcp.adoc @@ -0,0 +1,24 @@ +:_content-type: REFERENCE +[id="config-fields-hcp"] += Hitachi Content Platform object storage + +The following YAML shows a sample configuration using HCP for object storage. + +.Example HCP storage configuration +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + hcp_us: + - RadosGWStorage + - access_key: + bucket_name: + hostname: + is_secure: true + secret_key: + storage_path: /datastorage/registry + signature_version: v4 +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: +- hcp_us +DISTRIBUTED_STORAGE_PREFERENCE: +- hcp_us +---- \ No newline at end of file diff --git a/modules/config-fields-helm-oci.adoc b/modules/config-fields-helm-oci.adoc new file mode 100644 index 000000000..bf8ec750a --- /dev/null +++ b/modules/config-fields-helm-oci.adoc @@ -0,0 +1,45 @@ +:_content-type: REFERENCE +[id="config-fields-helm-oci"] += Helm configuration fields + +.Helm configuration fields +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **FEATURE_GENERAL_OCI_SUPPORT** | Boolean | Enable support for OCI artifacts. + + + +**Default:** True +|=== + +The following Open Container Initiative (OCI) artifact types are built into {productname} by default and are enabled through the *FEATURE_GENERAL_OCI_SUPPORT* configuration field: + +[cols="1a,3a,3a",options="header"] +|=== +| Field | Media Type | Supported content types + +| *Helm* | `application/vnd.cncf.helm.config.v1+json` | `application/tar+gzip`, `application/vnd.cncf.helm.chart.content.v1.tar+gzip` + +| *Cosign* | `application/vnd.oci.image.config.v1+json` | `application/vnd.dev.cosign.simplesigning.v1+json`, `application/vnd.dsse.envelope.v1+json` + +| *SPDX* | `application/vnd.oci.image.config.v1+json` | `text/spdx`, `text/spdx+xml`, `text/spdx+json` + +| *Syft* | `application/vnd.oci.image.config.v1+json` | `application/vnd.syft+json` + +| *CycloneDX* | `application/vnd.oci.image.config.v1+json` | `application/vnd.cyclonedx`, `application/vnd.cyclonedx+xml`, `application/vnd.cyclonedx+json` + +| *In-toto* | `application/vnd.oci.image.config.v1+json` | `application/vnd.in-toto+json` + +| *Unknown* | `application/vnd.cncf.openpolicyagent.policy.layer.v1+rego` | `application/vnd.cncf.openpolicyagent.policy.layer.v1+rego`, `application/vnd.cncf.openpolicyagent.data.layer.v1+json` + +|=== + +[id="configuring-helm-config"] +== Configuring Helm + +The following YAML is the example configuration when enabling Helm. + +.Helm YAML configuration +[source,yaml] +---- +FEATURE_GENERAL_OCI_SUPPORT: true +---- diff --git a/modules/config-fields-ibmcloudstorage.adoc b/modules/config-fields-ibmcloudstorage.adoc new file mode 100644 index 000000000..dc450b38b --- /dev/null +++ b/modules/config-fields-ibmcloudstorage.adoc @@ -0,0 +1,25 @@ +:_content-type: REFERENCE +[id="config-fields-ibmcloudstorage"] += IBM Cloud object storage + +The following YAML shows a sample configuration using IBM Cloud object storage. + +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + default: + - IBMCloudStorage #actual driver + - access_key: #parameters + secret_key: + bucket_name: + hostname: + is_secure: 'true' + port: '443' + storage_path: /datastorage/registry + maximum_chunk_size_mb: 100mb <1> +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: +- default +DISTRIBUTED_STORAGE_PREFERENCE: +- default +---- +<1> Optional. Recommended to be set to `100mb`. \ No newline at end of file diff --git a/modules/config-fields-intro.adoc b/modules/config-fields-intro.adoc new file mode 100644 index 000000000..e4afb4fea --- /dev/null +++ b/modules/config-fields-intro.adoc @@ -0,0 +1,5 @@ +:_content-type: CONCEPT +[id="config-fields-intro"] += Configuration fields + +This section describes the both required and optional configuration fields when deploying {productname}. \ No newline at end of file diff --git a/modules/config-fields-ipv6.adoc b/modules/config-fields-ipv6.adoc new file mode 100644 index 000000000..d08407c55 --- /dev/null +++ b/modules/config-fields-ipv6.adoc @@ -0,0 +1,14 @@ +:_content-type: REFERENCE +[id="config-fields-ipv6"] += IPv6 configuration field + +.IPv6 configuration field +[cols="3a,1a,2a",options="header"] +|=== +|Field | Type |Description +| **FEATURE_LISTEN_IP_VERSION** | String | Enables IPv4, IPv6, or dual-stack protocol family. This configuration field must be properly set, otherwise {productname} fails to start. + +*Default:* `IPv4` + +*Additional configurations:* `IPv6`, `dual-stack` +|=== \ No newline at end of file diff --git a/modules/config-fields-jwt.adoc b/modules/config-fields-jwt.adoc new file mode 100644 index 000000000..b744049ab --- /dev/null +++ b/modules/config-fields-jwt.adoc @@ -0,0 +1,24 @@ +[[config-fields-jwt]] += JWT configuration fields + + +.JWT configuration fields +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **JWT_AUTH_ISSUER** | String | The endpoint for JWT users + + + +**Pattern**: `^http(s)?://(.)+$` + +**Example**: `http://192.168.99.101:6060` +| **JWT_GETUSER_ENDPOINT** | String | The endpoint for JWT users + +**Pattern**: `^http(s)?://(.)+$` + +**Example**: `http://192.168.99.101:6060` +| **JWT_QUERY_ENDPOINT** | String | The endpoint for JWT queries + + + +**Pattern**: `^http(s)?://(.)+$` + +**Example**: `http://192.168.99.101:6060` +| **JWT_VERIFY_ENDPOINT** | String | The endpoint for JWT verification + + + +**Pattern**: `^http(s)?://(.)+$` + +**Example**: `http://192.168.99.101:6060` +|=== \ No newline at end of file diff --git a/modules/config-fields-ldap.adoc b/modules/config-fields-ldap.adoc new file mode 100644 index 000000000..dae9d2806 --- /dev/null +++ b/modules/config-fields-ldap.adoc @@ -0,0 +1,172 @@ +:_content-type: REFERENCE +[id="config-fields-ldap"] += LDAP configuration fields + +.LDAP configuration +[cols="2a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **AUTHENTICATION_TYPE** + +(Required) | String | Must be set to `LDAP`. +| **FEATURE_TEAM_SYNCING** | Boolean | Whether to allow for team membership to be synced from a backing group in the authentication engine (OIDC, LDAP, or Keystone). + + + +**Default:** `true` +| **FEATURE_NONSUPERUSER_TEAM_SYNCING_SETUP** | Boolean | If enabled, non-superusers can setup team syncrhonization. + + + +**Default:** `false` +| **LDAP_ADMIN_DN** | String | The admin DN for LDAP authentication. +| **LDAP_ADMIN_PASSWD** | String | The admin password for LDAP authentication. +| **LDAP_ALLOW_INSECURE_FALLBACK** | Boolean | Whether or not to allow SSL insecure fallback for LDAP authentication. +| **LDAP_BASE_DN** | Array of String | The base DN for LDAP authentication. +| **LDAP_EMAIL_ATTR** | String | The email attribute for LDAP authentication. +| **LDAP_UID_ATTR** | String | The uid attribute for LDAP authentication. +| **LDAP_URI** | String | The LDAP URI. +| **LDAP_USER_FILTER** | String | The user filter for LDAP authentication. +| **LDAP_USER_RDN** | Array of String| The user RDN for LDAP authentication. +| **LDAP_SECONDARY_USER_RDNS** | Array of String | Provide Secondary User Relative DNs if there are multiple Organizational Units where user objects are located. + +| **TEAM_RESYNC_STALE_TIME** | String | If team syncing is enabled for a team, how often to check its membership and resync if necessary. + + + +**Pattern:** + +`^[0-9]+(w\|m\|d\|h\|s)$` + +**Example:** + +`2h` + +**Default:** + +`30m` + +| **LDAP_SUPERUSER_FILTER** | String | Subset of the `LDAP_USER_FILTER` configuration field. When configured, allows {productname} administrators the ability to configure Lightweight Directory Access Protocol (LDAP) users as superusers when {productname} uses LDAP as its authentication provider. + +With this field, administrators can add or remove superusers without having to update the {productname} configuration file and restart their deployment. + +This field requires that your `AUTHENTICATION_TYPE` is set to `LDAP`. + +| **LDAP_GLOBAL_READONLY_SUPERUSER_FILTER** | String | When set, grants users of this list read access to all repositories, regardless of whether they are public repositories. Only works for those superusers defined with the `LDAP_SUPERUSER_FILTER` configuration field. + +| **LDAP_RESTRICTED_USER_FILTER** | String | Subset of the `LDAP_USER_FILTER` configuration field. When configured, allows {productname} administrators the ability to configure Lightweight Directory Access Protocol (LDAP) users as restricted users when {productname} uses LDAP as its authentication provider. + +This field requires that your `AUTHENTICATION_TYPE` is set to `LDAP`. + +| **FEATURE_RESTRICTED_USERS** | Boolean | When set to `True` with `LDAP_RESTRICTED_USER_FILTER` active, only the listed users in the defined LDAP group are restricted. + +*Default:* `False` + +| **LDAP_TIMEOUT** |Integer | Specifies the time limit, in seconds, for LDAP operations. This limits the amount of time an LDAP search, bind, or other operation can take. Similar to the `-l` option in `ldapsearch`, it sets a client-side operation timeout. + + + +**Default:** `10` + +| **LDAP_NETWORK_TIMEOUT** |Integer | Specifies the time limit, in seconds, for establishing a connection to the LDAP server. This is the maximum time {productname} waits for a response during network operations, similar to the `-o nettimeout` option in `ldapsearch`. + + + +**Default:** `10` + +|=== + +[id="ldap-config-field-reference"] +== LDAP configuration references + +Use the following references to update your `config.yaml` file with the desired LDAP settings. + +[id="reference-ldap-user"] +=== Basic LDAP configuration + +Use the following reference for a basic LDAP configuration. + +[source,yaml] +---- +--- +AUTHENTICATION_TYPE: LDAP <1> +--- +LDAP_ADMIN_DN: uid=,ou=Users,o=,dc=,dc=com <2> +LDAP_ADMIN_PASSWD: ABC123 <3> +LDAP_ALLOW_INSECURE_FALLBACK: false <4> +LDAP_BASE_DN: <5> + - dc=example + - dc=com +LDAP_EMAIL_ATTR: mail <6> +LDAP_UID_ATTR: uid <7> +LDAP_URI: ldap://.com <8> +LDAP_USER_FILTER: (memberof=cn=developers,ou=Users,dc=,dc=com) <9> +LDAP_USER_RDN: <10> + - ou=people +LDAP_SECONDARY_USER_RDNS: <11> + - ou= + - ou= + - ou= + - ou= +---- +<1> Required. Must be set to `LDAP`. +<2> Required. The admin DN for LDAP authentication. +<3> Required. The admin password for LDAP authentication. +<4> Required. Whether to allow SSL/TLS insecure fallback for LDAP authentication. +<5> Required. The base DN for LDAP authentication. +<6> Required. The email attribute for LDAP authentication. +<7> Required. The UID attribute for LDAP authentication. +<8> Required. The LDAP URI. +<9> Required. The user filter for LDAP authentication. +<10> Required. The user RDN for LDAP authentication. +<11> Optional. Secondary User Relative DNs if there are multiple Organizational Units where user objects are located. + +[id="reference-ldap-restricted-user"] +=== LDAP restricted user configuration + +Use the following reference for an LDAP restricted user configuration. + +[source,yaml] +---- +# ... +AUTHENTICATION_TYPE: LDAP +# ... +FEATURE_RESTRICTED_USERS: true <1> +# ... +LDAP_ADMIN_DN: uid=,ou=Users,o=,dc=,dc=com +LDAP_ADMIN_PASSWD: ABC123 +LDAP_ALLOW_INSECURE_FALLBACK: false +LDAP_BASE_DN: + - o= + - dc= + - dc=com +LDAP_EMAIL_ATTR: mail +LDAP_UID_ATTR: uid +LDAP_URI: ldap://.com +LDAP_USER_FILTER: (memberof=cn=developers,ou=Users,o=,dc=,dc=com) +LDAP_RESTRICTED_USER_FILTER: (=) <2> +LDAP_USER_RDN: + - ou= + - o= + - dc= + - dc=com +# ... +---- +<1> Must be set to `true` when configuring an LDAP restricted user. +<2> Configures specified users as restricted users. + +[id="reference-ldap-super-user"] +=== LDAP superuser configuration reference + +Use the following reference for an LDAP superuser configuration. + + +[source,yaml] +---- +# ... +AUTHENTICATION_TYPE: LDAP +# ... +LDAP_ADMIN_DN: uid=,ou=Users,o=,dc=,dc=com +LDAP_ADMIN_PASSWD: ABC123 +LDAP_ALLOW_INSECURE_FALLBACK: false +LDAP_BASE_DN: + - o= + - dc= + - dc=com +LDAP_EMAIL_ATTR: mail +LDAP_UID_ATTR: uid +LDAP_URI: ldap://.com +LDAP_USER_FILTER: (memberof=cn=developers,ou=Users,o=,dc=,dc=com) +LDAP_SUPERUSER_FILTER: (=) <1> +LDAP_USER_RDN: + - ou= + - o= + - dc= + - dc=com +# ... +---- +<1> Configures specified users as superusers. diff --git a/modules/config-fields-legacy.adoc b/modules/config-fields-legacy.adoc new file mode 100644 index 000000000..5b11cf1a4 --- /dev/null +++ b/modules/config-fields-legacy.adoc @@ -0,0 +1,35 @@ +:_content-type: REFERENCE +[id="config-fields-legacy"] += Legacy configuration fields + +The following fields are deprecated or obsolete. + +.Legacy configuration fields +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **FEATURE_BLACKLISTED_EMAILS** | Boolean | If set to true, no new User accounts may be created if their email domain is blacklisted +| **BLACKLISTED_EMAIL_DOMAINS** | Array of String | The list of email-address domains that is used if FEATURE_BLACKLISTED_EMAILS is set to true + + + +**Example:** `"example.com", "example.org"` +| **BLACKLIST_V2_SPEC** | String | The Docker CLI versions to which {productname} will respond that V2 is *unsupported* + + + +**Example**: `<1.8.0` + +**Default:** `<1.6.0` +| **DOCUMENTATION_ROOT** | String | Root URL for documentation links. This field is useful when {productname} is configured for disconnected environments to set an alternatively, or allowlisted, documentation link. +| **SECURITY_SCANNER_V4_NAMESPACE_WHITELIST** | String | The namespaces for which the security scanner should be enabled + +| **FEATURE_RESTRICTED_V1_PUSH** | Boolean | If set to true, only namespaces listed in V1_PUSH_WHITELIST support V1 push + + + +**Default:** `False` + +| **V1_PUSH_WHITELIST** | Array of String | The array of namespace names that support V1 push if FEATURE_RESTRICTED_V1_PUSH is set to true + + +| *FEATURE_HELM_OCI_SUPPORT* | Boolean | Enable support for Helm artifacts. + + + +**Default:** `False` + +|**ALLOWED_OCI_ARTIFACT_TYPES** | Object | The set of allowed OCI artifact MIME types and the associated layer types. + +|=== \ No newline at end of file diff --git a/modules/config-fields-mail.adoc b/modules/config-fields-mail.adoc new file mode 100644 index 000000000..61089bc28 --- /dev/null +++ b/modules/config-fields-mail.adoc @@ -0,0 +1,24 @@ +[[config-fields-mail]] += Mail configuration fields + + +.Mail configuration fields +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **FEATURE_MAILING** | Boolean | Whether emails are enabled + + + +**Default:** `False` +| **MAIL_DEFAULT_SENDER** | String | If specified, the e-mail address used as the `from` when {productname} sends e-mails. If none, defaults to `support@quay.io` + + + +**Example:** `support@example.com` +| **MAIL_PASSWORD** | String | The SMTP password to use when sending e-mails +| **MAIL_PORT** | Number | The SMTP port to use. If not specified, defaults to 587. +| **MAIL_SERVER** | String | The SMTP server to use for sending e-mails. Only required if FEATURE_MAILING is set to true. + + + +**Example:** `smtp.example.com` +| **MAIL_USERNAME** | String | The SMTP username to use when sending e-mails +| **MAIL_USE_TLS** | Boolean | If specified, whether to use TLS for sending e-mails + + + +**Default:** `True` +|=== \ No newline at end of file diff --git a/modules/config-fields-mirroring.adoc b/modules/config-fields-mirroring.adoc new file mode 100644 index 000000000..4dabe8db1 --- /dev/null +++ b/modules/config-fields-mirroring.adoc @@ -0,0 +1,28 @@ +[[config-fields-mirroring]] += Mirroring configuration fields + +.Mirroring configuration +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **FEATURE_REPO_MIRROR** | Boolean | Enable or disable repository mirroring + + + + **Default:** `false` +| **REPO_MIRROR_INTERVAL** | Number | The number of seconds between checking for repository mirror candidates + + + +**Default:** 30 +| **REPO_MIRROR_SERVER_HOSTNAME** | String | Replaces the `SERVER_HOSTNAME` as the destination for mirroring. + + + +**Default:** None + + + +**Example**: + +`openshift-quay-service` +| **REPO_MIRROR_TLS_VERIFY** | Boolean | Require HTTPS and verify certificates of Quay registry during mirror. + + + + **Default:** `true` + +|**REPO_MIRROR_ROLLBACK** | Boolean | When set to `true`, the repository rolls back after a failed mirror attempt. + +*Default*: `false` + +|=== \ No newline at end of file diff --git a/modules/config-fields-misc.adoc b/modules/config-fields-misc.adoc new file mode 100644 index 000000000..860d44fbb --- /dev/null +++ b/modules/config-fields-misc.adoc @@ -0,0 +1,101 @@ +:_content-type: REFERENCE +[id="config-fields-misc"] += Miscellaneous configuration fields + +.Miscellaneous configuration fields +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **ALLOW_PULLS_WITHOUT_STRICT_LOGGING** | String | If true, pulls will still succeed even if the pull audit log entry cannot be written . This is useful if the database is in a read-only state and it is desired for pulls to continue during that time. + + + + **Default:** False +| **AVATAR_KIND** | String | The types of avatars to display, either generated inline (local) or Gravatar (gravatar) + + + + **Values:** local, gravatar +| **BROWSER_API_CALLS_XHR_ONLY** | Boolean | If enabled, only API calls marked as being made by an XHR will be allowed from browsers + + + +**Default:** True +| **DEFAULT_NAMESPACE_MAXIMUM_BUILD_COUNT** | Number | The default maximum number of builds that can be queued in a namespace. + + + +**Default:** None +| **ENABLE_HEALTH_DEBUG_SECRET** | String | If specified, a secret that can be given to health endpoints to see full debug info when not authenticated as a superuser +| **EXTERNAL_TLS_TERMINATION** | Boolean | Set to `true` if TLS is supported, but terminated at a layer before Quay. Set to `false` when Quay is running with its own SSL certificates and receiving TLS traffic directly. +| **FRESH_LOGIN_TIMEOUT** | String | The time after which a fresh login requires users to re-enter their password + + + +**Example:** `5m` +| **HEALTH_CHECKER** | String | The configured health check + + + +**Example:** `('RDSAwareHealthCheck', {'access_key': 'foo', 'secret_key': 'bar'})` +| **PROMETHEUS_NAMESPACE** | String | The prefix applied to all exposed Prometheus metrics + + + +**Default:** `quay` +| **PUBLIC_NAMESPACES** | Array of String | If a namespace is defined in the public namespace list, then it will appear on *all* users' repository list pages, regardless of whether the user is a member of the namespace. Typically, this is used by an enterprise customer in configuring a set of "well-known" namespaces. +| **REGISTRY_STATE** | String | The state of the registry + + + +**Values:** `normal` or `read-only` +| **SEARCH_MAX_RESULT_PAGE_COUNT** | Number | Maximum number of pages the user can paginate in search before they are limited + + + +**Default:** 10 +| **SEARCH_RESULTS_PER_PAGE** | Number | Number of results returned per page by search page + + + +**Default:** 10 +| **V2_PAGINATION_SIZE** | Number | The number of results returned per page in V2 registry APIs + + + + **Default:** 50 +| **WEBHOOK_HOSTNAME_BLACKLIST** | Array of String | The set of hostnames to disallow from webhooks when validating, beyond localhost +| **CREATE_PRIVATE_REPO_ON_PUSH** | Boolean | Whether new repositories created by push are set to private visibility + + + +**Default:** True +| **CREATE_NAMESPACE_ON_PUSH** | Boolean | Whether new push to a non-existent organization creates it + + + +**Default:** False +| **NON_RATE_LIMITED_NAMESPACES** | Array of String | If rate limiting has been enabled using `FEATURE_RATE_LIMITS`, you can override it for specific namespace that require unlimited access. + +| xref:reference-miscellaneous-v2-ui[**FEATURE_UI_V2**] | Boolean | When set, allows users to try the beta UI environment. + +*Default:* `True` + +| **FEATURE_REQUIRE_TEAM_INVITE** | Boolean | Whether to require invitations when adding a user to a team + + + +**Default:** True + +| **FEATURE_REQUIRE_ENCRYPTED_BASIC_AUTH** | Boolean | Whether non-encrypted passwords (as opposed to encrypted tokens) can be used for basic auth + + + +**Default:** False + +| **FEATURE_RATE_LIMITS** | Boolean | Whether to enable rate limits on API and registry endpoints. Setting FEATURE_RATE_LIMITS to `true` causes `nginx` to limit certain API calls to 30 per second. If that feature is not set, API calls are limited to 300 per second (effectively unlimited). + + + +**Default:** False + +| **FEATURE_FIPS** | Boolean | If set to true, {productname} will run using FIPS-compliant hash functions + + + + **Default:** False + +| **FEATURE_AGGREGATED_LOG_COUNT_RETRIEVAL** | Boolean | Whether to allow retrieval of aggregated log counts + + + + **Default:** True + | **FEATURE_ANONYMOUS_ACCESS** | Boolean | Whether to allow anonymous users to browse and pull public repositories + + + +**Default:** True + +| **FEATURE_DIRECT_LOGIN** | Boolean | Whether users can directly login to the UI + + + +**Default:** True +| **FEATURE_LIBRARY_SUPPORT** | Boolean | Whether to allow for "namespace-less" repositories when pulling and pushing from Docker + + + +**Default:** True +| **FEATURE_PARTIAL_USER_AUTOCOMPLETE** | Boolean | If set to true, autocompletion will apply to partial usernames+ + + +**Default:** True +| **FEATURE_PERMANENT_SESSIONS** | Boolean | Whether sessions are permanent + + + +**Default:** True +| **FEATURE_PUBLIC_CATALOG** | Boolean | If set to true, the `_catalog` endpoint returns public repositories. Otherwise, only private repositories can be returned. + + + +**Default:** False + +|*DISABLE_PUSHES* |Boolean | Disables pushes of new content to the registry while retaining all other functionality. Differs from `read-only` mode because database is not set as `read-only`. When `DISABLE_PUSHES` is set to `true`, the {productname} garbage collector is disabled. As a result, when `PERMANENTLY_DELETE_TAGS` is enabled, using the {productname} UI to permanently delete a tag does not result in the immediate deletion of a tag. Instead, the image stays in the backend storage until `DISABLE_PUSHES` is set to `false`, which re-enables the garbage collector. {productname} administrators should be aware of this caveat when using `DISABLE_PUSHES` and `PERMANENTLY_DELETE_TAGS` together. + + + + **Default:** False + +|=== \ No newline at end of file diff --git a/modules/config-fields-modelcache-clustered-redis.adoc b/modules/config-fields-modelcache-clustered-redis.adoc new file mode 100644 index 000000000..eb2c5fdb5 --- /dev/null +++ b/modules/config-fields-modelcache-clustered-redis.adoc @@ -0,0 +1,19 @@ +:_content-type: CONCEPT +[id="config-fields-modelcache-clustered-redis"] += Clustered Redis configuration option + +Use the following configuration for a clustered Redis instance: + +[source,yaml] +---- + DATA_MODEL_CACHE_CONFIG: + engine: rediscluster + redis_config: + startup_nodes: + - host: + port: + password: + read_from_replicas: + skip_full_coverage_check: + ssl: +---- \ No newline at end of file diff --git a/modules/config-fields-modelcache-memcache.adoc b/modules/config-fields-modelcache-memcache.adoc new file mode 100644 index 000000000..bf2ce6b9d --- /dev/null +++ b/modules/config-fields-modelcache-memcache.adoc @@ -0,0 +1,5 @@ +:_content-type: CONCEPT +[id="config-fields-modelcache-memcache"] += Memcache configuration option + +Memcache is the default ModelCache configuration option. With Memcache, no additional configuration is necessary. diff --git a/modules/config-fields-modelcache-single-redis.adoc b/modules/config-fields-modelcache-single-redis.adoc new file mode 100644 index 000000000..7a255e2e4 --- /dev/null +++ b/modules/config-fields-modelcache-single-redis.adoc @@ -0,0 +1,22 @@ +:_content-type: CONCEPT +[id="config-fields-modelcache-single-redis"] += Single Redis configuration option + +The following configuration is for a single Redis instance with optional read-only replicas: + +[source,yaml] +---- + DATA_MODEL_CACHE_CONFIG: + engine: redis + redis_config: + primary: + host: + port: + password: + ssl: + replica: + host: + port: + password: + ssl: +---- \ No newline at end of file diff --git a/modules/config-fields-modelcache.adoc b/modules/config-fields-modelcache.adoc new file mode 100644 index 000000000..7bc07d254 --- /dev/null +++ b/modules/config-fields-modelcache.adoc @@ -0,0 +1,5 @@ +:_content-type: CONCEPT +[id="config-fields-modelcache"] += ModelCache configuration options + +The following options are available on {productname} for configuring ModelCache. \ No newline at end of file diff --git a/modules/config-fields-modelcard-rendering.adoc b/modules/config-fields-modelcard-rendering.adoc new file mode 100644 index 000000000..eaa30b30c --- /dev/null +++ b/modules/config-fields-modelcard-rendering.adoc @@ -0,0 +1,28 @@ +[id="config-fields-model-card-rendering"] +== Model card rendering + +The following configuration fields have been added to support model card rendering on the v2 UI. + +|=== +| Field | Type | Description + +|*FEATURE_UI_MODELCARD* |Boolean | Enables *Model Card* image tab in UI. Defaults to `true`. +|*UI_MODELCARD_ARTIFACT_TYPE* | String | Defines the model card artifact type. +|*UI_MODELCARD_ANNOTATION* |Object | This optional field defines the layer annotation of the model card stored in an OCI image. +|*UI_MODELCARD_LAYER_ANNOTATION* |Object | This optional field defines the layer annotation of the model card stored in an OCI image. +|=== + +.Example model card YAML +[source,yaml] +---- +FEATURE_UI_MODELCARD: true <1> +UI_MODELCARD_ARTIFACT_TYPE: application/x-mlmodel <2> +UI_MODELCARD_ANNOTATION: <3> + org.opencontainers.image.description: "Model card metadata" +UI_MODELCARD_LAYER_ANNOTATION: <4> + org.opencontainers.image.title: README.md +---- +<1> Enables the *Model Card* image tab in the UI. +<2> Defines the model card artifact type. In this example, the artifact type is `application/x-mlmodel`. +<3> Optional. If an image does not have an `artifactType` defined, this field is checked at the manifest level. If a matching annotation is found, the system then searches for a layer with an annotation matching `UI_MODELCARD_LAYER_ANNOTATION`. +<4> Optional. If an image has an `artifactType` defined and multiple layers, this field is used to locate the specific layer containing the model card. \ No newline at end of file diff --git a/modules/config-fields-nested-repositories.adoc b/modules/config-fields-nested-repositories.adoc new file mode 100644 index 000000000..64a3126bc --- /dev/null +++ b/modules/config-fields-nested-repositories.adoc @@ -0,0 +1,21 @@ +:_content-type: REFERENCE +[id="config-fields-nested-repositories"] += Nested repositories configuration fields + +Support for nested repository path names has been added under the `FEATURE_EXTENDED_REPOSITORY_NAMES` property. This optional configuration is added to the config.yaml by default. Enablement allows the use of `/` in repository names. + +.OCI and nested repositories configuration fields +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **FEATURE_EXTENDED_REPOSITORY_NAMES** | Boolean | Enable support for nested repositories + + + +**Default:** True + +|=== + +.OCI and nested repositories configuration example +[source,yaml] +---- +FEATURE_EXTENDED_REPOSITORY_NAMES: true +---- diff --git a/modules/config-fields-netapp-ontap-s3.adoc b/modules/config-fields-netapp-ontap-s3.adoc new file mode 100644 index 000000000..cbfcf2003 --- /dev/null +++ b/modules/config-fields-netapp-ontap-s3.adoc @@ -0,0 +1,24 @@ +:_content-type: REFERENCE +[id="config-fields-netapp-ontap"] += NetApp ONTAP S3 object storage + +The following YAML shows a sample configuration using NetApp ONTAP S3. + +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + local_us: + - RadosGWStorage + - access_key: + bucket_name: + hostname: + is_secure: true + port: + secret_key: + storage_path: /datastorage/registry + signature_version: v4 +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: +- local_us +DISTRIBUTED_STORAGE_PREFERENCE: +- local_us +---- diff --git a/modules/config-fields-nutanix.adoc b/modules/config-fields-nutanix.adoc new file mode 100644 index 000000000..e7de3b8a3 --- /dev/null +++ b/modules/config-fields-nutanix.adoc @@ -0,0 +1,22 @@ +:_content-type: REFERENCE +[id="config-fields-nutanix"] += Nutanix object storage + +The following YAML shows a sample configuration using Nutanix object storage. + +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + nutanixStorage: #storage config name + - RadosGWStorage #actual driver + - access_key: access_key_here #parameters + secret_key: secret_key_here + bucket_name: bucket_name_here + hostname: hostname_here + is_secure: 'true' + port: '443' + storage_path: /datastorage/registry +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] +DISTRIBUTED_STORAGE_PREFERENCE: #must contain name of the storage config + - nutanixStorage +---- \ No newline at end of file diff --git a/modules/config-fields-oauth.adoc b/modules/config-fields-oauth.adoc new file mode 100644 index 000000000..9cf92fa36 --- /dev/null +++ b/modules/config-fields-oauth.adoc @@ -0,0 +1,64 @@ +:_content-type: REFERENCE +[id="config-fields-oauth"] += OAuth configuration fields + +.OAuth fields +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **DIRECT_OAUTH_CLIENTID_WHITELIST** | Array of String | A list of client IDs for **Quay-managed** applications that are allowed to perform direct OAuth approval without user approval. + +|*FEATURE_ASSIGN_OAUTH_TOKEN* | Boolean| Allows organization administrators to assign OAuth tokens to other users. + +|=== + +[id="github-oauth-config-fields"] +== GitHub OAuth configuration fields + +.GitHub OAuth fields +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **FEATURE_GITHUB_LOGIN** | Boolean | Whether GitHub login is supported + + + +**Default: `False` +| **GITHUB_LOGIN_CONFIG** | Object | Configuration for using GitHub (Enterprise) as an external login provider. +| {nbsp}{nbsp}{nbsp}**.ALLOWED_ORGANIZATIONS** | Array of String | The names of the GitHub (Enterprise) organizations whitelisted to work with the ORG_RESTRICT option. +| {nbsp}{nbsp}{nbsp}**.API_ENDPOINT** | String | The endpoint of the GitHub (Enterprise) API to use. Must be overridden for github.com + + + +**Example:** `https://api.github.com/` +| {nbsp}{nbsp}{nbsp}**.CLIENT_ID** + +{nbsp}{nbsp}{nbsp}(Required) | String | The registered client ID for this {productname} instance; cannot be shared with `GITHUB_TRIGGER_CONFIG`. + + + +**Example:** `0e8dbe15c4c7630b6780` +| {nbsp}{nbsp}{nbsp}**.CLIENT_SECRET** + +{nbsp}{nbsp}{nbsp}(Required) | String | The registered client secret for this {productname} instance. + + + +**Example:** `e4a58ddd3d7408b7aec109e85564a0d153d3e846` +| {nbsp}{nbsp}{nbsp}**.GITHUB_ENDPOINT** + +{nbsp}{nbsp}{nbsp}(Required) | String | The endpoint for GitHub (Enterprise). + + + +**Example**: `https://github.com/` +| {nbsp}{nbsp}{nbsp}**.ORG_RESTRICT** | Boolean | If true, only users within the organization whitelist can login using this provider. +|=== + +[id="google-oauth-config-fields"] +== Google OAuth configuration fields + +.Google OAuth fields +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **FEATURE_GOOGLE_LOGIN** | Boolean | Whether Google login is supported. + + + +**Default: `False` +| **GOOGLE_LOGIN_CONFIG** | Object | Configuration for using Google for external authentication. +| {nbsp}{nbsp}{nbsp}**.CLIENT_ID** + +{nbsp}{nbsp}{nbsp}(Required) | String | The registered client ID for this {productname} instance. + + + +**Example:** `0e8dbe15c4c7630b6780` +| {nbsp}{nbsp}{nbsp}**.CLIENT_SECRET** + +{nbsp}{nbsp}{nbsp}(Required) | String | The registered client secret for this {productname} instance. + + + +**Example:** `e4a58ddd3d7408b7aec109e85564a0d153d3e846` +|=== \ No newline at end of file diff --git a/modules/config-fields-optional-intro.adoc b/modules/config-fields-optional-intro.adoc new file mode 100644 index 000000000..e995947ba --- /dev/null +++ b/modules/config-fields-optional-intro.adoc @@ -0,0 +1,29 @@ +:_content-type: CONCEPT +[id="config-fields-optional-intro"] += Optional configuration fields + +Optional fields for {productname} can be found in the following sections: + +* xref:config-fields-basic[Basic configuration] +* xref:config-fields-ssl[SSL] +* xref:config-fields-ldap[LDAP] +* xref:config-fields-mirroring[Repository mirroring] +* xref:config-fields-quota-management[Quota management] +* xref:config-fields-scanner[Security scanner] +* xref:config-fields-helm-oci[Helm] +* xref:config-fields-actionlog[Action log] +* xref:config-fields-build-logs[Build logs] +* xref:config-fields-dockerfile-build[Dockerfile build] +* xref:config-fields-oauth[OAuth] +* xref:config-fields-nested-repositories[Configuring nested repositories] +* xref:other-oci-artifacts-with-quay[Adding other OCI media types to Quay] +* xref:config-fields-mail[Mail] +* xref:config-fields-user[User] +* xref:config-fields-recaptcha[Recaptcha] +* xref:config-fields-aci[ACI] +* xref:config-fields-jwt[JWT] +* xref:config-fields-app-tokens[App tokens] +* xref:config-fields-misc[Miscellaneous] +* xref:config-fields-v2-ui[User interface v2] +* xref:config-fields-ipv6[IPv6 configuration field] +* xref:config-fields-legacy[Legacy options] \ No newline at end of file diff --git a/modules/config-fields-overview.adoc b/modules/config-fields-overview.adoc new file mode 100644 index 000000000..4e9bfa022 --- /dev/null +++ b/modules/config-fields-overview.adoc @@ -0,0 +1,132 @@ +:_content-type: CONCEPT +[id="config-fields-overview"] += Clair configuration overview + +Clair is configured by a structured YAML file. Each Clair node needs to specify what mode it will run in and a path to a configuration file through CLI flags or environment variables. For example: + +[source,terminal] +---- +$ clair -conf ./path/to/config.yaml -mode indexer +---- + +or + +[source,terminal] +---- +$ clair -conf ./path/to/config.yaml -mode matcher +---- + +The aforementioned commands each start two Clair nodes using the same configuration file. One runs the indexing facilities, while other runs the matching facilities. + +If you are running Clair in `combo` mode, you must supply the indexer, matcher, and notifier configuration blocks in the configuration. + +[id="information-using-clair-proxy-environment"] +== Information about using Clair in a proxy environment + +Environment variables respected by the Go standard library can be specified if needed, for example: + +* `HTTP_PROXY` ++ +[source,terminal] +---- +$ export HTTP_PROXY=http://:@: +---- +* `HTTPS_PROXY`. ++ +[source,terminal] +---- +$ export HTTPS_PROXY=https://:@: +---- +* `SSL_CERT_DIR` ++ +[source,terminal] +---- +$ export SSL_CERT_DIR=//// +---- +* `NO_PROXY` ++ +[source,terminal] +---- +$ export NO_PROXY= +---- + +If you are using a proxy server in your environment with Clair's updater URLs, you must identify which URL needs to be added to the proxy allowlist to ensure that Clair can access them unimpeded. For example, the `osv` updater requires access to `\https://osv-vulnerabilities.storage.googleapis.com` to fetch ecosystem data dumps. In this scenario, the URL must be added to the proxy allowlist. For a full list of updater URLs, see "Clair updater URLs". + +You must also ensure that the standard Clair URLs are added to the proxy allowlist: + +* `\https://search.maven.org/solrsearch/select` +* `\https://catalog.redhat.com/api/containers/` +* `\https://access.redhat.com/security/data/metrics/repository-to-cpe.json` +* `\https://access.redhat.com/security/data/metrics/container-name-repos-map.json` + +When configuring the proxy server, take into account any authentication requirements or specific proxy settings needed to enable seamless communication between Clair and these URLs. By thoroughly documenting and addressing these considerations, you can ensure that Clair functions effectively while routing its updater traffic through the proxy. + +[id="config-fields-clair-reference"] +== Clair configuration reference + +The following YAML shows an example Clair configuration: + +[source,yaml] +---- +http_listen_addr: "" +introspection_addr: "" +log_level: "" +tls: {} +indexer: + connstring: "" + scanlock_retry: 0 + layer_scan_concurrency: 5 + migrations: false + scanner: {} + airgap: false +matcher: + connstring: "" + indexer_addr: "" + migrations: false + period: "" + disable_updaters: false + update_retention: 2 +matchers: + names: nil + config: nil +updaters: + sets: nil + config: nil +notifier: + connstring: "" + migrations: false + indexer_addr: "" + matcher_addr: "" + poll_interval: "" + delivery_interval: "" + disable_summary: false + webhook: null + amqp: null + stomp: null +auth: + psk: nil +trace: + name: "" + probability: null + jaeger: + agent: + endpoint: "" + collector: + endpoint: "" + username: null + password: null + service_name: "" + tags: nil + buffer_max: 0 +metrics: + name: "" + prometheus: + endpoint: null + dogstatsd: + url: "" +---- + +[NOTE] +==== +The above YAML file lists every key for completeness. Using this configuration file as-is will result in some options not having their defaults set normally. +==== \ No newline at end of file diff --git a/modules/config-fields-proxy-cache.adoc b/modules/config-fields-proxy-cache.adoc new file mode 100644 index 000000000..be70b0001 --- /dev/null +++ b/modules/config-fields-proxy-cache.adoc @@ -0,0 +1,13 @@ +:_content-type: REFERENCE +[id="config-fields-proxy-cache"] += Proxy cache configuration fields + +.Proxy configuration +[cols="3a,1a,2a",options="header"] +|=== +|Field |Type |Description +|**FEATURE_PROXY_CACHE** | Boolean | Enables {productname} to act as a pull through cache for upstream registries. + +*Default*: `false` + +|=== \ No newline at end of file diff --git a/modules/config-fields-quota-management.adoc b/modules/config-fields-quota-management.adoc new file mode 100644 index 000000000..1da719ae8 --- /dev/null +++ b/modules/config-fields-quota-management.adoc @@ -0,0 +1,47 @@ +:_content-type: REFERENCE +[id="config-fields-quota-management"] += Quota management configuration fields + +.Quota management configuration +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **FEATURE_QUOTA_MANAGEMENT** | Boolean | Enables configuration, caching, and validation for quota management feature. + + **Default:** `False` + +| **DEFAULT_SYSTEM_REJECT_QUOTA_BYTES** | String | Enables system default quota reject byte allowance for all organizations. + +By default, no limit is set. + +| **QUOTA_BACKFILL** | Boolean | Enables the quota backfill worker to calculate the size of pre-existing blobs. + +**Default**: `True` + +|**QUOTA_TOTAL_DELAY_SECONDS** |String | The time delay for starting the quota backfill. Rolling deployments can cause incorrect totals. This field *must* be set to a time longer than it takes for the rolling deployment to complete. + +**Default**: `1800` + +|**PERMANENTLY_DELETE_TAGS** |Boolean | Enables functionality related to the removal of tags from the time machine window. + +**Default**: `False` + +|**RESET_CHILD_MANIFEST_EXPIRATION** |Boolean |Resets the expirations of temporary tags targeting the child manifests. With this feature set to `True`, child manifests are immediately garbage collected. + +**Default**: `False` +|=== + +[id="suggested-management-config-settings-39"] +== Example quota management configuration + +The following YAML is the suggested configuration when enabling quota management. + +.Quota management YAML configuration +[source,yaml] +---- +FEATURE_QUOTA_MANAGEMENT: true +FEATURE_GARBAGE_COLLECTION: true +PERMANENTLY_DELETE_TAGS: true +QUOTA_TOTAL_DELAY_SECONDS: 1800 +RESET_CHILD_MANIFEST_EXPIRATION: true +---- \ No newline at end of file diff --git a/modules/config-fields-recaptcha.adoc b/modules/config-fields-recaptcha.adoc new file mode 100644 index 000000000..e6ebaf082 --- /dev/null +++ b/modules/config-fields-recaptcha.adoc @@ -0,0 +1,13 @@ +[[config-fields-recaptcha]] += Recaptcha configuration fields + +.Recaptcha configuration fields +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **FEATURE_RECAPTCHA** | Boolean | Whether Recaptcha is necessary for user login and recovery + + + + **Default:** False +| **RECAPTCHA_SECRET_KEY** | String | If recaptcha is enabled, the secret key for the Recaptcha service +| **RECAPTCHA_SITE_KEY** | String | If recaptcha is enabled, the site key for the Recaptcha service +|=== \ No newline at end of file diff --git a/modules/config-fields-redis.adoc b/modules/config-fields-redis.adoc new file mode 100644 index 000000000..84dd1e8e4 --- /dev/null +++ b/modules/config-fields-redis.adoc @@ -0,0 +1,105 @@ +:_content-type: CONCEPT +[id="config-fields-redis"] += Redis configuration fields + +This section details the configuration fields available for Redis deployments. + +== Build logs + +The following build logs configuration fields are available for Redis deployments: + +.Build logs configuration +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **BUILDLOGS_REDIS** + +(Required) | Object | Redis connection details for build logs caching. +|**.host** + +(Required)| String | The hostname at which Redis is accessible. + +**Example:** + +`quay-server.example.com` +|**.port** + +(Required)| Number | The port at which Redis is accessible. + +**Example:** + +`6379` +|**.password** | String | The password to connect to the Redis instance. + +**Example:** + +`strongpassword` +| **.ssl** + +(Optional) | Boolean | Whether to enable TLS communication between Redis and Quay. Defaults to false. +|=== + +[id="user-event-fields-redis"] +== User events + +The following user event fields are available for Redis deployments: + +.User events config +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **USER_EVENTS_REDIS** + +(Required) | Object | Redis connection details for user event handling. +|**.host** + +(Required)| String | The hostname at which Redis is accessible. + +**Example:** + +`quay-server.example.com` +|**.port** + +(Required)| Number | The port at which Redis is accessible. + +**Example:** + +`6379` +|**.password** | String | The password to connect to the Redis instance. + +**Example:** + +`strongpassword` +| **.ssl** | Boolean | Whether to enable TLS communication between Redis and Quay. Defaults to false. +| **.ssl_keyfile** + +(Optional) | String | The name of the key database file, which houses the client certificate to be used. + +**Example:** + +`ssl_keyfile: /path/to/server/privatekey.pem` +| **.ssl_certfile** + +(Optional) | String | Used for specifying the file path of the SSL certificate. + +**Example:** + +`ssl_certfile: /path/to/server/certificate.pem` +| **.ssl_cert_reqs** + +(Optional) | String | Used to specify the level of certificate validation to be performed during the SSL/TLS handshake. + +**Example:** + +`ssl_cert_reqs: CERT_REQUIRED` +| **.ssl_ca_certs** + +(Optional) | String | Used to specify the path to a file containing a list of trusted Certificate Authority (CA) certificates. + +**Example:** + +`ssl_ca_certs: /path/to/ca_certs.pem` +| **.ssl_ca_data** + +(Optional) | String | Used to specify a string containing the trusted CA certificates in PEM format. + +**Example:** + +`ssl_ca_data: ` +| **.ssl_check_hostname ** + +(Optional) | Boolean | Used when setting up an SSL/TLS connection to a server. It specifies whether the client should check that the hostname in the server's SSL/TLS certificate matches the hostname of the server it is connecting to. + +**Example:** + +`ssl_check_hostname: true` +|=== + +[id="example-redis-configuration"] +== Example Redis configuration +The following YAML shows a sample configuration using Redis with optional SSL/TLS fields: + +[source,yaml] +---- +BUILDLOGS_REDIS: + host: quay-server.example.com + password: strongpassword + port: 6379 + ssl: true + + +USER_EVENTS_REDIS: + host: quay-server.example.com + password: strongpassword + port: 6379 + ssl: true + ssl_*: +---- + +[NOTE] +==== +If your deployment uses Azure Cache for Redis and `ssl` is set to `true`, the port defaults to `6380`. +==== diff --git a/modules/config-fields-required-general.adoc b/modules/config-fields-required-general.adoc new file mode 100644 index 000000000..ed5ef53df --- /dev/null +++ b/modules/config-fields-required-general.adoc @@ -0,0 +1,41 @@ +:_content-type: CONCEPT +[id="config-fields-required-general"] += General required fields + +The following table describes the required configuration fields for a {productname} deployment: + +.General required fields +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **AUTHENTICATION_TYPE** + +(Required) | String | The authentication engine to use for credential authentication. + + + +**Values:** + +One of `Database`, `LDAP`, `JWT`, `Keystone`, `OIDC` + + + +**Default:** `Database` +| **PREFERRED_URL_SCHEME** + +(Required) | String | The URL scheme to use when accessing {productname}. + + + +**Values:** + +One of `http`, `https` + + + +**Default:** `http` +| **SERVER_HOSTNAME** + +(Required) | String | The URL at which {productname} is accessible, without the scheme. + + + +**Example:** + +`quay-server.example.com` +| **DATABASE_SECRET_KEY** + +(Required) | String | Key used to encrypt sensitive fields within the database. This value should never be changed once set, otherwise all reliant fields, for example, repository mirror username and password configurations, are invalidated. + +This value is set automatically by the {productname} Operator for Operator-based deployments. For standalone deployments, administrators can provide their own key using Open SSL or a similar tool. Key length should not exceed 63 characters. +| **SECRET_KEY** + +(Required) | String | Key used to encrypt the session cookie and the CSRF token needed for correct interpretation of the user session. The value should not be changed when set. Should be persistent across all {productname} instances. If not persistent across all instances, login failures and other errors related to session persistence might occur. +| **SETUP_COMPLETE** + +(Required) | Boolean | This is an artifact left over from earlier versions of the software and currently it **must** be specified with a value of `true`. +|=== + + + + diff --git a/modules/config-fields-required-intro.adoc b/modules/config-fields-required-intro.adoc new file mode 100644 index 000000000..6a777c90f --- /dev/null +++ b/modules/config-fields-required-intro.adoc @@ -0,0 +1,12 @@ +:_content-type: CONCEPT +[id="config-fields-required-intro"] + += Required configuration fields + +The fields required to configure {productname} are covered in the following sections: + +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index#config-fields-required-general[General required fields] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index#config-fields-storage[Storage for images] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index#config-fields-db[Database for metadata] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index#config-fields-redis[Redis for build logs and user events] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index#config-fields-tag-expiration[Tag expiration options] diff --git a/modules/config-fields-robot-account.adoc b/modules/config-fields-robot-account.adoc new file mode 100644 index 000000000..b01799386 --- /dev/null +++ b/modules/config-fields-robot-account.adoc @@ -0,0 +1,13 @@ +:_content-type: CONCEPT +[id="config-fields-robot-account"] += Robot account configuration fields + +.Robot account configuration fields +[cols="3a,1a,2a",options="header"] +|=== +|Field | Type |Description + +|**ROBOTS_DISALLOW** |Boolean |When set to `true`, robot accounts are prevented from all interactions, as well as from being created + + +*Default*: `False` +|=== diff --git a/modules/config-fields-scanner.adoc b/modules/config-fields-scanner.adoc new file mode 100644 index 000000000..03cd5ee95 --- /dev/null +++ b/modules/config-fields-scanner.adoc @@ -0,0 +1,81 @@ +:_content-type: REFERENCE +[id="config-fields-scanner"] += Security scanner configuration fields + +.Security scanner configuration +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **FEATURE_SECURITY_SCANNER** | Boolean | Enable or disable the security scanner + + + + **Default:** `false` +| **FEATURE_SECURITY_NOTIFICATIONS** | Boolean | If the security scanner is enabled, turn on or turn off security notifications + + + + **Default:** `false` +| **SECURITY_SCANNER_V4_REINDEX_THRESHOLD** | String | This parameter is used to determine the minimum time, in seconds, to wait before re-indexing a manifest that has either previously failed or has changed states since the last indexing. The data is calculated from the `last_indexed datetime` in the *manifestsecuritystatus* table. This parameter is used to avoid trying to re-index every failed manifest on every indexing run. The default time to re-index is 300 seconds. +| **SECURITY_SCANNER_V4_ENDPOINT** | String | The endpoint for the V4 security scanner + + + +**Pattern:** + +`^http(s)?://(.)+$` + + + +**Example:** + +`http://192.168.99.101:6060` +| **SECURITY_SCANNER_V4_PSK** | String | The generated pre-shared key (PSK) for Clair +// TODO 36 Check that SECURITY_SCANNER_NOTIFICATIONS can be dropped +// | **SECURITY_SCANNER_NOTIFICATIONS** | String | +| **SECURITY_SCANNER_ENDPOINT** | String | The endpoint for the V2 security scanner + + + +**Pattern:** + +`^http(s)?://(.)+$` + + + +**Example:** + +`http://192.168.99.100:6060` +| **SECURITY_SCANNER_INDEXING_INTERVAL** | Integer | This parameter is used to determine the number of seconds between indexing intervals in the security scanner. When indexing is triggered, {productname} will query its database for manifests that must be indexed by Clair. These include manifests that have not yet been indexed and manifests that previously failed indexing. + + + +**Default:** 30 + +| **FEATURE_SECURITY_SCANNER_NOTIFY_ON_NEW_INDEX** | Boolean | Whether to allow sending notifications about vulnerabilities for new pushes. + + +**Default**: `True` + +| **SECURITY_SCANNER_V4_MANIFEST_CLEANUP** | Boolean | Whether the {productname} garbage collector removes manifests that are not referenced by other tags or manifests. + + +**Default**: `True` + +| *NOTIFICATION_MIN_SEVERITY_ON_NEW_INDEX* | String | Set minimal security level for new notifications on detected vulnerabilities. Avoids creation of large number of notifications after first index. If not defined, defaults to `High`. Available options include `Critical`, `High`, `Medium`, `Low`, `Negligible`, and `Unknown`. + +| *SECURITY_SCANNER_V4_INDEX_MAX_LAYER_SIZE* | String | The maximum layer size allowed for indexing. If the layer size exceeds the configured size, the {productname} UI returns the following message: `The manifest for this tag has layer(s) that are too large to index by the Quay Security Scanner`. The default is `8G`, and the maximum recommended is `10G`. Accepted values are `B`, `K`, `M`, `T`, and `G`. + + + **Default**: `8G` +|=== + +[id="reindexing-clair-v4"] +== Re-indexing with Clair v4 + +When Clair v4 indexes a manifest, the result should be deterministic. For example, the same manifest should produce the same index report. This is true until the scanners are changed, as using different scanners will produce different information relating to a specific manifest to be returned in the report. Because of this, Clair v4 exposes a state representation of the indexing engine (`/indexer/api/v1/index_state`) to determine whether the scanner configuration has been changed. + +{productname} leverages this index state by saving it to the index report when parsing to Quay's database. If this state has changed since the manifest was previously scanned, {productname} will attempt to re-index that manifest during the periodic indexing process. + +By default this parameter is set to 30 seconds. Users might decrease the time if they want the indexing process to run more frequently, for example, if they did not want to wait 30 seconds to see security scan results in the UI after pushing a new tag. Users can also change the parameter if they want more control over the request pattern to Clair and the pattern of database operations being performed on the {productname} database. + +[id="example-security-scanner-config"] +== Example security scanner configuration + +The following YAML is the suggested configuration when enabling the security scanner feature. + +.Security scanner YAML configuration +[source,yaml] +---- +FEATURE_SECURITY_NOTIFICATIONS: true +FEATURE_SECURITY_SCANNER: true +FEATURE_SECURITY_SCANNER_NOTIFY_ON_NEW_INDEX: true +... +SECURITY_SCANNER_INDEXING_INTERVAL: 30 +SECURITY_SCANNER_V4_MANIFEST_CLEANUP: true +SECURITY_SCANNER_V4_ENDPOINT: http://quay-server.example.com:8081 +SECURITY_SCANNER_V4_PSK: MTU5YzA4Y2ZkNzJoMQ== +SERVER_HOSTNAME: quay-server.example.com +SECURITY_SCANNER_V4_INDEX_MAX_LAYER_SIZE: 8G <1> +... +---- +<1> Recommended maximum is `10G`. \ No newline at end of file diff --git a/modules/config-fields-server.adoc b/modules/config-fields-server.adoc new file mode 100644 index 000000000..17d724f76 --- /dev/null +++ b/modules/config-fields-server.adoc @@ -0,0 +1,12 @@ +[[config-fileds-server]] += Server configuration + + +.Server configuration +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **SERVER_HOSTNAME** | String | The HTTP host where the registry will be accessible on the network. Include the port number if using a non-standard HTTP/HTTPS port, and omit the scheme. + + + +**Example:** `quay-server.example.com` +|=== \ No newline at end of file diff --git a/modules/config-fields-session-logout.adoc b/modules/config-fields-session-logout.adoc new file mode 100644 index 000000000..9e96665c2 --- /dev/null +++ b/modules/config-fields-session-logout.adoc @@ -0,0 +1,32 @@ +:_content-type: CONCEPT +[id="config-fields-session-logout"] += Session timeout configuration field + +The following configuration field relies on on the Flask API configuration field of the same name. + +.Session logout configuration field +[cols="3a,1a,2a",options="header"] +|=== +|Field | Type |Description +| **PERMANENT_SESSION_LIFETIME** | Integer | A `timedelta` which is used to set the expiration date of a permanent session. The default is 31 days, which makes a permanent session survive for roughly one month. + +*Default:* `2678400` +|=== + + +[id="suggested-permanent-session-lifetime-config"] +== Example session timeout configuration + +The following YAML is the suggest configuration when enabling session lifetime. + +[IMPORTANT] +==== +Altering session lifetime is not recommended. Administrators should be aware of the allotted time when setting a session timeout. If you set the time too early, it might interrupt your workflow. +==== + +.Session timeout YAML configuration +[source,yaml] +---- +PERMANENT_SESSION_LIFETIME: 3000 +---- + diff --git a/modules/config-fields-ssl.adoc b/modules/config-fields-ssl.adoc new file mode 100644 index 000000000..4be30c688 --- /dev/null +++ b/modules/config-fields-ssl.adoc @@ -0,0 +1,60 @@ +:_content-type: CONCEPT +[id="config-fields-ssl"] += SSL configuration fields + +.SSL configuration +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **PREFERRED_URL_SCHEME** | String | One of `http` or `https`. Note that users only set their `PREFERRED_URL_SCHEME` to `http` when there is no TLS encryption in the communication path from the client to Quay. + + +Users must set their `PREFERRED_URL_SCHEME`to `https` when using a TLS-terminating load balancer, a reverse proxy (for example, Nginx), or when using Quay with custom SSL certificates directly. In most cases, the `PREFERRED_URL_SCHEME` should be `https`. + + + **Default:** `http` + | **SERVER_HOSTNAME** + +(Required) | String | The URL at which {productname} is accessible, without the scheme + + + +**Example:** + +`quay-server.example.com` + +| **SSL_CIPHERS** | Array of String | If specified, the nginx-defined list of SSL ciphers to enabled and disabled + + + +**Example:** + +[`ECDHE-RSA-AES128-GCM-SHA256`, `ECDHE-ECDSA-AES128-GCM-SHA256`, `ECDHE-RSA-AES256-GCM-SHA384`, `ECDHE-ECDSA-AES256-GCM-SHA384`, `DHE-RSA-AES128-GCM-SHA256`, `DHE-DSS-AES128-GCM-SHA256`, `kEDH+AESGCM`, `ECDHE-RSA-AES128-SHA256`, `ECDHE-ECDSA-AES128-SHA256`, `ECDHE-RSA-AES128-SHA`, `ECDHE-ECDSA-AES128-SHA`, `ECDHE-RSA-AES256-SHA384`, `ECDHE-ECDSA-AES256-SHA384`, `ECDHE-RSA-AES256-SHA`, `ECDHE-ECDSA-AES256-SHA`, `DHE-RSA-AES128-SHA256`, `DHE-RSA-AES128-SHA`, `DHE-DSS-AES128-SHA256`, `DHE-RSA-AES256-SHA256`, `DHE-DSS-AES256-SHA`, `DHE-DSS-AES256-SHA`, `AES128-GCM-SHA256`, `AES256-GCM-SHA384`, `AES128-SHA256`, `AES256-SHA256`, `AES128-SHA`, `AES256-SHA`, `AES`, `!3DES"`, `!aNULL`, `!eNULL`, `!EXPORT`, `DES`, `!RC4`, `MD5`, `!PSK`, `!aECDH`, `!EDH-DSS-DES-CBC3-SHA`, `!EDH-RSA-DES-CBC3-SHA`, `!KRB5-DES-CBC3-SHA`] +| **SSL_PROTOCOLS** | Array of String | If specified, nginx is configured to enabled a list of SSL protocols defined in the list. Removing an SSL protocol from the list disables the protocol during {productname} startup. + + + +**Example:** + +`['TLSv1','TLSv1.1','TLSv1.2', `TLSv1.3`]` +| **SESSION_COOKIE_SECURE** | Boolean | Whether the `secure` property should be set on session cookies + + + +**Default:** + +False + + + +**Recommendation:** + +Set to True for all installations using SSL +|=== + + + +== Configuring SSL + + . Copy the certificate file and primary key file to your configuration directory, ensuring they are named `ssl.cert` and `ssl.key` respectively: ++ +``` +$ cp ~/ssl.cert $QUAY/config +$ cp ~/ssl.key $QUAY/config +$ cd $QUAY/config +``` + +. Edit the `config.yaml` file and specify that you want Quay to handle TLS: ++ +.config.yaml +[source,yaml] +---- +... +SERVER_HOSTNAME: quay-server.example.com +... +PREFERRED_URL_SCHEME: https +... +---- +. Stop the `Quay` container and restart the registry diff --git a/modules/config-fields-storage-aws.adoc b/modules/config-fields-storage-aws.adoc new file mode 100644 index 000000000..1c0e57586 --- /dev/null +++ b/modules/config-fields-storage-aws.adoc @@ -0,0 +1,115 @@ +:_content-type: CONCEPT +[id="config-fields-storage-aws"] += AWS S3 storage + +The following YAML shows a sample configuration using AWS S3 storage. + +[source,yaml] +---- +# ... +DISTRIBUTED_STORAGE_CONFIG: + default: + - S3Storage <1> + - host: s3.us-east-2.amazonaws.com + s3_access_key: ABCDEFGHIJKLMN + s3_secret_key: OL3ABCDEFGHIJKLMN + s3_bucket: quay_bucket + s3_region: <2> + storage_path: /datastorage/registry +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] +DISTRIBUTED_STORAGE_PREFERENCE: + - default +# ... +---- +<1> The `S3Storage` storage driver should only be used for AWS S3 buckets. Note that this differs from general S3 access, where the RadosGW driver or other storage services can be used. For an example, see "Example B: Using RadosGW with general S3 access". +<2> Optional. The Amazon Web Services region. Defaults to `us-east-1`. + +[id="config-fields-storage-aws-sts"] +== AWS STS S3 storage + +The following YAML shows an example configuration for using Amazon Web Services (AWS) Security Token Service (STS) with {productname-ocp} configurations. + +[source,yaml] +---- +# ... +DISTRIBUTED_STORAGE_CONFIG: + default: + - STSS3Storage + - sts_role_arn: <1> + s3_bucket: + storage_path: + sts_user_access_key: <2> + sts_user_secret_key: <3> + s3_region: <4> +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] +DISTRIBUTED_STORAGE_PREFERENCE: + - default +# ... +---- +<1> The unique Amazon Resource Name (ARN). +<2> The generated AWS S3 user access key. +<3> The generated AWS S3 user secret key. +<4> Optional. The Amazon Web Services region. Defaults to `us-east-1`. + +[id="aws-cloudfront-storage-example"] +== AWS Cloudfront storage + +Use the following example when configuring AWS Cloudfront for your {productname} deployment. + +[NOTE] +==== +* When configuring AWS Cloudfront storage, the following conditions must be met for proper use with {productname}: +** You must set an *Origin path* that is consistent with {productname}'s storage path as defined in your `config.yaml` file. Failure to meet this require results in a `403` error when pulling an image. For more information, see link:https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesOriginPath[Origin path]. +** You must configure a link:https://docs.aws.amazon.com/whitepapers/latest/secure-content-delivery-amazon-cloudfront/s3-origin-with-cloudfront.html[*Bucket policy*] and a link:https://docs.aws.amazon.com/AmazonS3/latest/userguide/cors.html[*Cross-origin resource sharing (CORS)*] policy. +==== + +.Cloudfront S3 example YAML +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + default: + - CloudFrontedS3Storage + - cloudfront_distribution_domain: + cloudfront_key_id: + cloudfront_privatekey_filename: + host: + s3_access_key: + s3_bucket: + s3_secret_key: + storage_path: + s3_region: +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: + - default +DISTRIBUTED_STORAGE_PREFERENCE: + - default +---- + +.Bucket policy example +[source,json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:iam:::user/CloudFront Origin Access Identity " <1> <2> + }, + "Action": "s3:GetObject", + "Resource": "arn:aws:s3:::/*" <3> + }, + { + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:iam:::user/CloudFront Origin Access Identity " <1> <2> + }, + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::" + } + ] +} + +---- +<1> The identifier, or account ID, of the AWS account that owns the CloudFront OAI and S3 bucket. +<2> The CloudFront Origin Access Identity (OAI) that accesses the S3 bucket. +<3> Specifies that CloudFront can access all objects (`/*`) inside of the S3 bucket. \ No newline at end of file diff --git a/modules/config-fields-storage-azure.adoc b/modules/config-fields-storage-azure.adoc new file mode 100644 index 000000000..983da1406 --- /dev/null +++ b/modules/config-fields-storage-azure.adoc @@ -0,0 +1,24 @@ +:_content-type: CONCEPT +[id="config-fields-storage-azure"] += Azure Storage + +The following YAML shows a sample configuration using Azure Storage: + +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + azureStorage: + - AzureStorage + - azure_account_name: azure_account_name_here + azure_container: azure_container_here + storage_path: /datastorage/registry + azure_account_key: azure_account_key_here + sas_token: some/path/ + endpoint_url: https://[account-name].blob.core.usgovcloudapi.net <1> +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] +DISTRIBUTED_STORAGE_PREFERENCE: + - azureStorage +---- +<1> The `endpoint_url` parameter for Azure storage is optional and can be used with Microsoft Azure Government (MAG) endpoints. If left blank, the `endpoint_url` will connect to the normal Azure region. ++ +As of {productname} 3.7, you must use the Primary endpoint of your MAG Blob service. Using the Secondary endpoint of your MAG Blob service will result in the following error: `AuthenticationErrorDetail:Cannot find the claimed account when trying to GetProperties for the account whusc8-secondary`. diff --git a/modules/config-fields-storage-features.adoc b/modules/config-fields-storage-features.adoc new file mode 100644 index 000000000..eda237d7f --- /dev/null +++ b/modules/config-fields-storage-features.adoc @@ -0,0 +1,21 @@ +:_content-type: CONCEPT +[id="config-fields-storage-features"] += Image storage features + +The following table describes the image storage features for {productname}: + +.Storage config features +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **FEATURE_REPO_MIRROR** | Boolean | If set to true, enables repository mirroring. + + + +**Default:** `false` +|**FEATURE_PROXY_STORAGE** | Boolean | Whether to proxy all direct download URLs in storage through NGINX. + + + +**Default:** `false` +| **FEATURE_STORAGE_REPLICATION** | Boolean | Whether to automatically replicate between storage engines. + + + +**Default:** `false` + +|=== diff --git a/modules/config-fields-storage-fields.adoc b/modules/config-fields-storage-fields.adoc new file mode 100644 index 000000000..de700af8f --- /dev/null +++ b/modules/config-fields-storage-fields.adoc @@ -0,0 +1,29 @@ +:_content-type: CONCEPT +[id="config-fields-storage-fields"] += Image storage configuration fields + +The following table describes the image storage configuration fields for {productname}: + +.Storage config fields +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **DISTRIBUTED_STORAGE_CONFIG** + +(Required) | Object | Configuration for storage engine(s) to use in {productname}. Each key represents an unique identifier for a storage engine. The value consists of a tuple of (key, value) forming an object describing the storage engine parameters. + + + + **Default:** `[]` +| **DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS** + +(Required) | Array of string | The list of storage engine(s) (by ID in `DISTRIBUTED_STORAGE_CONFIG`) whose images should be fully replicated, by default, to all other storage engines. +| **DISTRIBUTED_STORAGE_PREFERENCE** + +(Required) | Array of string | The preferred storage engine(s) (by ID in `DISTRIBUTED_STORAGE_CONFIG`) to use. A preferred engine means it is first checked for pulling and images are pushed to it. + + + + **Default:** `false` + | **MAXIMUM_LAYER_SIZE** | String | Maximum allowed size of an image layer. + + + +**Pattern**: `^[0-9]+(G\|M)$` + + + +**Example**: `100G` + + + +**Default:** `20G` + +|=== diff --git a/modules/config-fields-storage-gcp.adoc b/modules/config-fields-storage-gcp.adoc new file mode 100644 index 000000000..67828c0e5 --- /dev/null +++ b/modules/config-fields-storage-gcp.adoc @@ -0,0 +1,21 @@ +:_content-type: CONCEPT +[id="config-fields-storage-gcp"] += Google Cloud Storage + +The following YAML shows a sample configuration using Google Cloud Storage: + +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + googleCloudStorage: + - GoogleCloudStorage + - access_key: GOOGQIMFB3ABCDEFGHIJKLMN + bucket_name: quay-bucket + secret_key: FhDAYe2HeuAKfvZCAGyOioNaaRABCDEFGHIJKLMN + storage_path: /datastorage/registry + boto_timeout: 120 <1> +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] +DISTRIBUTED_STORAGE_PREFERENCE: + - googleCloudStorage +---- +<1> Optional. The time, in seconds, until a timeout exception is thrown when attempting to read from a connection. The default is `60` seconds. Also encompasses the time, in seconds, until a timeout exception is thrown when attempting to make a connection. The default is `60` seconds. \ No newline at end of file diff --git a/modules/config-fields-storage-local.adoc b/modules/config-fields-storage-local.adoc new file mode 100644 index 000000000..12133d16b --- /dev/null +++ b/modules/config-fields-storage-local.adoc @@ -0,0 +1,16 @@ +:_content-type: CONCEPT +[id="config-fields-storage-local"] += Local storage + +The following YAML shows a sample configuration using local storage: + +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + default: + - LocalStorage + - storage_path: /datastorage/registry +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] +DISTRIBUTED_STORAGE_PREFERENCE: + - default +---- diff --git a/modules/config-fields-storage-noobaa.adoc b/modules/config-fields-storage-noobaa.adoc new file mode 100644 index 000000000..26aadd1ba --- /dev/null +++ b/modules/config-fields-storage-noobaa.adoc @@ -0,0 +1,23 @@ +:_content-type: CONCEPT +[id="config-fields-storage-noobaa"] += OpenShift Container Storage/NooBaa + +The following YAML shows a sample configuration using an OpenShift Container Storage/NooBaa instance: + +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + rhocsStorage: + - RHOCSStorage + - access_key: access_key_here + secret_key: secret_key_here + bucket_name: quay-datastore-9b2108a3-29f5-43f2-a9d5-2872174f9a56 + hostname: s3.openshift-storage.svc.cluster.local + is_secure: 'true' + port: '443' + storage_path: /datastorage/registry + maximum_chunk_size_mb: 100 <1> + server_side_assembly: true <2> +---- +<1> Defines the maximum chunk size, in MB, for the final copy. Has no effect if `server_side_assembly` is set to `false`. +<2> Optional. Whether {productname} should try and use server side assembly and the final chunked copy instead of client assembly. Defaults to `true`. \ No newline at end of file diff --git a/modules/config-fields-storage-rados.adoc b/modules/config-fields-storage-rados.adoc new file mode 100644 index 000000000..b7c9c2e45 --- /dev/null +++ b/modules/config-fields-storage-rados.adoc @@ -0,0 +1,30 @@ +:_content-type: CONCEPT +[id="config-fields-storage-rados"] += Ceph Object Gateway/RadosGW storage + +The following YAML shows a sample configuration using Ceph/RadosGW. + +[NOTE] +==== +RadosGW is an on-premises S3-compatible storage solution. Note that this differs from general *AWS S3Storage*, which is specifically designed for use with Amazon Web Services S3. This means that RadosGW implements the S3 API and requires credentials like `access_key`, `secret_key`, and `bucket_name`. For more information about Ceph Object Gateway and the S3 API, see link:https://docs.redhat.com/en/documentation/red_hat_ceph_storage/4/html/developer_guide/ceph-object-gateway-and-the-s3-api#ceph-object-gateway-and-the-s3-api[Ceph Object Gateway and the S3 API]. +==== + +.RadosGW with general s3 access +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + radosGWStorage: <1> + - RadosGWStorage + - access_key: + bucket_name: + hostname: + is_secure: true + port: '443' + secret_key: + storage_path: /datastorage/registry + maximum_chunk_size_mb: 100 <2> + server_side_assembly: true <3> +---- +<1> Used for general s3 access. Note that general s3 access is not strictly limited to Amazon Web Services (AWS) s3, and can be used with RadosGW or other storage services. For an example of general s3 access using the AWS S3 driver, see "AWS S3 storage". +<2> Optional. Defines the maximum chunk size in MB for the final copy. Has no effect if `server_side_assembly` is set to `false`. +<3> Optional. Whether {productname} should try and use server side assembly and the final chunked copy instead of client assembly. Defaults to `true`. diff --git a/modules/config-fields-storage-swift.adoc b/modules/config-fields-storage-swift.adoc new file mode 100644 index 000000000..63f6e7750 --- /dev/null +++ b/modules/config-fields-storage-swift.adoc @@ -0,0 +1,26 @@ +:_content-type: REFERENCE +[id="config-fields-storage-swift"] += Swift storage + +The following YAML shows a sample configuration using Swift storage: + +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + swiftStorage: + - SwiftStorage + - swift_user: swift_user_here + swift_password: swift_password_here + swift_container: swift_container_here + auth_url: https://example.org/swift/v1/quay + auth_version: 3 + os_options: + tenant_id: + user_domain_name: + ca_cert_path: /conf/stack/swift.cert" + storage_path: /datastorage/registry +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] +DISTRIBUTED_STORAGE_PREFERENCE: + - swiftStorage +---- + diff --git a/modules/config-fields-storage.adoc b/modules/config-fields-storage.adoc new file mode 100644 index 000000000..9e5de4456 --- /dev/null +++ b/modules/config-fields-storage.adoc @@ -0,0 +1,5 @@ +:_content-type: CONCEPT +[id="config-fields-storage"] += Image storage + +This section details the image storage features and configuration fields that are available with {productname}. \ No newline at end of file diff --git a/modules/config-fields-tag-expiration.adoc b/modules/config-fields-tag-expiration.adoc new file mode 100644 index 000000000..872e3f6ee --- /dev/null +++ b/modules/config-fields-tag-expiration.adoc @@ -0,0 +1,97 @@ +:_content-type: CONCEPT +[id="config-fields-tag-expiration"] += Tag expiration configuration fields + +The following tag expiration configuration fields are available with {productname}: + +.Tag expiration configuration fields +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **FEATURE_GARBAGE_COLLECTION** | Boolean | Whether garbage collection of repositories is enabled. + + + +**Default:** True +| **TAG_EXPIRATION_OPTIONS** + +(Required) | Array of string | If enabled, the options that users can select for expiration of tags in their namespace. + + + +**Pattern:** + +`^[0-9]+(y\|w\|m\|d\|h\|s)$` +| **DEFAULT_TAG_EXPIRATION** + +(Required) | String | The default, configurable tag expiration time for time machine. + + + +**Pattern:** + +`^[0-9]+(y\w\|m\|d\|h\|s)$` + +**Default:** `2w` +| **FEATURE_CHANGE_TAG_EXPIRATION** | Boolean | Whether users and organizations are allowed to change the tag expiration for tags in their namespace. + + + +**Default:** True + +| **FEATURE_AUTO_PRUNE** | Boolean | When set to `True`, enables functionality related to the auto-pruning of tags. + + +*Default:* `False` + +| *NOTIFICATION_TASK_RUN_MINIMUM_INTERVAL_MINUTES* |Integer | The interval, in minutes, that defines the frequency to re-run notifications for expiring images. + + + +**Default:** `300` + +|*DEFAULT_NAMESPACE_AUTOPRUNE_POLICY* | Object | The default organization-wide auto-prune policy. + +|{nbsp}{nbsp}{nbsp} *.method: number_of_tags* | Object | The option specifying the number of tags to keep. + +|{nbsp}{nbsp}{nbsp} *.value: * | Integer | When used with *method: number_of_tags*, denotes the number of tags to keep. + + +For example, to keep two tags, specify `2`. + +|{nbsp}{nbsp}{nbsp} *.creation_date* | Object | The option specifying the duration of which to keep tags. +|{nbsp}{nbsp}{nbsp} *.value: * | Integer | When used with *creation_date*, denotes how long to keep tags. + + +Can be set to seconds (`s`), days (`d`), months (`m`), weeks (`w`), or years (`y`). Must include a valid integer. For example, to keep tags for one year, specify `1y`. + +|*AUTO_PRUNING_DEFAULT_POLICY_POLL_PERIOD* |Integer | The period in which the auto-pruner worker runs at the registry level. By default, it is set to run one time per day (one time per 24 hours). Value must be in seconds. + +|=== + +[id="example-config-fields-tag-expiration"] +== Example tag expiration configuration + +The following YAML example shows you a sample tag expiration configuration. + +[source,yaml] +---- +# ... +DEFAULT_TAG_EXPIRATION: 2w +TAG_EXPIRATION_OPTIONS: + - 0s + - 1d + - 1w + - 2w + - 4w + - 3y +# ... +---- + +[id="example-auto-prune-policy-registry"] +== Registry-wide auto-prune policies examples + +The following YAML examples show you registry-wide auto-pruning examples by both number of tags and creation date. + +.Example registry auto-prune policy by number of tags +[source,yaml] +---- +# ... +DEFAULT_NAMESPACE_AUTOPRUNE_POLICY: + method: number_of_tags + value: 10 <1> +# ... +---- +<1> In this scenario, ten tags remain. + +.Example registry auto-prune policy by creation date +[source,yaml] +---- +# ... +DEFAULT_NAMESPACE_AUTOPRUNE_POLICY: + method: creation_date + value: 1y +# ... +---- \ No newline at end of file diff --git a/modules/config-fields-user.adoc b/modules/config-fields-user.adoc new file mode 100644 index 000000000..3ea1f8f94 --- /dev/null +++ b/modules/config-fields-user.adoc @@ -0,0 +1,133 @@ +:_content-type: CONCEPT +[id="config-fields-user"] += User configuration fields + + +.User configuration fields +[cols="3a,1a,2a",options="header"] +|=== +| Field | Type | Description +| **FEATURE_SUPER_USERS** | Boolean | Whether superusers are supported + + + +**Default:** `true` +| **FEATURE_USER_CREATION** | Boolean | Whether users can be created (by non-superusers) + + + + **Default:** `true` +| **FEATURE_USER_LAST_ACCESSED** | Boolean | Whether to record the last time a user was accessed + + + +**Default:** `true` +| **FEATURE_USER_LOG_ACCESS** | Boolean | If set to true, users will have access to audit logs for their namespace + + + +**Default:** `false` +| **FEATURE_USER_METADATA** | Boolean | Whether to collect and support user metadata + + + +**Default:** `false` +| **FEATURE_USERNAME_CONFIRMATION** | Boolean | If set to true, users can confirm and modify their initial usernames when logging in via OpenID Connect (OIDC) or a non-database internal authentication provider like LDAP. + + +**Default:** `true` +| **FEATURE_USER_RENAME** | Boolean | If set to true, users can rename their own namespace + + + +**Default:** `false` +| **FEATURE_INVITE_ONLY_USER_CREATION** | Boolean | Whether users being created must be invited by another user + + + +**Default:** `false` +| **FRESH_LOGIN_TIMEOUT** | String | The time after which a fresh login requires users to re-enter their password + + + +**Example**: `5m` +| **USERFILES_LOCATION** | String | ID of the storage engine in which to place user-uploaded files + + + +**Example**: `s3_us_east` +| **USERFILES_PATH** | String | Path under storage in which to place user-uploaded files + + + +**Example**: `userfiles` +| **USER_RECOVERY_TOKEN_LIFETIME** | String | The length of time a token for recovering a user accounts is valid + + + +**Pattern**: `^[0-9]+(w\|m\|d\|h\|s)$` + +**Default**: `30m` + +| **FEATURE_SUPERUSERS_FULL_ACCESS** | Boolean | Grants superusers the ability to read, write, and delete content from other repositories in namespaces that they do not own or have explicit permissions for. + +*Default:* `False` + +|**FEATURE_SUPERUSERS_ORG_CREATION_ONLY** |Boolean | Whether to only allow superusers to create organizations. + +*Default:* `False` + +| **FEATURE_RESTRICTED_USERS** | Boolean | When set to `True` with `RESTRICTED_USERS_WHITELIST`: + +* All normal users and superusers are restricted from creating organizations or content in their own namespace unless they are allowlisted via `RESTRICTED_USERS_WHITELIST`. + +* Restricted users retain their normal permissions within organizations based on team memberships. + +*Default:* `False` + +| **RESTRICTED_USERS_WHITELIST** | String | When set with `FEATURE_RESTRICTED_USERS: true`, specific users are excluded from the `FEATURE_RESTRICTED_USERS` setting. + +| **GLOBAL_READONLY_SUPER_USERS** | String | When set, grants users of this list read access to all repositories, regardless of whether they are public repositories. Only works for those superusers defined with the `SUPER_USERS` configuration field. + +|=== + +[id="user-config-field-reference"] +== User configuration fields references + +Use the following references to update your `config.yaml` file with the desired configuration field. + +[id="configuring-superusers-full-access"] +=== FEATURE_SUPERUSERS_FULL_ACCESS configuration reference + +[source,yaml] +---- +--- +SUPER_USERS: +- quayadmin +FEATURE_SUPERUSERS_FULL_ACCESS: True +--- +---- + +[id="configuring-global-readonly-super-users"] +=== GLOBAL_READONLY_SUPER_USERS configuration reference + +[source,yaml] +---- +--- +GLOBAL_READONLY_SUPER_USERS: + - user1 +--- +---- + +[id="configuring-feature-restricted-users"] +=== FEATURE_RESTRICTED_USERS configuration reference + +[source,yaml] +---- +--- +AUTHENTICATION_TYPE: Database +--- +--- +FEATURE_RESTRICTED_USERS: true +--- +---- + +[id="configuring-restricted-users-whitelist"] +=== RESTRICTED_USERS_WHITELIST configuration reference + +.Prerequisites + +* `FEATURE_RESTRICTED_USERS` is set to `true` in your `config.yaml` file. + +[source,yaml] +---- +--- +AUTHENTICATION_TYPE: Database +--- +--- +FEATURE_RESTRICTED_USERS: true +RESTRICTED_USERS_WHITELIST: + - user1 +--- +---- + +[NOTE] +==== +When this field is set, whitelisted users can create organizations, or read or write content from the repository even if `FEATURE_RESTRICTED_USERS` is set to `true`. Other users, for example, `user2`, `user3`, and `user4` are restricted from creating organizations, reading, or writing content +==== \ No newline at end of file diff --git a/modules/config-fields-v2-ui.adoc b/modules/config-fields-v2-ui.adoc new file mode 100644 index 000000000..7ec14a576 --- /dev/null +++ b/modules/config-fields-v2-ui.adoc @@ -0,0 +1,48 @@ +:_content-type: CONCEPT +[id="config-fields-v2-ui"] += User interface v2 configuration fields + +.User interface v2 configuration fields +[cols="3a,1a,2a",options="header"] +|=== +|Field | Type |Description +| **FEATURE_UI_V2** | Boolean | When set, allows users to try the beta UI environment. ++ +*Default:* `False` + +|**FEATURE_UI_V2_REPO_SETTINGS** |Boolean | When set to `True`, enables repository settings in the {productname} v2 UI. ++ +*Default:* `False` +|=== + + +[id="reference-miscellaneous-v2-ui"] +== v2 user interface configuration + +With `FEATURE_UI_V2` enabled, you can toggle between the current version of the user interface and the new version of the user interface. + +[IMPORTANT] +==== +* This UI is currently in beta and subject to change. In its current state, users can only create, view, and delete organizations, repositories, and image tags. +* When running {productname} in the old UI, timed-out sessions would require that the user input their password again in the pop-up window. With the new UI, users are returned to the main page and required to input their username and password credentials. This is a known issue and will be fixed in a future version of the new UI. +* There is a discrepancy in how image manifest sizes are reported between the legacy UI and the new UI. In the legacy UI, image manifests were reported in mebibytes. In the new UI, {productname} uses the standard definition of megabyte (MB) to report image manifest sizes. +==== + +.Procedure + +. In your deployment's `config.yaml` file, add the `FEATURE_UI_V2` parameter and set it to `true`, for example: ++ +[source,yaml] +---- +--- +FEATURE_TEAM_SYNCING: false +FEATURE_UI_V2: true +FEATURE_USER_CREATION: true +--- +---- + +. Log in to your {productname} deployment. + +. In the navigation pane of your {productname} deployment, you are given the option to toggle between *Current UI* and *New UI*. Click the toggle button to set it to new UI, and then click *Use Beta Environment*, for example: ++ +image:38-ui-toggle.png[{productname} v2 UI toggle] \ No newline at end of file diff --git a/modules/config-file-intro.adoc b/modules/config-file-intro.adoc new file mode 100644 index 000000000..6cd0ea5d5 --- /dev/null +++ b/modules/config-file-intro.adoc @@ -0,0 +1,18 @@ +:_content-type: CONCEPT +[id="editing-the-configuration-file"] += Editing the configuration file + +To deploy a standalone instance of {productname}, you must provide the minimal configuration information. The requirements for a minimal configuration can be found in "{productname} minimal configuration." + +After supplying the required fields, you can validate your configuration. If there are any issues, they will be highlighted. + +//// +[NOTE] +==== +It is possible to use the configuration API to validate the configuration, but this requires starting the `Quay` container in configuration mode. + +To deploy the configuration tool locally, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.10/html-single/deploy_red_hat_quay_for_proof-of-concept_non-production_purposes/index#poc-getting-started[Getting started with {productname}] and follow the instructions up to "Configuring {productname}". +==== +//// + +For changes to take effect, the registry must be restarted. \ No newline at end of file diff --git a/modules/config-file-location.adoc b/modules/config-file-location.adoc new file mode 100644 index 000000000..78c2f43a1 --- /dev/null +++ b/modules/config-file-location.adoc @@ -0,0 +1,14 @@ +:_content-type: CONCEPT +[id="config-file-location"] += Location of configuration file in a standalone deployment + +For standalone deployments of {productname}, the `config.yaml` file must be specified when starting the {productname} registry. This file is located in the configuration volume. For example, the configuration file is located at `$QUAY/config/config.yaml` when deploying {productname} by the following command: + +[subs="verbatim,attributes"] +---- +$ sudo podman run -d --rm -p 80:8080 -p 443:8443 \ + --name=quay \ + -v $QUAY/config:/conf/stack:Z \ + -v $QUAY/storage:/datastorage:Z \ + {productrepo}/{quayimage}:{productminv} +---- diff --git a/modules/config-file-minimal.adoc b/modules/config-file-minimal.adoc new file mode 100644 index 000000000..ba7119d8e --- /dev/null +++ b/modules/config-file-minimal.adoc @@ -0,0 +1,105 @@ +:_content-type: CONCEPT +[id="config-file-minimal"] += Minimal configuration + +The following configuration options are required for a standalone deployment of {productname}: + +* Server hostname +* HTTP or HTTPS +* Authentication type, for example, Database or Lightweight Directory Access Protocol (LDAP) +* Secret keys for encrypting data +* Storage for images +* Database for metadata +* Redis for build logs and user events +* Tag expiration options + +[id="sample-config-file-minimal"] +== Sample minimal configuration file + +The following example shows a sample minimal configuration file that uses local storage for images: + +[source,yaml] +---- +AUTHENTICATION_TYPE: Database +BUILDLOGS_REDIS: + host: quay-server.example.com + password: strongpassword + port: 6379 + ssl: false +DATABASE_SECRET_KEY: 0ce4f796-c295-415b-bf9d-b315114704b8 +DB_URI: postgresql://quayuser:quaypass@quay-server.example.com:5432/quay +DEFAULT_TAG_EXPIRATION: 2w +DISTRIBUTED_STORAGE_CONFIG: + default: + - LocalStorage + - storage_path: /datastorage/registry +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] +DISTRIBUTED_STORAGE_PREFERENCE: + - default +PREFERRED_URL_SCHEME: http +SECRET_KEY: e8f9fe68-1f84-48a8-a05f-02d72e6eccba +SERVER_HOSTNAME: quay-server.example.com +SETUP_COMPLETE: true +TAG_EXPIRATION_OPTIONS: + - 0s + - 1d + - 1w + - 2w + - 4w + - 3y +USER_EVENTS_REDIS: + host: quay-server.example.com + port: 6379 + ssl: false +---- + +[id="config-local-storage"] +== Local storage + +Using local storage for images is only recommended when deploying a registry for _proof of concept_ purposes. + +When configuring local storage, storage is specified on the command line when starting the registry. + +The following command maps a local directory, `$QUAY/storage` to the `datastorage` path in the container: + +[subs="verbatim,attributes"] +---- +$ sudo podman run -d --rm -p 80:8080 -p 443:8443 \ + --name=quay \ + -v $QUAY/config:/conf/stack:Z \ + -v $QUAY/storage:/datastorage:Z \ + {productrepo}/{quayimage}:{productminv} +---- + +[id="config-cloud-storage"] +== Cloud storage + +Storage configuration is detailed in the xref:config-fields-storage[Image storage] section. For some users, it might be useful to compare the difference between Google Cloud Platform and local storage configurations. For example, the following YAML presents a Google Cloud Platform storage configuration: + +.$QUAY/config/config.yaml +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + default: + - GoogleCloudStorage + - access_key: GOOGQIMFB3ABCDEFGHIJKLMN + bucket_name: quay_bucket + secret_key: FhDAYe2HeuAKfvZCAGyOioNaaRABCDEFGHIJKLMN + storage_path: /datastorage/registry + boto_timeout: 120 <1> +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] +DISTRIBUTED_STORAGE_PREFERENCE: + - default +---- +<1> Optional. The time, in seconds, until a timeout exception is thrown when attempting to read from a connection. The default is `60` seconds. Also encompasses the time, in seconds, until a timeout exception is thrown when attempting to make a connection. The default is `60` seconds. + +When starting the registry using cloud storage, no configuration is required on the command line. For example: + +[subs="verbatim,attributes"] +---- +$ sudo podman run -d --rm -p 80:8080 -p 443:8443 \ + --name=quay \ + -v $QUAY/config:/conf/stack:Z \ + {productrepo}/{quayimage}:{productminv} +---- + diff --git a/modules/config-intro.adoc b/modules/config-intro.adoc new file mode 100644 index 000000000..aab8ea150 --- /dev/null +++ b/modules/config-intro.adoc @@ -0,0 +1,13 @@ +:_content-type: CONCEPT +[id="config-intro"] += Getting started with {productname} configuration + +{productname} can be deployed by an independent, standalone configuration, or by using the {productname} Operator on {ocp}. + +How you create, retrieve, update, and validate the {productname} configuration varies depending on the type of deployment you are using. However, the core configuration options are the same for either deployment type. Core configuration is primarily set through a `config.yaml` file, but can also be set by using the configuration API. + +For standalone deployments of {productname}, you must supply the minimum required configuration parameters before the registry can be started. The minimum requirements to start a {productname} registry can be found in the "Retrieving the current configuration" section. + +If you install {productname} on {ocp} using the {productname} Operator, you do not need to supply configuration parameters because the {productname} Operator supplies default information to deploy the registry. + +After you have deployed {productname} with the desired configuration, you should retrieve, and save, the full configuration from your deployment. The full configuration contains additional generated values that you might need when restarting or upgrading your system. diff --git a/modules/config-preconfigure-automation-intro.adoc b/modules/config-preconfigure-automation-intro.adoc new file mode 100644 index 000000000..662dad123 --- /dev/null +++ b/modules/config-preconfigure-automation-intro.adoc @@ -0,0 +1,8 @@ +:_content-type: CONCEPT +[id="config-preconfigure-automation-intro"] += Automation options + +The following sections describe the available automation options for {productname} deployments: + +* xref:config-preconfigure-automation[Pre-configuring {productname} for automation] +* xref:using-the-api-to-create-first-user[Using the API to create the first user] diff --git a/modules/config-preconfigure-automation.adoc b/modules/config-preconfigure-automation.adoc new file mode 100644 index 000000000..02972be77 --- /dev/null +++ b/modules/config-preconfigure-automation.adoc @@ -0,0 +1,79 @@ +:_content-type: CONCEPT +[id="config-preconfigure-automation"] += Pre-configuring {productname} for automation + +{productname} supports several configuration options that enable automation. Users can configure these options before deployment to reduce the need for interaction with the user interface. + +[id="allowing-the-api-to-create-first-user"] +== Allowing the API to create the first user + +To create the first user, users need to set the `FEATURE_USER_INITIALIZE` parameter to `true` and call the `/api/v1/user/initialize` API. Unlike all other registry API calls that require an OAuth token generated by an OAuth application in an existing organization, the API endpoint does not require authentication. + +Users can use the API to create a user such as `quayadmin` after deploying {productname}, provided no other users have been created. For more information, see xref:using-the-api-to-create-first-user[Using the API to create the first user]. + +[id="enabling-general-api-access"] +== Enabling general API access + +Users should set the `BROWSER_API_CALLS_XHR_ONLY` configuration option to `false` to allow general access to the {productname} registry API. + +[id="adding-super-user"] +== Adding a superuser + +After deploying {productname}, users can create a user and give the first user administrator privileges with full permissions. Users can configure full permissions in advance by using the `SUPER_USER` configuration object. For example: + +[source,yaml] +---- +# ... +SERVER_HOSTNAME: quay-server.example.com +SETUP_COMPLETE: true +SUPER_USERS: + - quayadmin +# ... +---- + +[id="restricting-user-creation"] +== Restricting user creation + +After you have configured a superuser, you can restrict the ability to create new users to the superuser group by setting the `FEATURE_USER_CREATION` to `false`. For example: + +[source,yaml] +---- +# ... +FEATURE_USER_INITIALIZE: true +BROWSER_API_CALLS_XHR_ONLY: false +SUPER_USERS: +- quayadmin +FEATURE_USER_CREATION: false +# ... +---- + +[id="enabling-new-functionality-310"] +== Enabling new functionality in {productname} {producty} + +To use new {productname} {producty} functions, enable some or all of the following features: + +[source,yaml] +---- +# ... +FEATURE_UI_V2: true +FEATURE_UI_V2_REPO_SETTINGS: true +FEATURE_AUTO_PRUNE: true +ROBOTS_DISALLOW: false +# ... +---- + +[id="suggested-configuration-for-automation"] +== Suggested configuration for automation + +The following `config.yaml` parameters are suggested for automation: + +[source,yaml] +---- +# ... +FEATURE_USER_INITIALIZE: true +BROWSER_API_CALLS_XHR_ONLY: false +SUPER_USERS: +- quayadmin +FEATURE_USER_CREATION: false +# ... +---- \ No newline at end of file diff --git a/modules/config-ui-access-settings.adoc b/modules/config-ui-access-settings.adoc new file mode 100644 index 000000000..040511f64 --- /dev/null +++ b/modules/config-ui-access-settings.adoc @@ -0,0 +1,5 @@ +[[config-ui-access-settings]] += Access settings configuration + + +image:ui-access-settings.png[Access settings configuration] \ No newline at end of file diff --git a/modules/config-ui-action-log.adoc b/modules/config-ui-action-log.adoc new file mode 100644 index 000000000..e26369fb5 --- /dev/null +++ b/modules/config-ui-action-log.adoc @@ -0,0 +1,22 @@ +[[config-ui-action-log]] += Action log configuration + +== Action log storage configuration + + +=== Database action log storage +image:ui-action-log-database.png[Database action log storage configuration] + +=== Elasticsearch action log storage + +image:ui-action-log-elastic.png[Elasticsearch log storage configuration] + + +== Action log rotation and archiving + +image:ui-action-log-rotation.png[Action log rotation and archiving configuration] + + +image:ui-action-log-rotation-storage-choice.png[Action log rotation and archiving storage choice] + + diff --git a/modules/config-ui-app-registry.adoc b/modules/config-ui-app-registry.adoc new file mode 100644 index 000000000..ddb64b1aa --- /dev/null +++ b/modules/config-ui-app-registry.adoc @@ -0,0 +1,4 @@ +[[config-ui-app-registry]] += Application registry configuration + +image:ui-app-registry.png[Application registry configuration] \ No newline at end of file diff --git a/modules/config-ui-basic-config.adoc b/modules/config-ui-basic-config.adoc new file mode 100644 index 000000000..654344fe2 --- /dev/null +++ b/modules/config-ui-basic-config.adoc @@ -0,0 +1,8 @@ +[[config-ui-basic-config]] += Basic configuration + +image:ui-basic-config.png[Basic configuration] + + +== Contact information +image:ui-basic-config-contact-info.png[Basic configuration] \ No newline at end of file diff --git a/modules/config-ui-custom-ssl-certs.adoc b/modules/config-ui-custom-ssl-certs.adoc new file mode 100644 index 000000000..61b386bd5 --- /dev/null +++ b/modules/config-ui-custom-ssl-certs.adoc @@ -0,0 +1,13 @@ +:_content-type: PROCEDURE +[id="config-ui-custom-ssl-certs"] += Custom SSL/TLS certificates UI + +The config tool can be used to load custom certificates to facilitate access to resources like external databases. Select the custom certs to be uploaded, ensuring that they are in PEM format, with an extension `.crt`. + +image:ui-custom-ssl-certs.png[Custom SSL/TLS certificates] + +The config tool also displays a list of any uploaded certificates. After you upload your custom SSL/TLS cert, it will appear in the list. For example: + +image:ui-custom-ssl-certs-uploaded.png[Custom SSL/TLS certificates] + +//As an alternative to using the config tool, you can place the custom certs in a folder named `extra_ca_certs` under the {productname} configdirectory where the `config.yaml` is located. \ No newline at end of file diff --git a/modules/config-ui-data-consistency.adoc b/modules/config-ui-data-consistency.adoc new file mode 100644 index 000000000..6053bfcff --- /dev/null +++ b/modules/config-ui-data-consistency.adoc @@ -0,0 +1,4 @@ +[[config-ui-data-consistency]] += Data consistency + +image:ui-data-consistency.png[Data consistency] diff --git a/modules/config-ui-database.adoc b/modules/config-ui-database.adoc new file mode 100644 index 000000000..47457560b --- /dev/null +++ b/modules/config-ui-database.adoc @@ -0,0 +1,21 @@ +[[config-ui-database]] += Database configuration + +You can choose between PostGreSQL and MySQL: +image:ui-database-choice.png[Database choice] + +[NOTE] +==== +The MySQL and MariaDB databases have been deprecated as of {PRODUCTNAME} 3.6. Support for these databases will be removed in a future version of {productname}. If starting a new {productname} installation, it is strongly recommended to use PostgreSQL. +==== + +== PostgreSQL configuration + +Enter the details for connecting to the database: + +image:ui-database-postgres.png[PostgreSQL configuration] + + +This will generate a DB_URI field of the form `postgresql://quayuser:quaypass@quay-server.example.com:5432/quay`. + +If you need finer-grained control of the connection arguments, see the section "Database connection arguments" in the Configuration Guide. diff --git a/modules/config-ui-dockerfile-build.adoc b/modules/config-ui-dockerfile-build.adoc new file mode 100644 index 000000000..cca956287 --- /dev/null +++ b/modules/config-ui-dockerfile-build.adoc @@ -0,0 +1,18 @@ +[[config-ui-dockerfile-build]] += Dockerfile build support + +image:ui-dockerfile-build.png[Dockerfile build support] + + +== GitHub (Enterprise) Build Triggers + +image:ui-dockerfile-build-github.png[GitHub (Enterprise) Build Triggers] + +== BitBucket Build Triggers + +image:ui-dockerfile-build-bitbucket.png[BitBucket Build Triggers] + +== GitLab Build Triggers + +image:ui-dockerfile-build-gitlab.png[GitLab Build Triggers] + diff --git a/modules/config-ui-email.adoc b/modules/config-ui-email.adoc new file mode 100644 index 000000000..fa753ed98 --- /dev/null +++ b/modules/config-ui-email.adoc @@ -0,0 +1,4 @@ +[[config-ui-email]] += Email configuration + +image:ui-email.png[Email configuration] \ No newline at end of file diff --git a/modules/config-ui-internal-authentication.adoc b/modules/config-ui-internal-authentication.adoc new file mode 100644 index 000000000..23b2cb6ec --- /dev/null +++ b/modules/config-ui-internal-authentication.adoc @@ -0,0 +1,20 @@ +[[config-ui-internal-authentication]] += Internal authentication configuration + +image:ui-internal-authentication.png[Internal authentication configuration] + +image:ui-internal-authentication-choice.png[Internal authentication choice] + + +== LDAP +image:ui-auth-ldap.png[LDAP authentication] + +== Keystone (OpenStack identity) +image:ui-auth-keystone.png[Keystone authentication] + +== JWT custom authentication +image:ui-auth-jwt.png[JWT custom authentication] + +== External application token +image:ui-auth-external-app.png[External application token authentication] + diff --git a/modules/config-ui-intro.adoc b/modules/config-ui-intro.adoc new file mode 100644 index 000000000..2dc0da197 --- /dev/null +++ b/modules/config-ui-intro.adoc @@ -0,0 +1,4 @@ +:_content-type: CONCEPT +[id="config-using-tool"] += Using the configuration tool + diff --git a/modules/config-ui-mirroring.adoc b/modules/config-ui-mirroring.adoc new file mode 100644 index 000000000..a64fda43f --- /dev/null +++ b/modules/config-ui-mirroring.adoc @@ -0,0 +1,7 @@ += Mirroring configuration UI + +. Start the `Quay` container in configuration mode and select the Enable Repository Mirroring check box. If you want to require HTTPS communications and verify certificates during mirroring, select the HTTPS and cert verification check box. ++ +image:repo_mirror_config.png[Enable mirroring and require HTTPS and verified certificates] + +. Validate and download the `configuration` file, and then restart Quay in registry mode using the updated config file. diff --git a/modules/config-ui-oauth.adoc b/modules/config-ui-oauth.adoc new file mode 100644 index 000000000..6105a2b8a --- /dev/null +++ b/modules/config-ui-oauth.adoc @@ -0,0 +1,10 @@ +[[config-ui-oauth]] += External authentication (OAUTH) configuration + + +== GitHub (Enterprise) authentication + +image:ui-oauth-github.png[GitHub (Enterprise) authentication configuration] + +== Google authentication +image:ui-oauth-google.png[Google authentication configuration] diff --git a/modules/config-ui-redis.adoc b/modules/config-ui-redis.adoc new file mode 100644 index 000000000..849be395c --- /dev/null +++ b/modules/config-ui-redis.adoc @@ -0,0 +1,4 @@ +[[config-ui-redis]] += Redis configuration + +image:ui-redis.png[Redis configuration] \ No newline at end of file diff --git a/modules/config-ui-repo-mirroring.adoc b/modules/config-ui-repo-mirroring.adoc new file mode 100644 index 000000000..d028b4e76 --- /dev/null +++ b/modules/config-ui-repo-mirroring.adoc @@ -0,0 +1,4 @@ +[[config-ui-repo-mirroring]] += Repository mirroring configuration + +image:ui-repo-mirroring.png[Repository mirroring configuration] \ No newline at end of file diff --git a/modules/config-ui-security-scanner.adoc b/modules/config-ui-security-scanner.adoc new file mode 100644 index 000000000..6b1adfec9 --- /dev/null +++ b/modules/config-ui-security-scanner.adoc @@ -0,0 +1,4 @@ +[[config-ui-security-scanner]] += Security scanner configuration + +image:ui-security-scanner.png[Security scanner configuration] \ No newline at end of file diff --git a/modules/config-ui-server-config.adoc b/modules/config-ui-server-config.adoc new file mode 100644 index 000000000..d6873e9c2 --- /dev/null +++ b/modules/config-ui-server-config.adoc @@ -0,0 +1,12 @@ +[[config-ui-server-config]] += Server configuration + +image:ui-server-config-no-tls.png[Server configuration] + + +== Server configuration choice +image:ui-server-config-tls-choice.png[Server configuration choice] + + +== TLS configuration +image:ui-server-config-tls.png[TLS configuration] \ No newline at end of file diff --git a/modules/config-ui-storage-engines.adoc b/modules/config-ui-storage-engines.adoc new file mode 100644 index 000000000..b17e93f5e --- /dev/null +++ b/modules/config-ui-storage-engines.adoc @@ -0,0 +1,24 @@ +[[config-ui-storage-engines]] += Storage engines + +== Local storage +image:ui-storage-local.png[Local storage configuration] + + +== Amazon S3 storage +image:ui-storage-s3.png[Amazon S3 storage configuration] + +== Azure blob storage +image:ui-storage-azure.png[Azure blob storage configuration] + +== Google cloud storage +image:ui-storage-google.png[Google cloud storage configuration] + +== Ceph object gateway (RADOS) storage +image:ui-storage-ceph.png[Ceph object gateway (RADOS) storage configuration] + +== OpenStack (Swift) storage configuration +image:ui-storage-swift.png[OpenStack (Swift) storage configuration] + +== Cloudfront + Amazon S3 storage configuration +image:ui-storage-cloudfront.png[Cloudfront + Amazon S3 storage configuration] diff --git a/modules/config-ui-storage-georepl.adoc b/modules/config-ui-storage-georepl.adoc new file mode 100644 index 000000000..12ef8044d --- /dev/null +++ b/modules/config-ui-storage-georepl.adoc @@ -0,0 +1,79 @@ +:_content-type: PROCEDURE +[id="enable-storage-replication-standalone"] += Enabling storage replication for standalone {productname} + +Use the following procedure to enable storage replication on {productname}. + +.Procedure + +. Update your `config.yaml` file to include the storage engines to which data will be replicated. You must list all storage engines to be used: ++ +[source,yaml] +---- +# ... +FEATURE_STORAGE_REPLICATION: true +# ... +DISTRIBUTED_STORAGE_CONFIG: + usstorage: + - RHOCSStorage + - access_key: + bucket_name: + hostname: my.noobaa.hostname + is_secure: false + port: "443" + secret_key: + storage_path: /datastorage/registry + eustorage: + - S3Storage + - host: s3.amazon.com + port: "443" + s3_access_key: + s3_bucket: + s3_secret_key: + storage_path: /datastorage/registry +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] +DISTRIBUTED_STORAGE_PREFERENCE: + - usstorage + - eustorage +# ... +---- + +. Optional. If complete replication of all images to all storage engines is required, you can replicate images to the storage engine by manually setting the `DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS` field. This ensures that all images are replicated to that storage engine. For example: ++ +[source,yaml] +---- +# ... +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: + - usstorage + - eustorage +# ... +---- ++ +[NOTE] +==== +To enable per-namespace replication, contact {productname} support. +==== + +. After adding storage and enabling *Replicate to storage engine by default* for geo-replication, you must sync existing image data across all storage. To do this, you must execute into the container by running the following command: ++ +[source,terminal] +---- +$ podman exec -it +---- + +. To sync the content after adding new storage, enter the following commands: ++ +[source,terminal] +---- +# scl enable python27 bash +---- ++ +[source,terminal] +---- +# python -m util.backfillreplication +---- ++ +[NOTE] +==== +This is a one time operation to sync content after adding new storage. +==== \ No newline at end of file diff --git a/modules/config-ui-storage-proxy.adoc b/modules/config-ui-storage-proxy.adoc new file mode 100644 index 000000000..fcdf6da66 --- /dev/null +++ b/modules/config-ui-storage-proxy.adoc @@ -0,0 +1,3 @@ +[[config-ui-storage-proxy]] += Proxy storage + diff --git a/modules/config-ui-storage.adoc b/modules/config-ui-storage.adoc new file mode 100644 index 000000000..e459dbd04 --- /dev/null +++ b/modules/config-ui-storage.adoc @@ -0,0 +1,11 @@ +[[config-ui-storage]] += Registry storage configuration + +* Proxy storage +* Storage georeplication +* Storage engines + + + + + diff --git a/modules/config-ui-time-machine.adoc b/modules/config-ui-time-machine.adoc new file mode 100644 index 000000000..77cc13d59 --- /dev/null +++ b/modules/config-ui-time-machine.adoc @@ -0,0 +1,4 @@ +[[config-ui-time-machine]] += Time machine configuration + +image:ui-time-machine.png[Time machine configuration] diff --git a/modules/config-updates-310.adoc b/modules/config-updates-310.adoc new file mode 100644 index 000000000..e770305fe --- /dev/null +++ b/modules/config-updates-310.adoc @@ -0,0 +1,30 @@ +:_content-type: REFERENCE +[id="config-updates-310"] += Configuration updates for {productname} 3.10 + +The following sections detail new configuration fields added in {productname} 3.10. + +[id="auto-pruner-namespace"] +== Namespace auto-pruning configuration fields + +With {productname} 3.10, deployments can be configured to automatically prune old image tags by a specified, allotted amount, or by the time in which they were created. + +.Namespace auto-pruning configuration field +|=== +|Field | Type |Description +| **FEATURE_AUTO_PRUNE** | Boolean | When set to `True`, enables functionality related to the auto-pruning of tags. + + +*Default:* `False` + +|**SECURITY_SCANNER_V4_MANIFEST_CLEANUP** |Boolean | When set to `true` the {productname} garbage collector removes manifests that are not referenced by other tags or manifests. + + +*Default*: `True` + +|**ROBOTS_DISALLOW** |Boolean |When set to `true`, robot accounts are prevented from all interactions, as well as from being created + + +*Default*: `False` + +|**FEATURE_UI_V2_REPO_SETTINGS** |Boolean | When set to `True`, enables repository settings in the {productname} v2 UI. + + +*Default:* `False` +|=== \ No newline at end of file diff --git a/modules/config-updates-311.adoc b/modules/config-updates-311.adoc new file mode 100644 index 000000000..550ff4d11 --- /dev/null +++ b/modules/config-updates-311.adoc @@ -0,0 +1,59 @@ +:_content-type: REFERENCE +[id="config-updates-311"] += Configuration updates for {productname} 3.11 + +The following sections detail new configuration fields added in {productname} 3.11. + +[id="team-synchronization-configuration-field"] +== Team synchronization configuration field + +The following configuration field has been added for the team synchronization via OIDC feature: + +.Team synchronization configuration field +|=== + +|Field | Type |Description +|*PREFERRED_GROUP_CLAIM_NAME* | String | The key name within the OIDC token payload that holds information about the user's group memberships. + +|=== + +.Team synchronization example YAML configuration + +[source,yaml] +---- +# ... +PREFERRED_GROUP_CLAIM_NAME: +# ... +---- + +[id="aws-s3-sts-configuration-fields"] +== Configuration fields for AWS S3 STS deployments + +The following configuration fields have been added when configuring AWS STS for {productname}. These fields are used when configuring AWS S3 storage for your deployment. + +.AWS S3 STS configuration fields +|=== +|Field | Type |Description +| *.sts_role_arn* | String | The unique Amazon Resource Name (ARN) required when configuring AWS STS for {productname}. + +|*.sts_user_access_key* |String | The generated AWS S3 user access key required when configuring AWS STS for {productname}. + +|*.sts_user_secret_key* |String |The generated AWS S3 user secret key required when configuring AWS STS for {productname}. +|=== + +.AWS S3 STS example YAML configuration + +[source,yaml] +---- +# ... +DISTRIBUTED_STORAGE_CONFIG: + default: + - STSS3Storage + - sts_role_arn: + s3_bucket: + s3_region: + storage_path: + sts_user_access_key: + sts_user_secret_key: +# ... +---- \ No newline at end of file diff --git a/modules/config-updates-312.adoc b/modules/config-updates-312.adoc new file mode 100644 index 000000000..9e7031221 --- /dev/null +++ b/modules/config-updates-312.adoc @@ -0,0 +1,132 @@ +:_content-type: REFERENCE +[id="config-updates-312"] += Configuration updates for {productname} 3.12 + +The following sections detail new configuration fields added in {productname} 3.12. + +[id="registry-auto-prune-configuration-fields"] +== Registry auto-pruning configuration fields + +The following configuration fields have been added to {productname} auto-pruning feature: +|=== +| Field | Type | Description +| *NOTIFICATION_TASK_RUN_MINIMUM_INTERVAL_MINUTES* |Integer | The interval, in minutes, that defines the frequency to re-run notifications for expiring images. + + + +**Default:** `300` + +|*DEFAULT_NAMESPACE_AUTOPRUNE_POLICY* | Object | The default organization-wide auto-prune policy. + +|{nbsp}{nbsp}{nbsp} *.method: number_of_tags* | Object | The option specifying the number of tags to keep. + +|{nbsp}{nbsp}{nbsp} *.value: * | Integer | When used with *method: number_of_tags*, denotes the number of tags to keep. + + +For example, to keep two tags, specify `2`. + +|{nbsp}{nbsp}{nbsp} *.method: creation_date* | Object | The option specifying the duration of which to keep tags. +|{nbsp}{nbsp}{nbsp} *.value: * | Integer | When used with *creation_date*, denotes how long to keep tags. + + +Can be set to seconds (`s`), days (`d`), months (`m`), weeks (`w`), or years (`y`). Must include a valid integer. For example, to keep tags for one year, specify `1y`. + +|*AUTO_PRUNING_DEFAULT_POLICY_POLL_PERIOD* |Integer | The period in which the auto-pruner worker runs at the registry level. By default, it is set to run one time per day (one time per 24 hours). Value must be in seconds. + +|=== + +[id="oauth-reassign-configuration-field"] +== OAuth access token reassignment configuration field + +The following configuration field has been added for reassigning OAuth access tokens: + +|=== +| Field | Type | Description +| *FEATURE_ASSIGN_OAUTH_TOKEN* | Boolean | Allows organization administrators to assign OAuth tokens to other users. +|=== + +.Example OAuth access token reassignment YAML +[source,yaml] +---- +# ... +FEATURE_ASSIGN_OAUTH_TOKEN: true +# ... +---- + +[id="image-vulnerability-notification-field"] +== Vulnerability detection notification configuration field + +The following configuration field has been added to notify users on detected vulnerabilities based on security level: + +|=== +| Field | Type | Description +| *NOTIFICATION_MIN_SEVERITY_ON_NEW_INDEX* | String | Set minimal security level for new notifications on detected vulnerabilities. Avoids creation of large number of notifications after first index. If not defined, defaults to `High`. Available options include `Critical`, `High`, `Medium`, `Low`, `Negligible`, and `Unknown`. +|=== + +.Example image vulnerability notification YAML +[source,yaml] +---- +NOTIFICATION_MIN_SEVERITY_ON_NEW_INDEX: High +---- + +[id="oci-referrers-api-configuration-field"] +== OCI referrers API configuration field + +The following configuration field allows users to list OCI referrers of a manifest under a repository by using the v2 API: + +|=== +| Field | Type | Description +| *FEATURE_REFERRERS_API* | Boolean | Enables OCI 1.1's referrers API. +|=== + +.Example OCI referrers enablement YAML +[source,yaml] +---- +# ... +FEATURE_REFERRERS_API: true +# ... +---- + +[id="disable-strict-logging-configuration-field"] +== Disable strict logging configuration field + +The following configuration field has been added to address when external systems like Splunk or ElasticSearch are configured as audit log destinations but are intermittently unavailable. When set to `True`, the logging event is logged to the stdout instead. + +|=== +| Field | Type | Description +| *ALLOW_WITHOUT_STRICT_LOGGING* | Boolean | When set to `True`, if the external log system like Splunk or ElasticSearch is intermittently unavailable, allows users to push images normally. Events are logged to the stdout instead. Overrides `ALLOW_PULLS_WITHOUT_STRICT_LOGGING` if set. +|=== + +.Example strict logging YAML +[source,yaml] +---- +# ... +ALLOW_WITHOUT_STRICT_LOGGING: True +# ... +---- + +[id="notification-configuration-field"] +== Notification interval configuration field + +The following configuration field has been added to enhance {productname} notifications: + +|=== +| Field | Type | Description +| *NOTIFICATION_TASK_RUN_MINIMUM_INTERVAL_MINUTES* | Integer | The interval, in minutes, that defines the frequency to re-run notifications for expiring images. By default, this field is set to notify {productname} users of events happening every 5 hours. +|=== + +.Example notification re-run YAML +[source,yaml] +---- +# ... +NOTIFICATION_TASK_RUN_MINIMUM_INTERVAL_MINUTES: 10 +# ... +---- + +[id="clair-index-layer-size-configuration-field"] +== Clair indexing layer size configuration field + +The following configuration field has been added for the Clair security scanner, which allows {productname} administrators to set a maximum layer size allowed for indexing. + +|=== +| Field | Type | Description +| *SECURITY_SCANNER_V4_INDEX_MAX_LAYER_SIZE* | String | The maximum layer size allowed for indexing. If the layer size exceeds the configured size, the {productname} UI returns the following message: `The manifest for this tag has layer(s) that are too large to index by the Quay Security Scanner`. The default is `8G`, and the maximum recommended is `10G`. + + + *Example*: `8G` +|=== \ No newline at end of file diff --git a/modules/config-updates-313.adoc b/modules/config-updates-313.adoc new file mode 100644 index 000000000..e6eafd5e3 --- /dev/null +++ b/modules/config-updates-313.adoc @@ -0,0 +1,26 @@ +:_content-type: REFERENCE +[id="config-updates-313"] += Configuration updates for {productname} 3.13 + +The following sections detail new configuration fields added in {productname} 3.13. + +[id="disabling-pushes-configuration-field"] +== Disabling pushes to the {productname} registry + +The following configuration field has been added to disable the push of new content to the registry. + +|=== +| Field | Type | Description + +|*DISABLE_PUSHES* |Boolean | Disables pushes of new content to the registry while retaining all other functionality. Differs from `read-only` mode because database is not set as `read-only`. When `DISABLE_PUSHES` is set to `true`, the {productname} garbage collector is disabled. As a result, when `PERMANENTLY_DELETE_TAGS` is enabled, using the {productname} UI to permanently delete a tag does not result in the immediate deletion of a tag. Instead, the image stays in the backend storage until `DISABLE_PUSHES` is set to `false`, which re-enables the garbage collector. {productname} administrators should be aware of this caveat when using `DISABLE_PUSHES` and `PERMANENTLY_DELETE_TAGS` together. + + + + **Default:** False +|=== + +.Example DISABLE_PUSHES configuration field +[source,yaml] +---- +# ... +DISABLE_PUSHES: true +# ... +---- diff --git a/modules/config-updates-314.adoc b/modules/config-updates-314.adoc new file mode 100644 index 000000000..b90a331ec --- /dev/null +++ b/modules/config-updates-314.adoc @@ -0,0 +1,79 @@ +:_content-type: REFERENCE +[id="config-updates-314"] += Configuration updates for {productname} 3.14 + +The following sections detail new configuration fields added in {productname} 3.14. + +[id="model-card-rendering"] +== Model card rendering configuration fields + +The following configuration fields have been added to support model card rendering on the v2 UI. + +|=== +| Field | Type | Description + +|*FEATURE_UI_MODELCARD* |Boolean | Enables *Model card* image tab in UI. Defaults to `true`. +|*UI_MODELCARD_ARTIFACT_TYPE* | String | Defines the model card artifact type. +|*UI_MODELCARD_ANNOTATION* |Object | This optional field defines the layer annotation of the model card stored in an OCI image. +|*UI_MODELCARD_LAYER_ANNOTATION* |Object | This optional field defines the layer annotation of the model card stored in an OCI image. +|=== + +.Example model card YAML +[source,yaml] +---- +FEATURE_UI_MODELCARD: true <1> +UI_MODELCARD_ARTIFACT_TYPE: application/x-mlmodel <2> +UI_MODELCARD_ANNOTATION: <3> + org.opencontainers.image.description: "Model card metadata" +UI_MODELCARD_LAYER_ANNOTATION: <4> + org.opencontainers.image.title: README.md +---- +<1> Enables the *Model Card* image tab in the UI. +<2> Defines the model card artifact type. In this example, the artifact type is `application/x-mlmodel`. +<3> Optional. If an image does not have an `artifactType` defined, this field is checked at the manifest level. If a matching annotation is found, the system then searches for a layer with an annotation matching `UI_MODELCARD_LAYER_ANNOTATION`. +<4> Optional. If an image has an `artifactType` defined and multiple layers, this field is used to locate the specific layer containing the model card. + +[id="new-quay-footer-fields"] +== Footer configuration fields + +The following configuration fields have been added to the original (v1) UI. You can use these fields to customize the footer of your on-prem v1 UI. + +[NOTE] +==== +These fields are currently unavailable on the {productname} v2 UI. +==== + +|=== +| Field | Type | Description + +|*FOOTER_LINKS* |Object | Enable customization of footer links in {productname}'s UI for on-prem installations. + +|*.TERMS_OF_SERVICE_URL* | String | Custom terms of service for on-prem installations. + + + +**Example:** + +`https://index.hr` + +|*.PRIVACY_POLICY_URL* | String | Custom privacy policy for on-prem installations. + + + +**Example:** + +`https://index.hr` +|*.SECURITY_URL* | String | Custom security page for on-prem installations. + + + +**Example:** + +`https://index.hr` + +| **.ABOUT_URL** | String | Custom about page for on-prem installations. + + + +**Example:** + +`https://index.hr` +|=== + +.Example footer links YAML +[source,yaml] +---- +FOOTER_LINKS: + "TERMS_OF_SERVICE_URL": "https://www.index.hr" + "PRIVACY_POLICY_URL": "https://www.example.hr" + "SECURITY_URL": "https://www.example.hr" + "ABOUT_URL": "https://www.example.hr" +---- \ No newline at end of file diff --git a/modules/config-updates-36.adoc b/modules/config-updates-36.adoc new file mode 100644 index 000000000..7f76adff0 --- /dev/null +++ b/modules/config-updates-36.adoc @@ -0,0 +1,35 @@ +:_content-type: CONCEPT +[id="config-updates-36"] += Configuration updates for {productname} 3.6 + + +[id="new-configuration-fields-36"] +== New configuration fields + +The following configuration fields have been introduced with {productname} 3.6: + +[options="header"] +|=== +|Parameter |Description +|**FEATURE_EXTENDED_REPOSITORY_NAMES** |Support for nested repositories and extended repository names has been added. This change allows the use of `/` in repository names needed for certain {ocp} use cases. For more information, see xref:config-fields-nested-repositories[Configuring nested repositories]. + +|**FEATURE_USER_INITIALIZE** |If set to true, the first `User` account can be created by the API `/api/v1/user/initialize`. For more information, see xref:config-preconfigure-automation[Pre-configuring {productname} for automation]. + +| **ALLOWED_OCI_ARTIFACT_TYPES** |Helm, cosign, and ztsd compression scheme artifacts are built into {productname} 3.6 by default. For any other Open Container Initiative (OCI) artifact types that are not supported by default, you can add them to the `ALLOWED_OCI_ARTIFACT_TYPES` configuration in Quay's `config.yaml` For more information, see xref:other-oci-artifacts-with-quay[Adding other OCI media types to Quay]. + +| **CREATE_PRIVATE_REPO_ON_PUSH** |Registry users now have the option to set `CREATE_PRIVATE_REPO_ON_PUSH` in their `config.yaml` to `True` or `False` depending on their security needs. + +| **CREATE_NAMESPACE_ON_PUSH** |Pushing to a non-existent organization can now be configured to automatically create the organization. +|=== + +[id="deprecated-configuration-fields-36"] +== Deprecated configuration fields + +The following configuration fields have been deprecated with {productname} 3.6: + +[options="header"] +|=== +|Parameter |Description +| *FEATURE_HELM_OCI_SUPPORT* |This option has been deprecated and will be removed in a future version of {productname}. In {productname} 3.6, Helm artifacts are supported by default and included under the `FEATURE_GENERAL_OCI_SUPPORT` property. Users are no longer required to update their `config.yaml` files to enable support. + +|=== \ No newline at end of file diff --git a/modules/config-updates-37.adoc b/modules/config-updates-37.adoc new file mode 100644 index 000000000..96c38a426 --- /dev/null +++ b/modules/config-updates-37.adoc @@ -0,0 +1,35 @@ +:_content-type: CONCEPT +[id="config-updates-37"] += Configuration updates for Quay 3.7 + +[id="new-configuration-fields-377"] +== New configuration fields for {productname} 3.7.7 + +[options="header"] +|=== + +|Field |Type |Description + +|**REPO_MIRROR_ROLLBACK** | Boolean | When set to `true`, the repository rolls back after a failed mirror attempt. + +*Default*: `false` + +|=== + + +[id="new-configuration-fields-37"] +== New configuration fields + +The following configuration fields have been introduced with {productname} 3.7: + +[options="header"] +|=== + +|Parameter |Description +| **FEATURE_QUOTA_MANAGEMENT** | Quota management is now supported. With this feature, users have the ability to report storage consumption and to contain registry growth by establishing configured storage quota limits. For more information about quota management, see link:https://access.redhat.com//documentation/en-us/red_hat_quay/3.7/html-single/use_red_hat_quay#red-hat-quay-quota-management-and-enforcement[{productname} Quota management and enforcement]. + +| **DEFAULT_SYSTEM_REJECT_QUOTA_BYTES** |The quota size to apply to all organizations and users. For more information about quota management, see link:https://access.redhat.com//documentation/en-us/red_hat_quay/3.7/html-single/use_red_hat_quay#red-hat-quay-quota-management-and-enforcement[{productname} Quota management and enforcement]. + +| **FEATURE_PROXY_CACHE** |Using {productname} to proxy a remote organization is now supported. With this feature, {productname} will act as a proxy cache to circumvent pull-rate limitations from upstream registries. For more information about quota management, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.7/html-single/use_red_hat_quay#quay-as-cache-proxy[{productname} as proxy cache for upstream registries]. + +|=== \ No newline at end of file diff --git a/modules/config-updates-38.adoc b/modules/config-updates-38.adoc new file mode 100644 index 000000000..ff0198e00 --- /dev/null +++ b/modules/config-updates-38.adoc @@ -0,0 +1,39 @@ +:_content-type: REFERENCE +[id="config-updates-38"] += Configuration updates for Quay 3.8 + +The following configuration fields have been introduced with {productname} 3.8: + +.{productname} 3.8 configuration fields +[cols="2a,1a,2a",options="header"] +|=== + +|Field | Type |Description +| xref:reference-miscellaneous-v2-ui[**FEATURE_UI_V2**] | Boolean | When set, allows users to try the beta UI environment. + +*Default:* `False` + +| link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/manage_red_hat_quay/index#proc_manage-ipv6-dual-stack[**FEATURE_LISTEN_IP_VERSION**] | String | Enables IPv4, IPv6, or dual-stack protocol family. This configuration field must be properly set, otherwise {productname} fails to start. + +*Default:* `IPv4` + +*Additional configurations:* `IPv6`, `dual-stack` + +| link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/manage_red_hat_quay/index#ldap-super-users-enabling[**LDAP_SUPERUSER_FILTER**] | String | Subset of the `LDAP_USER_FILTER` configuration field. When configured, allows {productname} administrators the ability to configure Lightweight Directory Access Protocol (LDAP) users as superusers when {productname} uses LDAP as its authentication provider. + +With this field, administrators can add or remove superusers without having to update the {productname} configuration file and restart their deployment. + +| link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/manage_red_hat_quay/index#ldap-restricted-users-enabling[**LDAP_RESTRICTED_USER_FILTER**] | String | Subset of the `LDAP_USER_FILTER` configuration field. When configured, allows {productname} administrators the ability to configure Lightweight Directory Access Protocol (LDAP) users as restricted users when {productname} uses LDAP as its authentication provider. + +| xref:configuring-superusers-full-access[**FEATURE_SUPERUSERS_FULL_ACCESS**] | Boolean | Grants superusers the ability to read, write, and delete content from other repositories in namespaces that they do not own or have explicit permissions for. + +*Default:* `False` + +| xref:configuring-global-readonly-super-users[**GLOBAL_READONLY_SUPER_USERS**] | String | When set, grants users of this list read access to all repositories, regardless of whether they are public repositories. + +| xref:configuring-feature-restricted-users[**FEATURE_RESTRICTED_USERS**] | Boolean | When set with `RESTRICTED_USERS_WHITELIST`, restricted users cannot create organizations or content in their own namespace. Normal permissions apply for an organization's membership, for example, a restricted user will still have normal permissions in organizations based on the teams that they are members of. + +*Default:* `False` + +| xref:configuring-restricted-users-whitelist[**RESTRICTED_USERS_WHITELIST**] | String | When set with `FEATURE_RESTRICTED_USERS: true`, specific users are excluded from the `FEATURE_RESTRICTED_USERS` setting. +|=== diff --git a/modules/config-updates-39.adoc b/modules/config-updates-39.adoc new file mode 100644 index 000000000..72acd5778 --- /dev/null +++ b/modules/config-updates-39.adoc @@ -0,0 +1,169 @@ +:_content-type: REFERENCE +[id="config-updates-39"] += Configuration updates for {productname} 3.9 + +The following sections detail new configuration fields added in {productname} 3.9. + +[id="tracking-audit-logins"] +== Action log audit configuration + +With {productname} 3.9, audit logins are tracked by default. + +.Audit logs configuration field +[cols="2a,1a,2a",options="header"] +|=== +|Field | Type |Description +| **ACTION_LOG_AUDIT_LOGINS** | Boolean | When set to `True`, tracks advanced events such as logging into, and out of, the UI, and logging in using Docker for regular users, robot accounts, and for application-specific token accounts. + + + +**Default:** `True` +|=== + +[id="splunk-action-log-field"] +== Addition of Splunk action logs + +With {productname} 3.9, Splunk can be configured under the *LOGS_MODEL* parameter. + +.Splunk configuration fields +[cols="2a,1a,2a",options="header"] +|=== +|Field | Type |Description +| **LOGS_MODEL** | String | Specifies the preferred method for handling log data. + + + +**Values:** One of `database`, `transition_reads_both_writes_es`, `elasticsearch`, `splunk` + +**Default:** `database` +|=== + +[id="new-model-config-options"] +=== LOGS_MODEL_CONFIG additions + +The following *LOGS_MODEL_CONFIG* options are available when configuring Splunk. + +* **LOGS_MODEL_CONFIG** [object]: Logs model config for action logs +** **producer** [string]: `splunk` +** **splunk_config** [object]: Logs model configuration for Splunk action logs or the Splunk cluster configuration +*** **host** [string]: Splunk cluster endpoint. +*** **port** [integer]: Splunk management cluster endpoint port. +*** **bearer_token** [string]: The bearer token for Splunk. +*** **verify_ssl** [boolean]: Enable (`True`) or disable (`False`) TLS/SSL verification for HTTPS connections. +*** **index_prefix** [string]: Splunk's index prefix. +*** **ssl_ca_path** [string]: The relative container path to a single `.pem` file containing a certificate authority (CA) for SSL validation. + +[id="splunk-example-yaml"] +=== Example configuration for Splunk + +The following YAML entry provides an example configuration for Splunk. + +.Splunk config.yaml example +[source,yaml] +---- +--- +LOGS_MODEL: splunk +LOGS_MODEL_CONFIG: + producer: splunk + splunk_config: + host: http://.remote.csb + port: 8089 + bearer_token: + url_scheme: + verify_ssl: False + index_prefix: + ssl_ca_path: +--- +---- + +[id="quota-management-config-fields"] +== Quota management configuration fields + +The following configuration fields have been added to enhance the {productname} quota management feature. + +.{productname} 3.9 quota management configuration fields +[cols="2a,1a,2a",options="header"] +|=== +|Field | Type |Description + +| **QUOTA_BACKFILL** | Boolean | Enables the quota backfill worker to calculate the size of pre-existing blobs. + + + +**Default**: `True` + +|**QUOTA_TOTAL_DELAY_SECONDS** |String | The time delay for starting the quota backfill. Rolling deployments can cause incorrect totals. This field *must* be set to a time longer than it takes for the rolling deployment to complete. + + + +**Default**: `1800` + +|**PERMANENTLY_DELETE_TAGS** |Boolean | Enables functionality related to the removal of tags from the time machine window. + + + +**Default**: `False` + +|**RESET_CHILD_MANIFEST_EXPIRATION** |Boolean |Resets the expirations of temporary tags targeting the child manifests. With this feature set to `True`, child manifests are immediately garbage collected. + + + +**Default**: `False` + +|=== + +[id="quota-management-config-settings-39"] +=== Possible quota management configuration settings + +The following table explains possible quota management configuration settings in {productname} 3.9. + +.Quota management configuration options +[cols="2a,1a,2a",options="header"] +|=== +|*FEATURE_QUOTA_MANAGEMENT* |*QUOTA_BACKFILL* |*OUTCOME* +|`true` |`true` | With these features configured as `true`, quota management is enabled and working for {productname} 3.9. For more information about configuring quota management for {productname} 3.9, see "Quota management for {productname} 3.9". +|`true` |`false` |With `FEATURE_QUOTA_MANAGEMENT` set to `true`, and `QUOTA_BACKFILL` set to `false`, the quota management feature has been enabled. However, pre-existing images from a prior (N-1) y-stream version of {productname} (for example, 3.8), must be backfilled before quota calculation can continue. To backfill image sizes, set `QUOTA_BACKFILL` to `true`. +|`false` |`false` | With these features configured as `false`, the quota management feature is disabled. +|`false` |`true` | With `FEATURE_QUOTA_MANAGEMENT` set to `false`, and `QUOTA_BACKFILL` set to `true`, the quota management feature is disabled. +|=== + +[id="suggested-management-config-settings-39-quota"] +=== Suggested quota management configuration settings + +The following YAML is the suggested configuration when enabling quota management. + +.Suggested quota management configuration +[source,yaml] +---- +FEATURE_QUOTA_MANAGEMENT: true +FEATURE_GARBAGE_COLLECTION: true +PERMANENTLY_DELETE_TAGS: true +QUOTA_TOTAL_DELAY_SECONDS: 1800 +RESET_CHILD_MANIFEST_EXPIRATION: true +---- + +[id=postgresql-pvc-backup-config-fields] +== PostgreSQL PVC backup environment variable + +The following environment variable has been added to configure whether {productname} automatically removes old persistent volume claims (PVCs) when upgrading from version 3.8 -> 3.9: + +.{productname} 3.9 PostgreSQL backup environment variable +[cols="2a,1a,2a",options="header"] +|=== +|Field | Type |Description +| *POSTGRES_UPGRADE_DELETE_BACKUP* |Boolean | When set to `True`, removes old persistent volume claims (PVCs) after upgrading. ++ +**Default**: `False` + +|=== + +[id="pvc-backup-example-yaml"] +=== Example configuration for PostgreSQL PVC backup + +The following `Subscription` object provides an example configuration for backing up PostgreSQL 10 PVCs. + +.`Subscription` object for PostgreSQL 10 PVCs +[source,yaml] +---- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: quay-operator + namespace: quay-enterprise +spec: + channel: stable-3.8 + name: quay-operator + source: redhat-operators + sourceNamespace: openshift-marketplace + config: + env: + - name: POSTGRES_UPGRADE_DELETE_BACKUP + value: "false" +---- diff --git a/modules/configuring-api-calls.adoc b/modules/configuring-api-calls.adoc new file mode 100644 index 000000000..922bfb314 --- /dev/null +++ b/modules/configuring-api-calls.adoc @@ -0,0 +1,17 @@ +[id="configuring-api-calls"] += Configuring {productname} to accept API calls + +Prior to using the {productname} API, you must disable `BROWSER_API_CALLS_XHR_ONLY` in your `config.yaml` file. This allows you to avoid such errors as `API calls must be invoked with an X-Requested-With header if called from a browser`. + +.Procedure + +. In your {productname} `config.yaml` file, set `BROWSER_API_CALLS_XHR_ONLY` to `false`. For example: ++ +[source,yaml] +---- +# ... +BROWSER_API_CALLS_XHR_ONLY: false +# ... +---- + +. Restart your {productname} deployment. \ No newline at end of file diff --git a/modules/configuring-aws-sts-quay.adoc b/modules/configuring-aws-sts-quay.adoc new file mode 100644 index 000000000..87d9c3c03 --- /dev/null +++ b/modules/configuring-aws-sts-quay.adoc @@ -0,0 +1,125 @@ +:_content-type: PROCEDURE +[id="configuring-aws-sts-quay"] += Configuring AWS STS for {productname} + +Support for Amazon Web Services (AWS) Security Token Service (STS) is available for standalone {productname} deployments and {productname-ocp}. AWS STS is a web service for requesting temporary, limited-privilege credentials for AWS Identity and Access Management (IAM) users and for users that you authenticate, or _federated users_. This feature is useful for clusters using Amazon S3 as an object storage, allowing {productname} to use STS protocols to authenticate with Amazon S3, which can enhance the overall security of the cluster and help to ensure that access to sensitive data is properly authenticated and authorized. + +Configuring AWS STS is a multi-step process that requires creating an AWS IAM user, creating an S3 role, and configuring your {productname} `config.yaml` file to include the proper resources. + +Use the following procedures to configure AWS STS for {productname}. + +[id="creating-am-user"] +== Creating an IAM user + +Use the following procedure to create an IAM user. + +.Procedure + +. Log in to the Amazon Web Services (AWS) console and navigate to the Identity and Access Management (IAM) console. + +. In the navigation pane, under *Access management* click *Users*. + +. Click *Create User* and enter the following information: + +.. Enter a valid username, for example, `quay-user`. + +.. For *Permissions options*, click *Add user to group*. + +. On the *review and create* page, click *Create user*. You are redirected to the *Users* page. + +. Click the username, for example, *quay-user*. + +. Copy the ARN of the user, for example, `arn:aws:iam::123492922789:user/quay-user`. + +. On the same page, click the *Security credentials* tab. + +. Navigate to *Access keys*. + +. Click *Create access key*. + +. On the *Access key best practices & alternatives* page, click *Command Line Interface (CLI)*, then, check the confirmation box. Then click *Next*. + +. Optional. On the *Set description tag - optional* page, enter a description. + +. Click *Create access key*. + +. Copy and store the access key and the secret access key. ++ +[IMPORTANT] +==== +This is the only time that the secret access key can be viewed or downloaded. You cannot recover it later. However, you can create a new access key any time. +==== + +. Click *Done*. + +[id="creating-s3-role"] +== Creating an S3 role + +Use the following procedure to create an S3 role for AWS STS. + +.Prerequisites + +* You have created an IAM user and stored the access key and the secret access key. + +.Procedure + +. If you are not already, navigate to the IAM dashboard by clicking *Dashboard*. + +. In the navigation pane, click *Roles* under *Access management*. + +. Click *Create role*. + +* Click *Custom Trust Policy*, which shows an editable JSON policy. By default, it shows the following information: ++ +[source,json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "Statement1", + "Effect": "Allow", + "Principal": {}, + "Action": "sts:AssumeRole" + } + ] +} +---- + +. Under the `Principal` configuration field, add your AWS ARN information. For example: ++ +[source,json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "Statement1", + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:iam::123492922789:user/quay-user" + }, + "Action": "sts:AssumeRole" + } + ] +} +---- + +. Click *Next*. + +. On the *Add permissions* page, type `AmazonS3FullAccess` in the search box. Check the box to add that policy to the S3 role, then click *Next*. + +. On the *Name, review, and create* page, enter the following information: + +.. Enter a role name, for example, `example-role`. + +.. Optional. Add a description. + +. Click the *Create role* button. You are navigated to the *Roles* page. Under *Role name*, the newly created S3 should be available. + +//// +[id="configuring-quay-operator-use-aws-sts"] +== Configuring the {productname} to use AWS STS + +Depending on your deployment type, whether standalone or on {ocp}, you can use one of the following procedures to edit your `config.yaml` file to use AWS STS. +//// diff --git a/modules/configuring-cert-based-auth-quay-cloudsql.adoc b/modules/configuring-cert-based-auth-quay-cloudsql.adoc new file mode 100644 index 000000000..e7bad5c12 --- /dev/null +++ b/modules/configuring-cert-based-auth-quay-cloudsql.adoc @@ -0,0 +1,116 @@ +:_content-type: PROCEDURE +[id="configuring-cert-based-auth-quay-sql"] += Configuring certificate-based authentication with SQL + +The following procedure demonstrates how to connect {productname} with an SQL database using secure client-side certificates. This method ensures both connectivity and authentication through Certificate Trust Verification, as it verifies the SQL server's certificate against a trusted Certificate Authority (CA). This enhances the security of the connection between {productname} and your SQL server while simplifying automation for your deployment. Although the example uses Google Cloud Platform's CloudSQL, the procedure also applies to PostgreSQL and other supported databases. + +.Prerequisites + +* You have generated custom Certificate Authorities (CAs) and your SSL/TLS certificates and keys are available in `PEM` format that will be used to generate an SSL connection with your CloudSQL database. For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/securing_red_hat_quay/index#ssl-tls-quay-overview[SSL and TLS for {productname}]. +* You have `base64 decoded` the original config bundle into a `config.yaml` file. For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/deploying_the_red_hat_quay_operator_on_openshift_container_platform/index#operator-config-cli-download[Downloading the existing configuration]. +* You are using an externally managed PostgreSQL or CloudSQL database. For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/deploying_the_red_hat_quay_operator_on_openshift_container_platform/index#operator-unmanaged-postgres[Using and existing PostgreSQL database] with the `DB_URI` variable set. +* Your externally managed PostgreSQL or CloudSQL database is configured for SSL/TLS. +* The `postgres` component of your `QuayRegistry` CRD is set to `managed: false`, and your CloudSQL database is set with the `DB_URI` configuration variable. The following procedure uses `postgresql://:@:/`. + +.Procedure + +. After you have generated the CAs and SSL/TLS certificates and keys for your CloudSQL database and ensured that they are in `.pem` format, test the SSL connection to your CloudSQL server: + +.. Initiate a connection to your CloudSQL server by entering the following command: ++ +[source,terminal] +---- +$ psql "sslmode=verify-ca sslrootcert=.pem sslcert=.pem sslkey=.pem hostaddr= port=<5432> user= dbname=" +---- + +. In your {productname} directory, create a new YAML file, for example, `quay-config-bundle.yaml`, by running the following command: ++ +[source,terminal] +---- +$ touch quay-config-bundle.yaml +---- + +. Create a `postgresql-client-certs` resource by entering the following command: ++ +[source,terminal] +---- +$ oc -n create secret generic postgresql-client-certs \ +--from-file config.yaml= <1> +--from-file=tls.crt= <2> +--from-file=tls.key= <3> +--from-file=ca.crt= <4> +---- +<1> Where` ` is your `base64 decoded` `config.yaml` file. +<2> Where `ssl_client_certificate.pem` is your SSL certificate in `.pem` format. +<3> Where `ssl_client_key.pem` is your SSL key in `.pem` format. +<4> Where `ssl_server_certificate.pem` is your SSL root CA in `.pem` format. + +. Edit your ``quay-config-bundle.yaml` file to include the following database connection settings: ++ +[IMPORTANT] +==== +* The information included in the `DB_CONNECTION_ARGS` variable, for example, `sslmode`, `sslrootcert`, `sslcert`, and `sslkey` *must* match the information appended to the `DB_URI` variable. Failure to match might result in a failed connection. +* You cannot specify custom filenames or paths. Certificate file paths for `sslrootcert`, `sslcert`, and `sslkey` are hardcoded defaults and mounted into the `Quay` pod from the Kubernetes secret. You must adhere to the following naming conventions or it will result in a failed connection. +==== ++ +[source,yaml] +---- +DB_CONNECTION_ARGS: + autorollback: true + sslmode: verify-ca <1> + sslrootcert: /.postgresql/root.crt <2> + sslcert: /.postgresql/postgresql.crt <3> + sslkey: /.postgresql/postgresql.key <4> + threadlocals: true <5> +DB_URI: postgresql://:@:/?sslmode=verify-full&sslrootcert=/.postgresql/root.crt&sslcert=/.postgresql/postgresql.crt&sslkey=/.postgresql/postgresql.key <6> +---- +<1> Using `verify-ca` ensures that the database connection uses SSL/TLS and verifies the server certificate against a trusted CA. This can work with both trusted CA and self-signed CA certificates. However, this mode does not verify the hostname of the server. For full hostname and certificate verification, use `verify-full`. For more information about the configuration options available, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index#config-fields-postgres[PostgreSQL SSL/TLS connection arguments]. +<2> The `root.crt` file contains the root certificate used to verify the SSL/TLS connection with your CloudSQL database. This file is mounted in the `Quay` pod from the Kubernetes secret. +<3> The `postgresql.crt` file contains the client certificate used to authenticate the connection to your CloudSQL database. This file is mounted in the `Quay` pod from the Kubernetes secret. +<4> The `postgresql.key` file contains the private key associated with the client certificate. This file is mounted in the `Quay` pod from the Kubernetes secret. +<5> Enables auto-rollback for connections. +<6> The URI that accesses your CloudSQL database. Must be appended with the `sslmode` type, your `root.crt`, `postgresql.crt`, and `postgresql.key` files. The SSL/TLS information included in `DB_URI` must match the information provided in `DB_CONNECTION_ARGS`. If you are using CloudSQL, you must include your database username and password in this variable. + +. Create the `configBundleSecret` resource by entering the following command: ++ +[source,terminal] +---- +$ oc create -n -f quay-config-bundle.yaml +---- ++ +.Example output ++ +[source,terminal] +---- +secret/quay-config-bundle created +---- + +. Update the `QuayRegistry` YAML file to reference the `quay-config-bundle` object by entering the following command: ++ +[source,terminal] +---- +$ oc patch quayregistry -n --type=merge -p '{"spec":{"configBundleSecret":"quay-config-bundle"}}' +---- ++ +.Example output ++ +[source,terminal] +---- +quayregistry.quay.redhat.com/example-registry patched +---- + +. Ensure that your `QuayRegistry` YAML file has been updated to use the extra CA certificate `configBundleSecret` resource by entering the following command: ++ +[source,terminal] +---- +$ oc get quayregistry -n -o yaml +---- ++ +.Example output ++ +[source,terminal] +---- +# ... + configBundleSecret: quay-config-bundle +# ... +---- diff --git a/modules/configuring-clair-disconnected-environment.adoc b/modules/configuring-clair-disconnected-environment.adoc new file mode 100644 index 000000000..139d8d602 --- /dev/null +++ b/modules/configuring-clair-disconnected-environment.adoc @@ -0,0 +1,39 @@ +:_content-type: PROCEDURE +[id="configuring-clair-disconnected-environment"] += Configuring Clair for disconnected environments + +Use the following procedure to configure Clair for a disconnected environment. + +.Prerequisites + +* You have installed the `clairctl` tool to be run as a binary, or by the Clair container image. + +.Procedure + +. In your `config.yaml` file, set your Clair configuration to disable updaters from running: ++ +.config.yaml +[source,yaml] +---- +matcher: + disable_updaters: true +---- + +. Export the latest updater data to a local archive. The following command assumes that your Clair configuration is in `/etc/clairv4/config/config.yaml` ++ +[subs="verbatim,attributes"] +---- +$ podman run -it --rm -v /etc/clairv4/config:/cfg:Z -v /path/to/output/directory:/updaters:Z --entrypoint /bin/clairctl {productrepo}/{clairimage}:{productminv} --config /cfg/config.yaml export-updaters /updaters/updaters.gz +---- ++ +[NOTE] +==== +You must explicitly reference the Clair configuration. This creates the updater archive in `/etc/clairv4/updaters/updaters.gz`. To ensure that the archive was created without any errors from the source databases, you can use the `--strict` flag with `clairctl`. The archive file should be copied over to a volume that is accessible from the disconnected host running Clair. +==== + +. From the disconnected host, use the following command to import the archive into Clair: ++ +[subs="verbatim,attributes"] +---- +$ podman run -it --rm -v /etc/clairv4/config:/cfg:Z -v /path/to/output/directory:/updaters:Z --entrypoint /bin/clairctl {productrepo}/{clairimage}:{productminv} --config /cfg/config.yaml import-updaters /updaters/updaters.gz +---- diff --git a/modules/configuring-clair-updaters.adoc b/modules/configuring-clair-updaters.adoc new file mode 100644 index 000000000..95d4cd30c --- /dev/null +++ b/modules/configuring-clair-updaters.adoc @@ -0,0 +1,594 @@ + +[id="configuring-updaters"] += Configuring updaters + +Updaters can be configured by the `updaters.sets` key in your `clair-config.yaml` file. + +[IMPORTANT] +==== +* If the `sets` field is not populated, it defaults to using all sets. In using all sets, Clair tries to reach the URL or URLs of each updater. If you are using a proxy environment, you must add these URLs to your proxy allowlist. +* If updaters are being run automatically within the matcher process, which is the default setting, the period for running updaters is configured under the matcher's configuration field. +==== + +[id="selecting-updater-sets"] +== Selecting specific updater sets + +Use the following references to select one, or multiple, updaters for your {productname} deployment. + +[discrete] +[id="configuring-clair-multiple-updaters"] +=== Configuring Clair for multiple updaters + +.Multiple specific updaters +[source,yaml] +---- +#... +updaters: + sets: + - alpine + - aws + - osv +#... +---- + +[discrete] +[id="configuring-clair-alpine"] +=== Configuring Clair for Alpine + +.Alpine config.yaml example +[source,yaml] +---- +#... +updaters: + sets: + - alpine +#... +---- + +[discrete] +[id="configuring-clair-aws"] +=== Configuring Clair for AWS + +.AWS config.yaml example +[source,yaml] +---- +#... +updaters: + sets: + - aws +#... +---- + +[discrete] +[id="configuring-clair-debian"] +=== Configuring Clair for Debian + +.Debian config.yaml example +[source,yaml] +---- +#... +updaters: + sets: + - debian +#... +---- + +[discrete] +[id="configuring-clair-clair-cvss"] +=== Configuring Clair for Clair CVSS + +.Clair CVSS config.yaml example +[source,yaml] +---- +#... +updaters: + sets: + - clair.cvss +#... +---- + +[discrete] +[id="configuring-clair-oracle"] +=== Configuring Clair for Oracle + +.Oracle config.yaml example +[source,yaml] +---- +#... +updaters: + sets: + - oracle +#... +---- + +[discrete] +[id="configuring-clair-photon"] +=== Configuring Clair for Photon +.Photon config.yaml example +[source,yaml] +---- +#... +updaters: + sets: + - photon +#... +---- + +[discrete] +[id="configuring-clair-suse"] +=== Configuring Clair for SUSE + +.SUSE config.yaml example +[source,yaml] +---- +#... +updaters: + sets: + - suse +#... +---- + +[discrete] +[id="configuring-clair-ubuntu"] +=== Configuring Clair for Ubuntu + +.Ubuntu config.yaml example +[source,yaml] +---- +#... +updaters: + sets: + - ubuntu +#... +---- + +[discrete] +[id="configuring-clair-osv"] +=== Configuring Clair for OSV + +.OSV config.yaml example +[source,yaml] +---- +#... +updaters: + sets: + - osv +#... +---- + +[id="full-rhel-coverage"] +== Selecting updater sets for full {rhel} coverage + +For full coverage of vulnerabilities in {rhel}, you must use the following updater sets: + +* `rhel`. This updater ensures that you have the latest information on the vulnerabilities that affect {rhel-short}. +* `rhcc`. This updater keeps track of vulnerabilities related to Red hat's container images. +* `clair.cvss`. This updater offers a comprehensive view of the severity and risk assessment of vulnerabilities by providing Common Vulnerabilities and Exposures (CVE) scores. +* `osv`. This updater focuses on tracking vulnerabilities in open-source software components. This updater is recommended due to how common the use of Java and Go are in {rhel-short} products. + +.{rhel-short} updaters example +[source,yaml] +---- +#... +updaters: + sets: + - rhel + - rhcc + - clair.cvss + - osv +#... +---- + +[id="configuring-specific-updaters"] +== Advanced updater configuration + +In some cases, users might want to configure updaters for specific behavior, for example, if you want to allowlist specific ecosystems for the Open Source Vulnerabilities (OSV) updaters. + +Advanced updater configuration might be useful for proxy deployments or air gapped deployments. Configuration for specific updaters in these scenarios can be passed by putting a key underneath the `config` environment variable of the `updaters` object. Users should examine their Clair logs to double-check names. + +The following YAML snippets detail the various settings available to some Clair updater + +[IMPORTANT] +==== +For more users, advanced updater configuration is unnecessary. +==== + +[discrete] +=== Configuring the alpine updater + +[source,yaml] +---- +#... +updaters: + sets: + - apline + config: + alpine: + url: https://secdb.alpinelinux.org/ +#... +---- + +[discrete] +=== Configuring the debian updater + +[source,yaml] +---- +#... +updaters: + sets: + - debian + config: + debian: + mirror_url: https://deb.debian.org/ + json_url: https://security-tracker.debian.org/tracker/data/json +#... +---- + +[discrete] +=== Configuring the clair.cvss updater + +[source,yaml] +---- +#... +updaters: + config: + clair.cvss: + url: https://nvd.nist.gov/feeds/json/cve/1.1/ +#... +---- + +[discrete] +=== Configuring the oracle updater + +[source,yaml] +---- +#... +updaters: + sets: + - oracle + config: + oracle-2023-updater: + url: + - https://linux.oracle.com/security/oval/com.oracle.elsa-2023.xml.bz2 + oracle-2022-updater: + url: + - https://linux.oracle.com/security/oval/com.oracle.elsa-2022.xml.bz2 +#... +---- + +[discrete] +=== Configuring the photon updater + +[source,yaml] +---- +#... +updaters: + sets: + - photon + config: + photon: + url: https://packages.vmware.com/photon/photon_oval_definitions/ +#... +---- + +[discrete] +=== Configuring the rhel updater + +[source,yaml] +---- +#... +updaters: + sets: + - rhel + config: + rhel: + url: https://access.redhat.com/security/data/oval/v2/PULP_MANIFEST + ignore_unpatched: true <1> +#... +---- +<1> Boolean. Whether to include information about vulnerabilities that do not have corresponding patches or updates available. + +[discrete] +=== Configuring the rhcc updater + +[source,yaml] +---- +#... +updaters: + sets: + - rhcc + config: + rhcc: + url: https://access.redhat.com/security/data/metrics/cvemap.xml +#... +---- + +[discrete] +=== Configuring the suse updater + +[source,yaml] +---- +#... +updaters: + sets: + - suse + config: + suse: + url: https://support.novell.com/security/oval/ +#... +---- + +[discrete] +=== Configuring the ubuntu updater + +[source,yaml] +---- +#... +updaters: + config: + ubuntu: + url: https://api.launchpad.net/1.0/ + name: ubuntu + force: <1> + - name: focal <2> + version: 20.04 <3> +#... +---- +<1> Used to force the inclusion of specific distribution and version details in the resulting UpdaterSet, regardless of their status in the API response. Useful when you want to ensure that particular distributions and versions are consistently included in your updater configuration. +<2> Specifies the distribution name that you want to force to be included in the UpdaterSet. +<3> Specifies the version of the distribution you want to force into the UpdaterSet. + +[discrete] +=== Configuring the osv updater + +[source,yaml] +---- +#... +updaters: + sets: + - osv + config: + osv: + url: https://osv-vulnerabilities.storage.googleapis.com/ + allowlist: <1> + - npm + - pypi +#... +---- +<1> The list of ecosystems to allow. When left unset, all ecosystems are allowed. Must be lowercase. For a list of supported ecosystems, see the documentation for link:https://ossf.github.io/osv-schema/#affectedpackage-field[defined ecosystems]. + +[id="disabling-clair-updater-component-managed-db"] +== Disabling the Clair Updater component + +In some scenarios, users might want to disable the Clair updater component. Disabling updaters is required when running {productname} in a disconnected environment. + +In the following example, Clair updaters are disabled: + +[source,yaml] +---- +#... +matcher: + disable_updaters: true +#... +---- + + +//// + + +The following sections outline how one might configure specific updaters in Clair when it is being used in a proxy environment. + +[IMPORTANT] +==== +These are examples, and depending on how your proxy server is configured to route requests might impact how your `clair-config.yaml` file structure is determined. +==== + +[discrete] +=== Configuring the alpine updater for proxy environments + +[source,yaml] +---- +#... +updaters: + sets: + - apline + config: + alpine: + url: https:///secdb/alpine/ <1> +---- +<1> Based on the `alpine` updater URL `\https://secdb.alpinelinux.org/`. + +[discrete] +=== Configuring the aws updater for proxy environments + +[source,yaml] +---- +#... +updaters: + sets: + - aws + config: + aws: + url: https:///updates/x86_64/mirror.list <1> + url: https:///core/latest/x86_64/mirror.list <2> + url: https:///al2023/core/mirrors/latest/x86_64/mirror.list <3> +---- +<1> Based on the `aws` updater URL `\http://repo.us-west-2.amazonaws.com/2018.03/updates/x86_64/mirror.list`. +<2> Based on the `aws` updater URL `\https://cdn.amazonlinux.com/2/core/latest/x86_64/mirror.list`. +<3> Based on the `aws` updater URL `\https://cdn.amazonlinux.com/al2023/core/mirrors/latest/x86_64/mirror.list`. + +[discrete] +=== Configuring the debian updater for proxy environments + +[source,yaml] +---- +#... +updaters: + sets: + - debian + config: + debian: + mirror_url: https:///debian-archive/ <1> + json_url: https:///debian-json/ <2> +---- +<1> Based on the `debian` updater URL `\https://deb.debian.org/`. +<2> Based on the `debian` updater URL `\https://security-tracker.debian.org/tracker/data/json`. + +[discrete] +=== Configuring the clair.cvss updater for proxy environments + +[source,yaml] +---- +#... +updaters: + config: + clair.cvss: + url: https:///feeds/json/cve/1.1/ <1> +---- +<1> Based on the `clair.cvss` updater URL `\https://nvd.nist.gov/feeds/json/cve/1.1/`. + +[discrete] +=== Configuring the oracle updater for proxy environments + +[source,yaml] +---- +#... +updaters: + sets: + - oracle + config: + oracle: + url: https:///security/oval/com.oracle.elsa-*.xml.bz2 <1> +#... +---- +<1> Based on the `oracle` updater URL `\https://linux.oracle.com/security/oval/com.oracle.elsa-*.xml.bz2`. + +[discrete] +=== Configuring the photon updater for proxy environments + +[source,yaml] +---- +#... +updaters: + sets: + - photon + config: + photon: + url: https:///photon/photon_oval_definitions/ <1> +#... +---- +<1> Based on the `photon` updater URL `\https://packages.vmware.com/photon/photon_oval_definitions/`. + + +[discrete] +=== Configuring the rhel updater for proxy environments + +[source,yaml] +---- +#... +updaters: + sets: + - rhel + config: + rhel: + url: https:///mirror/oval/PULP_MANIFEST <1> + url: https:///security/cve/ <2> + ignore_unpatched: true <3> +#... +---- +<1> Based on the `rhel` updater URL `\https://access.redhat.com/security/data/oval/v2/PULP_MANIFEST`. +<2> Based on the `rhel` updater URL `\https://access.redhat.com/security/cve/`. +<3> Boolean. Whether to include information about vulnerabilities that do not have corresponding patches or updates available. + +[discrete] +=== Configuring the rhcc updater for proxy environments + +[source,yaml] +---- +#... +updaters: + sets: + - rhcc + config: + rhcc: + url: https:///security/data/metrics/cvemap.xml <1> +#... +---- +<1> Based on the `rhcc` updater URL `\https://access.redhat.com/security/data/metrics/cvemap.xml`. + +[discrete] +=== Configuring the suse updater for proxy environments + +[source,yaml] +---- +#... +updaters: + sets: + - suse + config: + suse: + url: https:///security/oval/ <1> +#... +---- +<1> Based on the `suse` updater URL `\https://support.novell.com/security/oval/`. + +[discrete] +=== Configuring the ubuntu updater for proxy environments + +[source,yaml] +---- +#... +updaters: + sets: + - ubuntu + config: + ubuntu: + - url: https:///ubuntu-cve-oval/ <1> + - url: https:///ubuntu-launchpad-api/ <2> + name: ubuntu + force: <3> + - name: focal <4> + version: 20.04 <5> +#... +---- +<1> Based on the `ubuntu` updater URL `\https://security-metadata.canonical.com/oval/com.ubuntu.*.cve.oval.xml`. +<2> Based on the `ubuntu` updater URL `\https://api.launchpad.net/1.0/`. +<3> Used to force the inclusion of specific distribution and version details in the resulting UpdaterSet, regardless of their status in the API response. Useful when you want to ensure that particular distributions and versions are consistently included in your updater configuration. +<4> Specifies the distribution name that you want to force to be included in the UpdaterSet. In this case, it's set to `focal` to specify the Ubuntu distribution with the name `focal`. +<5> Specifies the version of the distribution you want to force into the UpdaterSet. Here, it's set to `20.04` to indicate that the specific version of the `focal` distribution to be included is `20.04`. + +[discrete] +=== Configuring the osv updater for proxy environments + +[source,yaml] +---- +#... +updaters: + sets: + - osv + config: + osv: + url: https:///osv-vulnerabilities/ <1> + allowlist: <2> + - npm + - PyPI +#... +---- +<1> Based on the `osv` updater URL `\https://osv-vulnerabilities.storage.googleapis.com/`. +<2> The list of ecosystems to allow. When left unset, all ecosystems are allowed. For a list of supported ecosystems, see the documentation for link:https://ossf.github.io/osv-schema/#affectedpackage-field[defined ecosystems]. + +[id="disabling-clair-updater-component-managed-db"] +== Disabling the Clair Updater component + +In some scenarios, users might want to disable the Clair updater component. Disabling updaters is required when running {productname} in a disconnected environment. + +In the following example, Clair updaters are disabled: + +[source,yaml] +---- +#... +matcher: + disable_updaters: true +#... +---- +//// \ No newline at end of file diff --git a/modules/configuring-custom-clair-database-managed.adoc b/modules/configuring-custom-clair-database-managed.adoc new file mode 100644 index 000000000..245070fb9 --- /dev/null +++ b/modules/configuring-custom-clair-database-managed.adoc @@ -0,0 +1,77 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: PROCEDURE +[id="configuring-custom-clair-database-managed"] += Configuring a custom Clair database with a managed Clair configuration + +{productname} on {ocp} allows users to provide their own Clair database. + +Use the following procedure to create a custom Clair database. + +.Procedure + +. Create a Quay configuration bundle secret that includes the `clair-config.yaml` by entering the following command: ++ +[source,terminal] +---- +$ oc create secret generic --from-file config.yaml=./config.yaml --from-file extra_ca_cert_rds-ca-2019-root.pem=./rds-ca-2019-root.pem --from-file clair-config.yaml=./clair-config.yaml config-bundle-secret +---- ++ +.Example Clair `config.yaml` file ++ +[source,yaml] +---- +indexer: + connstring: host=quay-server.example.com port=5432 dbname=quay user=quayrdsdb password=quayrdsdb sslmode=disable + layer_scan_concurrency: 6 + migrations: true + scanlock_retry: 11 +log_level: debug +matcher: + connstring: host=quay-server.example.com port=5432 dbname=quay user=quayrdsdb password=quayrdsdb sslmode=disable + migrations: true +metrics: + name: prometheus +notifier: + connstring: host=quay-server.example.com port=5432 dbname=quay user=quayrdsdb password=quayrdsdb sslmode=disable + migrations: true +---- ++ +[NOTE] +==== +* The database certificate is mounted under `/run/certs/rds-ca-2019-root.pem` on the Clair application pod in the `clair-config.yaml`. It must be specified when configuring your `clair-config.yaml`. +* An example `clair-config.yaml` can be found at link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/deploy_red_hat_quay_on_openshift_with_the_quay_operator/quay_operator_features#clair-openshift-config[Clair on OpenShift config]. +==== + +. Add the `clair-config.yaml` file to your bundle secret, for example: ++ +[source,yaml] +---- +apiVersion: v1 +kind: Secret +metadata: + name: config-bundle-secret + namespace: quay-enterprise +data: + config.yaml: + clair-config.yaml: +---- ++ +[NOTE] +==== +* When updated, the provided `clair-config.yaml` file is mounted into the Clair pod. Any fields not provided are automatically populated with defaults using the Clair configuration module. +==== + +. You can check the status of your Clair pod by clicking the commit in the *Build History* page, or by running `oc get pods -n `. For example: ++ +---- +$ oc get pods -n +---- ++ +.Example output +---- +NAME READY STATUS RESTARTS AGE +f192fe4a-c802-4275-bcce-d2031e635126-9l2b5-25lg2 1/1 Running 0 7s +---- \ No newline at end of file diff --git a/modules/configuring-custom-clair-database.adoc b/modules/configuring-custom-clair-database.adoc new file mode 100644 index 000000000..b082934fb --- /dev/null +++ b/modules/configuring-custom-clair-database.adoc @@ -0,0 +1,85 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: PROCEDURE +[id="configuring-custom-clair-database"] += Configuring a custom Clair database with an unmanaged Clair database + +{productname} on {ocp} allows users to provide their own Clair database. + +Use the following procedure to create a custom Clair database. + +[NOTE] +==== +The following procedure sets up Clair with SSL/TLS certifications. To view a similar procedure that does not set up Clair with SSL/TLS certifications, see "Configuring a custom Clair database with a managed Clair configuration". +==== + +.Procedure + +. Create a Quay configuration bundle secret that includes the `clair-config.yaml` by entering the following command: ++ +[source,terminal] +---- +$ oc create secret generic --from-file config.yaml=./config.yaml --from-file extra_ca_cert_rds-ca-2019-root.pem=./rds-ca-2019-root.pem --from-file clair-config.yaml=./clair-config.yaml --from-file ssl.cert=./ssl.cert --from-file ssl.key=./ssl.key config-bundle-secret +---- ++ +.Example Clair `config.yaml` file ++ +[source,yaml] +---- +indexer: + connstring: host=quay-server.example.com port=5432 dbname=quay user=quayrdsdb password=quayrdsdb sslrootcert=/run/certs/rds-ca-2019-root.pem sslmode=verify-ca + layer_scan_concurrency: 6 + migrations: true + scanlock_retry: 11 +log_level: debug +matcher: + connstring: host=quay-server.example.com port=5432 dbname=quay user=quayrdsdb password=quayrdsdb sslrootcert=/run/certs/rds-ca-2019-root.pem sslmode=verify-ca + migrations: true +metrics: + name: prometheus +notifier: + connstring: host=quay-server.example.com port=5432 dbname=quay user=quayrdsdb password=quayrdsdb sslrootcert=/run/certs/rds-ca-2019-root.pem sslmode=verify-ca + migrations: true +---- ++ +[NOTE] +==== +* The database certificate is mounted under `/run/certs/rds-ca-2019-root.pem` on the Clair application pod in the `clair-config.yaml`. It must be specified when configuring your `clair-config.yaml`. +* An example `clair-config.yaml` can be found at link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/deploy_red_hat_quay_on_openshift_with_the_quay_operator/quay_operator_features#clair-openshift-config[Clair on OpenShift config]. +==== + +. Add the `clair-config.yaml` file to your bundle secret, for example: ++ +[source,yaml] +---- +apiVersion: v1 +kind: Secret +metadata: + name: config-bundle-secret + namespace: quay-enterprise +data: + config.yaml: + clair-config.yaml: + extra_ca_cert_: + ssl.crt: + ssl.key: +---- ++ +[NOTE] +==== +When updated, the provided `clair-config.yaml` file is mounted into the Clair pod. Any fields not provided are automatically populated with defaults using the Clair configuration module. +==== + +. You can check the status of your Clair pod by clicking the commit in the *Build History* page, or by running `oc get pods -n `. For example: ++ +---- +$ oc get pods -n +---- ++ +.Example output +---- +NAME READY STATUS RESTARTS AGE +f192fe4a-c802-4275-bcce-d2031e635126-9l2b5-25lg2 1/1 Running 0 7s +---- \ No newline at end of file diff --git a/modules/configuring-oci-media-types.adoc b/modules/configuring-oci-media-types.adoc new file mode 100644 index 000000000..dc16a1c62 --- /dev/null +++ b/modules/configuring-oci-media-types.adoc @@ -0,0 +1,106 @@ +// Document included in the following assemblies: + +// unused + +:_content-type: REFERENCE +[id="supported-oci-media-types"] += Configuring artifact types + +As a {productname} administrator, you can configure Open Container Initiative (OCI) artifact types and other experimental artifact types through the `FEATURE_GENERAL_OCI_SUPPORT` and `ALLOWED_OCI_ARTIFACT_TYPES` configuration fields. + +The following Open Container Initiative (OCI) artifact types are built into {productname} by default and are enabled through the *FEATURE_GENERAL_OCI_SUPPORT* configuration field: + +[cols="3a,1a,2a",options="header"] +|=== +| Field | Media Type | Supported content types + +| *Helm* | `application/vnd.cncf.helm.config.v1+json` | `application/tar+gzip`, `application/vnd.cncf.helm.chart.content.v1.tar+gzip` + +| *Cosign* | `application/vnd.oci.image.config.v1+json` | `application/vnd.dev.cosign.simplesigning.v1+json`, `application/vnd.dsse.envelope.v1+json` + +| *SPDX* | `application/vnd.oci.image.config.v1+json` | `text/spdx`, `text/spdx+xml`, `text/spdx+json` + +| *Syft* | `application/vnd.oci.image.config.v1+json` | `application/vnd.syft+json` + +| *CycloneDX* | `application/vnd.oci.image.config.v1+json` | `application/vnd.cyclonedx`, `application/vnd.cyclonedx+xml`, `application/vnd.cyclonedx+json` + +| *In-toto* | `application/vnd.oci.image.config.v1+json` | `application/vnd.in-toto+json` + +| *Unknown* | `application/vnd.cncf.openpolicyagent.policy.layer.v1+rego` | `application/vnd.cncf.openpolicyagent.policy.layer.v1+rego`, `application/vnd.cncf.openpolicyagent.data.layer.v1+json` + +|=== + +Additionally, {productname} uses the _ZStandard_, or _zstd_, to reduce the size of container images or other related artifacts. Zstd helps optimize storage and improve transfer speeds when working with container images. + +Use the following procedures to configure support for the default and experimental OCI media types. + +[id="configuring-oci-media-types-proc"] +== Configuring OCI artifact types + +Use the following procedure to configure artifact types that are embedded in {productname} by default. + +.Prerequisites + +* You have {productname} administrator privileges. + +.Procedure + +* In your {productname} `config.yaml` file, enable support for general OCI support by setting the `FEATURE_GENERAL_OCI_SUPPORT` field to `true`. For example: ++ +[source,yaml] +---- +FEATURE_GENERAL_OCI_SUPPORT: true +---- ++ +With `FEATURE_GENERAL_OCI_SUPPORT` set to true, {productname} users can now push and pull charts of the default artifact types to their {productname} deployment. + +[id="configuring-additional-oci-media-types-proc"] +== Configuring additional artifact types + +Use the following procedure to configure additional, and specific, artifact types for your {productname} deployment. + +[NOTE] +==== +Using the `ALLOWED_OCI_ARTIFACT_TYPES` configuration field, you can restrict which artifact types are accepted by your {productname} registry. If you want your {productname} deployment to accept all artifact types, see "Configuring unknown media types". +==== + +.Prerequistes + +* You have {productname} administrator privileges. + +.Procedure + +* Add the `ALLOWED_OCI_ARTIFACT_TYPES` configuration field, along with the configuration and layer types: ++ +[source,yaml] +---- +FEATURE_GENERAL_OCI_SUPPORT: true +ALLOWED_OCI_ARTIFACT_TYPES: + : + - + - + + : + - + - +---- ++ +For example, you can add Singularity Image Format (SIF) support by adding the following to your `config.yaml` file: ++ +[source,yaml] +---- +ALLOWED_OCI_ARTIFACT_TYPES: + application/vnd.oci.image.config.v1+json: + - application/vnd.dev.cosign.simplesigning.v1+json + application/vnd.cncf.helm.config.v1+json: + - application/tar+gzip + application/vnd.sylabs.sif.config.v1+json: + - application/vnd.sylabs.sif.layer.v1+tar +---- ++ +[NOTE] +==== +When adding OCI artifact types that are not configured by default, {productname} administrators will also need to manually add support for Cosign and Helm if desired. +==== ++ +Now, users can tag SIF images for their {productname} registry. \ No newline at end of file diff --git a/modules/configuring-oidc-authentication.adoc b/modules/configuring-oidc-authentication.adoc new file mode 100644 index 000000000..9ebecb2b7 --- /dev/null +++ b/modules/configuring-oidc-authentication.adoc @@ -0,0 +1,58 @@ +:_content-type: PROCEDURE +[id="configuring-oidc-authentication"] += Configuring OIDC for {productname} + +Configuring OpenID Connect (OIDC) for {productname} can provide several benefits to your deployment. For example, OIDC allows users to authenticate to {productname} using their existing credentials from an OIDC provider, such as link:https://access.redhat.com/documentation/en-us/red_hat_single_sign-on/7.0[Red Hat Single Sign-On], Google, Github, Microsoft, or others. Other benefits of OIDC include centralized user management, enhanced security, and single sign-on (SSO). Overall, OIDC configuration can simplify user authentication and management, enhance security, and provide a seamless user experience for {productname} users. + +The following procedures show you how to configure Microsoft Entra ID on a standalone deployment of {productname}, and how to configure Red Hat Single Sign-On on an Operator-based deployment of {productname}. These procedures are interchangeable depending on your deployment type. + +[NOTE] +==== +By following these procedures, you will be able to add any OIDC provider to {productname}, regardless of which identity provider you choose to use. +==== + +[id="configuring-entra-oidc"] +== Configuring Microsoft Entra ID OIDC on a standalone deployment of {productname} + +By integrating Microsoft Entra ID authentication with {productname}, your organization can take advantage of the centralized user management and security features offered by Microsoft Entra ID. Some features include the ability to manage user access to {productname} repositories based on their Microsoft Entra ID roles and permissions, and the ability to enable multi-factor authentication and other security features provided by Microsoft Entra ID. + +Azure Active Directory (Microsoft Entra ID) authentication for {productname} allows users to authenticate and access {productname} using their Microsoft Entra ID credentials. + +Use the following procedure to configure Microsoft Entra ID by updating the {productname} `config.yaml` file directly. + +.Procedure + +[NOTE] +==== +* Using the following procedure, you can add any ODIC provider to {productname}, regardless of which identity provider is being added. +* If your system has a firewall in use, or proxy enabled, you must whitelist all Azure API endpoints for each Oauth application that is created. Otherwise, the following error is returned: `x509: certificate signed by unknown authority`. +==== + +. Use the following reference and update your `config.yaml` file with your desired OIDC provider's credentials: ++ +[source,yaml] +---- +AUTHENTICATION_TYPE: OIDC +# ... +AZURE_LOGIN_CONFIG: <1> + CLIENT_ID: <2> + CLIENT_SECRET: <3> + OIDC_SERVER: <4> + SERVICE_NAME: Microsoft Entra ID <5> + VERIFIED_EMAIL_CLAIM_NAME: <6> +# ... +---- +<1> The parent key that holds the OIDC configuration settings. In this example, the parent key used is `AZURE_LOGIN_CONFIG`, however, the string `AZURE` can be replaced with any arbitrary string based on your specific needs, for example `ABC123`.However, the following strings are not accepted: `GOOGLE`, `GITHUB`. These strings are reserved for their respective identity platforms and require a specific `config.yaml` entry contingent upon when platform you are using. +<2> The client ID of the application that is being registered with the identity provider. +<3> The client secret of the application that is being registered with the identity provider. +<4> The address of the OIDC server that is being used for authentication. In this example, you must use `sts.windows.net` as the issuer identifier. Using `https://login.microsoftonline.com` results in the following error: `Could not create provider for AzureAD. Error: oidc: issuer did not match the issuer returned by provider, expected "https://login.microsoftonline.com/73f2e714-xxxx-xxxx-xxxx-dffe1df8a5d5" got "https://sts.windows.net/73f2e714-xxxx-xxxx-xxxx-dffe1df8a5d5/"`. +<5> The name of the service that is being authenticated. +<6> The name of the claim that is used to verify the email address of the user. + +. Proper configuration of Microsoft Entra ID results three redirects with the following format: ++ +* `\https://QUAY_HOSTNAME/oauth2//callback` +* `\https://QUAY_HOSTNAME/oauth2//callback/attach` +* `\https://QUAY_HOSTNAME/oauth2//callback/cli` + +. Restart your {productname} deployment. \ No newline at end of file diff --git a/modules/configuring-openshift-tls-component-builds.adoc b/modules/configuring-openshift-tls-component-builds.adoc new file mode 100644 index 000000000..8f474bd93 --- /dev/null +++ b/modules/configuring-openshift-tls-component-builds.adoc @@ -0,0 +1,35 @@ +:_content-type: CONCEPT +[id="configuring-openshift-tls-component-builds"] += Configuring the {ocp} TLS component for builds + +The `tls` component of the `QuayRegistry` custom resource definition (CRD) allows you to control whether SSL/TLS are managed by the {productname} Operator, or self managed. In its current state, {productname} does not support the _builds_ feature, or the _builder_ workers, when the `tls` component is managed by the {productname} Operator. + +When setting the `tls` component to `unmanaged`, you must supply your own `ssl.cert` and `ssl.key` files. Additionally, if you want your cluster to support _builders_, or the worker nodes that are responsible for building images, you must add both the `Quay` route and the `builder` route name to the SAN list in the certificate. Alternatively, however, you could use a wildcard. + +The following procedure shows you how to add the _builder_ route. + +.Prerequisites + +* You have set the `tls` component to `unmanaged` and uploaded custom SSL/TLS certificates to the {productname} Operator. For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/securing_red_hat_quay/index#ssl-tls-quay-overview[SSL and TLS for {productname}]. + +.Procedure + +* In the configuration file that defines your SSL/TLS certificate parameters, for example, `openssl.cnf`, add the following information to the certificate's Subject Alternative Name (SAN) field. For example: ++ +[source,yaml] +---- +# ... +[alt_names] +-quay-builder-.:443 +# ... +---- ++ +For example: ++ +[source,yaml] +---- +# ... +[alt_names] +example-registry-quay-builder-quay-enterprise.apps.cluster-new.gcp.quaydev.org:443 +# ... +---- \ No newline at end of file diff --git a/modules/configuring-port-mapping.adoc b/modules/configuring-port-mapping.adoc new file mode 100644 index 000000000..7331a5f26 --- /dev/null +++ b/modules/configuring-port-mapping.adoc @@ -0,0 +1,38 @@ +:_content-type: PROCEDURE +[id="configuring-port-mapping"] += Configuring port mapping for {productname} + +You can use port mappings to expose ports on the host and then use these ports in combination with the host IP address or host name to navigate to the {productname} endpoint. + +.Procedure + +. Enter the following command to obtain your static IP address for your host system: ++ +[source,terminal] +---- +$ ip a +---- ++ +.Example output ++ +[source,terminal] +---- +--- + link/ether 6c:6a:77:eb:09:f1 brd ff:ff:ff:ff:ff:ff + inet 192.168.1.132/24 brd 192.168.1.255 scope global dynamic noprefixroute wlp82s0 +--- +---- + +. Add the IP address and a local hostname, for example, `quay-server.example.com` to your `/etc/hosts` file that will be used to reach the {productname} endpoint. You can confirm that the IP address and hostname have been added to the `/etc/hosts` file by entering the following command: ++ +[source,terminal] +---- +$ cat /etc/hosts +---- ++ +.Example output ++ +[source,terminal] +---- +192.168.1.138 quay-server.example.com +---- \ No newline at end of file diff --git a/modules/configuring-quay-ocp-aws-sts.adoc b/modules/configuring-quay-ocp-aws-sts.adoc new file mode 100644 index 000000000..935aca749 --- /dev/null +++ b/modules/configuring-quay-ocp-aws-sts.adoc @@ -0,0 +1,83 @@ +[id="configuring-quay-ocp-aws-sts"] += Configuring {productname-ocp} to use AWS STS + +Use the following procedure to edit your {productname-ocp} `config.yaml` file to use AWS STS. + +[NOTE] +==== +You can also edit and re-deploy your {productname-ocp} `config.yaml` file directly instead of using the {ocp} UI. +==== + +.Prerequisites + +* You have configured a Role ARN. +* You have generated a User Access Key. +* You have generated a User Secret Key. + +.Procedure + +. On the *Home* page of your {ocp} deployment, click *Operators* -> *Installed Operators*. + +. Click *Red Hat Quay*. + +. Click *Quay Registry* and then the name of your {productname} registry. + +. Under *Config Bundle Secret*, click the name of your registry configuration bundle, for example, *quay-registry-config-bundle-qet56*. + +. On the configuration bundle page, click *Actions* to reveal a drop-down menu. Then click *Edit Secret*. + +. Update your the `DISTRIBUTED_STORAGE_CONFIG` fields of your `config.yaml` file with the following information: ++ +[source,yaml] +---- +# ... +DISTRIBUTED_STORAGE_CONFIG: + default: + - STSS3Storage + - sts_role_arn: <1> + s3_bucket: <2> + storage_path: <3> + s3_region: <4> + sts_user_access_key: <5> + sts_user_secret_key: <6> +# ... +---- +<1> The unique Amazon Resource Name (ARN) required when configuring AWS STS +<2> The name of your s3 bucket. +<3> The storage path for data. Usually `/datastorage`. +<4> Optional. The Amazon Web Services region. Defaults to `us-east-1`. +<5> The generated AWS S3 user access key required when configuring AWS STS. +<6> The generated AWS S3 user secret key required when configuring AWS STS. + +. Click *Save*. + +.Verification + +. Tag a sample image, for example, `busybox`, that will be pushed to the repository. For example: ++ +[source,terminal] +---- +$ podman tag docker.io/library/busybox //busybox:test +---- + +. Push the sample image by running the following command: ++ +[source,terminal] +---- +$ podman push //busybox:test +---- + +. Verify that the push was successful by navigating to the Organization that you pushed the image to in your {productname} registry -> *Tags*. + +. Navigate to the Amazon Web Services (AWS) console and locate your s3 bucket. + +. Click the name of your s3 bucket. + +. On the *Objects* page, click *datastorage/*. + +. On the *datastorage/* page, the following resources should seen: ++ +* *sha256/* +* *uploads/* ++ +These resources indicate that the push was successful, and that AWS STS is properly configured. diff --git a/modules/configuring-quay-standalone-aws-sts.adoc b/modules/configuring-quay-standalone-aws-sts.adoc new file mode 100644 index 000000000..c00c4b556 --- /dev/null +++ b/modules/configuring-quay-standalone-aws-sts.adoc @@ -0,0 +1,62 @@ +[id="configuring-quay-standalone-aws-sts"] += Configuring {productname} to use AWS STS + +Use the following procedure to edit your {productname} `config.yaml` file to use AWS STS. + +.Procedure + +. Update your `config.yaml` file for {productname} to include the following information: ++ +[source,yaml] +---- +# ... +DISTRIBUTED_STORAGE_CONFIG: + default: + - STSS3Storage + - sts_role_arn: <1> + s3_bucket: <2> + storage_path: <3> + s3_region: <4> + sts_user_access_key: <5> + sts_user_secret_key: <6> +# ... +---- +<1> The unique Amazon Resource Name (ARN) required when configuring AWS STS +<2> The name of your s3 bucket. +<3> The storage path for data. Usually `/datastorage`. +<4> Optional. The Amazon Web Services region. Defaults to `us-east-1`. +<5> The generated AWS S3 user access key required when configuring AWS STS. +<6> The generated AWS S3 user secret key required when configuring AWS STS. + +. Restart your {productname} deployment. + +.Verification + +. Tag a sample image, for example, `busybox`, that will be pushed to the repository. For example: ++ +[source,terminal] +---- +$ podman tag docker.io/library/busybox //busybox:test +---- + +. Push the sample image by running the following command: ++ +[source,terminal] +---- +$ podman push //busybox:test +---- + +. Verify that the push was successful by navigating to the Organization that you pushed the image to in your {productname} registry -> *Tags*. + +. Navigate to the Amazon Web Services (AWS) console and locate your s3 bucket. + +. Click the name of your s3 bucket. + +. On the *Objects* page, click *datastorage/*. + +. On the *datastorage/* page, the following resources should seen: ++ +* *sha256/* +* *uploads/* ++ +These resources indicate that the push was successful, and that AWS STS is properly configured. \ No newline at end of file diff --git a/modules/configuring-red-hat-sso.adoc b/modules/configuring-red-hat-sso.adoc new file mode 100644 index 000000000..a9d191acf --- /dev/null +++ b/modules/configuring-red-hat-sso.adoc @@ -0,0 +1,95 @@ +[id="configuring-red-hat-sso-oidc"] += Configuring Red Hat Single Sign-On for {productname} + +Based on the Keycloak project, Red Hat Single Sign-On (RH-SSO) is an open source identity and access management (IAM) solution provided by Red Hat. RH-SSO allows organizations to manage user identities, secure applications, and enforce access control policies across their systems and applications. It also provides a unified authentication and authorization framework, which allows users to log in one time and gain access to multiple applications and resources without needing to re-authenticate. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_single_sign-on/7.0[Red Hat Single Sign-On]. + +By configuring Red Hat Single Sign-On on {productname}, you can create a seamless authentication integration between {productname} and other application platforms like {ocp}. + +[id="configuring-red-hat-sso-using-config-tool"] +== Configuring the Red Hat Single Sign-On Operator for use with the {productname} Operator + +Use the following procedure to configure Red Hat Single Sign-On for the {productname} Operator on {ocp}. + +.Prerequisites + +* You have set up the Red Hat Single Sign-On Operator. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_single_sign-on/7.6/html-single/server_installation_and_configuration_guide/index#operator[Red Hat Single Sign-On Operator]. +* You have configured link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/deploying_the_red_hat_quay_operator_on_openshift_container_platform/operator-config-cli#operator-custom-ssl-certs-config-bundle[SSL/TLS for your {productname-ocp} deployment] _and_ for Red Hat Single Sign-On. +* You have generated a single Certificate Authority (CA) and uploaded it to your Red Hat Single Sign-On Operator _and_ to your {productname} configuration. + +.Procedure + +. Navigate to the Red Hat Single Sign-On *Admin Console*. + +.. On the {ocp} *Web Console*, navigate to *Network* -> *Route*. + +.. Select the *Red Hat Single Sign-On* project from the drop-down list. + +.. Find the Red Hat Single Sign-On *Admin Console* in the *Routes* table. + +. Select the Realm that you will use to configure {productname}. + +. Click *Clients* under the *Configure* section of the navigation panel, and then click the *Create* button to add a new OIDC for {productname}. + +. Enter the following information. ++ +* **Client ID:** `quay-enterprise` +* **Client Protocol:** `openid-connect` +* **Root URL:** `\https:///` + +. Click *Save*. This results in a redirect to the *Clients* setting panel. + +. Navigate to *Access Type* and select *Confidential*. + +. Navigate to *Valid Redirect URIs*. You must provide three redirect URIs. The value should be the fully qualified domain name of the {productname} registry appended with `/oauth2/redhatsso/callback`. For example: ++ +* `\https:///oauth2/redhatsso/callback` +* `\https:///oauth2/redhatsso/callback/attach` +* `\https:///oauth2/redhatsso/callback/cli` + +. Click *Save* and navigate to the new *Credentials* setting. + +. Copy the value of the Secret. + +[id="configuring-quay-operator-use-redhat-sso"] +=== Configuring the {productname} Operator to use Red Hat Single Sign-On + +Use the following procedure to configure Red Hat Single Sign-On with the {productname} Operator. + +.Prerequisites + +* You have set up the Red Hat Single Sign-On Operator. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_single_sign-on/7.6/html-single/server_installation_and_configuration_guide/index#operator[Red Hat Single Sign-On Operator]. +* You have configured link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/deploying_the_red_hat_quay_operator_on_openshift_container_platform/operator-config-cli#operator-custom-ssl-certs-config-bundle[SSL/TLS for your {productname-ocp} deployment] _and_ for Red Hat Single Sign-On. +* You have generated a single Certificate Authority (CA) and uploaded it to your Red Hat Single Sign-On Operator _and_ to your {productname} configuration. + +.Procedure + +. Edit your {productname} `config.yaml` file by navigating to *Operators* -> *Installed Operators* -> *Red Hat Quay* -> *Quay Registry* -> *Config Bundle Secret*. Then, click *Actions* -> *Edit Secret*. Alternatively, you can update the `config.yaml` file locally. + +. Add the following information to your {productname-ocp} `config.yaml` file: ++ +[source,yaml] +---- +# ... +RHSSO_LOGIN_CONFIG: <1> + CLIENT_ID: <2> + CLIENT_SECRET: <3> + OIDC_SERVER: <4> + SERVICE_NAME: <5> + SERVICE_ICON: <6> + VERIFIED_EMAIL_CLAIM_NAME: <7> + PREFERRED_USERNAME_CLAIM_NAME: <8> + LOGIN_SCOPES: <9> + - 'openid' +# ... +---- +<1> The parent key that holds the OIDC configuration settings. In this example, the parent key used is `AZURE_LOGIN_CONFIG`, however, the string `AZURE` can be replaced with any arbitrary string based on your specific needs, for example `ABC123`.However, the following strings are not accepted: `GOOGLE`, `GITHUB`. These strings are reserved for their respective identity platforms and require a specific `config.yaml` entry contingent upon when platform you are using. +<2> The client ID of the application that is being registered with the identity provider, for example, `quay-enterprise`. +<3> The Client Secret from the *Credentials* tab of the `quay-enterprise` OIDC client settings. +<4> The fully qualified domain name (FQDN) of the Red Hat Single Sign-On instance, appended with `/auth/realms/` and the Realm name. You must include the forward slash at the end, for example, `\https://sso-redhat.example.com//auth/realms//`. +<5> The name that is displayed on the {productname} login page, for example, `Red hat Single Sign On`. +<6> Changes the icon on the login screen. For example, `/static/img/RedHat.svg`. +<7> The name of the claim that is used to verify the email address of the user. +<8> The name of the claim that is used to verify the email address of the user. +<9> The scopes to send to the OIDC provider when performing the login flow, for example, `openid`. + +. Restart your {productname-ocp} deployment with Red Hat Single Sign-On enabled. \ No newline at end of file diff --git a/modules/configuring-resources-managed-components.adoc b/modules/configuring-resources-managed-components.adoc new file mode 100644 index 000000000..097afe83a --- /dev/null +++ b/modules/configuring-resources-managed-components.adoc @@ -0,0 +1,139 @@ +:_content-type: PROCEDURE +[id="configuring-resources-managed-components"] += Configuring resources for managed components on {ocp} + +You can manually adjust the resources on {productname-ocp} for the following components that have running pods: + +* `quay` +* `clair` +* `mirroring` +* `clairpostgres` +* `postgres` + +This feature allows users to run smaller test clusters, or to request more resources upfront in order to avoid partially degraded `Quay` pods. Limitations and requests can be set in accordance with link:https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-units-in-kubernetes[Kubernetes resource units]. + +The following components should not be set lower than their minimum requirements. This can cause issues with your deployment and, in some cases, result in failure of the pod's deployment. + +* `quay`: Minimum of 6 GB, 2vCPUs +* `clair`: Recommended of 2 GB memory, 2 vCPUs +* `clairpostgres`: Minimum of 200 MB + +You can configure resource requests on the {ocp} UI, or by directly by updating the `QuayRegistry` YAML. + +[IMPORTANT] +==== +The default values set for these components are the suggested values. Setting resource requests too high or too low might lead to inefficient resource utilization, or performance degradation, respectively. +==== + +[id="configuring-resources-ocp-ui"] +== Configuring resource requests by using the {ocp} UI + +Use the following procedure to configure resources by using the {ocp} UI. + +.Procedure + +. On the {ocp} developer console, click *Operators* -> *Installed Operators* -> *Red Hat Quay*. + +. Click *QuayRegistry*. + +. Click the name of your registry, for example, *example-registry*. + +. Click *YAML*. + +. In the `spec.components` field, you can override the resource of the `quay`, `clair`, `mirroring` `clairpostgres`, and `postgres` resources by setting values for the `.overrides.resources.limits` and the `overrides.resources.requests` fields. For example: ++ +[source,yaml] +---- +spec: + components: + - kind: clair + managed: true + overrides: + resources: + limits: + cpu: "5" # Limiting to 5 CPU (equivalent to 5000m or 5000 millicpu) + memory: "18Gi" # Limiting to 18 Gibibytes of memory + requests: + cpu: "4" # Requesting 4 CPU + memory: "4Gi" # Requesting 4 Gibibytes of memory + - kind: postgres + managed: true + overrides: + resources: + limits: {} <1> + requests: + cpu: "700m" # Requesting 700 millicpu or 0.7 CPU + memory: "4Gi" # Requesting 4 Gibibytes of memory + - kind: mirror + managed: true + overrides: + resources: + limits: <2> + requests: + cpu: "800m" # Requesting 800 millicpu or 0.8 CPU + memory: "1Gi" # Requesting 1 Gibibyte of memory + - kind: quay + managed: true + overrides: + resources: + limits: + cpu: "4" # Limiting to 4 CPU + memory: "10Gi" # Limiting to 10 Gibibytes of memory + requests: + cpu: "4" # Requesting 4 CPU + memory: "10Gi" # Requesting 10 Gibi of memory + - kind: clairpostgres + managed: true + overrides: + resources: + limits: + cpu: "800m" # Limiting to 800 millicpu or 0.8 CPU + memory: "3Gi" # Limiting to 3 Gibibytes of memory + requests: {} +---- +<1> Setting the `limits` or `requests` fields to `{}` uses the default values for these resources. +<2> Leaving the `limits` or `requests` field empty puts no limitations on these resources. + +[id="configuring-resources-ocp-yaml"] +== Configuring resource requests by editing the QuayRegistry YAML + +You can re-configure {productname} to configure resource requests after you have already deployed a registry. This can be done by editing the `QuayRegistry` YAML file directly and then re-deploying the registry. + +.Procedure + +. Optional: If you do not have a local copy of the `QuayRegistry` YAML file, enter the following command to obtain it: ++ +[source,terminal] +---- +$ oc get quayregistry -n -o yaml > quayregistry.yaml +---- + +. Open the `quayregistry.yaml` created from Step 1 of this procedure and make the desired changes. For example: ++ +[source,yaml] +---- + - kind: quay + managed: true + overrides: + resources: + limits: {} + requests: + cpu: "0.7" # Requesting 0.7 CPU (equivalent to 500m or 500 millicpu) + memory: "512Mi" # Requesting 512 Mebibytes of memory +---- + +. Save the changes. + +. Apply the {productname} registry using the updated configurations by running the following command: ++ +[source,terminal] +---- +$ oc replace -f quayregistry.yaml +---- ++ +.Example output ++ +[source,terminal] +---- +quayregistry.quay.redhat.com/example-registry replaced +---- \ No newline at end of file diff --git a/modules/configuring-ssl-tls.adoc b/modules/configuring-ssl-tls.adoc new file mode 100644 index 000000000..58b4765f9 --- /dev/null +++ b/modules/configuring-ssl-tls.adoc @@ -0,0 +1,5 @@ +:_content-type: PROCEDURE +[id="configuring-ssl-tls"] += Configuring SSL/TLS for standalone {productname} deployments + +For standalone {productname} deployments, SSL/TLS certificates must be configured by using the command-line interface and by updating your `config.yaml` file manually. \ No newline at end of file diff --git a/modules/connecting-s3-timeout.adoc b/modules/connecting-s3-timeout.adoc new file mode 100644 index 000000000..2865995f4 --- /dev/null +++ b/modules/connecting-s3-timeout.adoc @@ -0,0 +1,32 @@ +:_content-type: PROCEDURE +[id="connecting-s3-timeout"] += Connection to AWS s3 bucket errors out + +In some cases, {productname} attempts to connect to the s3 bucket that is described in a `config.yaml` file and errors out. Running {productname} in debug might reveal the following error: `gunicorn-registry stdout | 2022-10-21 14:38:36,892 [253] [DEBUG] [urllib3.connectionpool] https://s3.ap-south-1.amazonaws.com:443 "POST /quay-bucket-1/storage/quayregistrystorage/uploads/23cd6e62-264c-48e4-94a7-21061b0e4ef1?uploads HTTP/1.1" 400 None`. + +This error occurs because the URL format of a bucket is one of two options. For example: + +* `\http://s3.amazonaws.com/[bucket_name]/` +* `http://[bucket_name].s3.amazonaws.com/` + +To resolve this issue, you must add the `s3_region` configuration parameter to your `config.yaml` file. This field is not currently embedded in the {productname} config editor, so it must be manually added. If this field is not present in your `config.yaml` file, the Authorization header explicitly mentions a different region and not the region set in the `hostname` field of your `config.yaml` file. + +The following `config.yaml` excerpt is the correct configuration when using AWS: + +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + default: + - S3Storage + - host: s3.ap-south-1.amazonaws.com + s3_access_key: ***************** + s3_bucket: quay-bucket-1 + s3_secret_key: ********************************* + storage_path: /storage/quayregistrystorage + s3_region: ap-south-1 +---- + +[role="_additional-resources"] +.Additional resources + +For more information, see link:https://access.redhat.com/solutions/6993082[Quay Errors out while connecting to AWS S3 Bucket]. \ No newline at end of file diff --git a/modules/connection-issues-clair-quay-db.adoc b/modules/connection-issues-clair-quay-db.adoc new file mode 100644 index 000000000..85df3f599 --- /dev/null +++ b/modules/connection-issues-clair-quay-db.adoc @@ -0,0 +1,58 @@ +:_content-type: PROCEDURE +[id="connection-issues-clair-quay-db"] += Clair and Quay database connection issues + +When attempting to connect to an external PostgreSQL database for {productname} and Clair, attempting to connect to the "public" schema might not be allowed. The public schema is only accessible by PostgreSQL administrators. Use the following procedure to troubleshoot connection issues. + +.Procedure + +. If you are an individual user accessing separate databases, use the following steps. + +.. Specify a `search_path`, for example, `\https://postgresqlco.nf/doc/en/param/search_path/` to the `Quay` and `Clair` database for the service account or user. + +... Enter the following command on the `Quay` database: ++ +[source,terminal] +---- +ALTER ROLE "role_name" SET search_path TO quay_username; +---- + +.. Enter the following command on the `Clair` database: ++ +[source,terminal] +---- +ALTER ROLE "role_name" SET search_path TO clair_username; +---- ++ +[NOTE] +==== +This will not allow one user to access the `Quay` and `Clair` database at the same time. +==== + +.. Optional. The `quay-app` pod requires the database to be created in advance to that the `quay-app-upgrade` pod assists in setting it up. As a result, schemas and tables are already populated. This causes an issue with the `search_path` setting for {productname}. To resolve this issue, add a `search_path` in your {productname} and Clair `config.yaml` files. + +... Add the following line to your {productname} `config.yaml` file: ++ +[source,yaml] +---- +DB_URI: postgresql://:@quayhostname.example.com:/quay_username?options=-csearch_path%3Dquay_username +---- + +... Reset the `Quay` database by cleaning it up and restarting the `quay-upgrade-app-pod`. + +... Configure the Clair connstring to use a `search_path` by adding the following line to your Clair `config.yaml` file: ++ +[source,yamnl] +---- +indexer: +connstring: host= port=5402 dbname=db_name user= password= sslmode=disable options=--search_path=clair_username +---- + +. In some cases, you might have a single service account or be a user that can access both databases. A database contains one or more named schemas, which in turn contains tables. Unlike databases, schemas are not rigidly separated; that is, a user can access objects in any of the schemas in the database that they are connected to if they have proper privileges. ++ +In this case, you must ensure that the tables of your {productname} and Clair are part of the same schema. Otherwise, unqualified queries are unable to find the tables. The queries from {productname} and Clair are all unqualified, as they expect the tables to be accessible from the database connection by default. + +[role="_additional-resources"] +.Additional resources + +For more information, see link:https://access.redhat.com/solutions/7004240[Issue connecting to external Clair/Quay databases]. \ No newline at end of file diff --git a/modules/content-distrib-intro.adoc b/modules/content-distrib-intro.adoc new file mode 100644 index 000000000..e6242a880 --- /dev/null +++ b/modules/content-distrib-intro.adoc @@ -0,0 +1,11 @@ +:_content-type: CONCEPT +[id="content-distrib-intro"] += Content distribution with {productname} + +Content distribution features in {productname} include: + + +* xref:arch-mirroring-intro[Repository mirroring] +* xref:georepl-intro[Geo-replication] +* xref:arch-airgap-intro[Deployment in air-gapped environments] + diff --git a/modules/core-distinct-registries.adoc b/modules/core-distinct-registries.adoc new file mode 100644 index 000000000..6f5444e70 --- /dev/null +++ b/modules/core-distinct-registries.adoc @@ -0,0 +1,17 @@ +:_content-type: CONCEPT +[id="core-distinct-registries"] += Single compared to multiple registries + +Many users consider running multiple, distinct registries. The preferred approach with {productname} is to have a single, shared registry: + +- If you want a clear separation between development and production images, or a clear separation by content origin, for example, keeping third-party images distinct from internal ones, you can use organizations and repositories, combined with role-based access control (RBAC), to achieve the desired separation. + +- Given that the image registry is a critical component in an enterprise environment, you may be tempted to use distinct deployments to test upgrades of the registry software to newer versions. The {productname} Operator updates the registry for patch releases as well as minor or major updates. This means that any complicated procedures are automated and, as a result, there is no requirement for you to provision multiple instances of the registry to test the upgrade. + +- With {productname}, there is no need to have a separate registry for each cluster you deploy. {productname} is proven to work at scale at link:https://quay.io[Quay.io], and can serve content to thousands of clusters. + +- Even if you have deployments in multiple data centers, you can still use a single {productname} instance to serve content to multiple physically-close data centers, or use the HA functionality with load balancers to stretch across data centers. Alternatively, you can use the {productname} geo-replication feature to stretch across physically distant data centers. This requires the provisioning of a global load balancer or DNS-based geo-aware load balancing. + +- One scenario where it may be appropriate to run multiple distinct registries, is when you want to specify different configuration for each registry. + +In summary, running a shared registry helps you to save storage, infrastructure and operational costs, but a dedicated registry might be needed in specific circumstances. diff --git a/modules/core-example-deployment.adoc b/modules/core-example-deployment.adoc new file mode 100644 index 000000000..161666078 --- /dev/null +++ b/modules/core-example-deployment.adoc @@ -0,0 +1,12 @@ +:_content-type: CONCEPT +[id="core-example-deployment"] += {productname} example deployments + +The following image shows three possible deployments for {productname}: + +.Deployment examples +image:178_Quay_architecture_0821_deployment_ex1.png[{productname} deployment example] + +Proof of Concept:: Running {productname}, Clair, and mirroring on a single node, with local image storage and local database +Single data center:: Running highly available {productname}, Clair ,and mirroring, on multiple nodes, with HA database and image storage +Multiple data centers:: Running highly available {productname}, Clair, and mirroring, on multiple nodes in multiple data centers, with HA database and image storage \ No newline at end of file diff --git a/modules/core-infrastructure.adoc b/modules/core-infrastructure.adoc new file mode 100644 index 000000000..8fe7f6561 --- /dev/null +++ b/modules/core-infrastructure.adoc @@ -0,0 +1,55 @@ + +:_content-type: CONCEPT +[id="arch-quay-infrastructure"] += {productname} infrastructure + +{productname} runs on any physical or virtual infrastructure, both on premise or public cloud. Deployments range from simple to massively scaled, like the following: + +* All-in-one setup on a developer notebook +* Highly available on virtual machines or on {ocp} +* Geographically dispersed across multiple availability zones and regions + +[id="arch-quay-standalone-hosts"] +== Running {productname} on standalone hosts + +You can automate the standalone deployment process by using Ansible or another automation suite. All standalone hosts require valid a {rhel} subscription. + +Proof of Concept deployment:: {productname} runs on a machine with image storage, containerized database, Redis, and optionally, Clair security scanning. + +Highly available setups:: {productname} and Clair run in containers across multiple hosts. You can use `systemd` units to ensure restart on failure or reboot. ++ +High availability setups on standalone hosts require customer-provided load balancers, either low-level TCP load balancers or application load balancers, capable of terminating TLS. + +[id="arch-quay-openshift"] +== Running {productname} on OpenShift + +The {productname} Operator for {ocp} provides the following features: + +* Automated deployment and management of {productname} with customization options +* Management of {productname} and all of its dependencies +* Automated scaling and updates +* Integration with existing {ocp} processes like GitOps, monitoring, alerting, logging +* Provision of object storage with limited availability, backed by the multi-cloud object gateway (NooBaa), as part of the Red Hat OpenShift Data Foundation (ODF) Operator. This service does not require an additional subscription. +* Scaled-out, high availability object storage provided by the ODF Operator. This service requires an additional subscription. + +{productname} can run on {ocp} infrastructure nodes. As a result, no further subscriptions are required. Running {productname} on {ocp} has the following benefits: + +* **Zero to Hero:** Simplified deployment of {productname} and associated components means that you can start using the product immediately +* **Scalability:** Use cluster compute capacity to manage demand through automated scaling, based on actual load +* **Simplified Networking:** Automated provisioning of load balancers and traffic ingress secured through HTTPS using {ocp} TLS certificates and Routes +* **Declarative configuration management:** Configurations stored in CustomResource objects for GitOps-friendly lifecycle management +* **Repeatability:** Consistency regardless of the number of replicas of {productname} and Clair +* **OpenShift integration:** Additional services to use {ocp} Monitoring and Alerting facilities to manage multiple {productname} deployments on a single cluster + +[id="arch-integrating-standalone-ocp"] +== Integrating standalone {productname} with {ocp} + +While the {productname} Operator ensures seamless deployment and management of {productname} running on {ocp}, it is also possible to run {productname} in standalone mode and then serve content to one or many {ocp} clusters, wherever they are running. + +.Integrating standalone {productname} with {ocp} +image:178_Quay_architecture_0821_deployment_ex2.png[Integrating standalone {productname} with {ocp}] + +Several Operators are available to help integrate standalone and Operator based deployments of{productname} with {ocp}, like the following: + +{productname} Cluster Security Operator:: Relays {productname} vulnerability scanning results into the {ocp} console +{productname} Bridge Operator:: Ensures seamless integration and user experience by using {productname} with {ocp} in conjunction with {ocp} Builds and ImageStreams \ No newline at end of file diff --git a/modules/core-prereqs-db.adoc b/modules/core-prereqs-db.adoc new file mode 100644 index 000000000..d77c38263 --- /dev/null +++ b/modules/core-prereqs-db.adoc @@ -0,0 +1,11 @@ +:_content-type: CONCEPT +[id="arch-core-prereqs-db"] += Database backend + +{productname} stores all of its configuration information in the `config.yaml` file. Registry metadata, for example, user information, robot accounts, team, permissions, organizations, images, tags, manifests, etc. are stored inside of the database backend. Logs can be pushed to ElasticSearch if required. PostgreSQL is the preferred database backend because it can be used for both {productname} and Clair. + +A future version of {productname} will remove support for using MySQL and MariaDB as the database backend, which has been deprecated since the {productname} 3.6 release. Until then, MySQL is still supported according to the link:https://access.redhat.com/articles/4067991[support matrix], but will not receive additional features or explicit testing coverage. The {productname} Operator supports only PostgreSQL deployments when the database is managed. If you want to use MySQL, you must deploy it manually and set the database component to `managed: false`. + +Deploying {productname} in a highly available (HA) configuration requires that your database services are provisioned for high availability. If {productname} is running on public cloud infrastructure, it is recommended that you use the PostgreSQL services provided by your cloud provider, however MySQL is also supported. + +Geo-replication requires a single, shared database that is accessible from all regions. diff --git a/modules/core-prereqs-redis.adoc b/modules/core-prereqs-redis.adoc new file mode 100644 index 000000000..ab24591aa --- /dev/null +++ b/modules/core-prereqs-redis.adoc @@ -0,0 +1,9 @@ +:_content-type: CONCEPT +[id="core-prereqs-redis"] += Redis + +{productname} stores builder logs inside a Redis cache. Because the data stored is ephemeral, Redis does not need to be highly available even though it is stateful. + +If Redis fails, you will lose access to build logs, builders, and the garbage collector service. Additionally, user events will be unavailable. + +You can use a Redis image from the Red Hat Software Collections or from any other source you prefer. diff --git a/modules/core-prereqs-storage.adoc b/modules/core-prereqs-storage.adoc new file mode 100644 index 000000000..bcfba3718 --- /dev/null +++ b/modules/core-prereqs-storage.adoc @@ -0,0 +1,32 @@ +:_content-type: CONCEPT +[id="core-prereqs-storage"] += Image storage backend + +{productname} stores all binary blobs in its storage backend. + +Local storage:: {productname} can work with local storage, however this should only be used for proof of concept or test setups, as the durability of the binary blobs cannot be guaranteed. + +HA storage setup:: For a {productname} HA deployment, you must provide HA image storage, for example: ++ +- **Red Hat OpenShift Data Foundation**, previously known as Red Hat OpenShift Container Storage, is software-defined storage for containers. Engineered as the data and storage services platform for {ocp}, Red Hat OpenShift Data Foundation helps teams develop and deploy applications quickly and efficiently across clouds. More information can be found at link:https://www.redhat.com/en/technologies/cloud-computing/openshift-data-foundation[]. +- **Ceph Object Gateway** (also called RADOS Gateway) is an example of a storage solution that can provide the object storage needed by {productname}. +Detailed instructions on how to use Ceph storage as a highly available storage backend can be found in the link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/deploy_red_hat_quay_-_high_availability/preparing_for_red_hat_quay_high_availability#set_up_ceph[Quay High Availability Guide]. +Further information about Red Hat Ceph Storage and HA setups can be found in the link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/{producty}/pdf/architecture_guide/Red_Hat_Ceph_Storage-3-Architecture_Guide-en-US.pdf[Red Hat Ceph Storage Architecture Guide] + +Geo-replication:: Local storage cannot be used for geo-replication, so a supported on premise or cloud based object storage solution must be deployed. Localized image storage is provided in each region and image pulls are served from the closest available storage engine. Container image pushes are written to the preferred storage engine for the {productname} instance, and will then be replicated, in the background, to the other storage engines. This requires the image storage to be accessible from all regions. + +[id="arch-supported-image-storage-types"] +== Supported image storage engines + +{productname} supports the following on premise storage types: + +* Ceph/Rados RGW +* OpenStack Swift +* Red Hat OpenShift Data Foundation 4 (through NooBaa) + +{productname} supports the following public cloud storage engines: + +* Amazon Web Services (AWS) S3 +* Google Cloud Storage +* Azure Blob Storage +* Hitachi Content Platform (HCP) diff --git a/modules/core-sample-quay-on-prem.adoc b/modules/core-sample-quay-on-prem.adoc new file mode 100644 index 000000000..bd9594ad0 --- /dev/null +++ b/modules/core-sample-quay-on-prem.adoc @@ -0,0 +1,12 @@ +:_content-type: CONCEPT +[id="sample-quay-on-prem-intro"] += Deploying {productname} on premise + +The following image shows examples for on premise configuration, for the following types of deployments: + +* Standalone Proof of Concept +* Highly available deployment on multiple hosts +* Deployment on an {ocp} cluster by using the {productname} Operator + +.On premise example configurations +image:178_Quay_architecture_0821_on-premises_config.png[On premise example configuration] diff --git a/modules/cosign-oci-intro.adoc b/modules/cosign-oci-intro.adoc new file mode 100644 index 000000000..fa6ce0e2e --- /dev/null +++ b/modules/cosign-oci-intro.adoc @@ -0,0 +1,115 @@ +:_content-type: CONCEPT +[id="cosign-oci-intro"] += Cosign OCI support + +Cosign is a tool that can be used to sign and verify container images. It uses the `ECDSA-P256` signature algorithm and Red Hat's Simple Signing payload format to create public keys that are stored in PKIX files. Private keys are stored as encrypted PEM files. + +Cosign currently supports the following: + +* Hardware and KMS Signing +* Bring-your-own PKI +* OIDC PKI +* Built-in binary transparency and timestamping service + +Use the following procedure to directly install Cosign. + +.Prerequisites + +* You have installed Go version 1.16 or later. +ifeval::["{context}" == "use-quay"] +* You have set `FEATURE_GENERAL_OCI_SUPPORT` to `true` in your `config.yaml` file. +endif::[] + +.Procedure + +. Enter the following `go` command to directly install Cosign: ++ +[source,terminal] +---- +$ go install github.com/sigstore/cosign/cmd/cosign@v1.0.0 +---- ++ +.Example output ++ +[source,terminal] +---- +go: downloading github.com/sigstore/cosign v1.0.0 +go: downloading github.com/peterbourgon/ff/v3 v3.1.0 +---- + +. Generate a key-value pair for Cosign by entering the following command: ++ +[source,terminal] +---- +$ cosign generate-key-pair +---- ++ +.Example output ++ +[source,terminal] +---- +Enter password for private key: +Enter again: +Private key written to cosign.key +Public key written to cosign.pub +---- + +. Sign the key-value pair by entering the following command: ++ +ifeval::["{context}" == "quay-io"] +[source,terminal] +---- +$ cosign sign -key cosign.key quay.io/user1/busybox:test +---- +endif::[] +ifeval::["{context}" == "use-quay"] +[source,terminal] +---- +$ cosign sign -key cosign.key quay-server.example.com/user1/busybox:test +---- +endif::[] ++ +.Example output ++ +[source,terminal] +---- +Enter password for private key: +Pushing signature to: quay-server.example.com/user1/busybox:sha256-ff13b8f6f289b92ec2913fa57c5dd0a874c3a7f8f149aabee50e3d01546473e3.sig +---- ++ +If you experience the `error: signing quay-server.example.com/user1/busybox:test: getting remote image: GET https://quay-server.example.com/v2/user1/busybox/manifests/test: UNAUTHORIZED: access to the requested resource is not authorized; map[]` error, which occurs because Cosign relies on `~./docker/config.json` for authorization, you might need to execute the following command: ++ +ifeval::["{context}" == "quay-io"] +[source,terminal] +---- +$ podman login --authfile ~/.docker/config.json quay.io +---- +endif::[] +ifeval::["{context}" == "use-quay"] +[source,terminal] +---- +$ podman login --authfile ~/.docker/config.json quay-server.example.com +---- +endif::[] ++ +.Example output ++ +[source,terminal] +---- +Username: +Password: +Login Succeeded! +---- + +. Enter the following command to see the updated authorization configuration: ++ +[source,terminal] +---- +$ cat ~/.docker/config.json +{ + "auths": { + "quay-server.example.com": { + "auth": "cXVheWFkbWluOnBhc3N3b3Jk" + } + } +---- \ No newline at end of file diff --git a/modules/cosign-oci-with-quay.adoc b/modules/cosign-oci-with-quay.adoc new file mode 100644 index 000000000..a172a5fcb --- /dev/null +++ b/modules/cosign-oci-with-quay.adoc @@ -0,0 +1,104 @@ +:_content-type: CONCEPT +[id="cosign-oci-with-quay"] += Installing and using Cosign + +Use the following procedure to directly install Cosign. + +.Prerequisites + +* You have installed Go version 1.16 or later. +* You have set `FEATURE_GENERAL_OCI_SUPPORT` to `true` in your `config.yaml` file. + +.Procedure + +. Enter the following `go` command to directly install Cosign: ++ +[source,terminal] +---- +$ go install github.com/sigstore/cosign/cmd/cosign@v1.0.0 +---- ++ +.Example output ++ +[source,terminal] +---- +go: downloading github.com/sigstore/cosign v1.0.0 +go: downloading github.com/peterbourgon/ff/v3 v3.1.0 +---- + +. Generate a key-value pair for Cosign by entering the following command: ++ +[source,terminal] +---- +$ cosign generate-key-pair +---- ++ +.Example output ++ +[source,terminal] +---- +Enter password for private key: +Enter again: +Private key written to cosign.key +Public key written to cosign.pub +---- + +. Sign the key-value pair by entering the following command: ++ +ifeval::["{context}" == "quay-io"] +[source,terminal] +---- +$ cosign sign -key cosign.key quay.io/user1/busybox:test +---- +endif::[] +ifeval::["{context}" == "use-quay"] +[source,terminal] +---- +$ cosign sign -key cosign.key quay-server.example.com/user1/busybox:test +---- +endif::[] ++ +.Example output ++ +[source,terminal] +---- +Enter password for private key: +Pushing signature to: quay-server.example.com/user1/busybox:sha256-ff13b8f6f289b92ec2913fa57c5dd0a874c3a7f8f149aabee50e3d01546473e3.sig +---- ++ +If you experience the `error: signing quay-server.example.com/user1/busybox:test: getting remote image: GET https://quay-server.example.com/v2/user1/busybox/manifests/test: UNAUTHORIZED: access to the requested resource is not authorized; map[]` error, which occurs because Cosign relies on `~./docker/config.json` for authorization, you might need to execute the following command: ++ +ifeval::["{context}" == "quay-io"] +[source,terminal] +---- +$ podman login --authfile ~/.docker/config.json quay.io +---- +endif::[] +ifeval::["{context}" == "use-quay"] +[source,terminal] +---- +$ podman login --authfile ~/.docker/config.json quay-server.example.com +---- +endif::[] ++ +.Example output ++ +[source,terminal] +---- +Username: +Password: +Login Succeeded! +---- + +. Enter the following command to see the updated authorization configuration: ++ +[source,terminal] +---- +$ cat ~/.docker/config.json +{ + "auths": { + "quay-server.example.com": { + "auth": "cXVheWFkbWluOnBhc3N3b3Jk" + } + } +---- \ No newline at end of file diff --git a/modules/creating-a-team-api.adoc b/modules/creating-a-team-api.adoc new file mode 100644 index 000000000..35f86d547 --- /dev/null +++ b/modules/creating-a-team-api.adoc @@ -0,0 +1,35 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc + +:_content-type: PROCEDURE +[id="creating-a-team-api"] += Creating a team by using the API + +When you create a team for your organization with the API you can select the team name, +choose which repositories to make available to the team, and decide the +level of access to the team. + +Use the following procedure to create a team for your organization repository. + +.Prerequisites + +* You have created an organization. +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +. Enter the following link:https://docs.redhat.com/en/documentation/red_hat_quay/3.12/html-single/red_hat_quay_api_reference/index#updateorganizationteam[`PUT /api/v1/organization/{orgname}/team/{teamname}`] command to create a team for your organization: ++ +[source,terminal] +---- +$ curl -k -X PUT -H 'Accept: application/json' -H 'Content-Type: application/json' -H "Authorization: Bearer " --data '{"role": "creator"}' https:///api/v1/organization//team/ +---- ++ +.Example output ++ +[source,terminal] +---- +{"name": "example_team", "description": "", "can_view": true, "role": "creator", "avatar": {"name": "example_team", "hash": "dec209fd7312a2284b689d4db3135e2846f27e0f40fa126776a0ce17366bc989", "color": "#e7ba52", "kind": "team"}, "new_team": true} +---- \ No newline at end of file diff --git a/modules/creating-a-team-ui.adoc b/modules/creating-a-team-ui.adoc new file mode 100644 index 000000000..ac875750a --- /dev/null +++ b/modules/creating-a-team-ui.adoc @@ -0,0 +1,44 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: PROCEDURE +[id="creating-a-team-ui"] += Creating a team by using the UI + +When you create a team for your organization you can select the team name, +choose which repositories to make available to the team, and decide the +level of access to the team. + +Use the following procedure to create a team for your organization repository. + +.Prerequisites + +* You have created an organization. + +.Procedure + +. On the {productname} v2 UI, click the name of an organization. + +. On your organization's page, click *Teams and membership*. + +. Click the *Create new team* box. + +. In the *Create team* popup window, provide a name for your new team. + +. Optional. Provide a description for your new team. + +. Click *Proceed*. A new popup window appears. + +. Optional. Add this team to a repository, and set the permissions to one of the following: ++ +* *None*. Team members have no permission to the repository. +* *Read*. Team members can view and pull from the repository. +* *Write*. Team members can read (pull) from and write (push) to the repository. +* *Admin*. Full access to pull from, and push to, the repository, plus the ability to do administrative tasks associated with the repository. + +. Optional. Add a team member or robot account. To add a team member, enter the name of their {productname} account. + +. Review and finish the information, then click *Review and Finish*. The new team appears under the *Teams and membership page*. \ No newline at end of file diff --git a/modules/creating-an-image-repository-via-docker.adoc b/modules/creating-an-image-repository-via-docker.adoc new file mode 100644 index 000000000..2742697f1 --- /dev/null +++ b/modules/creating-an-image-repository-via-docker.adoc @@ -0,0 +1,131 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: CONCEPT + +[id="creating-an-image-repository-via-docker"] += Creating a repository by using Podman + +With the proper credentials, you can _push_ an image to a repository using Podman that does not yet exist in your +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +instance. Pushing an image refers to the process of uploading a container image from your local system or development environment to a container registry like +ifeval::["{context}" == "quay-io"] +{quayio}. +endif::[] +ifeval::["{context}" == "use-quay"] +{productname}. +endif::[] +After pushing an image to your registry, a repository is created. If you push an image through the command-line interface (CLI) without first creating a repository on the UI, the created repository is set to *Private*. + +ifeval::["{context}" == "quay-io"] +If you push an image through the command-line interface (CLI) without first creating a repository on the UI, the created repository is set to *Private*, regardless of the plan you have. + +[NOTE] +==== +It is recommended that you create a repository on the {quayio} UI before pushing an image. {quayio} checks the plan status and does not allow creation of a private repository if a plan is not active. +==== +endif::[] + +Use the following procedure to create an image repository by pushing an image. + +.Prerequisites + +* You have download and installed the `podman` CLI. +* You have logged into your registry. +* You have pulled an image, for example, busybox. + +.Procedure + +ifeval::["{context}" == "quay-io"] +. Pull a sample page from an example registry. For example: ++ +[source,terminal] +---- +$ podman pull busybox +---- ++ +.Example output ++ +[source,terminal] +---- +Trying to pull docker.io/library/busybox... +Getting image source signatures +Copying blob 4c892f00285e done +Copying config 22667f5368 done +Writing manifest to image destination +Storing signatures +22667f53682a2920948d19c7133ab1c9c3f745805c14125859d20cede07f11f9 +---- +endif::[] + +ifeval::["{context}" == "use-quay"] +. Pull a sample page from an example registry. For example: ++ +[source,terminal] +---- +$ sudo podman pull busybox +---- ++ +.Example output ++ +[source,terminal] +---- +Trying to pull docker.io/library/busybox... +Getting image source signatures +Copying blob 4c892f00285e done +Copying config 22667f5368 done +Writing manifest to image destination +Storing signatures +22667f53682a2920948d19c7133ab1c9c3f745805c14125859d20cede07f11f9 +---- +endif::[] + + +. Tag the image on your local system with the new repository and image name. For example: ++ +ifeval::["{context}" == "quay-io"] +[source,terminal] +---- +$ podman tag docker.io/library/busybox quay.io/quayadmin/busybox:test +---- +endif::[] +ifeval::["{context}" == "use-quay"] +[source,terminal] +---- +$ sudo podman tag docker.io/library/busybox quay-server.example.com/quayadmin/busybox:test +---- +endif::[] + +. Push the image to the registry. Following this step, you can use your browser to see the tagged image in your repository. ++ +ifeval::["{context}" == "quay-io"] +[source,terminal] +---- +$ podman push --tls-verify=false quay.io/quayadmin/busybox:test +---- +endif::[] +ifeval::["{context}" == "use-quay"] +[source,terminal] +---- +$ sudo podman push --tls-verify=false quay-server.example.com/quayadmin/busybox:test +---- +endif::[] ++ +.Example output ++ +[source,terminal] +---- +Getting image source signatures +Copying blob 6b245f040973 done +Copying config 22667f5368 done +Writing manifest to image destination +Storing signatures +---- \ No newline at end of file diff --git a/modules/creating-an-image-repository-via-skopeo-copy.adoc b/modules/creating-an-image-repository-via-skopeo-copy.adoc new file mode 100644 index 000000000..89c6747c9 --- /dev/null +++ b/modules/creating-an-image-repository-via-skopeo-copy.adoc @@ -0,0 +1,72 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: CONCEPT + +[id="creating-an-image-repository-via-skopeo-copy"] += Creating a repository by using Skopeo + +In some cases, the `podman` CLI tool is unable to pull certain artifact types, for example, `application/x-mlmodel`. Attempting to use `podman pull` with this artifact type results in the following error: + +[source,terminal] +---- +Error: parsing image configuration: unsupported image-specific operation on artifact with type "application/x-mlmodel" +---- + +As an alternative, you can use `skopeo copy` to copy an artifact from one location to your {productname} repository. + +.Prerequisites + +* You have installed the `skopeo` CLI. +* You have logged in to a source registry (in this example, `\registry.redhat.io`) and have a valid authentication file (`~/.docker/config.json`). Alternatively, you can provide credentials by using the `--src-username` and `--src-password` parameters when running a command with the `skopeo` CLI. +* You have logged in to your {productname} repository. + +.Procedure + +* Use the `skopeo copy` command on an artifact to copy the artifact to your {productname} repository. For example: ++ +[source,terminal] +---- +$ sudo skopeo copy --dest-tls-verify=false --all \ <1> <2> + --src-username --src-password \ <3> + --src-authfile ~/.docker/config.json \ <4> + --dest-username --dest-password \ <5> + docker://registry.redhat.io/rhelai1/granite-8b-code-instruct:1.4-1739210683 \ <6> + docker://quay-server.example.com//granite-8b-code-instruct:latest <7> +---- +<1> Optional. `--dest-tls-verify=false` disables SSL/TLS verification for the destination registry. +<2> Optional. The `--all` flag optionally copies all image manifests, including multi-architecture images. +<3> Optional. If you are not logged into a registry, you can pass in the source registry credentials with these parameters. +<4> Optional. The path to your Docker authentication file. Typically located at `~/.docker/config.json`. +<5> Your {productname} registry username and password. +<6> The source image or artifact from the Red{nbsp}Hat container registry. Ensure that you are logged in to the registry and that you can pull the image. +<7> The URL of your {productname} repository appended with a namespace and the name of the image. ++ +.Example output ++ +[source,terminal] +---- +Getting image source signatures +Checking if image destination supports signatures +Copying blob 9538fa2b8ad9 done | +Copying blob 491ae95f59a2 done | +Copying blob 01196d075d77 done | +Copying blob e53a4633c992 done | +Copying blob c266e9cfa731 done | +Copying blob dae0e701d9b2 done | +Copying blob 1e227a2c78d8 done | +Copying blob 94ff9338861b done | +Copying blob 2f2bba45146f done | +Copying blob d3b4df07a0ce done | +Copying blob f533a8dbb852 done | +Copying config 44136fa355 done | +Writing manifest to image destination +Storing signatures +---- + +.Next steps + +* After you have pushed a machine learning artifact to your {productname} repository, you can link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#viewing-and-modifying-tags[View tag information by using the UI] or link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#viewing-model-card-information[View model card information by using the UI]. \ No newline at end of file diff --git a/modules/creating-an-image-repository-via-the-api.adoc b/modules/creating-an-image-repository-via-the-api.adoc new file mode 100644 index 000000000..a7421355d --- /dev/null +++ b/modules/creating-an-image-repository-via-the-api.adoc @@ -0,0 +1,43 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc + +:_content-type: CONCEPT +[id="creating-an-image-repository-via-the-api"] += Creating a repository by using the API + +ifeval::["{context}" == "quay-security"] +Private repositories can be created by using the API and specifying the the `"visibility": `private`` option. +endif::[] + +Use the following procedure to create an image repository using the {productname} API. + +.Prerequisites + +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +. Enter the following command to create a repository using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createrepo[`POST /api/v1/repository`] endpoint: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "repository": "", + "visibility": "", + "description": "." + }' \ + "https://quay-server.example.com/api/v1/repository" +---- ++ +.Example output ++ +[source,terminal] +---- +{"namespace": "quayadmin", "name": "", "kind": "image"} +---- diff --git a/modules/creating-an-image-repository-via-the-ui.adoc b/modules/creating-an-image-repository-via-the-ui.adoc new file mode 100644 index 000000000..f4b2e33bf --- /dev/null +++ b/modules/creating-an-image-repository-via-the-ui.adoc @@ -0,0 +1,47 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: CONCEPT +[id="creating-an-image-repository-via-the-ui"] += Creating a repository by using the UI + +Use the following procedure to create a repository using the +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +UI. + +.Procedure + +Use the following procedure to create a repository using the v2 UI. + +.Procedure + +. Click *Repositories* on the navigation pane. + +. Click *Create Repository*. + +. Select a namespace, for example, *quayadmin*, and then enter a *Repository name*, for example, `testrepo`. ++ +[IMPORTANT] +==== +Do not use the following words in your repository name: +* `build` +* `trigger` +* `tag` +* `notification` + +When these words are used for repository names, users are unable access the repository, and are unable to permanently delete the repository. Attempting to delete these repositories returns the following error: `Failed to delete repository , HTTP404 - Not Found.` +==== + +. Click *Create*. ++ +Now, your example repository should populate under the *Repositories* page. + +. Optional. Click *Settings* -> *Repository visibility* -> *Make private* to set the repository to private. \ No newline at end of file diff --git a/modules/creating-custom-ssl-certs-config-bundle.adoc b/modules/creating-custom-ssl-certs-config-bundle.adoc new file mode 100644 index 000000000..ada9de273 --- /dev/null +++ b/modules/creating-custom-ssl-certs-config-bundle.adoc @@ -0,0 +1,239 @@ +:_content-type: PROCEDURE +[id="creating-custom-ssl-certs-config-bundle"] += Creating a custom SSL/TLS configBundleSecret resource + +After creating your custom SSL/TLS certificates, you can create a custom `configBundleSecret` resource for {productname-ocp}, which allows you to upload `ssl.cert` and `ssl.key` files. + +.Prerequisites + +* You have base64 decoded the original config bundle into a `config.yaml` file. For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/deploying_the_red_hat_quay_operator_on_openshift_container_platform/index#operator-config-cli-download[Downloading the existing configuration]. +* You have generated custom SSL certificates and keys. + +.Procedure + +. Create a new YAML file, for example, `custom-ssl-config-bundle-secret.yaml`: ++ +[source,terminal] +---- +$ touch custom-ssl-config-bundle-secret.yaml +---- + +. Create the `custom-ssl-config-bundle-secret` resource. + +.. Create the resource by entering the following command: ++ +[source,terminal] +---- +$ oc -n create secret generic custom-ssl-config-bundle-secret \ + --from-file=config.yaml= \ <1> + --from-file=ssl.cert= \ <2> + --from-file=extra_ca_cert_.crt=ca-certificate-bundle.crt + \ <3> + --from-file=ssl.key= \ <4> + --dry-run=client -o yaml > custom-ssl-config-bundle-secret.yaml +---- +<1> Where `` is your `base64 decoded` `config.yaml` file. +<2> Where `` is your `ssl.cert` file. +<3> Optional. The `--from-file=extra_ca_cert_.crt=ca-certificate-bundle.crt` field allows {productname} to recognize custom Certificate Authority (CA) files. If you are using LDAP, OIDC, or another service that uses custom CAs, you must add them via the `extra_ca_cert` path. For more information, see "Adding additional Certificate Authorities to {productname-ocp}." +<4> Where `` is your `ssl.key` file. + +. Optional. You can check the content of the `custom-ssl-config-bundle-secret.yaml` file by entering the following command: ++ +[source,terminal] +---- +$ cat custom-ssl-config-bundle-secret.yaml +---- ++ +.Example output ++ +[source,terminal] +---- +apiVersion: v1 +data: + config.yaml: QUxMT1dfUFVMTFNfV0lUSE9VVF9TVFJJQ1RfTE9HR0lORzogZmFsc2UKQVVUSEVOVElDQVRJT05fVFlQRTogRGF0YWJhc2UKREVGQVVMVF9UQUdfRVhQSVJBVElPTjogMncKRElTVFJJQlVURURfU1R... + ssl.cert: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVYakNDQTBhZ0F3SUJBZ0lVTUFBRk1YVWlWVHNoMGxNTWI3U1l0eFV5eTJjd0RRWUpLb1pJaHZjTkFRRUwKQlFBd2dZZ3hDekFKQmdOVkJBWVR... + extra_ca_cert_:LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVYakNDQTBhZ0F3SUJBZ0lVTUFBRk1YVWlWVHNoMGxNTWI3U1l0eFV5eTJjd0RRWUpLb1pJaHZjTkFRRUwKQlFBd2dZZ3hDe... + ssl.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRQ2c0VWxZOVV1SVJPY1oKcFhpZk9MVEdqaS9neUxQMlpiMXQ... +kind: Secret +metadata: + creationTimestamp: null + name: custom-ssl-config-bundle-secret + namespace: +---- + +. Create the `configBundleSecret` resource by entering the following command: ++ +[source,terminal] +---- +$ oc create -n -f custom-ssl-config-bundle-secret.yaml +---- ++ +.Example output ++ +[source,terminal] +---- +secret/custom-ssl-config-bundle-secret created +---- + +. Update the `QuayRegistry` YAML file to reference the `custom-ssl-config-bundle-secret` object by entering the following command: ++ +[source,terminal] +---- +$ oc patch quayregistry -n --type=merge -p '{"spec":{"configBundleSecret":"custom-ssl-config-bundle-secret"}}' +---- ++ +.Example output ++ +[source,terminal] +---- +quayregistry.quay.redhat.com/example-registry patched +---- + +. Set the `tls` component of the `QuayRegistry` YAML to `false` by entering the following command: ++ +[source,terminal] +---- +$ oc patch quayregistry -n --type=merge -p '{"spec":{"components":[{"kind":"tls","managed":false}]}}' +---- ++ +.Example output ++ +[source,terminal] +---- +quayregistry.quay.redhat.com/example-registry patched +---- + +. Ensure that your `QuayRegistry` YAML file has been updated to use the custom SSL `configBundleSecret` resource, and that your and `tls` resource is set to `false` by entering the following command: ++ +[source,terminal] +---- +$ oc get quayregistry -n -o yaml +---- ++ +.Example output ++ +[source,terminal] +---- +# ... + configBundleSecret: custom-ssl-config-bundle-secret +# ... +spec: + components: + - kind: tls + managed: false +# ... +---- + +//// +. Set the `route` component of the `QuayRegistry` YAML to `false` by entering the following command: ++ +[source,terminal] +---- +$ oc patch quayregistry -n --type=merge -p '{"spec":{"components":[{"kind":"route","managed":false}]}}' +---- ++ +.Example output ++ +[source,terminal] +---- +quayregistry.quay.redhat.com/example-registry patched +---- + +. You must set the `Routes` to `Passthrough`. This can be done on the {ocp} web console. + +.. Click *Networking* -> *Routes*. + +.. Click the menu kebab for your registry, then click *Edit Route*. + +.. For *Hostname*, include the URL of your {productname} registry. + +.. For *Service*, select *<_registry_quay_app*. + +.. For *Target port*, select *443 -> 8443 (TCP)*. + +.. For *TLS termination* select *Passthrough*. + +.. For *Insecure traffic* select *Redirect*. Then, click *Save*. + +. Monitor your {productname} registry deployment: + +.. On the {ocp} web console click *Operators* -> *Installed Operators* -> *{productname}*. + +.. Click *Quay Registry*, then click the name of your registry. + +.. Click *Events* to monitor the progress of the reconfiguration. If necessary, you can restart all pods by deleting them. For example: ++ +[source,terminal] +---- +$ oc get pods -n | grep quay +---- ++ +.Example output ++ +[source,terminal] +---- +example-registry-quay-app-6c5bc8ffb7-4qr5v 1/1 Running 0 18m +example-registry-quay-app-6c5bc8ffb7-xwswd 1/1 Running 0 20m +example-registry-quay-database-5f64c9db49-bmg9v 1/1 Running 0 156m +example-registry-quay-mirror-797458dcc7-ktw9v 1/1 Running 0 19m +example-registry-quay-mirror-797458dcc7-tmcxd 1/1 Running 0 19m +example-registry-quay-redis-5f6b6cc597-rltc5 1/1 Running 0 20m +quay-operator.v3.12.1-5b7dbd57df-xrs87 1/1 Running 0 24h +---- ++ +[source,terminal] +---- +$ oc delete pods -n example-registry-quay-app-6c5bc8ffb7-4qr5v example-registry-quay-app-6c5bc8ffb7-xwswd example-registry-quay-database-5f64c9db49-bmg9v example-registry-quay-mirror-797458dcc7-ktw9v example-registry-quay-mirror-797458dcc7-tmcxd example-registry-quay-redis-5f6b6cc597-rltc5 quay-operator.v3.12.1-5b7dbd57df-xrs87 +---- + + +... Create an SSL/TLS bundle by concatenating the SSL certificate and the CA certificate. For example: ++ +[source,terminal] +---- +$ cat ssl.cert ca.cert > ssl-bundle.cert +---- ++ +[NOTE] +==== +Depending on your needs, you might also include `intermediateCA.pem` CAs, `rootCA.pem` CAs, or other CAs into the `ssl-bundle.cert` as necessary. Do not include private keys in your configuration bundle. +==== + +. If not already set, update your `config.yaml` file to include the `PREFERRED_URL_SCHEME: https`, `EXTERNAL_TLS_TERMINATION: false`, and `SERVER_HOSTNAME` fields: ++ +[source,yaml] +---- +PREFERRED_URL_SCHEME: https +EXTERNAL_TLS_TERMINATION: false +SERVER_HOSTNAME: +---- +//// + +.Verification + +* Confirm a TLS connection to the server and port by entering the following command: ++ +[source,terminal] +---- +$ openssl s_client -connect :443 +---- ++ +.Example output ++ +[source,terminal] +---- +# ... +SSL-Session: + Protocol : TLSv1.3 + Cipher : TLS_AES_256_GCM_SHA384 + Session-ID: 0E995850DC3A8EB1A838E2FF06CE56DBA81BD8443E7FA05895FBD6FBDE9FE737 + Session-ID-ctx: + Resumption PSK: 1EA68F33C65A0F0FA2655BF9C1FE906152C6E3FEEE3AEB6B1B99BA7C41F06077989352C58E07CD2FBDC363FA8A542975 + PSK identity: None + PSK identity hint: None + SRP username: None + TLS session ticket lifetime hint: 7200 (seconds) + +# ... +---- + diff --git a/modules/creating-image-expiration-notification.adoc b/modules/creating-image-expiration-notification.adoc new file mode 100644 index 000000000..195df1b3b --- /dev/null +++ b/modules/creating-image-expiration-notification.adoc @@ -0,0 +1,83 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: CONCEPT +[id="creating-image-expiration-notification"] += Creating an image expiration notification + +Image expiration event triggers can be configured to notify users through email, Slack, webhooks, and so on, and can be configured at the repository level. Triggers can be set for images expiring in any amount of days, and can work in conjunction with the auto-pruning feature. + +Image expiration notifications can be set by using the {productname} v2 UI or by using the `createRepoNotification` API endpoint. + +.Prerequisites + +* `FEATURE_GARBAGE_COLLECTION: true` is set in your `config.yaml` file. +* Optional. `FEATURE_AUTO_PRUNE: true` is set in your `config.yaml` file. + +.Procedure + +. On the {productname} v2 UI, click *Repositories*. + +. Select the name of a repository. + +. Click *Settings* -> *Events and notifications*. + +. Click *Create notification*. The *Create notification* popup box appears. + +. Click the *Select event...* box, then click *Image expiry trigger*. + +. In the *When the image is due to expiry in days* box, enter the number of days before the image's expiration when you want to receive an alert. For example, use `1` for 1 day. + +. In the *Select method...* box, click one of the following: ++ +* E-mail +* Webhook POST +* Flowdock Team Notification +* HipChat Room Notification +* Slack Notification + +. Depending on which method you chose, include the necessary data. For example, if you chose *Webhook POST*, include the `Webhook URL`. + +. Optional. Provide a *POST JSON body template*. + +. Optional. Provide a *Title* for your notification. + +. Click *Submit*. You are returned to the *Events and notifications* page, and the notification now appears. + +. Optional. You can set the `NOTIFICATION_TASK_RUN_MINIMUM_INTERVAL_MINUTES` variable in your config.yaml file. with this field set, if there are any expiring images notifications will be sent automatically. By default, this is set to `300`, or 5 hours, however it can be adjusted as warranted. ++ +[source,yaml] +---- +NOTIFICATION_TASK_RUN_MINIMUM_INTERVAL_MINUTES: 300 <1> +---- +<1> By default, this field is set to `300`, or 5 hours. + +.Verification + +. Click the menu kebab -> *Test Notification*. The following message is returned: ++ +[source,text] +---- +Test Notification Queued +A test version of this notification has been queued and should appear shortly +---- + +. Depending on which method you chose, check your e-mail, webhook address, Slack channel, and so on. The information sent should look similar to the following example: ++ +[source,json] +---- +{ + "repository": "sample_org/busybox", + "namespace": "sample_org", + "name": "busybox", + "docker_url": "quay-server.example.com/sample_org/busybox", + "homepage": "http://quay-server.example.com/repository/sample_org/busybox", + "tags": [ + "latest", + "v1" + ], + "expiring_in": "1 days" +} +---- \ No newline at end of file diff --git a/modules/creating-notifications-api.adoc b/modules/creating-notifications-api.adoc new file mode 100644 index 000000000..994ebf296 --- /dev/null +++ b/modules/creating-notifications-api.adoc @@ -0,0 +1,94 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: CONCEPT +[id="managing-notifications-api"] += Creating notifications by using the API + +Use the following procedure to add notifications. + +.Prerequisites + +* You have created a repository. +* You have administrative privileges for the repository. +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +. Enter the following link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createreponotification[`POST /api/v1/repository/{repository}/notification`] command to create a notification on your repository: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + --data '{ + "event": "", + "method": "", + "config": { + "": "" + }, + "eventConfig": { + "": "" + } + }' \ + https:///api/v1/repository///notification/ +---- ++ +This command does not return output in the CLI. Instead, you can enter the following link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getreponotification[`GET /api/v1/repository/{repository}/notification/{uuid}`] command to obtain information about the repository notification: ++ +[source,terminal] +---- +{"uuid": "240662ea-597b-499d-98bb-2b57e73408d6", "title": null, "event": "repo_push", "method": "quay_notification", "config": {"target": {"name": "quayadmin", "kind": "user", "is_robot": false, "avatar": {"name": "quayadmin", "hash": "b28d563a6dc76b4431fc7b0524bbff6b810387dac86d9303874871839859c7cc", "color": "#17becf", "kind": "user"}}}, "event_config": {}, "number_of_failures": 0} +---- + +. You can test your repository notification by entering the following link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#testreponotification[`POST /api/v1/repository/{repository}/notification/{uuid}/test`] command: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + https:///api/v1/repository//notification//test +---- ++ +.Example output ++ +[source,terminal] +---- +{} +---- + +. You can reset repository notification failures to 0 by entering the following link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#resetrepositorynotificationfailures[`POST /api/v1/repository/{repository}/notification/{uuid}`] command: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + https:///api/v1/repository//notification/ +---- + +. Enter the following link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deletereponotification[`DELETE /api/v1/repository/{repository}/notification/{uuid}`] command to delete a repository notification: ++ +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + https:///api/v1/repository///notification/ +---- ++ +This command does not return output in the CLI. Instead, you can enter the following link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listreponotifications[`GET /api/v1/repository/{repository}/notification/`] command to retrieve a list of all notifications: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " -H "Accept: application/json" https:///api/v1/repository///notification +---- ++ +.Example output ++ +[source,terminal] +---- +{"notifications": []} +---- \ No newline at end of file diff --git a/modules/creating-notifications.adoc b/modules/creating-notifications.adoc new file mode 100644 index 000000000..d5cabff12 --- /dev/null +++ b/modules/creating-notifications.adoc @@ -0,0 +1,51 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: CONCEPT +[id="creating-notifications"] += Creating notifications by using the UI + +Use the following procedure to add notifications. + +.Prerequisites + +* You have created a repository. +* You have administrative privileges for the repository. + +.Procedure + +ifeval::["{context}" == "quay-io"] +. Navigate to a repository on {quayio}. +endif::[] +ifeval::["{context}" == "use-quay"] +. Navigate to a repository on {productname}. +endif::[] + +. In the navigation pane, click *Settings*. + +. In the *Events and Notifications* category, click *Create Notification* to add a new notification for a repository event. The *Create notification* popup box appears. + +. On the *Create repository* popup box, click the *When this event occurs* box to select an event. You can select a notification for the following types of events: ++ +* Push to Repository +* Image build failed +* Image build queued +* Image build started +* Image build success +* Image build cancelled +* Image expiry trigger + +. After you have selected the event type, select the notification method. The following methods are supported: ++ +* Quay Notification +* E-mail Notification +* Webhook POST +* Flowdock Team Notification +* HipChat Room Notification +* Slack Notification ++ +Depending on the method that you choose, you must include additional information. For example, if you select *E-mail*, you are required to include an e-mail address and an optional notification title. + +. After selecting an event and notification method, click *Create Notification*. \ No newline at end of file diff --git a/modules/creating-oauth-access-token.adoc b/modules/creating-oauth-access-token.adoc new file mode 100644 index 000000000..9a5ad10c1 --- /dev/null +++ b/modules/creating-oauth-access-token.adoc @@ -0,0 +1,51 @@ +:_content-type: PROCEDURE +[id="creating-oauth-access-token"] += Creating an OAuth 2 access token + +With {productname}, you must create an OAuth 2 access token before you can access the API endpoints of your organization. OAuth 2 access token can only be generated by using the {productname} UI; the CLI cannot be used to generate an OAuth 2 access token. + +Use the following procedure to create an OAuth2 access token. + +.Prerequisites + +* You have logged in to {productname} as an administrator. +* You have created an OAuth 2 application. + +.Procedure + +. On the main page, select an Organization. + +. In the navigation pane, select *Applications*. + +. Click the name of your application, for example, *Test application*. + +. In the navigation pane, select *Generate Token*. + +. Check the boxes for the following options: + +.. *Administer Organization*. When selected, allows the user to be able to administer organizations, including creating robots, creating teams, adjusting team membership, and changing billing settings. + +.. *Administer Repositories*. When selected, provides the user administrator access to all repositories to which the granting user has access. + +.. *Create Repositories*. When selected, provides the user the ability to create repositories in any namespaces that the granting user is allowed to create repositories. + +.. *View all visible repositories*. When selected, provides the user the ability to view and pull all repositories visible to the granting user. + +.. *Read/Write to any accessible repositories*. When selected, provides the user the ability to view, push and pull to all repositories to which the granting user has write access. + +.. *Super User Access*. When selected, provides the user the ability to administer your installation including managing users, managing organizations and other features found in the superuser panel. + +.. *Administer User* When selected, provides the user the ability to administer your account including creating robots and granting them permissions to your repositories. + +.. *Read User Information*. When selected, provides the user the ability to read user information such as username and email address. + +. Click *Generate Access Token*. You are redirected to a new page. + +. Review the permissions that you are allowing, then click *Authorize Application*. Confirm your decision by clicking *Authorize Application*. + +. You are redirected to the *Access Token* page. Copy and save the access token. ++ +[IMPORTANT] +==== +This is the only opportunity to copy and save the access token. It cannot be reobtained after leaving this page. +==== \ No newline at end of file diff --git a/modules/creating-oauth-application-api.adoc b/modules/creating-oauth-application-api.adoc new file mode 100644 index 000000000..33cfb207c --- /dev/null +++ b/modules/creating-oauth-application-api.adoc @@ -0,0 +1,79 @@ +:_content-type: PROCEDURE +[id="creating-oauth-application-api"] += Managing a user application by using the API + +{productname} users can create, list information about, and delete a _user application_ that can be used as an alternative to using your password for Docker, Podman, or other service providers. User application tokens work like your username and password, but are encrypted and do not provide any information to third parties regarding who is accessing {productname}. + +[NOTE] +==== +After creation via the CLI, the user application token is listed under *User Settings* of the {productname} UI. Note that this differs from an application token that is created under user settings, and should be considered a different application entirely. +==== + +Use the following procedure to create a user application token. + +.Prerequisites + +* You have created an OAuth 2 access token. + +.Procedure + +* Create a user application by entering the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#appspecifictokens[`POST /api/v1/user/apptoken`] API call: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "title": "MyAppToken" + }' \ + "http://quay-server.example.com/api/v1/user/apptoken" +---- ++ +.Example output ++ +[source,terminal] +---- +{"token": {"uuid": "6b5aa827-cee5-4fbe-a434-4b7b8a245ca7", "title": "MyAppToken", "last_accessed": null, "created": "Wed, 08 Jan 2025 19:32:48 -0000", "expiration": null, "token_code": "K2YQB1YO0ABYV5OBUYOMF9MCUABN12Y608Q9RHFXBI8K7IE8TYCI4WEEXSVH1AXWKZCKGUVA57PSA8N48PWED9F27PXATFUVUD9QDNCE9GOT9Q8ACYPIN0HL"}} +---- + +* You can obtain information about your application, including when the application expires, by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listapptokens[`GET /api/v1/user/apptoken`] command. For example: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "http://quay-server.example.com/api/v1/user/apptoken" +---- ++ +[source,terminal] +---- +{"tokens": [{"uuid": "6b5aa827-cee5-4fbe-a434-4b7b8a245ca7", "title": "MyAppToken", "last_accessed": null, "created": "Wed, 08 Jan 2025 19:32:48 -0000", "expiration": null}], "only_expiring": null} +---- + +* You can obtain information about a specific user application by entering the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getapptoken[`GET /api/v1/user/apptoken/{token_uuid}`] command: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "http://quay-server.example.com/api/v1/user/apptoken/" +---- ++ +.Example output ++ +[source,terminal] +---- +{"token": {"uuid": "6b5aa827-cee5-4fbe-a434-4b7b8a245ca7", "title": "MyAppToken", "last_accessed": null, "created": "Wed, 08 Jan 2025 19:32:48 -0000", "expiration": null, "token_code": "K2YQB1YO0ABYV5OBUYOMF9MCUABN12Y608Q9RHFXBI8K7IE8TYCI4WEEXSVH1AXWKZCKGUVA57PSA8N48PWED9F27PXATFUVUD9QDNCE9GOT9Q8ACYPIN0HL"}} +---- + +* You can delete or revoke a user application token by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#revokeapptoken[`DELETE /api/v1/user/apptoken/{token_uuid}`] endpoint: ++ +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + "http://quay-server.example.com/api/v1/user/apptoken/" +---- ++ +This command does not return output in the CLI. You can return a list of tokens by entering one of the aforementioned commands. \ No newline at end of file diff --git a/modules/creating-oauth-application.adoc b/modules/creating-oauth-application.adoc new file mode 100644 index 000000000..c98e6934c --- /dev/null +++ b/modules/creating-oauth-application.adoc @@ -0,0 +1,34 @@ +:_content-type: PROCEDURE +[id="creating-oauth-application"] += Creating an OAuth 2 application by using the UI + +{productname} administrators can define an application by specifying a unique name, a homepage URL, a description of the application's uses, an e-mail, or a redirect/callback URL. + +[NOTE] +==== +The following application token is created under an Organization. This differs from an application token that is created under user settings, and should be considered a different application entirely. +==== + +Use the following procedure to create an OAuth2 application. + +.Prerequisites + +* You have logged in to {productname} as an administrator. + +.Procedure + +. On the main page, select an Organization. + +. In the navigation pane, select *Applications*. + +. Click *Create New Application* and provide a new application name, then press *Enter*. + +. On the *OAuth Applications* page, select the name of your application. + +. Optional. Enter the following information: + +.. *Application Name* +.. *Homepage URL* +.. *Description* +.. *Avatar E-mail* +.. *Redirect/Callback URL prefix* \ No newline at end of file diff --git a/modules/creating-org-policy-api.adoc b/modules/creating-org-policy-api.adoc new file mode 100644 index 000000000..f89995abe --- /dev/null +++ b/modules/creating-org-policy-api.adoc @@ -0,0 +1,92 @@ +[id="creating-org-policy-api"] += Creating an auto-prune policy for a namespace by using the {productname} API + +You can use {productname} API endpoints to manage auto-pruning policies for an namespace. + +.Prerequisites + +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. +* You have created an OAuth access token. +* You have logged into {productname}. + +.Procedure + +. Enter the following link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createorganizationautoprunepolicy[`POST /api/v1/organization/{orgname}/autoprunepolicy/`] command create a new policy that limits the number of tags allowed in an organization: ++ +[source,terminal] +---- +$ curl -X POST -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{"method": "number_of_tags", "value": 10}' http:///api/v1/organization//autoprunepolicy/ +---- ++ +Alternatively, you can can set tags to expire for a specified time after their creation date: ++ +[source,terminal] +---- +$ curl -X POST -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{ +"method": "creation_date", "value": "7d"}' http:///api/v1/organization//autoprunepolicy/ +---- ++ +.Example output +[source,terminal] +---- +{"uuid": "73d64f05-d587-42d9-af6d-e726a4a80d6e"} +---- + +. Optional. You can add an additional policy to an organization and pass in the `tagPattern` and `tagPatternMatches` fields to prune only tags that match the given regex pattern. For example: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "method": "creation_date", + "value": "7d", + "tagPattern": "^v*", + "tagPatternMatches": <1> + }' \ + "https:///api/v1/organization//autoprunepolicy/" +---- +<1> Setting `tagPatternMatches` to `true` makes it so that tags that match the given regex pattern will be pruned. In this example, tags that match `^v*` are pruned. ++ +.Example output ++ +[source,terminal] +---- +{"uuid": "ebf7448b-93c3-4f14-bf2f-25aa6857c7b0"} +---- + +. You can update your organization's auto-prune policy by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#updateorganizationautoprunepolicy[`PUT /api/v1/organization/{orgname}/autoprunepolicy/{policy_uuid}`] command. For example: ++ +[source,terminal] +---- +$ curl -X PUT -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{ + "method": "creation_date", + "value": "4d", + "tagPattern": "^v*", + "tagPatternMatches": true + }' "/api/v1/organization//autoprunepolicy/" +---- ++ +This command does not return output. Continue to the next step. + +. Check your auto-prune policy by entering the following command: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " http:///api/v1/organization//autoprunepolicy/ +---- ++ +.Example output ++ +[source,terminal] +---- +{"policies": [{"uuid": "ebf7448b-93c3-4f14-bf2f-25aa6857c7b0", "method": "creation_date", "value": "4d", "tagPattern": "^v*", "tagPatternMatches": true}, {"uuid": "da4d0ad7-3c2d-4be8-af63-9c51f9a501bc", "method": "number_of_tags", "value": 10, "tagPattern": null, "tagPatternMatches": true}, {"uuid": "17b9fd96-1537-4462-a830-7f53b43f94c2", "method": "creation_date", "value": "7d", "tagPattern": "^v*", "tagPatternMatches": true}]} +---- + +. You can delete the auto-prune policy for your organization by entering the following command. Note that deleting the policy requires the UUID. ++ +[source,terminal] +---- +$ curl -X DELETE -H "Authorization: Bearer " http:///api/v1/organization//autoprunepolicy/73d64f05-d587-42d9-af6d-e726a4a80d6e +---- \ No newline at end of file diff --git a/modules/creating-policy-api-current-user.adoc b/modules/creating-policy-api-current-user.adoc new file mode 100644 index 000000000..1407aa297 --- /dev/null +++ b/modules/creating-policy-api-current-user.adoc @@ -0,0 +1,66 @@ +[id="creating-policy-api-current-user"] += Creating an auto-prune policy for a namespace for the current user by using the API + +You can use {productname} API endpoints to manage auto-pruning policies for your account. + +[NOTE] +==== +The use of `/user/` in the following commands represents the user that is currently logged into {productname}. +==== + +.Prerequisites + +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. +* You have created an OAuth access token. +* You have logged into {productname}. + +.Procedure + +. Enter the following `POST` command create a new policy that limits the number of tags for the current user: ++ +[source,terminal] +---- +$ curl -X POST -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{"method": "number_of_tags", "value": 10}' http:///api/v1/user/autoprunepolicy/ +---- ++ +.Example output ++ +[source,terminal] +---- +{"uuid": "8c03f995-ca6f-4928-b98d-d75ed8c14859"} +---- + +. Check your auto-prune policy by entering the following command: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " http:///api/v1/user/autoprunepolicy/ +---- ++ +Alternatively, you can include the UUID: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " http:///api/v1/user/autoprunepolicy/8c03f995-ca6f-4928-b98d-d75ed8c14859 +---- ++ +.Example output ++ +[source,terminal] +---- +{"policies": [{"uuid": "8c03f995-ca6f-4928-b98d-d75ed8c14859", "method": "number_of_tags", "value": 10}]} +---- + +. You can delete the auto-prune policy by entering the following command. Note that deleting the policy requires the UUID. ++ +[source,terminal] +---- +$ curl -X DELETE -H "Authorization: Bearer " http:///api/v1/user/autoprunepolicy/8c03f995-ca6f-4928-b98d-d75ed8c14859 +---- ++ +.Example output ++ +[source,terminal] +---- +{"uuid": "8c03f995-ca6f-4928-b98d-d75ed8c14859"} +---- diff --git a/modules/creating-policy-api-other-user.adoc b/modules/creating-policy-api-other-user.adoc new file mode 100644 index 000000000..c92c9d98d --- /dev/null +++ b/modules/creating-policy-api-other-user.adoc @@ -0,0 +1,100 @@ + +[id="creating-policy-api-other-user"] += Creating an auto-prune policy on a repository for a user with the API + +You can use {productname} API endpoints to manage auto-pruning policies on a repository for user accounts that are not your own, so long as you have `admin` privileges on the repository. + +.Prerequisites + +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. +* You have created an OAuth access token. +* You have logged into {productname}. +* You have `admin` privileges on the repository that you are creating the policy for. + +.Procedure + +. Enter the following link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createuserautoprunepolicy[`POST /api/v1/repository///autoprunepolicy/`] command create a new policy that limits the number of tags for the user: ++ +[source,terminal] +---- +$ curl -X POST -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{"method": "number_of_tags","value": 2}' https:///api/v1/repository///autoprunepolicy/ +---- ++ +.Example output ++ +[source,terminal] +---- +{"uuid": "7726f79c-cbc7-490e-98dd-becdc6fefce7"} +---- + +. Optional. You can add an additional policy for the current user and pass in the `tagPattern` and `tagPatternMatches` fields to prune only tags that match the given regex pattern. For example: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "method": "creation_date", + "value": "7d", + "tagPattern": "^v*", + "tagPatternMatches": true + }' \ + "http:///api/v1/repository///autoprunepolicy/" +---- ++ +.Example output ++ +[source,terminal] +---- +{"uuid": "b3797bcd-de72-4b71-9b1e-726dabc971be"} +---- + +. You can update your policy for the current user by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#updateuserautoprunepolicy[`PUT /api/v1/repository///autoprunepolicy/`] command. For example: ++ +[source,terminal] +---- +$ curl -X PUT -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{ + "method": "creation_date", + "value": "4d", + "tagPattern": "^test.", + "tagPatternMatches": true + }' "https:///api/v1/repository///autoprunepolicy/" +---- ++ +Updating a policy does not return output in the CLI. + +. Check your auto-prune policy by entering the following command: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " http:///api/v1/repository///autoprunepolicy/ +---- ++ +Alternatively, you can include the UUID: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " http:///api/v1/repository///autoprunepolicy/7726f79c-cbc7-490e-98dd-becdc6fefce7 +---- ++ +.Example output ++ +[source,terminal] +---- +{"uuid": "81ee77ec-496a-4a0a-9241-eca49437d15b", "method": "creation_date", "value": "7d", "tagPattern": "^v*", "tagPatternMatches": true} +---- + +. You can delete the auto-prune policy by entering the following command. Note that deleting the policy requires the UUID. ++ +[source,terminal] +---- +$ curl -X DELETE -H "Authorization: Bearer " http:///api/v1/repository///autoprunepolicy/ +---- ++ +.Example output ++ +[source,terminal] +---- +{"uuid": "7726f79c-cbc7-490e-98dd-becdc6fefce7"} +---- diff --git a/modules/creating-repository-policy-api.adoc b/modules/creating-repository-policy-api.adoc new file mode 100644 index 000000000..b0a7d64fa --- /dev/null +++ b/modules/creating-repository-policy-api.adoc @@ -0,0 +1,110 @@ +[id="creating-repository-policy-api"] += Creating an auto-prune policy for a repository using the {productname} API + +You can use {productname} API endpoints to manage auto-pruning policies for an repository. + +.Prerequisites + +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. +* You have created an OAuth access token. +* You have logged into {productname}. + +.Procedure + +. Enter the following link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#updateorganizationautoprunepolicy[`POST /api/v1/repository/{repository}/autoprunepolicy/`] command create a new policy that limits the number of tags allowed in an organization: ++ +[source,terminal] +---- +$ curl -X POST -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{"method": "number_of_tags","value": 2}' http:///api/v1/repository///autoprunepolicy/ +---- ++ +Alternatively, you can can set tags to expire for a specified time after their creation date: ++ +[source,terminal] +---- +$ curl -X POST -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{"method": "creation_date", "value": "7d"}' http:///api/v1/repository///autoprunepolicy/ +---- ++ +.Example output ++ +[source,terminal] +---- +{"uuid": "ce2bdcc0-ced2-4a1a-ac36-78a9c1bed8c7"} +---- + +. Optional. You can add an additional policy and pass in the `tagPattern` and `tagPatternMatches` fields to prune only tags that match the given regex pattern. For example: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "method": "", + "value": "<7d>", + "tagPattern": "<^test.>*", + "tagPatternMatches": <1> + }' \ + "https:///api/v1/repository///autoprunepolicy/" +---- +<1> Setting `tagPatternMatches` to `false` makes it so that tags that all tags that _do not_ match the given regex pattern are pruned. In this example, all tags _but_ `^test.` are pruned. ++ +.Example output ++ +[source,terminal] +---- +{"uuid": "b53d8d3f-2e73-40e7-96ff-736d372cd5ef"} +---- + +. You can update your policy for the repository by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#updaterepositoryautoprunepolicy[`PUT /api/v1/repository/{repository}/autoprunepolicy/{policy_uuid}`] command and passing in the UUID. For example: ++ +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "method": "number_of_tags", + "value": "5", + "tagPattern": "^test.*", + "tagPatternMatches": true + }' \ + "https://quay-server.example.com/api/v1/repository///autoprunepolicy/" +---- ++ +This command does not return output. Continue to the next step to check your auto-prune policy. + +. Check your auto-prune policy by entering the following command: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " http:///api/v1/repository///autoprunepolicy/ +---- ++ +Alternatively, you can include the UUID: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " http:///api/v1/repository///autoprunepolicy/ce2bdcc0-ced2-4a1a-ac36-78a9c1bed8c7 +---- ++ +.Example output ++ +[source,terminal] +---- +{"policies": [{"uuid": "ce2bdcc0-ced2-4a1a-ac36-78a9c1bed8c7", "method": "number_of_tags", "value": 10}]} +---- + +. You can delete the auto-prune policy by entering the following command. Note that deleting the policy requires the UUID. ++ +[source,terminal] +---- +$ curl -X DELETE -H "Authorization: Bearer " http:///api/v1/repository///autoprunepolicy/ce2bdcc0-ced2-4a1a-ac36-78a9c1bed8c7 +---- ++ +.Example output ++ +[source,terminal] +---- +{"uuid": "ce2bdcc0-ced2-4a1a-ac36-78a9c1bed8c7"} +---- \ No newline at end of file diff --git a/modules/creating-robot-account-api.adoc b/modules/creating-robot-account-api.adoc new file mode 100644 index 000000000..d380f3a0b --- /dev/null +++ b/modules/creating-robot-account-api.adoc @@ -0,0 +1,45 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc +:_content-type: CONCEPT +[id="creating-robot-account-api"] += Creating a robot account by using the {productname} API + +Use the following procedure to create a robot account using the {productname} API. + +.Prerequisites + +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +* Enter the following command to create a new robot account for an organization using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createorgrobot[`PUT /api/v1/organization/{orgname}/robots/{robot_shortname}`] endpoint: ++ +[source,terminal] +---- +$ curl -X PUT -H "Authorization: Bearer " "https:///api/v1/organization//robots/" +---- ++ +Example output ++ +[source,terminal] +---- +{"name": "orgname+robot-name", "created": "Fri, 10 May 2024 15:11:00 -0000", "last_accessed": null, "description": "", "token": "", "unstructured_metadata": null} +---- + +* Enter the following command to create a new robot account for the current user with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createuserrobot[`PUT /api/v1/user/robots/{robot_shortname}`] endpoint: ++ +[source,terminal] +---- +$ curl -X PUT -H "Authorization: Bearer " "https:///api/v1/user/robots/" +---- ++ +Example output ++ +[source,terminal] +---- +{"name": "quayadmin+robot-name", "created": "Fri, 10 May 2024 15:24:57 -0000", "last_accessed": null, "description": "", "token": "", "unstructured_metadata": null} +---- \ No newline at end of file diff --git a/modules/creating-robot-account-v2-ui.adoc b/modules/creating-robot-account-v2-ui.adoc new file mode 100644 index 000000000..36f7a0846 --- /dev/null +++ b/modules/creating-robot-account-v2-ui.adoc @@ -0,0 +1,44 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: CONCEPT +[id="creating-robot-account-v2-ui"] += Creating a robot account by using the UI + +Use the following procedure to create a robot account using the v2 UI. + +.Procedure + +. On the v2 UI, click *Organizations*. + +. Click the name of the organization that you will create the robot account for, for example, `test-org`. + +. Click the *Robot accounts* tab -> *Create robot account*. + +. In the *Provide a name for your robot account* box, enter a name, for example, `robot1`. The name of your Robot Account becomes a combination of your username plus the name of the robot, for example, `quayadmin+robot1` + +. Optional. The following options are available if desired: + +.. Add the robot account to a team. + +.. Add the robot account to a repository. + +.. Adjust the robot account's permissions. + +. On the *Review and finish* page, review the information you have provided, then click *Review and finish*. The following alert appears: *Successfully created robot account with robot name: + *. ++ +Alternatively, if you tried to create a robot account with the same name as another robot account, you might receive the following error message: *Error creating robot account*. + +. Optional. You can click *Expand* or *Collapse* to reveal descriptive information about the robot account. + +. Optional. You can change permissions of the robot account by clicking the kebab menu -> *Set repository permissions*. The following message appears: *Successfully updated repository permission*. + +. Optional. You can click the name of your robot account to obtain the following information: + +* *Robot Account*: Select this obtain the robot account token. You can regenerate the token by clicking *Regenerate token now*. +* *Kubernetes Secret*: Select this to download credentials in the form of a Kubernetes pull secret YAML file. +* *Podman*: Select this to copy a full `podman login` command line that includes the credentials. +* *Docker Configuration*: Select this to copy a full `docker login` command line that includes the credentials. \ No newline at end of file diff --git a/modules/creating-user-account-quay-api.adoc b/modules/creating-user-account-quay-api.adoc new file mode 100644 index 000000000..f60bed00e --- /dev/null +++ b/modules/creating-user-account-quay-api.adoc @@ -0,0 +1,70 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc + +// Needs updated when v2 UI panel is available + +:_content-type: CONCEPT +[id="creating-user-account-quay-api"] += Creating a user account by using the {productname} API + +Use the following procedure to create a new user for your {productname} repository by using the API. + +.Prerequisites + +* You are logged into your {productname} deployment as a superuser. +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +. Enter the following command to create a new user using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createinstalluser[`POST /api/v1/superuser/users/`] endpoint: ++ +[source,terminal] +---- +$ curl -X POST -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{ + "username": "newuser", + "email": "newuser@example.com" +}' "https:///api/v1/superuser/users/" +---- ++ +Example output ++ +[source,terminal] +---- +{"username": "newuser", "email": "newuser@example.com", "password": "123456789", "encrypted_password": "/JKY9pnDcsw="} +---- + +. Navigate to your {productname} registry endpoint, for example, `quay-server.example.com` and login with the username and password generated from the API call. In this scenario, the username is `newuser` and the password is `123456789`. Alternatively, you can log in to the registry with the CLI. For example: ++ +[source,terminal] +---- +$ podman login +---- ++ +.Example output ++ +[source,terminal] +---- +username: newuser +password: 123456789 +---- + +. Optional. You can obtain a list of all users, including superusers, by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listallusers[`GET /api/v1/superuser/users/`] endpoint: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " "https:///api/v1/superuser/users/" +---- ++ +[NOTE] +==== +The `GET /api/v1/superuser/users/` endpoint only returns users and superusers if `AUTHENTICATION_TYPE: Database` is set in your `config.yaml` file. It does not work for `LDAP` authentication types. +==== ++ +Example output ++ +[source,terminal] +---- +{"users": [{"kind": "user", "name": "quayadmin", "username": "quayadmin", "email": "quay@quay.com", "verified": true, "avatar": {"name": "quayadmin", "hash": "b28d563a6dc76b4431fc7b0524bbff6b810387dac86d9303874871839859c7cc", "color": "#17becf", "kind": "user"}, "super_user": true, "enabled": true}, {"kind": "user", "name": "newuser", "username": "newuser", "email": "newuser@example.com", "verified": true, "avatar": {"name": "newuser", "hash": "f338a2c83bfdde84abe2d3348994d70c34185a234cfbf32f9e323e3578e7e771", "color": "#9edae5", "kind": "user"}, "super_user": false, "enabled": true}]} +---- \ No newline at end of file diff --git a/modules/creating-user-account-quay-ui.adoc b/modules/creating-user-account-quay-ui.adoc new file mode 100644 index 000000000..f3788e2a9 --- /dev/null +++ b/modules/creating-user-account-quay-ui.adoc @@ -0,0 +1,43 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc + +// Needs updated when v2 UI panel is available:_content-type: CONCEPT + +[id="creating-user-account-quay-ui"] += Creating a user account by using the UI + +Use the following procedure to create a new user for your {productname} repository using the UI. + +.Prerequisites + +* You are logged into your {productname} deployment as a superuser. + +.Procedure + +. Log in to your {productname} repository as the superuser. + +. In the navigation pane, select your account name, and then click *Super User Admin Panel*. + +. Click the *Users* icon in the column. + +. Click the *Create User* button. + +. Enter the new user's Username and Email address, and then click the *Create User* button. + +. You are redirected to the *Users* page, where there is now another {productname} user. ++ +[NOTE] +==== +You might need to refresh the *Users* page to show the additional user. +==== + +. On the *Users* page, click the *Options* cogwheel associated with the new user. A drop-down menu appears, as shown in the following figure: ++ +image:user-options.png[Select Options drop-down to change user passwords] + +. Click *Change Password*. + +. Add the new password, and then click *Change User Password*. ++ +The new user can now use that username and password to log in using the web UI or through their preferred container client, like Podman. \ No newline at end of file diff --git a/modules/creating-v2-oauth-access-token.adoc b/modules/creating-v2-oauth-access-token.adoc new file mode 100644 index 000000000..f409d2234 --- /dev/null +++ b/modules/creating-v2-oauth-access-token.adoc @@ -0,0 +1,46 @@ +:_content-type: PROCEDURE +[id="creating-v2-oauth-access-token"] += Creating an OCI referrers OAuth access token + +This OCI referrers OAuth access token is used to list OCI referrers of a manifest under a repository. + +.Procedure + +. Update your `config.yaml` file to include the `FEATURE_REFERRERS_API: true` field. For example: ++ +[source,yaml] +---- +# ... +FEATURE_REFERRERS_API: true +# ... +---- + +. Enter the following command to Base64 encode your credentials: ++ +[source,terminal] +---- +$ echo -n ':' | base64 +---- ++ +.Example output ++ +[source,terminal] +---- +abcdeWFkbWluOjE5ODlraWROZXQxIQ== +---- + +. Enter the following command to use the base64 encoded string and modify the URL endpoint to your {productname} server: ++ +[source,terminal] +---- +$ curl --location '/v2/auth?service=&scope=repository:quay/listocireferrs:pull,push' --header 'Authorization: Basic ' -k | jq +---- ++ +.Example output ++ +[source,terminal] +---- +{ + "token": " +} +---- \ No newline at end of file diff --git a/modules/custom-clair-configuration-managed-database.adoc b/modules/custom-clair-configuration-managed-database.adoc new file mode 100644 index 000000000..2fd6b1960 --- /dev/null +++ b/modules/custom-clair-configuration-managed-database.adoc @@ -0,0 +1,14 @@ +:_content-type: CONCEPT +[id="custom-clair-configuration-managed-database"] += Running a custom Clair configuration with a managed Clair database + +In some cases, users might want to run a custom Clair configuration with a managed Clair database. This is useful in the following scenarios: + +* When a user wants to disable specific updater resources. +* When a user is running {productname} in an disconnected environment. For more information about running Clair in a disconnected environment, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/vulnerability_reporting_with_clair_on_red_hat_quay/index#clair-disconnected-environments[Clair in disconnected environments]. ++ +[NOTE] +==== +* If you are running {productname} in an disconnected environment, the `airgap` parameter of your `clair-config.yaml` must be set to `true`. +* If you are running {productname} in an disconnected environment, you should disable all updater components. +==== diff --git a/modules/database-troubleshooting-issues.adoc b/modules/database-troubleshooting-issues.adoc new file mode 100644 index 000000000..b638e3483 --- /dev/null +++ b/modules/database-troubleshooting-issues.adoc @@ -0,0 +1,261 @@ +:_content-type: CONCEPT +[id="database-troubleshooting-issues"] += Troubleshooting {productname} database issues + +Use the following procedures to troubleshoot the PostgreSQL database. + +//// +[id="checking-deployment-type"] +== Checking the type of deployment + +Check whether your database is deployed as a container on a virtual machine, or deployed on {ocp} as a pod. + +[id="checking-container-pod-status"] +== Checking the container or pod status + +Use the following procedure to check the status of the database pod or container. + +.Procedure + +. Enter the following command to check the status of the pod or container. + +.. If you are using the {productname} Operator on {ocp}, enter the following command: ++ +[source,terminal] +---- +$ oc get pods +---- + +.. If you are using a standalone deployment of {productname}, enter the following command: ++ +[source,terminal] +---- +$ podman ps -a +---- +//// + +[id="interact-with-database"] +== Interacting with the {productname} database + +Use the following procedure to interact with the PostgreSQL database. + +[WARNING] +==== +Interacting with the PostgreSQL database is potentially destructive. It is highly recommended that you perform the following procedure with the help of a {productname} Support Specialist. +==== + +[NOTE] +==== +Interacting with the PostgreSQL database can also be used to troubleshoot authorization and authentication issues. +==== + +.Procedure + +. Exec into the {productname} database. + +.. Enter the following commands to exec into the {productname} database pod on {ocp}: ++ +[source,terminal] +---- +$ oc exec -it -- psql +---- + +.. Enter the following command to exec into the {productname} database on a standalone deployment: ++ +[source,terminal] +---- +$ sudo podman exec -it /bin/bash +---- + +. Enter the PostgreSQL shell. ++ +[WARNING] +==== +Interacting with the PostgreSQL database is potentially destructive. It is highly recommended that you perform the following procedure with the help of a {productname} Support Specialist. +==== + +.. If you are using the {productname} Operator, enter the following command to enter the PostgreSQL shell: ++ +[source,terminal] +---- +$ oc rsh psql -U your_username -d your_database_name +---- + +.. If you are on a standalone {productname} deployment, enter the following command to enter the PostgreSQL shell: ++ +[source,terminal] +---- +bash-4.4$ psql -U your_username -d your_database_name +---- + +[id="troubleshooting-crashloop-backoff-state"] +== Troubleshooting crashloopbackoff states + +Use the following procedure to troueblshoot `crashloopbackoff` states. + +.Procedure + +. If your container or pod is in a `crashloopbackoff` state, you can enter the following commands. + +.. Enter the following command to scale down the {productname} Operator: ++ +[source,terminal] +---- +$ oc scale deployment/quay-operator.v3.8.z --replicas=0 +---- ++ +.Example output ++ +[source,terminal] +---- +deployment.apps/quay-operator.v3.8.z scaled +---- + +.. Enter the following command to scale down the {productname} database: ++ +[source,terminal] +---- +$ oc scale deployment/ --replicas=0 +---- ++ +.Example output ++ +[source,terminal] +---- +deployment.apps/ scaled +---- + +.. Enter the following command to edit the {productname} database: ++ +[WARNING] +==== +Interacting with the PostgreSQL database is potentially destructive. It is highly recommended that you perform the following procedure with the help of a {productname} Support Specialist. +==== ++ +[source,terminal] +---- +$ oc edit deployment +---- ++ +[source,yaml] +---- +... + template: + metadata: + creationTimestamp: null + labels: + quay-component: + quay-operator/quayregistry: quay-operator.v3.8.z + spec: + containers: + - env: + - name: POSTGRESQL_USER + value: postgres + - name: POSTGRESQL_DATABASE + value: postgres + - name: POSTGRESQL_PASSWORD + value: postgres + - name: POSTGRESQL_ADMIN_PASSWORD + value: postgres + - name: POSTGRESQL_MAX_CONNECTIONS + value: "1000" + image: registry.redhat.io/rhel8/postgresql-10@sha256:a52ad402458ec8ef3f275972c6ebed05ad64398f884404b9bb8e3010c5c95291 + imagePullPolicy: IfNotPresent + name: postgres + command: ["/bin/bash", "-c", "sleep 86400"] <1> +... +---- +<1> Add this line in the same indentation. ++ +.Example output ++ +[source,terminal] +---- +deployment.apps/ edited +---- + +.. Execute the following command inside of your ``: ++ +[source,terminal] +---- +$ oc exec -it -- cat /var/lib/pgsql/data/userdata/postgresql/logs/* /path/to/desired_directory_on_host +---- + +[id="connectivity-networking"] +== Checking the connectivity between {productname} and the database pod + +Use the following procedure to check the connectivity between {productname} and the database pod + +.Procedure + +. Check the connectivity between {productname} and the database pod. + +.. If you are using the {productname} Operator on {ocp}, enter the following command: ++ +[source,terminal] +---- +$ oc exec -it _quay_pod_name_ -- curl -v telnet://:5432 +---- + +.. If you are using a standalone deployment of {productname}, enter the following command: ++ +[source,terminal] +---- +$ podman exec -it curl -v telnet://:5432 +---- + +[id="check-resource-allocation"] +== Checking resource allocation + +Use the following procedure to check resource allocation. + +.Procedure + +. Obtain a list of running containers. + +. Monitor disk usage of your {productname} deployment. + +.. If you are using the {productname} Operator on {ocp}, enter the following command: ++ +[source,terminal] +---- +$ oc exec -it -- df -ah +---- + +.. If you are using a standalone deployment of {productname}, enter the following command: ++ +[source,terminal] +---- +$ podman exec -it df -ah +---- + +. Monitor other resource usage. + +.. Enter the following command to check resource allocation on a {productname} Operator deployment: ++ +[source,terminal] +---- +$ oc adm top pods +---- + +.. Enter the following command to check the status of a specific pod on a standalone deployment of {productname}: ++ +[source,terminal] +---- +$ podman pod stats +---- + +.. Enter the following command to check the status of a specific container on a standalone deployment of {productname}: ++ +[source,terminal] +---- +$ podman stats +---- ++ +The following information is returned: ++ +* *CPU %*. The percentage of CPU usage by the container since the last measurement. This value represents the container's share of the available CPU resources. +* *MEM USAGE / LIMIT*. The current memory usage of the container followed by its memory limit. The values are displayed in the format `current_usage / memory_limit`. For example, `300.4MiB / 7.795GiB` indicates that the container is currently using 300.4 megabytes of memory out of a limit of 7.795 gigabytes. +* *MEM %*. The percentage of memory usage by the container in relation to its memory limit. +* *NET I/O*. The network I/O (input/output) statistics of the container. It displays the amount of data transmitted and received by the container over the network. The values are displayed in the format: `transmitted_bytes / received_bytes`. +* *BLOCK I/O*. The block I/O (input/output) statistics of the container. It represents the amount of data read from and written to the block devices (for example, disks) used by the container. The values are displayed in the format `read_bytes / written_bytes`. \ No newline at end of file diff --git a/modules/database-troubleshooting.adoc b/modules/database-troubleshooting.adoc new file mode 100644 index 000000000..642976490 --- /dev/null +++ b/modules/database-troubleshooting.adoc @@ -0,0 +1,33 @@ +:_content-type: CONCEPT +[id="database-troubleshooting"] += Troubleshooting the {productname} database + +The PostgreSQL database used for {productname} store various types of information related to container images and their management. Some of the key pieces of information that the PostgreSQL database stores includes: + +* *Image Metadata*. The database stores metadata associated with container images, such as image names, versions, creation timestamps, and the user or organization that owns the image. This information allows for easy identification and organization of container images within the registry. + +* *Image Tags*. {productname} allows users to assign tags to container images, enabling convenient labeling and versioning. The PostgreSQL database maintains the mapping between image tags and their corresponding image manifests, allowing users to retrieve specific versions of container images based on the provided tags. + +* *Image Layers*. Container images are composed of multiple layers, which are stored as individual objects. The database records information about these layers, including their order, checksums, and sizes. This data is crucial for efficient storage and retrieval of container images. + +* *User and Organization Data*. {productname} supports user and organization management, allowing users to authenticate and manage access to container images. The PostgreSQL database stores user and organization information, including usernames, email addresses, authentication tokens, and access permissions. + +* *Repository Information*. {productname} organizes container images into repositories, which act as logical units for grouping related images. The database maintains repository data, including names, descriptions, visibility settings, and access control information, enabling users to manage and share their repositories effectively. + +* *Event Logs*. {productname} tracks various events and activities related to image management and repository operations. These event logs, including image pushes, pulls, deletions, and repository modifications, are stored in the PostgreSQL database, providing an audit trail and allowing administrators to monitor and analyze system activities. + +The content in this section covers the following procedures: + +* *Checking the type of deployment*: Determine if the database is deployed as a container on a virtual machine or as a pod on {ocp}. + +* *Checking the container or pod status*: Verify the status of the `database` pod or container using specific commands based on the deployment type. + +* *Examining the database container or pod logs*: Access and examine the logs of the database pod or container, including commands for different deployment types. + +* *Checking the connectivity between {productname} and the database pod*: Check the connectivity between {productname} and the `database` pod using relevant commands. + +* *Checking the database configuration*: Check the database configuration at various levels ({ocp} or PostgreSQL level) based on the deployment type. + +* *Checking resource allocation*: Monitor resource allocation for the {productname} deployment, including disk usage and other resource usage. + +* *Interacting with the {productname} database*: Learn how to interact with the PostgreSQL database, including commands to access and query databases. diff --git a/modules/default-permissions-api.adoc b/modules/default-permissions-api.adoc new file mode 100644 index 000000000..4ddff5cda --- /dev/null +++ b/modules/default-permissions-api.adoc @@ -0,0 +1,87 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: PROCEDURE +[id="default-permissions-api"] += Creating and managing default permissions by using the API + +Use the following procedures to manage default permissions using the {productname} API. + +.Prerequisites + +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +. Enter the following command to create a default permission with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createorganizationprototypepermission[`POST /api/v1/organization/{orgname}/prototypes`] endpoint: ++ +[source,terminal] +---- +$ curl -X POST -H "Authorization: Bearer " -H "Content-Type: application/json" --data '{ + "role": "", + "delegate": { + "name": "", + "kind": "user" + }, + "activating_user": { + "name": "" + } + }' https:///api/v1/organization//prototypes +---- ++ +.Example output ++ +[source,terminal] +---- +{"activating_user": {"name": "test-org+test", "is_robot": true, "kind": "user", "is_org_member": true, "avatar": {"name": "test-org+test", "hash": "aa85264436fe9839e7160bf349100a9b71403a5e9ec684d5b5e9571f6c821370", "color": "#8c564b", "kind": "robot"}}, "delegate": {"name": "testuser", "is_robot": false, "kind": "user", "is_org_member": false, "avatar": {"name": "testuser", "hash": "f660ab912ec121d1b1e928a0bb4bc61b15f5ad44d5efdc4e1c92a25e99b8e44a", "color": "#6b6ecf", "kind": "user"}}, "role": "admin", "id": "977dc2bc-bc75-411d-82b3-604e5b79a493"} +---- + +. Enter the following command to update a default permission using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#updateorganizationprototypepermission[`PUT /api/v1/organization/{orgname}/prototypes/{prototypeid}`] endpoint, for example, if you want to change the permission type. You must include the ID that was returned when you created the policy. ++ +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + --data '{ + "role": "write" + }' \ + https:///api/v1/organization//prototypes/ +---- ++ +.Example output ++ +[source,terminal] +---- +{"activating_user": {"name": "test-org+test", "is_robot": true, "kind": "user", "is_org_member": true, "avatar": {"name": "test-org+test", "hash": "aa85264436fe9839e7160bf349100a9b71403a5e9ec684d5b5e9571f6c821370", "color": "#8c564b", "kind": "robot"}}, "delegate": {"name": "testuser", "is_robot": false, "kind": "user", "is_org_member": false, "avatar": {"name": "testuser", "hash": "f660ab912ec121d1b1e928a0bb4bc61b15f5ad44d5efdc4e1c92a25e99b8e44a", "color": "#6b6ecf", "kind": "user"}}, "role": "write", "id": "977dc2bc-bc75-411d-82b3-604e5b79a493"} +---- + +. You can delete the permission by entering the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleteorganizationprototypepermission[`DELETE /api/v1/organization/{orgname}/prototypes/{prototypeid}`] command: ++ +[source,terminal] +---- +curl -X DELETE \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + https:///api/v1/organization//prototypes/ +---- ++ +This command does not return an output. Instead, you can obtain a list of all permissions by entering the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getorganizationprototypepermissions[`GET /api/v1/organization/{orgname}/prototypes`] command: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + https:///api/v1/organization//prototypes +---- ++ +.Example output ++ +[source,terminal] +---- +{"prototypes": []} +---- \ No newline at end of file diff --git a/modules/default-permissions-v2-ui.adoc b/modules/default-permissions-v2-ui.adoc new file mode 100644 index 000000000..feaa61a40 --- /dev/null +++ b/modules/default-permissions-v2-ui.adoc @@ -0,0 +1,35 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: PROCEDURE +[id="default-permissions-v2-ui"] += Creating and managing default permissions by using the UI + +Default permissions define permissions that should be granted automatically to a repository when it is created, in addition to the default of the repository's creator. Permissions are assigned based on the user who created the repository. + +Use the following procedure to create default permissions using the {productname} v2 UI. + +.Procedure + +. Click the name of an organization. + +. Click *Default permissions*. + +. Click *Create default permissions*. A toggle drawer appears. + +. Select either *Anyone* or *Specific user* to create a default permission when a repository is created. + +.. If selecting *Anyone*, the following information must be provided: ++ +* **Applied to**. Search, invite, or add a user/robot/team. +* **Permission**. Set the permission to one of *Read*, *Write*, or *Admin*. + +.. If selecting *Specific user*, the following information must be provided: ++ +* **Repository creator**. Provide either a user or robot account. +* **Applied to**. Provide a username, robot account, or team name. +* **Permission**. Set the permission to one of *Read*, *Write*, or *Admin*. + +. Click *Create default permission*. A confirmation box appears, returning the following alert: *Successfully created default permission for creator*. diff --git a/modules/deleting-a-tag-api.adoc b/modules/deleting-a-tag-api.adoc new file mode 100644 index 000000000..540bb025e --- /dev/null +++ b/modules/deleting-a-tag-api.adoc @@ -0,0 +1,40 @@ +:_content-type: PROCEDURE +[id="deleting-tags-api"] += Deleting an image by using the API + +You can delete an old image tag by using the API. + +.Prerequisites + +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +. You can delete an image tag by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deletefulltag[`DELETE /api/v1/repository/{repository}/tag/{tag}`] command: ++ +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + https:///api/v1/repository///tag/ +---- ++ +This command does not return output in the CLI. Continue on to the next step to return a list of tags. + +. To see a list of tags after deleting a tag, you can use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listrepotags[`GET /api/v1/repository/{repository}/tag/`] command. For example: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + https:///api/v1/repository///tag +---- ++ +.Example output ++ +[source,terminal] +---- +{"tags": [{"name": "test", "reversion": false, "start_ts": 1716324069, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Tue, 21 May 2024 20:41:09 -0000"}, {"name": "example", "reversion": false, "start_ts": 1715698131, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Tue, 14 May 2024 14:48:51 -0000"}, {"name": "example", "reversion": false, "start_ts": 1715697708, "end_ts": 1715698131, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Tue, 14 May 2024 14:41:48 -0000", "expiration": "Tue, 14 May 2024 14:48:51 -0000"}, {"name": "test", "reversion": false, "start_ts": 1715695488, "end_ts": 1716324069, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Tue, 14 May 2024 14:04:48 -0000", "expiration": "Tue, 21 May 2024 20:41:09 -0000"}, {"name": "test", "reversion": false, "start_ts": 1715631517, "end_ts": 1715695488, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Mon, 13 May 2024 20:18:37 -0000", "expiration": "Tue, 14 May 2024 14:04:48 -0000"}], "page": 1, "has_additional": false} +---- \ No newline at end of file diff --git a/modules/deleting-a-tag.adoc b/modules/deleting-a-tag.adoc new file mode 100644 index 000000000..7615a5ce1 --- /dev/null +++ b/modules/deleting-a-tag.adoc @@ -0,0 +1,29 @@ +:_content-type: CONCEPT +[id="deleting-a-tag"] += Deleting an image tag + +Deleting an image tag removes that specific version of the image from the registry. + +To delete an image tag, use the following procedure. + +.Procedure + +. On the *Repositories* page of the v2 UI, click the name of the image you want to delete, for example, `quay/admin/busybox`. + +. Click the *More Actions* drop-down menu. + +. Click *Delete*. ++ +[NOTE] +==== +If desired, you could click *Make Public* or *Make Private*. +==== + +. Type *confirm* in the box, and then click *Delete*. + +. After deletion, you are returned to the *Repositories* page. ++ +[NOTE] +==== +Deleting an image tag can be reverted based on the amount of time allotted assigned to the _time machine_ feature. For more information, see "Reverting tag changes". +==== \ No newline at end of file diff --git a/modules/deleting-an-image-repository-via-the-api.adoc b/modules/deleting-an-image-repository-via-the-api.adoc new file mode 100644 index 000000000..e404cba65 --- /dev/null +++ b/modules/deleting-an-image-repository-via-the-api.adoc @@ -0,0 +1,38 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc +:_content-type: CONCEPT +[id="deleting-repository-api"] += Deleting a repository by using the {productname} API + +Use the following procedure to delete a repository using the {productname} API. + +.Prerequisites + +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +. Enter the following command to delete a repository using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleterepository[`DELETE /api/v1/repository/{repository}`] endpoint: ++ +[source,terminal] +---- +$ curl -X DELETE -H "Authorization: Bearer " "/api/v1/repository//" +---- + +. The CLI does not return information when deleting a repository from the CLI. To confirm deletion, you can check the {productname} UI, or you can enter the following link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getrepo[`GET /api/v1/repository/{repository}`] command to see if details are returned for the deleted repository: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " "/api/v1/repository//" +---- ++ +Example output ++ +[source,terminal] +---- +{"detail": "Not Found", "error_message": "Not Found", "error_type": "not_found", "title": "not_found", "type": "http://quay-server.example.com/api/v1/error/not_found", "status": 404} +---- \ No newline at end of file diff --git a/modules/deleting-an-image-repository-via-ui.adoc b/modules/deleting-an-image-repository-via-ui.adoc new file mode 100644 index 000000000..b2dd4aac5 --- /dev/null +++ b/modules/deleting-an-image-repository-via-ui.adoc @@ -0,0 +1,27 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: CONCEPT +[id="deleting-repository-v2"] += Deleting a repository by using the UI + +You can delete a repository directly on the UI. + +.Prerequisites + +* You have created a repository. + +.Procedure + +. On the *Repositories* page of the v2 UI, check the box of the repository that you want to delete, for example, `quayadmin/busybox`. + +. Click the *Actions* drop-down menu. + +. Click *Delete*. + +. Type *confirm* in the box, and then click *Delete*. ++ +After deletion, you are returned to the *Repositories* page. \ No newline at end of file diff --git a/modules/deleting-oauth-access-token.adoc b/modules/deleting-oauth-access-token.adoc new file mode 100644 index 000000000..e166139ed --- /dev/null +++ b/modules/deleting-oauth-access-token.adoc @@ -0,0 +1,22 @@ +:_content-type: PROCEDURE +[id="deleting-oauth-access-token"] += Deleting an OAuth 2 access token + +Because OAuth 2 access tokens are created through the OAuth application, they cannot be rotated or renewed. In the event that a token is compromised, or you need to delete a token, you must deleted its associated application through the {productname} UI. + +[IMPORTANT] +==== +Deleting an application deletes all tokens that were made within that specific application. Use with caution. +==== + +.Prerequisites + +* You have created an OAuth 2 access token. + +.Procedure + +. On the {productname} UI, click the name of the organization hosting the application. Then, in the navigation pane, click *Applications*. + +. Click the application name, for example, *Test application*. + +. In the navigation pane, click *Delete Application*. You are redirected to a new page. Click *Delete application* and confirm your decision. \ No newline at end of file diff --git a/modules/deleting-robot-account-api.adoc b/modules/deleting-robot-account-api.adoc new file mode 100644 index 000000000..e898a299d --- /dev/null +++ b/modules/deleting-robot-account-api.adoc @@ -0,0 +1,65 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc +:_content-type: CONCEPT +[id="deleting-robot-account-api"] += Deleting a robot account by using the {productname} API + +Use the following procedure to delete a robot account using the {productname} API. + +.Prerequisites + +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +. Enter the following command to delete a robot account for an organization using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleteorgrobot[`DELETE /api/v1/organization/{orgname}/robots/{robot_shortname}`] endpoint: ++ +[source,terminal] +---- +curl -X DELETE \ + -H "Authorization: Bearer " \ + "/api/v1/organization//robots/" +---- + +. The CLI does not return information when deleting a robot account with the API. To confirm deletion, you can check the {productname} UI, or you can enter the following link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getorgrobots[`GET /api/v1/organization/{orgname}/robots`] command to see if details are returned for the robot account: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " "https:///api/v1/organization//robots" +---- ++ +Example output ++ +[source,terminal] +---- +{"robots": []} +---- + +. Enter the following command to delete a robot account for the current user with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleteuserrobot[`DELETE /api/v1/user/robots/{robot_shortname}`] endpoint: ++ +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + "/api/v1/user/robots/" +---- + +. The CLI does not return information when deleting a robot account for the current user with the API. To confirm deletion, you can check the {productname} UI, or you can enter the following link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getuserrobot[`GET /api/v1/user/robots/{robot_shortname}`] command to see if details are returned for the robot account: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "/api/v1/user/robots/" +---- ++ +Example output ++ +[source,terminal] +---- +{"message":"Could not find robot with specified username"} +---- \ No newline at end of file diff --git a/modules/deleting-robot-account-v2-ui.adoc b/modules/deleting-robot-account-v2-ui.adoc new file mode 100644 index 000000000..fe26e2c7f --- /dev/null +++ b/modules/deleting-robot-account-v2-ui.adoc @@ -0,0 +1,26 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc +:_content-type: CONCEPT +[id="deleting-robot-account-ui"] += Deleting a robot account by using the UI + +Use the following procedure to delete a robot account using the {productname} UI. + +.Procedure + +. Log into your {productname} registry: + +. Click the name of the Organization that has the robot account. + +. Click *Robot accounts*. + +. Check the box of the robot account to be deleted. + +. Click the kebab menu. + +. Click *Delete*. + +. Type `confirm` into the textbox, then click *Delete*. \ No newline at end of file diff --git a/modules/deleting-tag-permanently.adoc b/modules/deleting-tag-permanently.adoc new file mode 100644 index 000000000..8f6938876 --- /dev/null +++ b/modules/deleting-tag-permanently.adoc @@ -0,0 +1,79 @@ +:_content-type: PROCEDURE +[id="deleting-tag-permanently"] += Permanently deleting an image tag + +In some cases, users might want to delete an image tag outside of the time machine window. Use the following procedure to manually delete an image tag permanently. + +[IMPORTANT] +==== +The results of the following procedure cannot be undone. Use with caution. +==== + +[id="permanently-deleting-image-tag-v2-ui"] +== Permanently deleting an image tag using the {productname} v2 UI + +Use the following procedure to permanently delete an image tag using the {productname} v2 UI. + +.Prerequisites + +* You have set `FEATURE_UI_V2` to `true` in your `config.yaml` file. + +.Procedure + +. Ensure that the `PERMANENTLY_DELETE_TAGS` and `RESET_CHILD_MANIFEST_EXPIRATION` parameters are set to `true` in your `config.yaml` file. For example: ++ +[source,yaml] +---- +PERMANENTLY_DELETE_TAGS: true +RESET_CHILD_MANIFEST_EXPIRATION: true +---- + +. In the navigation pane, click *Repositories*. + +. Click the name of the repository, for example, *quayadmin/busybox*. + +. Check the box of the image tag that will be deleted, for example, *test*. + +. Click *Actions* -> *Permanently Delete*. ++ +[IMPORTANT] +==== +This action is permanent and cannot be undone. +==== + + +[id="permanently-deleting-image-tag-legacy-ui"] +== Permanently deleting an image tag using the {productname} legacy UI + +Use the following procedure to permanently delete an image tag using the {productname} legacy UI. + +.Procedure + +. Ensure that the `PERMANENTLY_DELETE_TAGS` and `RESET_CHILD_MANIFEST_EXPIRATION` parameters are set to `true` in your `config.yaml` file. For example: ++ +[source,yaml] +---- +PERMANENTLY_DELETE_TAGS: true +RESET_CHILD_MANIFEST_EXPIRATION: true +---- + +. On the {productname} UI, click *Repositories* and the name of the repository that contains the image tag you will delete, for example, *quayadmin/busybox*. + +. In the navigation pane, click *Tags*. + +. Check the box of the name of the tag you want to delete, for example, *test*. + +. Click the *Actions* drop down menu and select *Delete Tags* -> *Delete Tag*. + +. Click *Tag History* in the navigation pane. + +. On the name of the tag that was just deleted, for example, `test`, click *Delete test* under the *Permanently Delete* category. For example: ++ +.Permanently delete image tag ++ +image:permanently-delete-image-tag.png[Permanently delete image tag] ++ +[IMPORTANT] +==== +This action is permanent and cannot be undone. +==== diff --git a/modules/deleting-team-within-organization-api.adoc b/modules/deleting-team-within-organization-api.adoc new file mode 100644 index 000000000..e4e51b8ea --- /dev/null +++ b/modules/deleting-team-within-organization-api.adoc @@ -0,0 +1,28 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: PROCEDURE +[id="deleting-team-within-organization-api"] += Deleting a team within an organization by using the API + +Use the following procedure to delete a team within an organization by using the API. + +.Prerequisites + +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +* You can delete a team within an organization by entering the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleteorganizationteam[`DELETE /api/v1/organization/{orgname}/team/{teamname}`] command: ++ +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + "/api/v1/organization//team/" +---- ++ +This command does not return output in the CLI. \ No newline at end of file diff --git a/modules/deleting-user-cli-api.adoc b/modules/deleting-user-cli-api.adoc new file mode 100644 index 000000000..1598bffda --- /dev/null +++ b/modules/deleting-user-cli-api.adoc @@ -0,0 +1,42 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc + +// Needs updated when v2 UI panel is available +:_content-type: CONCEPT +[id="deleting-user-cli-api"] += Deleting a user by using the {productname} API + +Use the following procedure to delete a user from {productname} using the API. + +[IMPORTANT] +==== +After deleting the user, any repositories that this user had in his private account become unavailable. +==== + +.Prerequisites + +* You are logged into your {productname} deployment as a superuser. +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +. Enter the following link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleteinstalluser[`DELETE /api/v1/superuser/users/{username}`] command to delete a user from the command line: ++ +[source,terminal] +---- +$ curl -X DELETE -H "Authorization: Bearer " https:///api/v1/superuser/users/ +---- + +. The CLI does not return information when deleting a user from the CLI. To confirm deletion, you can check the {productname} UI by navigating to *Superuser Admin Panel* -> *Users*, or by entering the following link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listallusers[`GET /api/v1/superuser/users/`] command. You can then check to see if they are present. ++ +[NOTE] +==== +The `GET /api/v1/superuser/users/` endpoint only returns users and superusers if `AUTHENTICATION_TYPE: Database` is set in your `config.yaml` file. It does not work for `LDAP` authentication types. +==== ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " "https:///api/v1/superuser/users/" +---- diff --git a/modules/deleting-user-cli.adoc b/modules/deleting-user-cli.adoc new file mode 100644 index 000000000..66b858b7b --- /dev/null +++ b/modules/deleting-user-cli.adoc @@ -0,0 +1,17 @@ +:_content-type: CONCEPT +[id="deleting-user-cli"] += Deleting a {productname} user + +You can delete a user on the {productname} UI or by using the {productname} API. + +[NOTE] +==== +In some cases, when accessing the *Users* tab in the *Superuser Admin Panel* of the {productname} UI, you might encounter a situation where no users are listed. Instead, a message appears, indicating that {productname} is configured to use external authentication, and users can only be created in that system. + +This error occurs for one of two reasons: + +* The web UI times out when loading users. When this happens, users are not accessible to perform any operations on. +* On LDAP authentication. When a userID is changed but the associated email is not. Currently, {productname} does not allow the creation of a new user with an old email address. + +When this happens, you must delete the user using the {productname} API. +==== \ No newline at end of file diff --git a/modules/deleting-user-ui.adoc b/modules/deleting-user-ui.adoc new file mode 100644 index 000000000..d737ec66c --- /dev/null +++ b/modules/deleting-user-ui.adoc @@ -0,0 +1,38 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc + +// Needs updated when v2 UI panel is available +:_content-type: CONCEPT +[id="deleting-user-ui"] += Deleting a user by using the UI + +Use the following procedure to delete a user from your {productname} repository using the UI. Note that after deleting the user, any repositories that the user had in their private account become unavailable. + +[NOTE] +==== +In some cases, when accessing the *Users* tab in the *Superuser Admin Panel* of the {productname} UI, you might encounter a situation where no users are listed. Instead, a message appears, indicating that {productname} is configured to use external authentication, and users can only be created in that system. + +This error occurs for one of two reasons: + +* The web UI times out when loading users. When this happens, users are not accessible to perform any operations on. +* On LDAP authentication. When a userID is changed but the associated email is not. Currently, {productname} does not allow the creation of a new user with an old email address. + +When this happens, you must delete the user using the {productname} API. +==== + +.Prerequisites + +* You are logged into your {productname} deployment as a superuser. + +.Procedure + +. Log in to your {productname} repository as the superuser. + +. In the navigation pane, select your account name, and then click *Super User Admin Panel*. + +. Click the *Users* icon in the navigation pane. + +. Click the *Options* cogwheel beside the user to be deleted. + +. Click *Delete User*, and then confirm deletion by clicking *Delete User*. \ No newline at end of file diff --git a/modules/deploy-local-quay-ipv6.adoc b/modules/deploy-local-quay-ipv6.adoc new file mode 100644 index 000000000..0ed30e136 --- /dev/null +++ b/modules/deploy-local-quay-ipv6.adoc @@ -0,0 +1,31 @@ +:_content-type: PROCEDURE +[id="deploy-local-quay-ipv6"] += Deploying a local instance of {productname} in IPv6 + +After you have created a new dual-stack container network, you can deploy a local instance of {productname}. + +.Prerequisites + +* You have configured Redis, your PostgreSQL database, local image storage, and creating a `config.yaml` file with the desired settings. + +.Procedure + +. In your {productname} `config.yaml` file, set the `FEATURE_LISTEN_IP_VERSION` field to `IPv6`. For example: ++ +[source,yaml] +---- +# ... +FEATURE_LISTEN_IP_VERSION: dual-stack +# ... +---- + +. Enter the following command to start the `Quay` registry container, specifying the appropriate volumes for configuration data and local storage for image data. Note that if you are using dual-stack, you must specify explicit IPv6 port mapping when starting the container. ++ +[subs="verbatim,attributes"] +---- +$ sudo podman run -d --rm -p "[::]:80:8080" -p "[::]:443:8443" \ + --name=quay \ + -v $QUAY/config:/conf/stack:Z \ + -v $QUAY/storage:/datastorage:Z \ + {productrepo}/{quayimage}:{productminv} +---- \ No newline at end of file diff --git a/modules/deploying-the-operator-using-initial-configuration.adoc b/modules/deploying-the-operator-using-initial-configuration.adoc new file mode 100644 index 000000000..3306fd5b3 --- /dev/null +++ b/modules/deploying-the-operator-using-initial-configuration.adoc @@ -0,0 +1,43 @@ +:_content-type: PROCEDURE +[id="deploying-the-operator-using-initial-configuration"] +== Deploying the {productname} Operator using the initial configuration + +Use the following procedure to deploy {productname} on {ocp} using the initial configuration. + +.Prerequisites + +* You have installed the `oc` CLI. + +.Procedure + +. Create a secret using the configuration file: ++ +[source,terminal] +---- +$ oc create secret generic -n quay-enterprise --from-file config.yaml=./config.yaml init-config-bundle-secret +---- + +. Create a `quayregistry.yaml` file. Identify the unmanaged components and reference the created secret, for example: ++ + +[source,yaml] +---- +apiVersion: quay.redhat.com/v1 +kind: QuayRegistry +metadata: + name: example-registry + namespace: quay-enterprise +spec: + configBundleSecret: init-config-bundle-secret +---- + +. Deploy the {productname} registry: ++ +[source,terminal] +---- +$ oc create -n quay-enterprise -f quayregistry.yaml +---- + +.Next Steps + +* xref:using-the-api-to-create-first-user[Using the API to create the first user] diff --git a/modules/deployment-topology-with-storage-proxy.adoc b/modules/deployment-topology-with-storage-proxy.adoc new file mode 100644 index 000000000..5e5f78717 --- /dev/null +++ b/modules/deployment-topology-with-storage-proxy.adoc @@ -0,0 +1,10 @@ +:_content-type: CONCEPT +[id="deployment-topology-with-storage-proxy"] += {productname} deployment topology with storage proxy + +The following image provides a high level overview of a {productname} deployment topology with storage proxy configured: + +.{productname} deployment topology with storage proxy +image:178_Quay_architecture_0821_deploy_topology_storage.png[{productname} deployment topology with storage proxy] + +With storage proxy configured, all traffic passes through the public {productname} endpoint. \ No newline at end of file diff --git a/modules/deployment-topology.adoc b/modules/deployment-topology.adoc new file mode 100644 index 000000000..571f1bdf7 --- /dev/null +++ b/modules/deployment-topology.adoc @@ -0,0 +1,10 @@ +:_content-type: CONCEPT +[id="deployment-topology"] += {productname} deployment topology + +The following image provides a high level overview of a {productname} deployment topology: + +.{productname} deployment topology +image:178_Quay_architecture_0821_deploy_topology.png[{productname} deployment topology] + +In this deployment, all pushes, user interface, and API requests are received by public {productname} endpoints. Pulls are served directly from `object storage`. \ No newline at end of file diff --git a/modules/disable-oci-artifacts-in-quay.adoc b/modules/disable-oci-artifacts-in-quay.adoc new file mode 100644 index 000000000..f27e6e510 --- /dev/null +++ b/modules/disable-oci-artifacts-in-quay.adoc @@ -0,0 +1,14 @@ +:_content-type: REFERENCE +[id="disable-oci-artifacts-in-quay"] += Disabling OCI artifacts in {productname} + +Use the following procedure to disable support for OCI artifacts. + +.Procedure + +* Disable OCI artifact support by setting `FEATURE_GENERAL_OCI_SUPPORT` to `false` in your `config.yaml` file. For example: ++ +[source,yaml] +---- +FEATURE_GENERAL_OCI_SUPPORT = false +---- \ No newline at end of file diff --git a/modules/disabling-robot-account.adoc b/modules/disabling-robot-account.adoc new file mode 100644 index 000000000..c0a2cf5cb --- /dev/null +++ b/modules/disabling-robot-account.adoc @@ -0,0 +1,123 @@ +:_content-type: CONCEPT +[id="disabling-robot-account"] += Disabling robot accounts by using the UI + +{productname} administrators can manage robot accounts by disallowing users to create new robot accounts. + +[IMPORTANT] +==== +Robot accounts are mandatory for repository mirroring. Setting the `ROBOTS_DISALLOW` configuration field to `true` breaks mirroring configurations. Users mirroring repositories should not set `ROBOTS_DISALLOW` to `true` in their `config.yaml` file. This is a known issue and will be fixed in a future release of {productname}. +==== + +//// +Additionally, {productname} administrators can add robot accounts to allowlists when disallowing the creation of new robot accounts. This ensures operability of approved robot accounts and a seamless workflow in mirroring configurations. + + +[IMPORTANT] +==== +Robot accounts are mandatory for repository mirroring. Setting the `ROBOTS_DISALLOW` configuration field to `true` without allowlisting supplementary robot accounts breaks mirroring configurations. You must allowlist robot accounts with the `ROBOTS_WHITELIST` variable when managing robot accounts with the `ROBOTS_DISALLOW` field. +==== +//// + +Use the following procedure to disable robot account creation. + +.Prerequisites + +* You have created multiple robot accounts. + +.Procedure + +. Update your `config.yaml` field to add the `ROBOTS_DISALLOW` variable, for example: ++ +[source,yaml] +---- +ROBOTS_DISALLOW: true +---- + +. Restart your {productname} deployment. + +.Verification: Creating a new robot account + +. Navigate to your {productname} repository. + +. Click the name of a repository. + +. In the navigation pane, click *Robot Accounts*. + +. Click *Create Robot Account*. + +. Enter a name for the robot account, for example, `+`. + +. Click *Create robot account* to confirm creation. The following message appears: `Cannot create robot account. Robot accounts have been disabled. Please contact your administrator.` + +//// +.Verification: Pushing an image with an allowlisted robot account + +. On the command-line interface (CLI) log in as one of the allowlisted robot accounts by entering the following command: ++ +[source,terminal] +---- +$ podman login -u="+" -p="KETJ6VN0WT8YLLNXUJJ4454ZI6TZJ98NV41OE02PC2IQXVXRFQ1EJ36V12345678" +---- + +. Enter the following command to pull an example image: ++ +[source,terminal] +---- +$ podman pull busybox +---- + +. Tag the image by entering the following command: ++ +[source,terminal] +---- +$ podman tag docker.io/library/busybox //busybox:test +---- + +. Push the image by entering the following command: ++ +[source,terminal] +---- +$ podman push --tls-verify=false //busybox:test +---- ++ +.Example output ++ +[source,terminal] +---- +Getting image source signatures +Copying blob 702a604e206f skipped: already exists +Copying config a416a98b71 done +Writing manifest to image destination +Storing signatures +---- +//// + +.Verification: Logging into a robot account + +. On the command-line interface (CLI), attempt to log in as one of the robot accounts by entering the following command: ++ +[source,terminal] +---- +$ podman login -u="+" -p="KETJ6VN0WT8YLLNXUJJ4454ZI6TZJ98NV41OE02PC2IQXVXRFQ1EJ36V12345678" +---- ++ +The following error message is returned: ++ +[source,terminal] +---- +Error: logging into "": invalid username/password +---- + +. You can pass in the `log-level=debug` flag to confirm that robot accounts have been deactivated: ++ +[source,terminal] +---- +$ podman login -u="+" -p="KETJ6VN0WT8YLLNXUJJ4454ZI6TZJ98NV41OE02PC2IQXVXRFQ1EJ36V12345678" --log-level=debug +---- ++ +[source,terminal] +---- +... +DEBU[0000] error logging into "quay-server.example.com": unable to retrieve auth token: invalid username/password: unauthorized: Robot accounts have been disabled. Please contact your administrator. +---- \ No newline at end of file diff --git a/modules/discovering-quay-api-endpoints.adoc b/modules/discovering-quay-api-endpoints.adoc new file mode 100644 index 000000000..f0ab60fb3 --- /dev/null +++ b/modules/discovering-quay-api-endpoints.adoc @@ -0,0 +1,30 @@ +:_content-type: PROCEDURE +[id="discovering-quay-api-endpoints"] += Discovering {productname} API endpoints + +{productname} API endpoints are discoverable by using the API. + +Use the following procedure to discover available API endpoints. + +.Prerequisites + +* You have created an OAuth 2 access token. + +.Procedure + +* Enter the following link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#discovery_2[`GET /api/v1/discovery`] command to list all of the API endpoints available in the swagger API format: ++ +[source,terminal] +---- +$ curl -X GET "https:///api/v1/discovery?query=true" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +--- +: "Manage the tags of a repository."}, {"name": "team", "description": "Create, list and manage an organization's teams."}, {"name": "trigger", "description": "Create, list and manage build triggers."}, {"name": "user", "description": "Manage the current user."}, {"name": "userfiles", "description": ""}]} +--- +---- diff --git a/modules/docker-failing-pulls.adoc b/modules/docker-failing-pulls.adoc new file mode 100644 index 000000000..b110ccab6 --- /dev/null +++ b/modules/docker-failing-pulls.adoc @@ -0,0 +1,34 @@ +:_content-type: CONCEPT +[id="docker-failing-pulls"] += Docker resulting in failing pulls + +In some cases, using `docker pull` might return the following error: `39cb5a2eab5d: Error pulling image (myimage) from quay.io/my/repository. . . Could not find repository on any of the indexed registries.` There are two reasons for receiving this error. + +* *Linux kernel bug on Ubuntu Precise Pangolin (12.04 LTS) (64-bit).* Precise has a Linux kernel bug that must be updated to use Docker. Use the following commands to update and reboot Precise. ++ +[source,terminal] +---- +$ sudo apt-get update +---- ++ +[source,terminal] +---- +$ sudo apt-get install linux-image-generic-lts-raring linux-headers-generic-lts-raring +---- ++ +[source,terminal] +---- +$ sudo reboot +---- + +* *Missing AUFS on Raring 13.04 and Saucy 13.10 (64-bit).* Not all installations of Ubuntu 13.04 or 13.10 include AUFS enabled. Enter the following commands to install additional Linux kernel modules: ++ +[source,terminal] +---- +$ sudo apt-get update +---- ++ +[source,terminal] +---- +$ sudo apt-get install linux-image-extra-`uname -r` +---- \ No newline at end of file diff --git a/modules/docker-io-timeout.adoc b/modules/docker-io-timeout.adoc new file mode 100644 index 000000000..ffeb7d17d --- /dev/null +++ b/modules/docker-io-timeout.adoc @@ -0,0 +1,19 @@ +:_content-type: CONCEPT +[id="docker-io-timeout"] += Docker is returning an i/o timeout + +In some cases, interacting with a remote repository yields an i/o timeout. For example: + +[source,terminal] +---- +$ sudo docker pull ... +---- +.Example output +[source,terminal] +---- +FATA[0021] Error response from daemon: v1 ping attempt failed with error: Get https://quay.io/v1/_ping: dial tcp: i/o timeout. +---- + +If you are running an older version of Docker, for example, 1.7 or before, this issue was related to DNS. Try restarting the Docker daemon process. If that solution does not work, you can try rebooting. For more information about this issue, see link:https://github.com/docker/docker/issues/13337[Ambiguous i/o timeouts] + +If you are running Docker 1.8 or greater, the issue is related to network infrastructure, and is likely the product of latency between the client and the registry. Ensure that there are no proxies in between the client and the registry, and that the two are geographically close, to resolve the issue. \ No newline at end of file diff --git a/modules/docker-login-error.adoc b/modules/docker-login-error.adoc new file mode 100644 index 000000000..81e7dd698 --- /dev/null +++ b/modules/docker-login-error.adoc @@ -0,0 +1,21 @@ +:_content-type: CONCEPT +[id="docker-login-error"] += Docker login is failing + +In some cases, Docker fails with the following error: `2014/01/01 12:00:00 Error: Invalid Registry endpoint: Get https://quay.io/v1/_ping: dial tcp: ping timeout`. This occurs for one of three reasons: + +* *You are on a high-latency, slow connection.* Docker has defined a maximum timeout of five seconds before timeout occurs. Currently, the only solution is to find a connection with better latency. + +* *Docker on OSX (through boot2docker) is out of sync.* If you are using Docker on OSX through link:https://github.com/boot2docker/boot2docker[boot2docker], the networking stack can get out of sync. To fix it, restart the `boot2docker` image. For example: ++ +[source,terminal] +---- +$ boot2docker restart +---- ++ +Alternatively, because Docker-machine supersedes boot2docker on OSX, you might need to restart Docker-machine: ++ +[source,terminal] +---- +$ docker-machine restart default +---- \ No newline at end of file diff --git a/modules/docker-timestamp-error.adoc b/modules/docker-timestamp-error.adoc new file mode 100644 index 000000000..dd6068466 --- /dev/null +++ b/modules/docker-timestamp-error.adoc @@ -0,0 +1,13 @@ +:_content-type: CONCEPT +[id="docker-timestamp-error"] += Incorrect timestamp + +In some cases, using `docker push` might show the incorrect timestamp for one or more images. In most cases, this means that your machine or virtual machine's time has become desynchronized. + +This occurs because the timestamp shown for the _Changed_ file is generated by the Docker client when the image is created. If the time on the machine on which the image was built is out of sync, the timestamp shown is different on {productname} as well. Usually, this means that your machine needs to be synced with the link:http://www.ntp.org/[Network Time Protocol]. + +Enter the following command to force the Docker virtual machine to synchronize its clock: +[source,terminal] +---- +$ docker ssh -C 'sudo ntpclient -s -h pool.ntp.org' +---- \ No newline at end of file diff --git a/modules/downgrade-quay-deployment.adoc b/modules/downgrade-quay-deployment.adoc new file mode 100644 index 000000000..502d09e9a --- /dev/null +++ b/modules/downgrade-quay-deployment.adoc @@ -0,0 +1,11 @@ +:_content-type: CONCEPT + +[id="downgrade-quay-deployment"] += Downgrading {productname} + +{productname} only supports rolling back, or downgrading, to previous z-stream versions, for example, 3.12.3 -> 3.12.2. Rolling back to previous y-stream versions ({producty} -> {producty-n1}) is not supported. This is because {productname} updates might contain database schema upgrades that are applied when upgrading to a new version of {productname}. Database schema upgrades are not considered backwards compatible. + +[IMPORTANT] +==== +Downgrading to previous z-streams is neither recommended nor supported by either Operator based deployments or virtual machine based deployments. Downgrading should only be done in extreme circumstances. The decision to rollback your {productname} deployment must be made in conjunction with the {productname} support and development teams. For more information, contact {productname} support. +==== \ No newline at end of file diff --git a/modules/enabling-team-sync-oidc.adoc b/modules/enabling-team-sync-oidc.adoc new file mode 100644 index 000000000..edcb0d444 --- /dev/null +++ b/modules/enabling-team-sync-oidc.adoc @@ -0,0 +1,214 @@ +:_content-type: PROCEDURE +[id="oidc-team-sync"] += Team synchronization for {productname} OIDC deployments + +Administrators can leverage an OpenID Connect (OIDC) identity provider that supports group or team synchronization to apply repository permissions to sets of users in {productname}. This allows administrators to avoid having to manually create and sync group definitions between {productname} and the OIDC group. + +:_content-type: PROCEDURE +[id="enabling-oidc-team-sync"] +== Enabling synchronization for {productname} OIDC deployments + +Use the following procedure to enable team synchronization when your {productname} deployment uses an OIDC authenticator. + +[IMPORTANT] +==== +The following procedure does not use a specific OIDC provider. Instead, it provides a general outline of how best to approach team synchronization between an OIDC provider and {productname}. Any OIDC provider can be used to enable team synchronization, however, setup might vary depending on your provider. +==== + +.Procedure + +. Update your `config.yaml` file with the following information: ++ +[source,yaml] +---- +AUTHENTICATION_TYPE: OIDC +# ... +OIDC_LOGIN_CONFIG: + CLIENT_ID: <1> + CLIENT_SECRET: <2> + OIDC_SERVER: <3> + SERVICE_NAME: <4> + PREFERRED_GROUP_CLAIM_NAME: <5> + LOGIN_SCOPES: [ 'openid', '' ] <6> + OIDC_DISABLE_USER_ENDPOINT: false <7> +# ... +FEATURE_TEAM_SYNCING: true <8> +FEATURE_NONSUPERUSER_TEAM_SYNCING_SETUP: true <9> +FEATURE_UI_V2: true +# ... +---- +<1> Required. The registered OIDC client ID for this {productname} instance. +<2> Required. The registered OIDC client secret for this {productname} instance. +<3> Required. The address of the OIDC server that is being used for authentication. This URL should be such that a `GET` request to `/.well-known/openid-configuration` returns the provider's configuration information. This configuration information is essential for the relying party (RP) to interact securely with the OpenID Connect provider and obtain necessary details for authentication and authorization processes. +<4> Required. The name of the service that is being authenticated. +<5> Required. The key name within the OIDC token payload that holds information about the user's group memberships. This field allows the authentication system to extract group membership information from the OIDC token so that it can be used with {productname}. +<6> Required. Adds additional scopes that {productname} uses to communicate with the OIDC provider. Must include `'openid'`. Additional scopes are optional. +<7> Whether to allow or disable the `/userinfo` endpoint. If using Azure Entra ID, set this field to `true`. Defaults to `false`. +<8> Required. Whether to allow for team membership to be synced from a backing group in the authentication engine. +<9> Optional. If enabled, non-superusers can setup team synchronization. + +. Restart your {productname} registry. + +[id="setting-up-quay-team-sync"] +== Setting up your {productname} deployment for team synchronization + +. Log in to your {productname} registry via your OIDC provider. + +. On the {productname} v2 UI dashboard, click *Create Organization*. + +. Enter and Organization name, for example, `test-org`. + +. Click the name of the Organization. + +. In the navigation pane, click *Teams and membership*. + +. Click *Create new team* and enter a name, for example, `testteam`. + +. On the *Create team* pop-up: + +.. Optional. Add this team to a repository. +.. Add a team member, for example, `user1`, by typing in the user's account name. +.. Add a robot account to this team. This page provides the option to create a robot account. + +. Click *Next*. + +. On the *Review and Finish* page, review the information that you have provided and click *Review and Finish*. + +. To enable team synchronization for your {productname} OIDC deployment, click *Enable Directory Sync* on the *Teams and membership* page. + +. You are prompted to enter the group Object ID if your OIDC authenticator is Azure Entra ID, or the group name if using a different provider. Note the message in the popup: ++ +[WARNING] +==== +Please note that once team syncing is enabled, the membership of users who are already part of the team will be revoked. OIDC group will be the single source of truth. This is a non-reversible action. Team's user membership from within Quay will be ready-only. +==== + +. Click *Enable Sync*. + +. You are returned to the *Teams and membership* page. Note that users of this team are removed and are re-added upon logging back in. At this stage, only the robot account is still part of the team. ++ +A banner at the top of the page confirms that the team is synced: ++ +[source,text] +---- +This team is synchronized with a group in OIDC and its user membership is therefore read-only. +---- ++ +By clicking the *Directory Synchronization Config* accordion, the OIDC group that your deployment is synchronized with appears. + +. Log out of your {productname} registry and continue on to the verification steps. + +.Verification + +Use the following verification procedure to ensure that `user1` appears as a member of the team. + +. Log back in to your {productname} registry. + +. Click *Organizations* -> *test-org* -> *test-team* *Teams and memberships*. `user1` now appears as a team member for this team. + +.Verification + +Use the following procedure to remove `user1` from a group via your OIDC provider, and subsequently remove them from the team on {productname}. + +. Navigate to your OIDC provider's administration console. + +. Navigate to the *Users* page of your OIDC provider. The name of this page varies depending on your provider. + +. Click the name of the user associated with {productname}, for example, `user1`. + +. Remove the user from group in the configured identity provider. + +. Remove, or unassign, the access permissions from the user. + +. Log in to your {productname} registry. + +. Click *Organizations* -> *test-org* -> *test-team* *Teams and memberships*. `user1` has been removed from this team. + +//// +[id="setting-up-keycloak-oidc-team-sync"] +== Setting up Keycloak for OIDC team synchronization + +Keycloak is an open source software product to allow single sign-on with identity and access management. It can be leveraged with {productname} as an extra layer of security for your deployment. + +Use the following procedure to setup Keycloak for {productname} team synchronization. + +.Procedure + +. Log in to your Keycloak adminstration console. + +. In the navigation pane, click the drop down menu, and then click *Create realm*. + +. Provide a realm name, for example, `quayrealm`. + +. Click *Clients* -> *Create client*. + +. On the *General settings* page: + +.. Set the Client type to *OpenID Connect*. +.. Provide a Client ID, for example, `quaydev`. +.. Optional. Provide a name for the client. +.. Optional. Provide a description for the client. +.. Optional. Specify whether the client is always listed in the Account UI. + +. Click *Next*. + +. On the *Capability config* page: + +.. Ensure that *Client authentication* is on. +.. Optional. Turn *Authorization* on. +.. For *Authentication flow*, click *Standard flow* and *Direct access grants*. + +. Click *Next*. + +. On the *Login settings* page: + +.. Optional. Provide a Root URL. +.. Optional. Provide a Home URL. +.. Optional. Provide Valid redirect URIs. +.. Optional. Provide Valid post logout redirect URIs. +.. Optional. Provide Web origins. + +. Click *Save*. You are redirected to the *quaydev* *Settings* page. + +. In the navigation pane, click *Realm roles* -> *Create role*. + +. Enter a role name, for example, `test-team-sync`. Then, click *Save*. + +. In the navigation pane, click *Groups* -> *Create a group*. + +. Enter a name for the group, for example, `oidc-sync-test`. + +. In the navigation pane, click *Users* -> *Create new user*. + +. Enter a username, for example, `test`. + +. Click *Join Groups* and add this user to the `oidc-sync-test` group. + +. Click *Create*. + +. In the navigation pane, click *Clients*. + +. Click the name of the Client ID created earlier, for example, *quay-dev*. + +. On the *Client details* page, click *Client scopes*. + +. Click name of the client scope ID, for example, *quaydev-dedicated*. + +. Click *Configure a new mapper*. This mapper allows groups to be returned from the user information endpoint. + +. Select *User Realm Role*. + +. On the *Add mapper* page, provide the following information: + +.. Enter a name for the mapper, for example, `group`. +.. Enter a Token Claim Name, for example, `groupName`. User groups are returned under this key name. It is used in your {productname} configuration. +.. Click to turn Add to ID token `Off`. +.. Click to turn Add to access token `Off`. +.. Ensure that Add to userinfo is `On`. + +. Click *Save*. + + +[id="configuring-oidc-team-synchronization"] +== Configuring team synchronization for OIDC deployments +//// \ No newline at end of file diff --git a/modules/enabling-using-the-api.adoc b/modules/enabling-using-the-api.adoc new file mode 100644 index 000000000..6e573d44a --- /dev/null +++ b/modules/enabling-using-the-api.adoc @@ -0,0 +1,20 @@ +:_content-type: REFERENCE +[id="enabling-using-the-api"] += Enabling and using the {productname} API + +By leveraging the {productname} API, you can streamline container registry management, automate tasks, and integrate {productname}'s functionalities into your existing workflow. This can improve efficiency, offer enhanced flexibility (by way of repository management, user management, user permissions, image management, and so on), increase the stability of your organization, repository, or overall deployment, and more. + +ifeval::["{context}" == "use-quay"] +Detailed instructions for how to use the {productname} API can be found in the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index[{productname} API guide]. In that guide, the following topics are covered: + +* {productname} token types, including OAuth 2 access tokens, robot account tokens, and OCI referrers tokens, and how to generate these tokens. +* Enabling the {productname} API by configuring your `config.yaml` file. +* How to use the {productname} API by passing in your OAuth 2 account token into the desired endpoint. +* API examples, including one generic example of how an administrator might automate certain tasks. + +See the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index[{productname} API guide] before attempting to use the API endpoints offered in this chapter. +endif::[] + +ifeval::["{context}" == "use-api"] +The following sections explain how to enable and use the {productname} API. +endif::[] diff --git a/modules/error-403-troubleshooting.adoc b/modules/error-403-troubleshooting.adoc new file mode 100644 index 000000000..e8e77b07b --- /dev/null +++ b/modules/error-403-troubleshooting.adoc @@ -0,0 +1,40 @@ +:_content-type: CONCEPT +[id="error-403-troubleshooting"] += Troubleshooting HTTP status code 403 + +HTTP status code `403` occurs when a user does not have the necessary permissions to access certain resources of the server, such as files, directories, API endpoints, or authenticated content. For some users, this might occur when attempting to push or pull content from {productname}, even after successfully logging in with Docker or Podman. + +Use the following sections to troubleshoot the various reasons for receiving an HTTP status code `403`. + +[id="centos-seven"] +== CentOS 7 + +CentOS 7, released 2014-07-07, introduced a custom build of Docker with a known issue that prevents logging into private registries. As a workaround for this issue, upgrade CentOS to version 8, or upgrade your version of Docker. + +For more information, see link:https://bugzilla.redhat.com/show_bug.cgi?id=1209439[Docker fails to authenticate against additional registries]. + +[id="docker-zero-eight-one"] +== Docker version 0.8.1 + +Docker version 0.8.1 introduced a bug in its storage of authentication credentials in the `.dockercfg` file that resulted in no credentials being sent to Quay.io, despite a successful login. + +As a workaround for this issue, upgrade your version of Docker. + +For more information, see link:https://github.com/moby/moby/issues/4267[Dockercfg registry endpoint format invalid in v0.8.1]. + +[id="docker-execution-environment"] +== Docker is being executed in a different environment + +Docker stores the credentials that it uses for pushing and pulling in a file that is usually placed in the `$HOME/.docker/config.json` folder. If you are executing Docker in another environment, such as a scripted `docker build`, a virtual machine, `makefile`, `virtualenv`, and so on, Docker cannot find the `config.json` file and fails. + +As a workaround, verify that the `config.json` file is accessible to the environment which is performing the push or pull commands. + +[id="repository-permissions"] +== Insufficient repository permissions + +Ensure that your user, robot account, or token has the necessary permissions on the repository. Permissions on a repository can be edited from the *Settings* -> *Repository settings* page. + +[NOTE] +==== +If you are trying to pull or push an organization repository, your account must either have the correct permissions, or you must be a member of a team. +==== \ No newline at end of file diff --git a/modules/error-406-dockerfile.adoc b/modules/error-406-dockerfile.adoc new file mode 100644 index 000000000..446ef1000 --- /dev/null +++ b/modules/error-406-dockerfile.adoc @@ -0,0 +1,7 @@ +:_content-type: CONCEPT +[id="error-406-dockerfile"] += Base image pull in Dockerfile fails with HTTP error 403 + +In some cases, you might receive an HTTP error `403` when attempting to use a private base image as the `FROM` line in a Build Trigger. To use a private base image as the `FROM` lin in a Build Trigger, credentials for your robot account with _read access to the private image_ must be specified when setting up the Build Trigger. + +For more information about robot accounts and Build Triggers, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#github-build-triggers[Setting up Github Build Trigger tags]. \ No newline at end of file diff --git a/modules/error-429-troubleshooting.adoc b/modules/error-429-troubleshooting.adoc new file mode 100644 index 000000000..2e8e5e7a7 --- /dev/null +++ b/modules/error-429-troubleshooting.adoc @@ -0,0 +1,11 @@ +:_content-type: CONCEPT +[id="error-429-troubleshooting"] += Troubleshooting HTTP status code 429 + +HTTP status code `429` indicates that the user has sent too many requests in a given amount of time. If you are receiving this status code, it means that {productname} has reached its maximum capacity for requests per-second, per-IP address. To resolve this issue, you can take the following steps: + +* Reduce the frequency or pace at which you are sending requests to your {productname} registry. This helps ensure that you stay within the allowed limits and avoid triggering a `429` response. + +* Implement a back-off strategy to wait and retry the request after a certain period of time. Back-off strategies involve increasing the waiting time between subsequent requests. This gives the server enough time to process previous requests, which avoids overwhelming the server. + +* Use caching mechanisms to store and reuse frequently accessed data from the {productname} registry. This can help reduce the need for repeated requests and improve overall performance. \ No newline at end of file diff --git a/modules/error-500-troubleshooting.adoc b/modules/error-500-troubleshooting.adoc new file mode 100644 index 000000000..e6b4a0d3f --- /dev/null +++ b/modules/error-500-troubleshooting.adoc @@ -0,0 +1,31 @@ +:_content-type: CONCEPT +[id="error-500-troubleshooting"] += Troubleshooting HTTP status code 500 + +In some cases, users are unable to push or pull images from their {productname} registry, or cannot access the {productname} web UI. The received error message, HTTP error `500`, indicates that the database connections are exhausted. As a result, the database influences the service key renewal that is used for internal communication and the signing of requests made to the Docker v2 API. Consequently, the registry falls back to the Docker v1 API, which has been deprecated, and returns HTTP error `500` + +To resolve this issue, you can increase the database connection count by using the following procedure. + +.Procedure + +. Optional. For an immediate solution, you can force start the `Quay` container. Restarting the container helps resolve the issue because, on each restart, {productname} creates a new service key. These keys have a life of 2 hours and are regularly rotated. + +. Navigate to your `/var/lib/pgsql/data/postgresql.conf` file. + +. Increase the database connection count by updating the `max_connections` variable. It is recommended to set the number of connections on the database to at least `1000` for a development cluster, and `2000` for a production cluster. In some cases you might need more. For example: ++ +[source,yaml] +---- +max_connections = 1000 +---- ++ +[IMPORTANT] +==== +You should consult with your database team before making any changes to this field. +==== + + +[role="_additional-resources"] +.Additional resources + +For more information, see link:https://access.redhat.com/solutions/6988741[Troubleshooting Quay Database]. diff --git a/modules/error-502-troubleshooting.adoc b/modules/error-502-troubleshooting.adoc new file mode 100644 index 000000000..0ef77839b --- /dev/null +++ b/modules/error-502-troubleshooting.adoc @@ -0,0 +1,172 @@ +:_content-type: CONCEPT +[id="error-502-troubleshooting"] += Troubleshooting HTTP status code 502 + +In some cases, {productname} users might receive the following HTTP status code when attemping to pull or push an image with Podman: `invalid status code from registry 502 (Bad Gateway)`. Code `502` indicates a a problem with the communication between two serves. This error commonly occurs when a server acting as a gateway or a proxy receives an invalid response from an upstream server. + +The primary solution when receiving this error is to restart your {productname} deployment to clear locked-up worker nodes, clean up temporary files or caches, or to resolve other transient issues. Restarting {productname} can help resolve many problematic states. In some cases, more thorough troubleshooting must be done. + +[id="restart-standalone-quay"] +== Restarting a standalone {productname} deployment + +Use the following procedure to restart a standalone {productname} deployment. + +.Procedure + +* Enter the following command to restart your {productname} container: ++ +[source,terminal] +---- +$ podman restart +---- + +[id="restart-quay-operator"] +== Restarting the {productname} Operator + +Use the following procedure to restart your {productname} Operator + +* Enter the following command to restart your {productname} Operator: ++ +[source,terminal] +---- +$ oc delete pod quay-app +---- + +[id="integer-502-issue"] +== Integer out of range + +Some `502` error messages might occur because the garbage collection worker fails when collecting images whose value is too high. Other `502` error messages might occurs because PostgreSQL cannot handle an integer bigger than 2147483647, which causes the garbage collection worker to fail. Running {productname} in debug mode can reveal additional information about the `502` error. + +If debug mode reveals the error `peewee.DataError: integer out of range`, it means there is an issue with the range of an integer value in the context of the Peewee Object-Relational Mapping (ORM) library. This error occurs when an attempt is made to store an integer value that is outside of the valid range for the corresponding database column; this is often caused when a user sets their tag expiration too high, which causes the garbage collection worker to fail when collecting images. Each database has its own limits on the range of integer values it can store. PostgreSQL can store values from -2147483648 to 2147483647. + +If you run {productname} in debug mode and the error `peewee.DataError: integer out of range` is returned, use the following steps for troubleshooting. + +.Procedure + +. In most cases, a {productname} administrator can resolve this error by setting the `FEATURE_CHANGE_TAG_EXPIRATION` configuration field to `false` in their `config.yaml` file. ++ +[NOTE] +==== +This change affects all users of your organization and disables them from setting tag expirations themselves. +==== + +. Alternatively, you can request the user or owner of the repository in question to either remove, or change, the tag expiration manually. If they do not respond, you can execute the following steps: ++ +** Obtain information from the user table: ++ +[source,terminal] +---- +$ SELECT username, removed_tag_expiration_s FROM "user" WHERE id = (SELECT namespace_user_id FROM repository WHERE id = ); +---- ++ +** Update the user or owner of the repository in question and set the `default tag expiration` for that user to two weeks: ++ +[source,terminal] +---- +$ UPDATE "user" SET removed_tag_expiration_s = 1209600 WHERE id = (SELECT namespace_user_id FROM repository WHERE id = ); +---- + +[id="troubleshooting-502-pull"] +== Troubleshooting 502 Podman pull errors + +In some cases, the following error might be returned when using `podman pull`: `Error: error pulling image " /:": unable to pull /:: unable to pull image: Error parsing image configuration: Error fetching blob: invalid status code from registry 502 (Bad Gateway)`. This error primarily occurred in {productname} versions 3.7 and earlier. It has been resolved in {productname} 3.7.1 and later. + +If you are using an earlier version of {productname}, the error occurs because the installation script `cert_install.sh` do not have a new line at the end of the file. To resolve this issue, you can manually add a new line at the end of your `cert_install.sh` file by simply going to the end of the file, pressing `enter`, saving the file, and then reuploading it to {productname}. + +[id="troubleshooting-502-push"] +== Troubleshooting 502 Podman push errors + +In some cases, the following error might be returned when using `podman push`: `Error: Error writing blob: Error initiating layer upload to /v2/repo/image/blobs/uploads/ in : received unexpected HTTP status: 502 Bad Gateway`. This issue is caused by either the NooBaa certificate rotation, or the service signing root CA rotation. The workaround for this issue is to manually add a new certificate chain to {productname}'s deployment after it has rotated. + +.Procedure + +. Download the new certificate chain for your NooBaa endpoint by entering the following command: ++ +[source,terminal] +---- +$ oc exec -it quay-quay-pod-name -- openssl s_client -connect s3.openshift-storage.svc.cluster.local:443 -showcerts 2>/dev/null > extra_ca_certs_noobaa.crt +---- ++ +.Example output ++ +[source,terminal] +---- +-----BEGIN CERTIFICATE----- +MIIFRjCCBC6gAwIBAgIUKd8q... +-----END CERTIFICATE----- +---- + +. Locate the custom config bundle secret that the Operator is using to deploy {productname} by entering the following command: ++ +[source,terminal] +---- +$ oc get quayregistry name-of-registry -o yaml | grep -i custom +---- + +. On the {ocp} console, locate the namespace where the {productname} Operator is deployed. Click *Workloads* -> *Secret* in the navigation pane to find the custom config bundle secret. + +. Open the secret and set it to *Editing* mode by clicking *Actions* -> *Edit* on the navigation pane. + +. Scroll to the end of the file and create a new key named `extra_ca_certs_noobaa.crt`. Paste the certificate generated in Step 1 of this procedure inside of the secret. + +. Save the file and let the {productname} Operator recycle the deployment. If reconciliation does not happen immediately, delete the Operator pod name and let it restart. For example: ++ +[source,terminal] +---- +$ oc delete pod quay-operator-xxxxx-xxxxxxxx -n <1> +---- + +For more information about this issue, see link:https://issues.redhat.com/browse/PROJQUAY-5174[PROJQUAY-5174]. + +[id="troubleshooting-502-unmanaged-storage"] +== Troubleshooting 502 errors when using unmanaged storage + +In some cases, pulling an image from a {productname} registry that is using RadosGW or Noobaa as an unmanaged object storage returns the following error: `parsing image configuration 502 (Bad Gateway):`. Use the following steps to resolve this issue. + +.Procedure + +. In your `config.yaml` file, update the `DISTRIBUTED_STORAGE_CONFIG` field. + +.. If you are using RadosGW storage: ++ +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + radosGWStorage: + - RadosGWStorage + - access_key: xxx + secret_key: xxx + bucket_name: xxx + hostname: rook-ceph-rgw-ocs-storagecluster-cephobjectstore.openshift-storage.svc.cluster.local + is_secure: true + port: 443 + storage_path: /datastorage/registry +DISTRIBUTED_STORAGE_PREFERENCE: + - radosGWStorage +---- + +.. If you are using NooBaa storage: ++ +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + default: + - RHOCSStorage + - access_key: xxx + bucket_name: xxx + hostname: s3.openshift-storage.svc.cluster.local + is_secure: true + port: "443" + secret_key: xxx + storage_path: /datastorage/registry +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] +DISTRIBUTED_STORAGE_PREFERENCE: + - default +---- ++ +With these updates, you should be able to successfully pull images when using unmanaged object storage. + +[role="_additional-resources"] +.Additional resources + +For more information, see link:https://access.redhat.com/solutions/6998878[ Podman pull/push fails with 502 http code in QUAY] diff --git a/modules/external-registry-config-api-example.adoc b/modules/external-registry-config-api-example.adoc new file mode 100644 index 000000000..3211fc31e --- /dev/null +++ b/modules/external-registry-config-api-example.adoc @@ -0,0 +1,25 @@ +:_content-type: CONCEPT +[id="external-registry-config-api-example"] + += external_registry_config object reference + +[source,yaml] +---- +{ + "is_enabled": True, + "external_reference": "quay.io/redhat/quay", + "sync_interval": 5000, + "sync_start_date": datetime(2020, 0o1, 0o2, 6, 30, 0), + "external_registry_username": "fakeUsername", + "external_registry_password": "fakePassword", + "external_registry_config": { + "verify_tls": True, + "unsigned_images": False, + "proxy": { + "http_proxy": "http://insecure.proxy.corp", + "https_proxy": "https://secure.proxy.corp", + "no_proxy": "mylocalhost", + }, + }, + } +---- \ No newline at end of file diff --git a/modules/fetching-images-and-tags.adoc b/modules/fetching-images-and-tags.adoc new file mode 100644 index 000000000..0bd5eedc0 --- /dev/null +++ b/modules/fetching-images-and-tags.adoc @@ -0,0 +1,41 @@ +:_content-type: CONCEPT +[id="fetching-images-and-tags"] += Fetching an image by tag or digest + +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +offers multiple ways of pulling images using Docker and Podman clients. + +.Procedure + +. Navigate to the *Tags* page of a repository. + +. Under *Manifest*, click the *Fetch Tag* icon. + +. When the popup box appears, users are presented with the following options: ++ +* Podman Pull (by tag) +* Docker Pull (by tag) +* Podman Pull (by digest) +* Docker Pull (by digest) ++ +Selecting any one of the four options returns a command for the respective client that allows users to pull the image. + +. Click *Copy Command* to copy the command, which can be used on the command-line interface (CLI). For example: ++ +ifeval::["{context}" == "quay-io"] +[source,terminal] +---- +$ podman pull quay.io/quayadmin/busybox:test2 +---- +endif::[] +ifeval::["{context}" == "use-quay"] +[source,terminal] +---- +$ podman pull quay-server.example.com/quayadmin/busybox:test2 +---- +endif::[] diff --git a/modules/fine-grained-access-control-intro.adoc b/modules/fine-grained-access-control-intro.adoc new file mode 100644 index 000000000..a88fe0c8d --- /dev/null +++ b/modules/fine-grained-access-control-intro.adoc @@ -0,0 +1,11 @@ +[[fine-grained-access-control]] += Fine-grained access control + +{productname} allow users to integrate their existing identity infrastructure and use a fine-grained permissions system to map their organizational structure and grant access to whole teams to manage specific repositories. + +{productname} is supported by the following authentication providers: + +* Built-in database authentication +* Lightweight Directory Access Protocol (LDAP) authentication and _sync +* External OpenID Connect (OIDC) provider +* OpenStack Keystone diff --git a/modules/fips-overview.adoc b/modules/fips-overview.adoc new file mode 100644 index 000000000..4f720da3f --- /dev/null +++ b/modules/fips-overview.adoc @@ -0,0 +1,41 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: CONCEPT +[id="fips-overview"] += Federal Information Processing Standard (FIPS) readiness and compliance + +The Federal Information Processing Standard (FIPS) developed by the National Institute of Standards and Technology (NIST) is regarded as the highly regarded for securing and encrypting sensitive data, notably in highly regulated areas such as banking, healthcare, and the public sector. {rhel} and {ocp} support FIPS by providing a _FIPS mode_, in which the system only allows usage of specific FIPS-validated cryptographic modules like `openssl`. This ensures FIPS compliance. + +[id="enabling-fips-compliance"] +== Enabling FIPS compliance + +Use the following procedure to enable FIPS compliance on your {productname} deployment. + +.Prerequisite + +* If you are running a standalone deployment of {productname}, your {rhel} deployment is version 8 or later and FIPS-enabled. + +* If you are deploying {productname-ocp}, {ocp} is version 4.10 or later. + +* Your {productname} version is 3.5.0 or later. + +* If you are using the {productname-ocp} on an IBM Power or IBM Z cluster: +** {ocp} version 4.14 or later is required +** {productname} version 3.10 or later is required + +* You have administrative privileges for your {productname} deployment. + +.Procedure + +* In your {productname} `config.yaml` file, set the `FEATURE_FIPS` configuration field to `true`. For example: ++ +[source,yaml] +---- +--- +FEATURE_FIPS = true +--- +---- ++ +With `FEATURE_FIPS` set to `true`, {productname} runs using FIPS-compliant hash functions. \ No newline at end of file diff --git a/modules/first-user-api.adoc b/modules/first-user-api.adoc new file mode 100644 index 000000000..ff53b11de --- /dev/null +++ b/modules/first-user-api.adoc @@ -0,0 +1,82 @@ +:_content-type: PROCEDURE +[id="using-the-api-to-create-first-user"] += Using the API to create the first user + +Use the following procedure to create the first user in your {productname} organization. + +.Prerequisites + +* The config option `FEATURE_USER_INITIALIZE` must be set to `true`. +* No users can already exist in the database. + +.Procedure + +[NOTE] +==== +This procedure requests an OAuth token by specifying `"access_token": true`. +==== + +. Open your {productname} configuration file and update the following configuration fields: ++ +[source,yaml] +---- +FEATURE_USER_INITIALIZE: true +SUPER_USERS: + - quayadmin +---- + +. Stop the {productname} service by entering the following command: ++ +[source,terminal] +---- +$ sudo podman stop quay +---- + +. Start the {productname} service by entering the following command: ++ +[source,terminal] +---- +$ sudo podman run -d -p 80:8080 -p 443:8443 --name=quay -v $QUAY/config:/conf/stack:Z -v $QUAY/storage:/datastorage:Z {productrepo}/{quayimage}:{productminv} +---- + +. Run the following `CURL` command to generate a new user with a username, password, email, and access token: ++ +[source,terminal] +---- +$ curl -X POST -k http://quay-server.example.com/api/v1/user/initialize --header 'Content-Type: application/json' --data '{ "username": "quayadmin", "password":"quaypass12345", "email": "quayadmin@example.com", "access_token": true}' +---- ++ +If successful, the command returns an object with the username, email, and encrypted password. For example: ++ +[source,yaml] +---- +{"access_token":"6B4QTRSTSD1HMIG915VPX7BMEZBVB9GPNY2FC2ED", "email":"quayadmin@example.com","encrypted_password":"1nZMLH57RIE5UGdL/yYpDOHLqiNCgimb6W9kfF8MjZ1xrfDpRyRs9NUnUuNuAitW","username":"quayadmin"} # gitleaks:allow +---- ++ +If a user already exists in the database, an error is returned: ++ +[source,terminal] +---- +{"message":"Cannot initialize user in a non-empty database"} +---- ++ +If your password is not at least eight characters or contains whitespace, an error is returned: ++ +[source,terminal] +---- +{"message":"Failed to initialize user: Invalid password, password must be at least 8 characters and contain no whitespace."} +---- + +. Log in to your {productname} deployment by entering the following command: ++ +[source,terminal] +---- +$ sudo podman login -u quayadmin -p quaypass12345 http://quay-server.example.com --tls-verify=false +---- ++ +.Example output ++ +[source,terminal] +---- +Login Succeeded! +---- \ No newline at end of file diff --git a/modules/frequently-asked-questions.adoc b/modules/frequently-asked-questions.adoc new file mode 100644 index 000000000..cc73750da --- /dev/null +++ b/modules/frequently-asked-questions.adoc @@ -0,0 +1,7 @@ +:_content-type: CONCEPT +[id="frequently-asked-questions"] += Frequently asked questions + +The "Frequently Asked Questions" (FAQ) document for {productname} aims to address common inquiries and provide comprehensive answers regarding the functionality, features, and usage of the {productname} container registry platform. This document serves as a valuable resource for users, administrators, and developers seeking quick and accurate information about various aspects of {productname}. + +The FAQ covers a wide range of topics, including account management, repository operations, security features, integration with other systems, troubleshooting tips, and best practices for optimizing the usage of {productname}. \ No newline at end of file diff --git a/modules/garbage-collection.adoc b/modules/garbage-collection.adoc new file mode 100644 index 000000000..2a99d2f85 --- /dev/null +++ b/modules/garbage-collection.adoc @@ -0,0 +1,176 @@ +:_content-type: CONCEPT +[id="garbage-collection"] += {productname} garbage collection + +{productname} includes automatic and continuous image garbage collection. Garbage collection ensures efficient use of resources for active objects by removing objects that occupy sizeable amounts of disk space, such as dangling or untagged images, repositories, and blobs, including layers and manifests. Garbage collection performed by {productname} can reduce downtime in your organization's environment. + +[id="garbage-collection-practice"] +== {productname} garbage collection in practice + +Currently, all garbage collection happens discreetly, and there are no commands to manually run garbage collection. {productname} provides metrics that track the status of the different garbage collection workers. + +For namespace and repository garbage collection, the progress is tracked based on the size of their respective queues. Namespace and repository garbage collection workers require a global lock to work. As a result, and for performance reasons, only one worker runs at a time. + +[NOTE] +==== +{productname} shares blobs between namespaces and repositories in order to conserve disk space. For example, if the same image is pushed 10 times, only one copy of that image will be stored. + +It is possible that tags can share their layers with different images already stored somewhere in {productname}. In that case, blobs will stay in storage, because deleting shared blobs would make other images unusable. + +Blob expiration is independent of the time machine. If you push a tag to {productname} and the time machine is set to 0 seconds, and then you delete a tag immediately, garbage collection deletes the tag and everything related to that tag, but will not delete the blob storage until the blob expiration time is reached. +==== + +Garbage collecting tagged images works differently than garbage collection on namespaces or repositories. Rather than having a queue of items to work with, the garbage collection workers for tagged images actively search for a repository with inactive or expired tags to clean up. Each instance of garbage collection workers will grab a repository lock, which results in one worker per repository. + +[NOTE] +==== +* In {productname}, inactive or expired tags are manifests without tags because the last tag was deleted or it expired. The manifest stores information about how the image is composed and stored in the database for each individual tag. When a tag is deleted and the allotted time from *Time Machine* has been met, {productname} garbage collects the blobs that are not connected to any other manifests in the registry. If a particular blob is connected to a manifest, then it is preserved in storage and only its connection to the manifest that is being deleted is removed. +* Expired images will disappear after the allotted time, but are still stored in {productname}. The time in which an image is completely deleted, or collected, depends on the *Time Machine* setting of your organization. The default time for garbage collection is 14 days unless otherwise specified. Until that time, tags can be pointed to an expired or deleted images. +==== + +For each type of garbage collection, {productname} provides metrics for the number of rows per table deleted by each garbage collection worker. The following image shows an example of how {productname} monitors garbage collection with the same metrics: + +image:garbage-collection-metrics.png[Garbage collection metrics] + +[id="measuring-storage-reclamation"] +=== Measuring storage reclamation + +{productname} does not have a way to track how much space is freed up by garbage collection. Currently, the best indicator of this is by checking how many blobs have been deleted in the provided metrics. + +[NOTE] +==== +The `UploadedBlob` table in the {productname} metrics tracks the various blobs that are associated with a repository. When a blob is uploaded, it will not be garbage collected before the time designated by the `PUSH_TEMP_TAG_EXPIRATION_SEC` parameter. This is to avoid prematurely deleting blobs that are part of an ongoing push. For example, if garbage collection is set to run often, and a tag is deleted in the span of less than one hour, then it is possible that the associated blobs will not get cleaned up immediately. Instead, and assuming that the time designated by the `PUSH_TEMP_TAG_EXPIRATION_SEC` parameter has passed, the associated blobs will be removed the next time garbage collection is triggered to run by another expired tag on the same repository. +==== + +[id="garbage-collection-configuration-fields"] +== Garbage collection configuration fields + +The following configuration fields are available to customize what is garbage collected, and the frequency at which garbage collection occurs: + +[cols="3a,1a,2a",options="header"] +|=== +|Name |Description |Schema +| **FEATURE_GARBAGE_COLLECTION** | Whether garbage collection is enabled for image tags. Defaults to `true`. |Boolean +| **FEATURE_NAMESPACE_GARBAGE_COLLECTION** | Whether garbage collection is enabled for namespaces. Defaults to `true`. |Boolean +| **FEATURE_REPOSITORY_GARBAGE_COLLECTION** | Whether garbage collection is enabled for repositories. Defaults to `true`. |Boolean +| **GARBAGE_COLLECTION_FREQUENCY** | The frequency, in seconds, at which the garbage collection worker runs. Affects only garbage collection workers. Defaults to 30 seconds. |String +| **PUSH_TEMP_TAG_EXPIRATION_SEC** | The number of seconds that blobs will not be garbage collected after being uploaded. This feature prevents garbage collection from cleaning up blobs that are not referenced yet, but still used as part of an ongoing push. |String +| **TAG_EXPIRATION_OPTIONS** | List of valid tag expiration values. |String +| **DEFAULT_TAG_EXPIRATION** | Tag expiration time for time machine. |String +| **CLEAN_BLOB_UPLOAD_FOLDER** | Automatically cleans stale blobs left over from an S3 multipart upload. By default, blob files older than two days are cleaned up every hour. | Boolean ++ +**Default:** `true` + +|=== + +[id="disabling-garbage-collection"] +== Disabling garbage collection + +The garbage collection features for image tags, namespaces, and repositories are stored in the `config.yaml` file. These features default to `true`. + +In rare cases, you might want to disable garbage collection, for example, to control when garbage collection is performed. You can disable garbage collection by setting the `GARBAGE_COLLECTION` features to `false`. When disabled, dangling or untagged images, repositories, namespaces, layers, and manifests are not removed. This might increase the downtime of your environment. + + +[NOTE] +==== +There is no command to manually run garbage collection. Instead, you would disable, and then re-enable, the garbage collection feature. +==== + +[id="garbage-collection-quota-management"] +== Garbage collection and quota management + +{productname} introduced quota management in 3.7. With quota management, users have the ability to report storage consumption and to contain registry growth by establishing configured storage quota limits. + +As of {productname} 3.7, garbage collection reclaims memory that was allocated to images, repositories, and blobs after deletion. Because the garbage collection feature reclaims memory after deletion, there is a discrepancy between what is stored in an environment's disk space and what quota management is reporting as the total consumption. There is currently no workaround for this issue. + +[id="garbage-collection-procedure"] +== Garbage collection in practice + +Use the following procedure to check your {productname} logs to ensure that garbage collection is working. + +.Procedure + +. Enter the following command to ensure that garbage collection is properly working: ++ +[source,terminal] +---- +$ sudo podman logs +---- ++ +Example output: ++ +[source,terminal] +---- +gcworker stdout | 2022-11-14 18:46:52,458 [63] [INFO] [apscheduler.executors.default] Job "GarbageCollectionWorker._garbage_collection_repos (trigger: interval[0:00:30], next run at: 2022-11-14 18:47:22 UTC)" executed successfully +---- + +. Delete an image tag. + +. Enter the following command to ensure that the tag was deleted: ++ +[source,terminal] +---- +$ podman logs quay-app +---- ++ +Example output: ++ +[source,terminal] +---- +gunicorn-web stdout | 2022-11-14 19:23:44,574 [233] [INFO] [gunicorn.access] 192.168.0.38 - - [14/Nov/2022:19:23:44 +0000] "DELETE /api/v1/repository/quayadmin/busybox/tag/test HTTP/1.0" 204 0 "http://quay-server.example.com/repository/quayadmin/busybox?tab=tags" "Mozilla/5.0 (X11; Linux x86_64; rv:102.0) Gecko/20100101 Firefox/102.0" +---- + +[id="garbage-collection-metrics"] +== {productname} garbage collection metrics + +The following metrics show how many resources have been removed by garbage collection. These metrics show how many times the garbage collection workers have run and how many namespaces, repositories, and blobs were removed. + +[options="header"] +|=== +| Metric name | Description +| quay_gc_iterations_total | Number of iterations by the GCWorker +| quay_gc_namespaces_purged_total | Number of namespaces purged by the NamespaceGCWorker +| quay_gc_repos_purged_total | Number of repositories purged by the RepositoryGCWorker or NamespaceGCWorker +| quay_gc_storage_blobs_deleted_total | Number of storage blobs deleted +|=== + + +.Sample metrics output +[source,terminal] +---- +# TYPE quay_gc_iterations_created gauge +quay_gc_iterations_created{host="example-registry-quay-app-6df87f7b66-9tfn6",instance="",job="quay",pid="208",process_name="secscan:application"} 1.6317823190189714e+09 +... + +# HELP quay_gc_iterations_total number of iterations by the GCWorker +# TYPE quay_gc_iterations_total counter +quay_gc_iterations_total{host="example-registry-quay-app-6df87f7b66-9tfn6",instance="",job="quay",pid="208",process_name="secscan:application"} 0 +... + +# TYPE quay_gc_namespaces_purged_created gauge +quay_gc_namespaces_purged_created{host="example-registry-quay-app-6df87f7b66-9tfn6",instance="",job="quay",pid="208",process_name="secscan:application"} 1.6317823190189433e+09 +... + +# HELP quay_gc_namespaces_purged_total number of namespaces purged by the NamespaceGCWorker +# TYPE quay_gc_namespaces_purged_total counter +quay_gc_namespaces_purged_total{host="example-registry-quay-app-6df87f7b66-9tfn6",instance="",job="quay",pid="208",process_name="secscan:application"} 0 +.... + +# TYPE quay_gc_repos_purged_created gauge +quay_gc_repos_purged_created{host="example-registry-quay-app-6df87f7b66-9tfn6",instance="",job="quay",pid="208",process_name="secscan:application"} 1.631782319018925e+09 +... + +# HELP quay_gc_repos_purged_total number of repositories purged by the RepositoryGCWorker or NamespaceGCWorker +# TYPE quay_gc_repos_purged_total counter +quay_gc_repos_purged_total{host="example-registry-quay-app-6df87f7b66-9tfn6",instance="",job="quay",pid="208",process_name="secscan:application"} 0 +... + +# TYPE quay_gc_storage_blobs_deleted_created gauge +quay_gc_storage_blobs_deleted_created{host="example-registry-quay-app-6df87f7b66-9tfn6",instance="",job="quay",pid="208",process_name="secscan:application"} 1.6317823190189059e+09 +... + +# HELP quay_gc_storage_blobs_deleted_total number of storage blobs deleted +# TYPE quay_gc_storage_blobs_deleted_total counter +quay_gc_storage_blobs_deleted_total{host="example-registry-quay-app-6df87f7b66-9tfn6",instance="",job="quay",pid="208",process_name="secscan:application"} 0 +... +---- \ No newline at end of file diff --git a/modules/geo-repl-sslerror.adoc b/modules/geo-repl-sslerror.adoc new file mode 100644 index 000000000..375d33c4d --- /dev/null +++ b/modules/geo-repl-sslerror.adoc @@ -0,0 +1,30 @@ +:_content-type: PROCEDURE +[id="geo-repl-sslerror"] += Geo-replication errors out with SSLError + +In some cases, using the `podman push` command might return the following error: + +[source,terminal] +---- +storagereplication stdout | 2021-12-16 14:56:29,602 [144] [ERROR] [__main__] Failed to copy path `sha256/9f/9f9b90db7acda0f3f43e720ac9d54a7e623078fc7af6cf0c1d055410986d3f10` of image storage 0a014260-01a3-4a54-8dd6-784de7bf4feb to location dr +toragereplication stdout | Traceback (most recent call last): +storagereplication stdout | File "/usr/local/lib/python3.8/site-packages/urllib3/util/ssl_.py", line 336, in ssl_wrap_socket +storagereplication stdout | context.load_verify_locations(ca_certs, ca_cert_dir) +storagereplication stdout | ssl.SSLError: [X509] PEM lib (_ssl.c:4265) +storagereplication stdout | During handling of the above exception, another exception occurred: +. +storagereplication stdout | File "/usr/local/lib/python3.8/site-packages/botocore/httpsession.py", line 338, in send +storagereplication stdout | raise SSLError(endpoint_url=request.url, error=e) +storagereplication stdout | botocore.exceptions.SSLError: SSL validation failed for https://s3-openshift-storage.apps.ocp1.rosbank.rus.socgen/quay-bucket-dr [X509] PEM lib (_ssl.c:4265) +storagereplication stdout | 2021-12-16 14:56:29,603 [144] [WARNING] [workers.queueworker] An error occurred processing request: {"namespace_user_id": 1, "storage_id": "0a014260-01a3-4a54-8dd6-784de7bf4feb"} +storagereplication stdout | 2021-12-16 14:56:29,603 [144] [WARNING] [workers.queueworker] Job exception: +---- + +`SSLError` usually occurs after multiple certificates signing the same thing are added to your {productname} deployment. This error is most commonly seen on regular pushes and LDAP connectivity, even when outside sources are used, for example, AWS storage buckets. + +As a workaround for this issue, remove certificates from the `extra_ca_certs` one by one until you find the duplicate. After each removal, restart the `Quay` pod to test whether the issue persists. + +[role="_additional-resources"] +.Additional resources + +For more information, see link:https://access.redhat.com/solutions/6612551[Quay georeplication errors out with SSLError]. \ No newline at end of file diff --git a/modules/geo-repl-troubleshooting-issues.adoc b/modules/geo-repl-troubleshooting-issues.adoc new file mode 100644 index 000000000..840707892 --- /dev/null +++ b/modules/geo-repl-troubleshooting-issues.adoc @@ -0,0 +1,82 @@ +:_content-type: PROCEDURE +[id="geo-repl-troubleshooting-issues"] += Troubleshooting geo-replication for {productname} + +Use the following sections to troubleshoot geo-replication for {productname}. + +//// +[id="check-geo-repl-config"] +== Checking the geo-replication configuration + +Use the following procedure to check your geo-replication configuration in your {productname} `config.yaml` file. + +[IMPORTANT] +==== +The same configuration must be used across all regions. +==== + +.Procedure + +. Check your geo-replication configuration. + +.. If you are using the {productname} Operator, enter the following command: ++ +[source,terminal] +---- +$ oc exec -it quay-pod -- cat /conf/stack/config.yaml +---- + +.. If you are using a standalone deployment of {productname}, enter the following command: ++ +[source,terminal] +---- +$ podman exec -it quay-container cat /conf/stack/config.yaml +---- +//// + +[id="check-data-replication"] +== Checking data replication in backend buckets + +Use the following procedure to ensure that your data is properly replicated in all backend buckets. + +.Prerequisites + +* You have installed the `aws` CLI. + +.Procedure + +. Enter the following command to ensure that your data is replicated in all backend buckets: ++ +[source,terminal] +---- +$ aws --profile quay_prod_s3 --endpoint=http://10.0.x.x:port s3 ls ocp-quay --recursive --human-readable --summarize +---- ++ +.Example output ++ +[source,terminal] +---- +Total Objects: 17996 +Total Size: 514.4 GiB +---- + +[id="check-backend-storage-running"] +== Checking the status of your backend storage + +Use the following resources to check the status of your backend storage. + +* *Amazon Web Service Storage (AWS)*. Check the AWS S3 service health status on the link:https://health.aws.amazon.com/health/status[AWS Service Health Dashboard]. Validate your access to S3 by listing objects in a known bucket using the `aws` CLI or SDKs. + +* *Google Cloud Storage (GCS)*. Check the link:https://status.cloud.google.com/[Google Cloud Status Dashboard] for the status of the GCS service. Verify your access to GCS by listing objects in a known bucket using the Google Cloud SDK or GCS client libraries. + +* *NooBaa*. Check the NooBaa management console or administrative interface for any health or status indicators. Ensure that the NooBaa services and related components are running and accessible. Verify access to NooBaa by listing objects in a known bucket using the NooBaa CLI or SDK. + +* **{odf}**. Check the {ocp} Console or management interface for the status of the {odf} components. Verify the availability of {odf} S3 interface and services. Ensure that the {odf} services are running and accessible. Validate access to {odf} S3 by listing objects in a known bucket using the appropriate S3-compatible SDK or CLI. + +* **Ceph**. Check the status of Ceph services, including Ceph monitors, OSDs, and RGWs. Validate that the Ceph cluster is healthy and operational. Verify access to Ceph object storage by listing objects in a known bucket using the appropriate Ceph object storage API or CLI. + +* **Azure Blob Storage**. Check the link:https://azure.status.microsoft/en-us/status[Azure Status Dashboard] to see the health status of the Azure Blob Storage service. Validate your access to Azure Blob Storage by listing containers or objects using the Azure CLI or Azure SDKs. + +* **OpenStack Swift**. Check the link:https://www.ibm.com/docs/ro/cmwo/4.3.0.0?topic=services-checking-status[OpenStack Status] page to verify the status of the OpenStack Swift service. Ensure that the Swift services, like the proxy server, container servers, object servers, are running and accessible. Validate your access to Swift by listing containers or objects using the appropriate Swift CLI or SDK. + +After checking the status of your backend storage, ensure that all {productname} instances have access to all s3 storage backends. \ No newline at end of file diff --git a/modules/georepl-arch-operator.adoc b/modules/georepl-arch-operator.adoc new file mode 100644 index 000000000..de321a0c0 --- /dev/null +++ b/modules/georepl-arch-operator.adoc @@ -0,0 +1,8 @@ +[[georepl-arch-operator]] += Geo-replication using the {productname} Operator + +image:178_Quay_architecture_0821_georeplication_openshift-temp.png[Geo-replication architecture] + +In the example shown above, the {productname} Operator is deployed in two separate regions, with a common database and a common Redis instance. Localized image storage is provided in each region and image pulls are served from the closest available storage engine. Container image pushes are written to the preferred storage engine for the Quay instance, and will then be replicated, in the background, to the other storage engines. + +Because the Operator now manages the Clair security scanner and its database separately, geo-replication setups can be leveraged so that they do not manage the Clair database. Instead, an external shared database would be used. {productname} and Clair support several providers and vendors of PostgreSQL, which can be found in the {productname} 3.x link:https://access.redhat.com/articles/4067991[test matrix]. Additionally, the Operator also supports custom Clair configurations that can be injected into the deployment, which allows users to configure Clair with the connection credentials for the external database. diff --git a/modules/georepl-arch-standalone.adoc b/modules/georepl-arch-standalone.adoc new file mode 100644 index 000000000..91697f9a3 --- /dev/null +++ b/modules/georepl-arch-standalone.adoc @@ -0,0 +1,13 @@ +:_content-type: CONCEPT +[id="georepl-arch-standalone"] += Geo-replication using standalone {productname} + +In the following image, {productname} is running standalone in two separate regions, with a common database and a common Redis instance. Localized image storage is provided in each region and image pulls are served from the closest available storage engine. Container image pushes are written to the preferred storage engine for the {productname} instance, and will then be replicated, in the background, to the other storage engines. + +[NOTE] +==== +If Clair fails in one cluster, for example, the US cluster, US users would not see vulnerability reports in {productname} for the second cluster (EU). This is because all Clair instances have the same state. When Clair fails, it is usually because of a problem within the cluster. +==== + +.Geo-replication architecture +image:178_Quay_architecture_0821_georeplication.png[Geo-replication] \ No newline at end of file diff --git a/modules/georepl-arch.adoc b/modules/georepl-arch.adoc new file mode 100644 index 000000000..f8e24c53c --- /dev/null +++ b/modules/georepl-arch.adoc @@ -0,0 +1,6 @@ +[[georepl-arch]] += Geo-replication architecture for standalone {productname} + +image:178_Quay_architecture_0821_georeplication.png[Georeplication] + +In the example shown above, {productname} is running in two separate regions, with a common database and a common Redis instance. Localized image storage is provided in each region and image pulls are served from the closest available storage engine. Container image pushes are written to the preferred storage engine for the Quay instance, and will then be replicated, in the background, to the other storage engines. diff --git a/modules/georepl-deploy-operator.adoc b/modules/georepl-deploy-operator.adoc new file mode 100644 index 000000000..be3add226 --- /dev/null +++ b/modules/georepl-deploy-operator.adoc @@ -0,0 +1,201 @@ +:_content-type: PROCEDURE +[id="georepl-deploy-operator"] += Setting up geo-replication on {ocp} + +Use the following procedure to set up geo-replication on {ocp}. + +.Procedure + +. Deploy a postgres instance for {productname}. + +. Login to the database by entering the following command: ++ +[source,terminal] +---- +psql -U -h -p -d +---- + +. Create a database for {productname} named `quay`. For example: ++ +[source,terminal] +---- +CREATE DATABASE quay; +---- +. Enable pg_trm extension inside the database ++ +[source,terminal] +---- +\c quay; +CREATE EXTENSION IF NOT EXISTS pg_trgm; +---- + +. Deploy a Redis instance: ++ +[NOTE] +==== +* Deploying a Redis instance might be unnecessary if your cloud provider has its own service. +* Deploying a Redis instance is required if you are leveraging Builders. +==== + +.. Deploy a VM for Redis +.. Verify that it is accessible from the clusters where {productname} is running +.. Port 6379/TCP must be open +.. Run Redis inside the instance ++ +[source,terminal] +---- +sudo dnf install -y podman +podman run -d --name redis -p 6379:6379 redis +---- + +. Create two object storage backends, one for each cluster. Ideally, one object storage bucket will be close to the first, or primary, cluster, and the other will run closer to the second, or secondary, cluster. + +. Deploy the clusters with the same config bundle, using environment variable overrides to select the appropriate storage backend for an individual cluster. + +. Configure a load balancer to provide a single entry point to the clusters. + +[id="configuring-geo-repl"] +== Configuring geo-replication for the {productname} on {ocp} + +Use the following procedure to configure geo-replication for the {productname-ocp}. + +.Procedure + +. Create a `config.yaml` file that is shared between clusters. This `config.yaml` file contains the details for the common PostgreSQL, Redis and storage backends: ++ +.Geo-replication `config.yaml` file +[source,yaml] +---- +SERVER_HOSTNAME: <1> +DB_CONNECTION_ARGS: + autorollback: true + threadlocals: true +DB_URI: postgresql://postgres:password@10.19.0.1:5432/quay <2> +BUILDLOGS_REDIS: + host: 10.19.0.2 + port: 6379 +USER_EVENTS_REDIS: + host: 10.19.0.2 + port: 6379 +DATABASE_SECRET_KEY: 0ce4f796-c295-415b-bf9d-b315114704b8 +DISTRIBUTED_STORAGE_CONFIG: + usstorage: + - GoogleCloudStorage + - access_key: GOOGQGPGVMASAAMQABCDEFG + bucket_name: georep-test-bucket-0 + secret_key: AYWfEaxX/u84XRA2vUX5C987654321 + storage_path: /quaygcp + eustorage: + - GoogleCloudStorage + - access_key: GOOGQGPGVMASAAMQWERTYUIOP + bucket_name: georep-test-bucket-1 + secret_key: AYWfEaxX/u84XRA2vUX5Cuj12345678 + storage_path: /quaygcp +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: + - usstorage + - eustorage +DISTRIBUTED_STORAGE_PREFERENCE: + - usstorage + - eustorage +FEATURE_STORAGE_REPLICATION: true +---- +<1> A proper `SERVER_HOSTNAME` must be used for the route and must match the hostname of the global load balancer. +<2> To retrieve the configuration file for a Clair instance deployed using the {ocp} Operator, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/deploy_red_hat_quay_on_openshift_with_the_quay_operator/quay_operator_features#clair-openshift-config[Retrieving the Clair config]. + +. Create the `configBundleSecret` by entering the following command: ++ +[source,terminal] +---- +$ oc create secret generic --from-file config.yaml=./config.yaml georep-config-bundle +---- + +. In each of the clusters, set the `configBundleSecret` and use the `QUAY_DISTRIBUTED_STORAGE_PREFERENCE` environmental variable override to configure the appropriate storage for that cluster. For example: ++ +[NOTE] +==== +The `config.yaml` file between both deployments must match. If making a change to one cluster, it must also be changed in the other. +==== ++ +[source,yaml] +.US cluster `QuayRegistry` example +---- +apiVersion: quay.redhat.com/v1 +kind: QuayRegistry +metadata: + name: example-registry + namespace: quay-enterprise +spec: + configBundleSecret: georep-config-bundle + components: + - kind: objectstorage + managed: false + - kind: route + managed: true + - kind: tls + managed: false + - kind: postgres + managed: false + - kind: clairpostgres + managed: false + - kind: redis + managed: false + - kind: quay + managed: true + overrides: + env: + - name: QUAY_DISTRIBUTED_STORAGE_PREFERENCE + value: usstorage + - kind: mirror + managed: true + overrides: + env: + - name: QUAY_DISTRIBUTED_STORAGE_PREFERENCE + value: usstorage +---- ++ +[NOTE] +==== +Because SSL/TLS is unmanaged, and the route is managed, you must supply the certificates directly in the config bundle. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/deploy_red_hat_quay_on_openshift_with_the_quay_operator/operator-preconfigure#operator-preconfig-tls-routes[Configuring TLS and routes]. +==== ++ +[source,yaml] +.European cluster +---- +apiVersion: quay.redhat.com/v1 +kind: QuayRegistry +metadata: + name: example-registry + namespace: quay-enterprise +spec: + configBundleSecret: georep-config-bundle + components: + - kind: objectstorage + managed: false + - kind: route + managed: true + - kind: tls + managed: false + - kind: postgres + managed: false + - kind: clairpostgres + managed: false + - kind: redis + managed: false + - kind: quay + managed: true + overrides: + env: + - name: QUAY_DISTRIBUTED_STORAGE_PREFERENCE + value: eustorage + - kind: mirror + managed: true + overrides: + env: + - name: QUAY_DISTRIBUTED_STORAGE_PREFERENCE + value: eustorage +---- ++ +[NOTE] +==== +Because SSL/TLS is unmanaged, and the route is managed, you must supply the certificates directly in the config bundle. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/deploy_red_hat_quay_on_openshift_with_the_quay_operator/operator-preconfigure#operator-preconfig-tls-routes[Configuring TLS and routes]. +==== \ No newline at end of file diff --git a/modules/georepl-deploy-standalone.adoc b/modules/georepl-deploy-standalone.adoc new file mode 100644 index 000000000..39b569a24 --- /dev/null +++ b/modules/georepl-deploy-standalone.adoc @@ -0,0 +1,28 @@ +[[georepl-deploy-standalone]] += Run {productname} with storage preferences + +. Copy the config.yaml to all machines running {productname} + +. For each machine in each region, add a +`QUAY_DISTRIBUTED_STORAGE_PREFERENCE` environment variable with the +preferred storage engine for the region in which the machine is running. ++ +For example, for a machine running in Europe with the config +directory on the host available from `$QUAY/config`: ++ +[subs="verbatim,attributes"] +---- +$ sudo podman run -d --rm -p 80:8080 -p 443:8443 \ + --name=quay \ + -v $QUAY/config:/conf/stack:Z \ + -e QUAY_DISTRIBUTED_STORAGE_PREFERENCE=europestorage \ + {productrepo}/{quayimage}:{productminv} +---- ++ +[NOTE] +==== +The value of the environment variable specified must match the +name of a Location ID as defined in the config panel. +==== + +. Restart all {productname} containers diff --git a/modules/georepl-intro.adoc b/modules/georepl-intro.adoc new file mode 100644 index 000000000..0dafd7a04 --- /dev/null +++ b/modules/georepl-intro.adoc @@ -0,0 +1,7 @@ +:_content-type: CONCEPT +[id="georepl-intro"] += Geo-replication + +Geo-replication allows multiple, geographically distributed {productname} deployments to work as a single registry from the perspective of a client or user. It significantly improves push and pull performance in a globally-distributed {productname} setup. Image data is asynchronously replicated in the background with transparent failover and redirect for clients. + +Deployments of {productname} with geo-replication is supported on standalone and Operator deployments. \ No newline at end of file diff --git a/modules/georepl-mixed-storage.adoc b/modules/georepl-mixed-storage.adoc new file mode 100644 index 000000000..fa93a33cd --- /dev/null +++ b/modules/georepl-mixed-storage.adoc @@ -0,0 +1,12 @@ +:_content-type: CONCEPT +[id="georepl-mixed-storage"] += Mixed storage for geo-replication + +{productname} geo-replication supports the use of different and multiple replication targets, for example, using AWS S3 storage on public cloud and using Ceph storage on premise. This complicates the key requirement of granting access to all storage backends from all {productname} pods and cluster nodes. As a result, it is recommended that you use the following: + +* A VPN to prevent visibility of the internal storage, _or_ +* A token pair that only allows access to the specified bucket used by {productname} + +This results in the public cloud instance of {productname} having access to on-premise storage, but the network will be encrypted, protected, and will use ACLs, thereby meeting security requirements. + +If you cannot implement these security measures, it might be preferable to deploy two distinct {productname} registries and to use repository mirroring as an alternative to geo-replication. \ No newline at end of file diff --git a/modules/georepl-prereqs.adoc b/modules/georepl-prereqs.adoc new file mode 100644 index 000000000..b0907610c --- /dev/null +++ b/modules/georepl-prereqs.adoc @@ -0,0 +1,42 @@ +:_content-type: CONCEPT +[id="arch-georepl-prereqs"] += Geo-replication requirements and constraints + +* In geo-replicated setups, {productname} requires that all regions are able to read and write to all other region's object storage. Object storage must be geographically accessible by all other regions. + +* In case of an object storage system failure of one geo-replicating site, that site's {productname} deployment must be shut down so that clients are redirected to the remaining site with intact storage systems by a global load balancer. Otherwise, clients will experience pull and push failures. + +* {productname} has no internal awareness of the health or availability of the connected object storage system. Users must configure a global load balancer (LB) to monitor the health of your distributed system and to route traffic to different sites based on their storage status. + +* To check the status of your geo-replication deployment, you must use the `/health/endtoend` checkpoint, which is used for global health monitoring. You must configure the redirect manually using the `/health/endtoend` endpoint. The `/health/instance` end point only checks local instance health. + +* If the object storage system of one site becomes unavailable, there will be no automatic redirect to the remaining storage system, or systems, of the remaining site, or sites. + +* Geo-replication is asynchronous. The permanent loss of a site incurs the loss of the data that has been saved in that sites' object storage system but has not yet been replicated to the remaining sites at the time of failure. + +* A single database, and therefore all metadata and {productname} configuration, is shared across all regions. ++ +Geo-replication does not replicate the database. In the event of an outage, {productname} with geo-replication enabled will not failover to another database. + +* A single Redis cache is shared across the entire {productname} setup and needs to be accessible by all {productname} pods. + +* The exact same configuration should be used across all regions, with exception of the storage backend, which can be configured explicitly using the `QUAY_DISTRIBUTED_STORAGE_PREFERENCE` environment variable. + +* Geo-replication requires object storage in each region. It does not work with local storage. + +* Each region must be able to access every storage engine in each region, which requires a network path. + +* Alternatively, the storage proxy option can be used. + +* The entire storage backend, for example, all blobs, is replicated. Repository mirroring, by contrast, can be limited to a repository, or an image. + +* All {productname} instances must share the same entrypoint, typically through a load balancer. + +* All {productname} instances must have the same set of superusers, as they are defined inside the common configuration file. + +* Geo-replication requires your Clair configuration to be set to `unmanaged`. An unmanaged Clair database allows the {productname} Operator to work in a geo-replicated environment, where multiple instances of the {productname} Operator must communicate with the same database. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.7/html-single/deploy_red_hat_quay_on_openshift_with_the_quay_operator/index#clair-unmanaged[Advanced Clair configuration]. + +* Geo-Replication requires SSL/TLS certificates and keys. For more information, see * Geo-Replication requires SSL/TLS certificates and keys. For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html/proof_of_concept_-_deploying_red_hat_quay/advanced-quay-poc-deployment[Proof of concept deployment using SSL/TLS certificates]. +. + +If the above requirements cannot be met, you should instead use two or more distinct {productname} deployments and take advantage of repository mirroring functions. \ No newline at end of file diff --git a/modules/getting-support.adoc b/modules/getting-support.adoc new file mode 100644 index 000000000..8963d5c9f --- /dev/null +++ b/modules/getting-support.adoc @@ -0,0 +1,74 @@ +:_content-type: CONCEPT +[id="getting-support"] += Getting support + +If you experience difficulty with a procedure described in this documentation, or with {productname} in general, visit the link:http://access.redhat.com[Red Hat Customer Portal]. From the Customer Portal, you can: + +* Search or browse through the Red Hat Knowledgebase of articles and solutions relating to Red Hat products. +* Submit a support case to Red Hat Support. +* Access other product documentation. + +To identify issues with your deployment, you can use the {productname} debugging tool, or check the health endpoint of your deployment to obtain information about your problem. After you have debugged or obtained health information about your deployment, you can search the Red Hat Knowledgebase for a solution or file a support ticket. + +If you have a suggestion for improving this documentation or have found an +error, submit a link:https://issues.redhat.com/secure/CreateIssue!default.jspa[Jira issue] to the `ProjectQuay` project. Provide specific details, such as the section name and {productname} version. + +[id="support-knowledgebase-about"] +== About the Red Hat Knowledgebase + +The link:https://access.redhat.com/knowledgebase[Red Hat Knowledgebase] provides rich content aimed at helping you make the most of Red Hat's products and technologies. The Red Hat Knowledgebase consists of articles, product documentation, and videos outlining best practices on installing, configuring, and using Red Hat products. In addition, you can search for solutions to known issues, each providing concise root cause descriptions and remedial steps. + +The {productname} Support Team also maintains a link:https://access.redhat.com/articles/6975387[Consolidate troubleshooting article for {productname}] that details solutions to common problems. This is an evolving document that can help users navigate various issues effectively and efficiently. + +[id="support-knowledgebase-search"] +== Searching the Red Hat Knowledgebase + +In the event of an {productname} issue, you can perform an initial search to determine if a solution already exists within the Red Hat Knowledgebase. + +.Prerequisites + +* You have a Red Hat Customer Portal account. + +.Procedure + +. Log in to the link:http://access.redhat.com[Red Hat Customer Portal]. + +. In the main Red Hat Customer Portal search field, input keywords and strings relating to the problem, including: ++ +* {productname} components (such as *database*) +* Related procedure (such as *installation*) +* Warnings, error messages, and other outputs related to explicit failures + +. Click *Search*. + +. Select the *{productname}* product filter. + +. Select the *Knowledgebase* content type filter. + +[id="support-submitting-a-case"] +== Submitting a support case + +.Prerequisites + +* You have a Red Hat Customer Portal account. +* You have a Red Hat standard or premium Subscription. + +.Procedure + +. Log in to the link:http://access.redhat.com[Red Hat Customer Portal] and select *Open a support case*. + +. Select the *Troubleshoot* tab. + +. For *Summary*, enter a concise but descriptive problem summary and further details about the symptoms being experienced, as well as your expectations. + +. Review the list of suggested Red Hat Knowledgebase solutions for a potential match against the problem that is being reported. If the suggested articles do not address the issue, continue to the following step. + +. For *Product*, select *Red Hat Quay*. + +. Select the version of {productname} that you are using. + +. Click *Continue*. + +. Optional. Drag and drop, paste, or browse to upload a file. This could be debug logs gathered from your {productname} deployment. + +. Click *Get support* to file your ticket. \ No newline at end of file diff --git a/modules/health-check-quay.adoc b/modules/health-check-quay.adoc new file mode 100644 index 000000000..b93814bec --- /dev/null +++ b/modules/health-check-quay.adoc @@ -0,0 +1,47 @@ +:_content-type: CONCEPT +[id="health-check-quay"] += Performing health checks on {productname} deployments + +Health check mechanisms are designed to assess the health and functionality of a system, service, or component. Health checks help ensure that everything is working correctly, and can be used to identify potential issues before they become critical problems. By monitoring the health of a system, {productname} administrators can address abnormalities or potential failures for things like geo-replication deployments, Operator deployments, standalone {productname} deployments, object storage issues, and so on. Performing health checks can also help reduce the likelihood of encountering troubleshooting scenarios. + +Health check mechanisms can play a role in diagnosing issues by providing valuable information about the system's current state. By comparing health check results with expected benchmarks or predefined thresholds, deviations or anomalies can be identified quicker. + +[id="health-check-endpoints"] +== {productname} health check endpoints + +[IMPORTANT] +==== +Links contained herein to any external website(s) are provided for convenience only. Red Hat has not reviewed the links and is not responsible for the content or its availability. The inclusion of any link to an external website does not imply endorsement by Red Hat of the website or its entities, products, or services. You agree that Red Hat is not responsible or liable for any loss or expenses that may result due to your use of (or reliance on) the external site or content. +==== + +{productname} has several health check endpoints. The following table shows you the health check, a description, an endpoint, and an example output. + +.Health check endpoints +[cols="1a,3a,2a,2a",options="header"] +|=== +|Health check |Description |Endpoint |Example output +|`instance` | The `instance` endpoint acquires the entire status of the specific {productname} instance. Returns a `dict` with key-value pairs for the following: `auth`, `database`, `disk_space`, `registry_gunicorn`, `service_key`, and `web_gunicorn.` Returns a number indicating the health check response of either `200`, which indicates that the instance is healthy, or `503`, which indicates an issue with your deployment. |`https://{quay-ip-endpoint}/health/instance` _or_ `https://{quay-ip-endpoint}/health` | `{"data":{"services":{"auth":true,"database":true,"disk_space":true,"registry_gunicorn":true,"service_key":true,"web_gunicorn":true}},"status_code":200}` + +|`endtoend` |The `endtoend` endpoint conducts checks on all services of your {productname} instance. Returns a `dict` with key-value pairs for the following: `auth`, `database`, `redis`, `storage`. Returns a number indicating the health check response of either `200`, which indicates that the instance is healthy, or `503`, which indicates an issue with your deployment. |`https://{quay-ip-endpoint}/health/endtoend` | `{"data":{"services":{"auth":true,"database":true,"redis":true,"storage":true}},"status_code":200}` + +|`warning` |The `warning` endpoint conducts a check on the warnings. Returns a `dict` with key-value pairs for the following: `disk_space_warning`. Returns a number indicating the health check response of either `200`, which indicates that the instance is healthy, or `503`, which indicates an issue with your deployment. +|`https://{quay-ip-endpoint}/health/warning` | `{"data":{"services":{"disk_space_warning":true}},"status_code":503}` +|=== + +[id="instance-endpoint-quay"] +== Navigating to a {productname} health check endpoint + +Use the following procedure to navigate to the `instance` endpoint. This procedure can be repeated for `endtoend` and `warning` endpoints. + +.Procedure + +. On your web browser, navigate to `https://{quay-ip-endpoint}/health/instance`. + +. You are taken to the health instance page, which returns information like the following: ++ +[source,json] +---- +{"data":{"services":{"auth":true,"database":true,"disk_space":true,"registry_gunicorn":true,"service_key":true,"web_gunicorn":true}},"status_code":200} +---- ++ +For {productname}, `"status_code": 200` means that the instance is health. Conversely, if you receive `"status_code": 503`, there is an issue with your deployment. \ No newline at end of file diff --git a/modules/helm-oci-prereqs.adoc b/modules/helm-oci-prereqs.adoc new file mode 100644 index 000000000..964b8bc8b --- /dev/null +++ b/modules/helm-oci-prereqs.adoc @@ -0,0 +1,106 @@ +:_content-type: CONCEPT +[id="helm-oci-prereqs"] += Helm and OCI prerequisites + +Helm simplifies how applications are packaged and deployed. Helm uses a packaging format called _Charts_ which contain the Kubernetes resources representing an application. +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +supports Helm charts so long as they are a version supported by OCI. + +Use the following procedures to pre-configure your system to use Helm and other OCI media types. + +The most recent version of Helm can be downloaded from the link:https://github.com/helm/helm/releases[Helm releases] page. +ifeval::["{context}" == "use-quay"] +After you have downloaded Helm, you must enable your system to trust SSL/TLS certificates used by {productname}. + +//// +[id="installing-helm"] +== Installing Helm + +Use the following procedure to install the Helm client. + +.Procedure + +. Download the latest version of Helm from the link:https://github.com/helm/helm/releases[Helm releases] page. + +. Enter the following command to unpack the Helm binary: ++ +[source,terminal] +---- +$ tar -zxvf helm-v3.8.2-linux-amd64.tar.gz +---- + +. Move the Helm binary to the desired location: ++ +[source,terminal] +---- +$ mv linux-amd64/helm /usr/local/bin/helm +---- + +For more information about installing Helm, see the link:https://helm.sh/docs/intro/install/[Installing Helm] documentation. + +[id="upgrading-helm-38"] +== Upgrading to Helm 3.8 + +Support for OCI registry charts requires that Helm has been upgraded to at least 3.8. If you have already downloaded Helm and need to upgrade to Helm 3.8, see the link:https://helm.sh/docs/helm/helm_upgrade/[Helm Upgrade] documentation. +//// +[id="enabling-system-trust-ssl-tls-certs"] +== Enabling your system to trust SSL/TLS certificates used by {productname} + +Communication between the Helm client and {productname} is facilitated over HTTPS. As of Helm 3.5, support is only available for registries communicating over HTTPS with trusted certificates. In addition, the operating system must trust the certificates exposed by the registry. You must ensure that your operating system has been configured to trust the certificates used by {productname}. Use the following procedure to enable your system to trust the custom certificates. + +.Procedure + +. Enter the following command to copy the `rootCA.pem` file to the `/etc/pki/ca-trust/source/anchors/` folder: ++ +[source,terminal] +---- +$ sudo cp rootCA.pem /etc/pki/ca-trust/source/anchors/ +---- + +. Enter the following command to update the CA trust store: ++ +[source,terminal] +---- +$ sudo update-ca-trust extract +---- +endif::[] + +//// + +[id="creating-organization-helm"] +== Creating an organization for Helm + +It is recommended that you create a new organization for storing Helm charts in +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +after you have downloaded the Helm client. Use the following procedure to create a new organization using the +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +UI. + +.Procedure + +ifeval::["{context}" == "quay-io"] +. Log in to your {quayio} deployment. +endif::[] +ifeval::["{context}" == "use-quay"] +. Log in to your {productname} deployment. +endif::[] + +. Click *Create New Organization*. + +. Enter a name for the organization, for example, *helm*. Then, click *Create Organization*. +//// \ No newline at end of file diff --git a/modules/helm-oci-quay.adoc b/modules/helm-oci-quay.adoc new file mode 100644 index 000000000..05eb6edf3 --- /dev/null +++ b/modules/helm-oci-quay.adoc @@ -0,0 +1,130 @@ +:_content-type: PROCEDURE +[id="using-helm-charts"] += Using Helm charts + +Use the following example to download and push an etherpad chart from the Red Hat Community of Practice (CoP) repository. + +.Prerequisites + +* You have logged into +ifeval::["{context}" == "quay-io"] +{quayio}. +endif::[] +ifeval::["{context}" == "use-quay"] +{productname}. +endif::[] + +.Procedure +//// +ifeval::["{context}" == "use-quay"] +. As a {productname} administrator, enable support for Helm by setting `FEATURE_GENERAL_OCI_SUPPORT` to `true` in your `config.yaml` file: ++ +[source,yaml] +---- +FEATURE_GENERAL_OCI_SUPPORT: true +---- +endif::[] +//// +. Add a chart repository by entering the following command: ++ +[source,terminal] +---- +$ helm repo add redhat-cop https://redhat-cop.github.io/helm-charts +---- + +. Enter the following command to update the information of available charts locally from the chart repository: ++ +[source,terminal] +---- +$ helm repo update +---- + +. Enter the following command to pull a chart from a repository: ++ +[source,terminal] +---- +$ helm pull redhat-cop/etherpad --version=0.0.4 --untar +---- + +. Enter the following command to package the chart into a chart archive: ++ +[source,terminal] +---- +$ helm package ./etherpad +---- ++ +Example output ++ +[source,terminal] +---- +Successfully packaged chart and saved it to: /home/user/linux-amd64/etherpad-0.0.4.tgz +---- + +ifeval::["{context}" == "quay-io"] +. Log in to {quayio} using `helm registry login`: ++ +[source,terminal] +---- +$ helm registry login quay.io +---- +endif::[] +ifeval::["{context}" == "use-quay"] +. Log in to {productname} using `helm registry login`: ++ +[source,terminal] +---- +$ helm registry login quay370.apps.quayperf370.perfscale.devcluster.openshift.com +---- +endif::[] + +. Push the chart to your repository using the `helm push` command: +ifeval::["{context}" == "quay-io"] ++ +[source,terminal] +---- +helm push etherpad-0.0.4.tgz oci://quay.io//helm +---- +endif::[] +ifeval::["{context}" == "use-quay"] ++ +[source,terminal] +---- +$ helm push etherpad-0.0.4.tgz oci://quay370.apps.quayperf370.perfscale.devcluster.openshift.com +---- +endif::[] ++ +Example output: ++ +[source,terminal] +---- +Pushed: quay370.apps.quayperf370.perfscale.devcluster.openshift.com/etherpad:0.0.4 +Digest: sha256:a6667ff2a0e2bd7aa4813db9ac854b5124ff1c458d170b70c2d2375325f2451b +---- + +. Ensure that the push worked by deleting the local copy, and then pulling the chart from the repository: ++ +[source,terminal] +---- +$ rm -rf etherpad-0.0.4.tgz +---- ++ +ifeval::["{context}" == "quay-io"] +[source,terminal] +---- +$ helm pull oci://quay.io//helm/etherpad --version 0.0.4 +---- +endif::[] +ifeval::["{context}" == "use-quay"] +[source,terminal] +---- +$ helm pull oci://quay370.apps.quayperf370.perfscale.devcluster.openshift.com/etherpad --version 0.0.4 +---- +endif::[] ++ +Example output: ++ +[source,terminal] +---- +Pulled: quay370.apps.quayperf370.perfscale.devcluster.openshift.com/etherpad:0.0.4 +Digest: sha256:4f627399685880daf30cf77b6026dc129034d68c7676c7e07020b70cf7130902 +---- diff --git a/modules/how-to-list-quay-repos.adoc b/modules/how-to-list-quay-repos.adoc new file mode 100644 index 000000000..b271bd331 --- /dev/null +++ b/modules/how-to-list-quay-repos.adoc @@ -0,0 +1,91 @@ +:_content-type: CONCEPT +[id="how-to-list-quay-repos"] += Listing more than 100 {productname} repositories using next_page + +When using the `curl` command to list {productname} repositories, you might encounter a limitation where only the first 100 repositories are displayed. + +To overcome the limitation and retrieve more than 100 repositories, the `next_page` token needs to be utilized. The `next_page` token allows you to fetch the next set of repositories. + +Use the following procedure to list more than 100 repositories. + +.Procedure + +. Enter the following `curl` command to retrieve the first 100 records along with the `next_page` token: ++ +[source,terminal] +---- +# curl -X GET -H "Authorization: Bearer ${TOKEN}" "https://${URL}/api/v1/repository?namespace=${ORGANIZATION}" | jq '.' | head -20 +---- ++ +.Example output ++ +[source,terminal] +---- +100 15092 100 15092 0 0 49320 0 --:--:-- --:--:-- --:--:-- 49159 +{ + "repositories": [ + { + "namespace": "redhat", + "name": "repo1", + "description": "description", + "is_public": true, + "kind": "image", + "state": "NORMAL", + "is_starred": false + }, + { + "namespace": "redhat", + "name": "repo2", + "description": "description", + "is_public": true, + "kind": "image", + "state": "NORMAL", + "is_starred": false + }, +... +gAAAAABhC5BunFXqUYIni1MZ_eXO8NL_TQEVzPEwpcUlnTMChM0YaNBiZwApkIllW5hpg8ARSBsuFg== <---- next_page token +---- + +. Use the `next_page` token obtained from the previous command to list more than 100 repositories. For example: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer Dz7hPQ..." "http://quay.example.com/api/v1/repository?namespace=redhat&next_page=gAAAAABhC7A...SBsuFg== " | jq '.' | head -20 +---- ++ +.Example output ++ +[source,terminal] +---- +% Total % Received % Xferd Average Speed Time Time Time Current + Dload Upload Total Spent Left Speed +100 15200 100 15200 0 0 51351 0 --:--:-- --:--:-- --:--:-- 51351 + + "repositories": [ + { + "namespace": "redhat", + "name": "repo101", + "description": "description", + "is_public": true, + "kind": "image", + "state": "NORMAL", + "is_starred": false + }, + { + "namespace": "redhat", + "name": "repo102", + "description": "description", + "is_public": true, + "kind": "image", + "state": "NORMAL", + "is_starred": false + }, ] +---- + +[role="_additional-resources"] +.Additional resources + +For more information, see link:https://access.redhat.com/solutions/6234121[How to list more than 100 Quay repositories using next_page token]. + + + diff --git a/modules/image-tags-overview.adoc b/modules/image-tags-overview.adoc new file mode 100644 index 000000000..126cfa1a5 --- /dev/null +++ b/modules/image-tags-overview.adoc @@ -0,0 +1,25 @@ +:_content-type: CONCEPT +[id="image-tags-overview"] += Image tags overview + +An _image tag_ refers to a label or identifier assigned to a specific version or variant of a container image. Container images are typically composed of multiple layers that represent different parts of the image. Image tags are used to differentiate between different versions of an image or to provide additional information about the image. + +Image tags have the following benefits: + +* *Versioning and Releases*: Image tags allow you to denote different versions or releases of an application or software. For example, you might have an image tagged as _v1.0_ to represent the initial release and _v1.1_ for an updated version. This helps in maintaining a clear record of image versions. + +* *Rollbacks and Testing*: If you encounter issues with a new image version, you can easily revert to a previous version by specifying its tag. This is helpful during debugging and testing phases. + +* *Development Environments*: Image tags are beneficial when working with different environments. You might use a _dev_ tag for a development version, _qa_ for quality assurance testing, and _prod_ for production, each with their respective features and configurations. + +* *Continuous Integration/Continuous Deployment (CI/CD)*: CI/CD pipelines often utilize image tags to automate the deployment process. New code changes can trigger the creation of a new image with a specific tag, enabling seamless updates. + +* *Feature Branches*: When multiple developers are working on different features or bug fixes, they can create distinct image tags for their changes. This helps in isolating and testing individual features. + +* *Customization*: You can use image tags to customize images with different configurations, dependencies, or optimizations, while keeping track of each variant. + +* *Security and Patching*: When security vulnerabilities are discovered, you can create patched versions of images with updated tags, ensuring that your systems are using the latest secure versions. + +* *Dockerfile Changes*: If you modify the Dockerfile or build process, you can use image tags to differentiate between images built from the previous and updated Dockerfiles. + +Overall, image tags provide a structured way to manage and organize container images, enabling efficient development, deployment, and maintenance workflows. \ No newline at end of file diff --git a/modules/integration-intro.adoc b/modules/integration-intro.adoc new file mode 100644 index 000000000..3a5f88138 --- /dev/null +++ b/modules/integration-intro.adoc @@ -0,0 +1,6 @@ +[[integration-intro]] += Integration + +* Extensible API +* Webhooks, OAuth +* Robot Accounts \ No newline at end of file diff --git a/modules/internal-api.adoc b/modules/internal-api.adoc new file mode 100644 index 000000000..1af8ee816 --- /dev/null +++ b/modules/internal-api.adoc @@ -0,0 +1,30 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: CONCEPT +[id="internal-api"] += Internal API endpoints for Clair + +Internal API endpoints are embedded in `/api/v1/internal` and are meant for communication between Clair microservices. + +[IMPORTANT] +==== +* If your Clair `config.yaml` file is set to `CLAIR_MODE=combo`, internal API endpoints might not exist. +* APIs are not formally exposed in Clair's OpenAPI Specification. Further information and usage is dependent on the reader. +==== + +[id="update-diffs"] +== Update diffs + +The `update_diff` endpoint exposes the API for diffing two update operations. This parameter is used by the notifier to determine the added and removed vulnerabilities on security database updates. + +[id="update-operations"] +== Update operation + +The `update_operation` endpoint exposes the API for viewing updaters activity. This is used by the notifier to determine if new updates have occurred, and triggers an update diff to see what has changed. + +[id=affected-manifest] +== AffectedManifest + +The `affected_manifest` endpoint exposes the API for retrieving affected manifests given a list of vulnerabilities. This is used by the notifier to determine the manifests that need to have a notification generated. \ No newline at end of file diff --git a/modules/java-image-scan-not-working.adoc b/modules/java-image-scan-not-working.adoc new file mode 100644 index 000000000..08c26dbdc --- /dev/null +++ b/modules/java-image-scan-not-working.adoc @@ -0,0 +1,58 @@ +:_content-type: PROCEDURE +[id="java-image-scan-not-working"] += Java image scanning not work with disconnected Clair + +In {productname} 3.8, Java image scanning does not work in a disconnected Clair environment. It requires an active connection to the internet. + +To resolve this issue, you must disable Java scanning from Clair, or connect to the internet. + +Use the following procedure to disable Java scanning from Clair. + +.Procedure + +. Check which SHA ID errors out by reaching out to the Maven indexer, for example: ++ +[source,terminal] +---- +{"level":"warn","file":"spring-web/lib/jcip-annotations-1.0.jar","layer":"sha256:7072d1ca8cd39f2ae4fd36d5a5272e4564a06c92441bdf29185c312ff87432ee","component":"java/Scanner.Scan","version":"3","scanner":"java","manifest":"sha256:d2eed634032c3827bd36f8aae86ef6113d9f4763fbeb6ad041b1f2a3962b6b24","state":"ScanLayers","kind":"package","error":"Get \"https://search.maven.org/solrsearch/select?q=1%3A%22afba4942caaeaf46aab0b976afd57cc7c181467e%22&wt=json\": dial tcp 52.1.120.204:443: i/o timeout","time":"2023-02-08T10:46:59Z","message":"error making request"} +---- + +. Run the following command to reveal which image this SHA ID belongs to: ++ +[source,terminal] +---- +quay=# SELECT t1.username AS namespace, t2.name AS repo_name, t4.content_checksum AS sha_digest FROM "user" AS t1 INNER JOIN repository AS t2 ON t1.id = t2.namespace_user_id INNER JOIN manifestblob AS t3 on t2.id = t3.repository_id INNER JOIN imagestorage AS t4 ON t3.blob_id = t4.id WHERE t4.content_checksum = 'sha256:0cea90e4778f9241c20421d8c97a8d182fd0fa51e6c84210dc4b57522fc901b8'; +---- ++ +.Example output ++ +[source,terminal] +---- +namespace | repo_name | sha_digest +-----------+-----------+------------------------------------------------------------------------- +redhat | quay | sha256:0cea90e4778f9241c20421d8c97a8d182fd0fa51e6c84210dc4b57522fc901b8 +---- + +. Run the following command to find the base operating system of the image, assuming it is Java-based: ++ +[source,terminal] +---- +$ podman run image:tag /bin/bash -c "cat /etc/*release" +---- + +. There are no documented steps to stop the Maven indexer. Run the following command in a development or test cluster first, setting the API request to a page that returns a `404` so that it fails quickly: ++ +[source,yaml] +---- +scanner: + package: + java: + api: https://quay.github.io/clair404 +---- ++ +Replace the API with a known page that returns error `404`. This should fail the Maven indexer and turn it off for that image. + +[role="_additional-resources"] +.Additional resources + +For more information, see link:https://access.redhat.com/solutions/7003383[In disconnected Clair, java image scanning is not working]. \ No newline at end of file diff --git a/modules/keyless-authentication-robot-accounts.adoc b/modules/keyless-authentication-robot-accounts.adoc new file mode 100644 index 000000000..31ef6fc47 --- /dev/null +++ b/modules/keyless-authentication-robot-accounts.adoc @@ -0,0 +1,274 @@ +:_content-type: PROCEDURE +[id="keyless-authentication-robot-accounts"] += Keyless authentication with robot accounts + +In previous versions of {productname}, robot account tokens were valid for the lifetime of the token unless deleted or regenerated. Tokens that do not expire have security implications for users who do not want to store long-term passwords or manage the deletion, or regeneration, or new authentication tokens. + +With {productname} {producty}, {productname} administrators are provided the ability to exchange external OIDC tokens for short-lived, or _ephemeral_ robot account tokens with either Red Hat Single Sign-On (based on the Keycloak project) or Microsoft Entra ID. This allows robot accounts to leverage tokens that last one hour, which are are refreshed regularly and can be used to authenticate individual transactions. + +This feature greatly enhances the security of your {productname} registry by mitigating the possibility of robot token exposure by removing the tokens after one hour. + +Configuring keyless authentication with robot accounts is a multi-step procedure that requires setting a robot federation, generating an OAuth2 token from your OIDC provider, and exchanging the OAuth2 token for a robot account access token. + +[id="generating-oauth2-token-using-keycloak"] +== Generating an OAuth2 token with Red Hat Sign Sign-On + +The following procedure shows you how to generate an OAuth2 token using Red Hat Single Sign-On. Depending on your OIDC provider, these steps will vary. + +.Procedure + +. On the Red Hat Single Sign-On UI: + +.. Click *Clients* and then the name of the application or service that can request authentication of a user. + +.. On the *Settings* page of your client, ensure that the following options are set or enabled: ++ +* *Client ID* +* *Valid redirect URI* +* *Client authentication* +* *Authorization* +* *Standard flow* +* *Direct access grants* ++ +[NOTE] +==== +Settings can differ depending on your setup. +==== + +.. On the *Credentials* page, store the *Client Secret* for future use. + +.. On the *Users* page, click *Add user* and enter a username, for example, `service-account-quaydev`. Then, click *Create*. + +.. Click the name of of the user, for example *service-account-quaydev* on the *Users* page. + +.. Click the *Credentials* tab -> *Set password* -> and provide a password for the user. If warranted, you can make this password temporary by selecting the *Temporary* option. + +.. Click the *Realm settings* tab -> *OpenID Endpoint Configuration*. Store the `/protocol/openid-connect/token` endpoint. For example: ++ +[source,text] +---- +http://localhost:8080/realms/master/protocol/openid-connect/token +---- + +. On a web browser, navigate to the following URL: ++ +[source,text] +---- +http:///realms//protocol/openid-connect/auth?response_type=code&client_id= +---- + +. When prompted, log in with the *service-account-quaydev* user and the temporary password you set. Complete the login by providing the required information and setting a permanent password if necessary. + +. You are redirected to the URI address provided for your client. For example: ++ +[source,text] +---- +https://localhost:3000/cb?session_state=5c9bce22-6b85-4654-b716-e9bbb3e755bc&iss=http%3A%2F%2Flocalhost%3A8080%2Frealms%2Fmaster&code=ea5b76eb-47a5-4e5d-8f71-0892178250db.5c9bce22-6b85-4654-b716-e9bbb3e755bc.cdffafbc-20fb-42b9-b254-866017057f43 +---- ++ +Take note of the `code` provided in the address. For example: ++ +[source,text] +---- +code=ea5b76eb-47a5-4e5d-8f71-0892178250db.5c9bce22-6b85-4654-b716-e9bbb3e755bc.cdffafbc-20fb-42b9-b254-866017057f43 +---- ++ +[NOTE] +==== +This is a temporary code that can only be used one time. If necessary, you can refresh the page or revisit the URL to obtain another code. +==== + +. On your terminal, use the following `curl -X POST` command to generate a temporary OAuth2 access token: ++ +[source,terminal] +---- +$ curl -X POST "http://localhost:8080/realms/master/protocol/openid-connect/token" <1> +-H "Content-Type: application/x-www-form-urlencoded" \ +-d "client_id=quaydev" <2> +-d "client_secret=g8gPsBLxVrLo2PjmZkYBdKvcB9C7fmBz" <3> +-d "grant_type=authorization_code" +-d "code=ea5b76eb-47a5-4e5d-8f71-0892178250db.5c9bce22-6b85-4654-b716-e9bbb3e755bc.cdffafbc-20fb-42b9-b254-866017057f43" <4> +---- +<1> The `protocol/openid-connect/token` endpoint found on the *Realm settings* page of the Red Hat Single Sign-On UI. +<2> The Client ID used for this procedure. +<3> The Client Secret for the Client ID. +<4> The code returned from the redirect URI. ++ +.Example output ++ +[source,terminal] +---- +{"access_token":"eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJTVmExVHZ6eDd2cHVmc1dkZmc1SHdua1ZDcVlOM01DN1N5T016R0QwVGhVIn0...", +"expires_in":60,"refresh_expires_in":1800,"refresh_token":"eyJhbGciOiJIUzUxMiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJiNTBlZTVkMS05OTc1LTQwMzUtYjNkNy1lMWQ5ZTJmMjg0MTEifQ.oBDx6B3pUkXQO8m-M3hYE7v-w25ak6y70CQd5J8f5EuldhvTwpWrC1K7yOglvs09dQxtq8ont12rKIoCIi4WXw","token_type":"Bearer","not-before-policy":0,"session_state":"5c9bce22-6b85-4654-b716-e9bbb3e755bc","scope":"profile email"} +---- + +. Store the `access_token` from the previously step, as it will be exchanged for a {productname} robot account token in the following procedure. + +[id="setting-robot-federation"] +== Setting up a robot account federation by using the {productname} v2 UI + +The following procedure shows you how to set up a robot account federation by using the {productname} v2 UI. This procedure uses Red Hat Single Sign-On, which is based on the Keycloak project. These steps, and the information used to set up a robot account federation, will vary depending on your OIDC provider. + +.Prerequisites + +* You have created an organization. The following example uses `fed_test`. +* You have created a robot account. The following example uses `fest_test+robot1`. +* You have configured a OIDC for your {productname} deployment. The following example uses Red Hat Single Sign-On. + +.Procedure + +. On the Red Hat Single Sign-On main page: + +.. Select the appropriate realm that is authenticated for use with {productname}. Store the issuer URL, for example, `\https://keycloak-auth-realm.quayadmin.org/realms/quayrealm`. + +.. Click *Users* -> the name of the user to be linked with the robot account for authentication. You must use the same user account that you used when generating the OAuth2 access token. + +.. On the *Details* page, store the *ID* of the user, for example, `449e14f8-9eb5-4d59-a63e-b7a77c75f770`. ++ +[NOTE] +==== +The information collected in this step will vary depending on your OIDC provider. For example, with Red Hat Single Sign-On, the *ID* of a user is used as the *Subject* to set up the robot account federation in a subsequent step. For a different OIDC provider, like Microsoft Entra ID, this information is stored as the *Subject*. +==== + +. On your {productname} registry: + +.. Navigate to *Organizations* and click the name of your organization, for example, *fed_test*. + +.. Click *Robot Accounts*. + +.. Click the menu kebab -> *Set robot federation*. + +.. Click the *+* symbol. + +.. In the popup window, include the following information: ++ +* *Issuer URL*: `\https://keycloak-auth-realm.quayadmin.org/realms/quayrealm`. For Red Hat Single Sign-On, this is the the URL of your Red Hat Single Sign-On realm. This might vary depending on your OIDC provider. +* *Subject*: `449e14f8-9eb5-4d59-a63e-b7a77c75f770`. For Red Hat Single Sign-On, the *Subject* is the *ID* of your Red Hat Single Sign-On user. This varies depending on your OIDC provider. For example, if you are using Microsoft Entra ID, the *Subject* will be the *Subject* or your Entra ID user. + +.. Click *Save*. + +[id="exchanging-oauth2-robot-account-token"] +== Exchanging an OAuth2 access token for a {productname} robot account token + +The following procedure leverages the `access token` generated in the previous procedure to create a new {productname} robot account token. The new {productname} robot account token is used for authentication between your OIDC provider and {productname}. + +[NOTE] +==== +The following example uses a Python script to exchange the OAuth2 access token for a {productname} robot account token. +==== + +.Prerequisites + +* You have the `python3` CLI tool installed. + +.Procedure + +. Save the following Python script in a `.py` file, for example, `robot_fed_token_auth.py` ++ +[source,python] +---- +import requests +import os + +TOKEN=os.environ.get('TOKEN') +robot_user = "fed-test+robot1" + +def get_quay_robot_token(fed_token): + URL = "https:///oauth2/federation/robot/token" + response = requests.get(URL, auth=(robot_user,fed_token)) <1> + print(response) + print(response.text) + +if __name__ == "__main__": + get_quay_robot_token(TOKEN) +---- +<1> If your {productname} deployment is using custom SSL/TLS certificates, the response must be `response = requests.get(URL,auth=(robot_user,fed_token),verify=False)`, which includes the `verify=False` flag. + +. Export the OAuth2 access token as `TOKEN`. For example: ++ +[source,terminal] +---- +$ export TOKEN = eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJTVmExVHZ6eDd2cHVmc1dkZmc1SHdua1ZDcVlOM01DN1N5T016R0QwVGhVIn0... +---- + +. Run the `robot_fed_token_auth.py` script by entering the following command: ++ +[source,terminal] +---- +$ python3 robot_fed_token_auth.py +---- ++ +.Example output ++ +[source,terminal] +---- + +{"token": "291cmNlX2FjY2VzcyI6eyJhY2NvdW50Ijp7InJvbGVzIjpbIm1hbmFnZS1hY2NvdW50IiwibWFuYWdlLWFjY291bnQtbGlua3MiLCJ2aWV3LXByb2ZpbGUiXX19LCJzY29wZSI6InByb2ZpbGUgZW1haWwiLCJlbWFpbF92ZXJpZ..."} +---- ++ +[IMPORTANT] +==== +This token expires after one hour. After one hour, a new token must be generated. +==== + +. Export the robot account access token as `QUAY_TOKEN`. For example: ++ +[source,terminal] +---- +$ export QUAY_TOKEN=291cmNlX2FjY2VzcyI6eyJhY2NvdW50Ijp7InJvbGVzIjpbIm1hbmFnZS1hY2NvdW50IiwibWFuYWdlLWFjY291bnQtbGlua3MiLCJ2aWV3LXByb2ZpbGUiXX19LCJzY29wZSI6InByb2ZpbGUgZW1haWwiLCJlbWFpbF92ZXJpZ +---- + +[id="pushing-pulling-images-robot-account"] +== Pushing and pulling images + +After you have generated a new robot account access token and exported it, you can log in and the robot account using the access token and push and pull images. + +.Prerequisites + +* You have exported the OAuth2 access token into a new robot account access token. + +.Procedure + +. Log in to your {productname} registry using the `fest_test+robot1` robot account and the `QUAY_TOKEN` access token. For example: ++ +[source,terminal] +---- +$ podman login -u fed_test+robot1 -p $QUAY_TOKEN +---- + +. Pull an image from a {productname} repository for which the robot account has the proper permissions. For example: ++ +[source,terminal] +---- +$ podman pull /> +---- ++ +.Example output ++ +[source,terminal] +---- +Getting image source signatures +Copying blob 900e6061671b done +Copying config 8135583d97 done +Writing manifest to image destination +Storing signatures +8135583d97feb82398909c9c97607159e6db2c4ca2c885c0b8f590ee0f9fe90d +0.57user 0.11system 0:00.99elapsed 68%CPU (0avgtext+0avgdata 78716maxresident)k +800inputs+15424outputs (18major+6528minor)pagefaults 0swaps +---- + +. Attempt to pull an image from a {productname} repository for which the robot account does _not_ have the proper permissions. For example: ++ +[source,terminal] +---- +$ podman pull /> +---- ++ +.Example output ++ +[source,terminal] +---- +Error: initializing source docker://quay-server.example.com/example_repository/busybox:latest: reading manifest in quay-server.example.com/example_repository/busybox: unauthorized: access to the requested resource is not authorized +---- ++ +After one hour, the credentials for this robot account are set to expire. Afterwards, you must generate a new access token for this robot account. diff --git a/modules/ldap-binding-groups-intro.adoc b/modules/ldap-binding-groups-intro.adoc new file mode 100644 index 000000000..d5c2267b4 --- /dev/null +++ b/modules/ldap-binding-groups-intro.adoc @@ -0,0 +1,23 @@ +[[ldap-binding-groups]] += Bind team membership to specific LDAP groups + +With {productname}, team sync support can be enabled via the config application by clicking on the `Enable team synchronization support` button once Lightweight Directory Access Protocol (LDAP) is selected as the internal authentication method: + +image:ldap-internal-authentication.png[LDAP authentication] + +The default time to sync groups is 60 minutes, but can be modified to any other time frame if needed. The first sync happens immediately after linking a team to a specific group. The last option enables any team admin, and not just super users, to sync groups. + +In order to enable team sync, users must configure their teams by clicking on the `Enable Directory Synchronization` button in the *Teams* tab. A distinguished name is required relative to the base DN. For example: + +---- +cn=quayusers,ou=cloud +---- + +If binding to the group goes correctly, {productname} will display the group's distinguished name and the "last updated" status on the page. + +[IMPORTANT] +==== +Once sync is enabled, adding users to the group is no longer possible. It becomes read only. +==== + +Users that are added to the LDAP group will automatically be added to the team as well. Robot accounts can still be added to the group directly. diff --git a/modules/ldap-filtering-intro.adoc b/modules/ldap-filtering-intro.adoc new file mode 100644 index 000000000..32844568d --- /dev/null +++ b/modules/ldap-filtering-intro.adoc @@ -0,0 +1,5 @@ +[[ldap-filtering]] += LDAP filtering + +Lightweight Directory Access Protocol (LDAP) is an open, vendor neutral, industry standard application protocol for accessing and maintaining distributed directory information services over an IP network. {productname} supports using LDAP as an identity provider. {productname} users can now apply additional filters for lookup queries if LDAP / AD authentication is used. For information on setting up LDAP authentication for {productname}, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/manage_red_hat_quay/ldap-authentication-setup-for-quay-enterprise[LDAP authentication setup for {productname}]. + diff --git a/modules/ldap-timeouts-quay.adoc b/modules/ldap-timeouts-quay.adoc new file mode 100644 index 000000000..086c1b884 --- /dev/null +++ b/modules/ldap-timeouts-quay.adoc @@ -0,0 +1,17 @@ +:_content-type: CONCEPT +[id="ldap-timeouts-quay"] += Can I increase LDAP timeouts when accessing {productname}? + +When using LDAP as your authentication provider, some users might experience timeouts when accessing {productname}. You can increase the timeout value by adding the following properties to your `config.yaml` file: + +[source,yaml] +---- +LDAP_TIMEOUT: 60 +LDAP_NETWORK_TIMEOUT: 60 +---- + +This increases the timeout to 60 seconds. The default time for this field is 10 seconds. + +If you are using a standalone version of {productname}, redeploy {productname} after updating your `config.yaml` file. + +If you are using the {productname} Operator, update the `config-bundle-secret` with the latest configuration. \ No newline at end of file diff --git a/modules/limit-organization-creation.adoc b/modules/limit-organization-creation.adoc new file mode 100644 index 000000000..eafccc4ac --- /dev/null +++ b/modules/limit-organization-creation.adoc @@ -0,0 +1,5 @@ +:_content-type: CONCEPT +[id="limit-organization-creation"] += Can I limit normal users from creating organizations in {productname}? + +Currently, there is no way to limit normal users from creating organizations in {productname}. \ No newline at end of file diff --git a/modules/listing-repos-superuser-api.adoc b/modules/listing-repos-superuser-api.adoc new file mode 100644 index 000000000..e9be21e0e --- /dev/null +++ b/modules/listing-repos-superuser-api.adoc @@ -0,0 +1,54 @@ +[id="listing-logs-superuser-api"] += Listing logs as a superuser with the {productname} API + +{productname} superusers can list usage logs for the current system. + +.Procedure + +* Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listalllogs[`GET /api/v1/superuser/logs`] endpoint to list the usage logs for the current system: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https:///api/v1/superuser/logs?starttime=&endtime=&page=&next_page=" +---- ++ +.Example output ++ +[source,terminal] +---- +{"start_time": "Mon, 17 Feb 2025 19:29:14 -0000", "end_time": "Wed, 19 Feb 2025 19:29:14 -0000", "logs": [{"kind": "login_success", "metadata": {"type": "quayauth", "useragent": "Mozilla/5.0 (X11; Linux x86_64; rv:134.0) Gecko/20100101 Firefox/134.0"}, "ip": "192.168.1.131", "datetime": "Tue, 18 Feb 2025 19:28:15 -0000", "namespace": {"kind": "user", "name": "quayadmin", "avatar": {"name": "quayadmin", "hash": "6d640d802fe23b93779b987c187a4b7a4d8fbcbd4febe7009bdff58d84498fba", "color": "#f7b6d2", "kind": "user"}}}], "next_page": "gAAAAABntN-KbPJDI0PpcHmWjRCmQTLiCprE_KXiOSidbGZ7Ireu8pVTgGUIstijNhmiLzlAv_S3HOsCrKWnuBmoQYZ3F53Uxg=="} +---- + +* Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getregistrysize[`GET /api/v1/superuser/registrysize/`] end point to obtain information about the size of the registry: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https:///api/v1/superuser/registrysize/" +---- ++ +.Example output ++ +[source,terminal] +---- +{"size_bytes": 0, "last_ran": null, "running": false, "queued": false} +---- +* Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getregistrysize[`POST /api/v1/superuser/registrysize/`] end point to define registry size information: ++ +[source,terminal] +---- +$ curl -X POST "https://quay-server.example.com/api/v1/superuser/registrysize/" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "namespace": "", + "last_ran": 1700000000, + "queued": true, + "running": false + }' +---- ++ +This command does not return output in the CLI. \ No newline at end of file diff --git a/modules/logging-into-quayio.adoc b/modules/logging-into-quayio.adoc new file mode 100644 index 000000000..725df6bbc --- /dev/null +++ b/modules/logging-into-quayio.adoc @@ -0,0 +1,62 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: CONCEPT +[id="logging-into-quay"] += Logging into Quay + +A user account for {quayio} represents an individual with authenticated access to the platform's features and functionalities. Through this account, you gain the capability to create and manage repositories, upload and retrieve container images, and control access permissions for these resources. This account is pivotal for organizing and overseeing your container image management within {quayio}. + +[NOTE] +==== +Not all features on {quayio} require that users be logged in. For example, you can anonymously pull an image from {quayio} without being logged in, so long as the image you are pulling comes from a public repository. +==== + +Users have two options for logging into {quayio}: + +* By logging in through {quayio}. ++ +This option provides users with the legacy UI, as well as an option to use the beta UI environment, which adheres to link:https://www.patternfly.org/[PatternFly] UI principles. ++ +* By logging in through the link:console.redhat.com/quay[Red Hat Hybrid Cloud Console]. ++ +This option uses Red Hat SSO for authentication, and is a public managed service offering by Red Hat. This option _always_ requires users to login. Like other managed services, Quay on the Red Hat Hybrid Cloud Console enhances the user experience by adhering to link:https://www.patternfly.org/[PatternFly] UI principles. + +Differences between using {quayio} directly and Quay on the link:console.redhat.com/quay[Red Hat Hybrid Cloud Console] are negligible, including for users on the free tier. Whether you are using {quayio} directly, on the Hybrid Cloud Console, features that require login, such as pushing to a repository, use your {quayio} username specifications. + +[id="logging-into-quayio"] +== Logging into {quayio} + +Use the following procedure to log into {quayio}. + +.Prerequisites + +* You have created a Red Hat account and a {quayio} account. For more information, see "Creating a {quayio} account". + +.Procedure + +. Navigate to link:quay.io[{quayio}]. + +. In the navigation pane, select *Sign In* and log in using your Red Hat credentials. + +. If it is your first time logging in, you must confirm the automatically-generated username. Click *Confirm Username* to log in. ++ +You are redirected to the {quayio} repository landing page. ++ +image:quayio-repo-landing-page.png[{quayio} repository landing page] + +[id="logging-into-quay-hybrid-cloud-console"] +== Logging into Quay through the Hybrid Cloud Console + +.Prerequisites + +* You have created a Red Hat account and a {quayio} account. For more information, see "Creating a {quayio} account". + +.Procedure + +. Navigate to the link:console.redhat.com/quay[Quay on the Red Hat Hybrid Cloud Console] and log in using your Red Hat account. You are redirected to the Quay repository landing page: ++ +image:quay-hybrid-cloud-landing-page.png[Quay on the Red Hat Hybrid Cloud Console] diff --git a/modules/managed-clair-database.adoc b/modules/managed-clair-database.adoc new file mode 100644 index 000000000..c75ec006c --- /dev/null +++ b/modules/managed-clair-database.adoc @@ -0,0 +1,28 @@ +:_content-type: PROCEDURE +[id="managed-clair-database"] += Setting a Clair database to managed + +Use the following procedure to set your Clair database to managed. + +.Procedure + +* In the Quay Operator, set the `clairpostgres` component of the `QuayRegistry` custom resource to `managed: true`: ++ +[source,yaml] +---- +apiVersion: quay.redhat.com/v1 +kind: QuayRegistry +metadata: + name: quay370 +spec: + configBundleSecret: config-bundle-secret + components: + - kind: objectstorage + managed: false + - kind: route + managed: true + - kind: tls + managed: false + - kind: clairpostgres + managed: true +---- \ No newline at end of file diff --git a/modules/managing-a-team-api.adoc b/modules/managing-a-team-api.adoc new file mode 100644 index 000000000..a1c5634e0 --- /dev/null +++ b/modules/managing-a-team-api.adoc @@ -0,0 +1,12 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: PROCEDURE +[id="managing-a-team-api"] += Managing a team by using the {productname} API + +After you have created a team, you can use the API to obtain information about team permissions or team members, add, update, or delete team members (including by email), or delete an organization team. + +The following procedures show you how to how to manage a team using the {productname} API. \ No newline at end of file diff --git a/modules/managing-builds-api.adoc b/modules/managing-builds-api.adoc new file mode 100644 index 000000000..0a7b1d6e5 --- /dev/null +++ b/modules/managing-builds-api.adoc @@ -0,0 +1,84 @@ +[id="manage-builds-api"] += Managing builds by using the {productname} API + +Builds can be managed by using the {productname} API. + +.Procedure + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listbuildtriggers[`GET /api/v1/repository/{repository}/trigger/`] endpoint to list the triggers for the specified repository: ++ +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/repository/example_namespace/example_repo/trigger/" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +{"triggers": [{"id": "32ca5eae-a29f-46c7-8f44-3221ca417c92", "service": "custom-git", "is_active": false, "build_source": null, "repository_url": null, "config": {}, "can_invoke": true, "enabled": true, "disabled_reason": null}]} +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#activatebuildtrigger[`POST /api/v1/repository/{repository}/trigger/{trigger_uuid}/activate`] endpoint to activate the specified build trigger. ++ +[source,terminal] +---- +$ curl -X POST "https://quay-server.example.com/api/v1/repository/example_namespace/example_repo/trigger/example-trigger-uuid/activate" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "config": { + "branch": "main" + }, + "pull_robot": "example+robot" + }' +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#manuallystartbuildtrigger[`POST /api/v1/repository/{repository}/trigger/{trigger_uuid}/start`] endpoint to manually start the build from the specified trigger: ++ +[source,terminal] +---- +$ curl -X POST "https://quay-server.example.com/api/v1/repository/example_namespace/example_repo/trigger/example-trigger-uuid/start" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "branch_name": "main", + "commit_sha": "abcdef1234567890", + "refs": "refs/heads/main" + }' +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listtriggerrecentbuilds[`GET /api/v1/repository/{repository}/trigger/{trigger_uuid}/builds`] endpoint to list the builds started by the specified trigger: ++ +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/repository/example_namespace/example_repo/trigger/example-trigger-uuid/builds?limit=10" \ + -H "Authorization: Bearer " +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getbuildtrigger[`GET /api/v1/repository/{repository}/trigger/{trigger_uuid}`] endpoint to get information for the specified build trigger: ++ +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/repository/example_namespace/example_repo/trigger/example-trigger-uuid" \ + -H "Authorization: Bearer " +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#updatebuildtrigger[`PUT /api/v1/repository/{repository}/trigger/{trigger_uuid}`] endpoint to update the specified build trigger: ++ +[source,terminal] +---- +$ curl -X PUT "https://quay-server.example.com/api/v1/repository/example_namespace/example_repo/trigger/example-trigger-uuid" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{"enabled": true}' +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deletebuildtrigger[`DELETE /api/v1/repository/{repository}/trigger/{trigger_uuid}`] endpoint to delete the specified build trigger: ++ +[source,terminal] +---- +$ curl -X DELETE "https://quay-server.example.com/api/v1/repository/example_namespace/example_repo/trigger/example-trigger-uuid" \ + -H "Authorization: Bearer " +---- \ No newline at end of file diff --git a/modules/managing-namespace-auto-pruning-policies.adoc b/modules/managing-namespace-auto-pruning-policies.adoc new file mode 100644 index 000000000..37454fa99 --- /dev/null +++ b/modules/managing-namespace-auto-pruning-policies.adoc @@ -0,0 +1,583 @@ +:_content-type: PROCEDURE +[id="managing-namespace-auto-pruning-policies"] += Managing auto-pruning policies using the {productname} UI + +//All API content in this module needs removed and replaced with the modules that exist in the /api/ guide. + +All auto-pruning policies, with the exception of a registry-wide auto pruning policy, are created using the {productname} v2 UI or by using the API. This can be done after you have configured your {productname} `config.yaml` file to enable the auto-pruning feature and the v2 UI. + +[NOTE] +==== +This feature is not available when using the {productname} legacy UI. +==== + +[id="configuring-namespace-auto-prune-feature"] +== Configuring the {productname} auto-pruning feature + +Use the following procedure to configure your {productname} `config.yaml` file to enable the auto-pruning feature. + +.Prerequisites + +* You have set `FEATURE_UI_V2` to `true` in your `config.yaml` file. + +.Procedure + +* In your {productname} `config.yaml` file, add, and set, the `FEATURE_AUTO_PRUNE` environment variable to `True`. For example: ++ +[source,yaml] +---- +# ... +FEATURE_AUTO_PRUNE: true +# ... +---- + +[id="creating-registry-wide-auto-pruning-policy"] +== Creating a registry-wide auto-pruning policy + +Registry-wide auto-pruning policies can be configured on new and existing organizations. This feature saves {productname} administrators time, effort, and storage by enforcing registry-wide rules. + +{productname} administrators must enable this feature by updating their `config.yaml` file through the inclusion of `DEFAULT_NAMESPACE_AUTOPRUNE_POLICY` configuration field, and one of `number_of_tags` or `creation_date` methods. Currently, this feature cannot be enabled by using the v2 UI or the API. + +Use the following procedure to create an auto-prune policy for your {productname} registry. + +.Prerequisites + +* You have enabled the `FEATURE_AUTO_PRUNE` feature. + +.Procedure + +. Update your `config.yaml` file to add the `DEFAULT_NAMESPACE_AUTOPRUNE_POLICY` configuration field: + +.. To set the policy method to remove the oldest tags by their creation date until the number of tags provided is left, use the `number_of_tags` method: ++ +[source,yaml] +---- +# ... +DEFAULT_NAMESPACE_AUTOPRUNE_POLICY: + method: number_of_tags + value: 2 <1> +# ... +---- +<1> In this scenario, two tags remain. + +.. To set the policy method to remove tags with a creation date older than the provided time span, for example, `5d`, use the `creation_date` method: ++ +[source,yaml] +---- +DEFAULT_NAMESPACE_AUTOPRUNE_POLICY: + method: creation_date + value: 5d +---- + +. Restart your {productname} deployment. + +. Optional. If you need to tag and push images to test this feature: + +.. Tag four sample images that will be pushed to a {productname} registry. For example: ++ +[source,terminal] +---- +$ podman tag docker.io/library/busybox //busybox:test +---- ++ +[source,terminal] +---- +$ podman tag docker.io/library/busybox //busybox:test2 +---- ++ +[source,terminal] +---- +$ podman tag docker.io/library/busybox //busybox:test3 +---- ++ +[source,terminal] +---- +$ podman tag docker.io/library/busybox //busybox:test4 +---- + +.. Push the four sample images to the registry with auto-pruning enabled by entering the following commands: ++ +[source,terminal] +---- +$ podman push /quayadmin/busybox:test +---- ++ +[source,terminal] +---- +$ podman push //busybox:test2 +---- ++ +[source,terminal] +---- +$ podman push //busybox:test3 +---- ++ +[source,terminal] +---- +$ podman push //busybox:test4 +---- + +. Check that there are four tags in the registry that you pushed the images to. + +. By default, the auto-pruner worker at the registry level runs every 24 hours. After 24 hours, the two oldest image tags are removed, leaving the `test3` and `test4` tags if you followed these instructions. Check your {productname} organization to ensure that the two oldest tags were removed. + +[id="creating-policy-v2-ui"] +== Creating an auto-prune policy for an organization by using the {productname} v2 UI + +Use the following procedure to create an auto-prune policy for an organization using the {productname} v2 UI. + +.Prerequisites + +* You have enabled the `FEATURE_AUTO_PRUNE` feature. +* Your organization has image tags that have been pushed to it. + +.Procedure + +. On the {productname} v2 UI, click *Organizations* in the navigation pane. + +. Select the name of an organization that you will apply the auto-pruning feature to, for example, `test_organization`. + +. Click *Settings*. + +. Click *Auto-Prune Policies*. For example: ++ +image:auto-prune-policies-page.png[Auto-Prune Policies page] + +. Click the drop down menu and select the desired policy, for example, *By number of tags*. + +. Select the desired number of tags to keep. By default, this is set at *20* tags. For this example, the number of tags to keep is set at *3*. + +. Optional. With the introduction of _regular expressions_, you are provided the following options to fine-grain your auto-pruning policy: ++ +* *Match*: When selecting this option, the auto-pruner prunes all tags that match the given _regex_ pattern. +* *Does not match*: When selecting this option, the auto-pruner prunes all tags that _do not_ match the _regex_ pattern. ++ +If you do not select an option, the auto-pruner defaults to pruning all image tags. ++ +For this example, click the *Tag pattern* box and select *match*. In the regex box, enter a pattern to match tags against. For example, to automatically prune all `test` tags, enter `^test.*`. + +. Optional. You can create a second auto-prune policy by clicking *Add Policy* and entering the required information. + +. Click *Save*. A notification that your auto-prune policy has been updated appears. ++ +With this example, the organization is configured to keep the three latest tags that are named `^test.*`. + +.Verification + +* Navigate to the *Tags* page of your Organization's repository. After a few minutes, the auto-pruner worker removes tags that no longer fit within the established criteria. In this example, it removes the `busybox:test` tag, and keeps the `busybox:test2`, `busybox:test3`, and `busybox:test4` tag. ++ +After tags are automatically pruned, they go into the {productname} time machine, or the amount of time after a tag is deleted that the tag is accessible before being garbage collected. The expiration time of an image tag is dependent on your organization's settings. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#garbage-collection[{productname} garbage collection]. + +[id="creating-policy-api"] +== Creating an auto-prune policy for a namespace by using the {productname} API + +You can use {productname} API endpoints to manage auto-pruning policies for an namespace. + +.Prerequisites + +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. +* You have created an OAuth access token. +* You have logged into {productname}. + +.Procedure + +. Enter the following link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createorganizationautoprunepolicy[`POST /api/v1/organization/{orgname}/autoprunepolicy/`] command create a new policy that limits the number of tags allowed in an organization: ++ +[source,terminal] +---- +$ curl -X POST -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{"method": "number_of_tags", "value": 10}' http:///api/v1/organization//autoprunepolicy/ +---- ++ +Alternatively, you can can set tags to expire for a specified time after their creation date: ++ +[source,terminal] +---- +$ curl -X POST -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{ +"method": "creation_date", "value": "7d"}' http:///api/v1/organization//autoprunepolicy/ +---- ++ +.Example output +[source,terminal] +---- +{"uuid": "73d64f05-d587-42d9-af6d-e726a4a80d6e"} +---- + +. Optional. You can add an additional policy to an organization and pass in the `tagPattern` and `tagPatternMatches` fields to prune only tags that match the given regex pattern. For example: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "method": "creation_date", + "value": "7d", + "tagPattern": "^v*", + "tagPatternMatches": <1> + }' \ + "https:///api/v1/organization//autoprunepolicy/" +---- +<1> Setting `tagPatternMatches` to `true` makes it so that tags that match the given regex pattern will be pruned. In this example, tags that match `^v*` are pruned. ++ +.Example output ++ +[source,terminal] +---- +{"uuid": "ebf7448b-93c3-4f14-bf2f-25aa6857c7b0"} +---- + +. You can update your organization's auto-prune policy by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#updateorganizationautoprunepolicy[`PUT /api/v1/organization/{orgname}/autoprunepolicy/{policy_uuid}`] command. For example: ++ +[source,terminal] +---- +$ curl -X PUT -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{ + "method": "creation_date", + "value": "4d", + "tagPattern": "^v*", + "tagPatternMatches": true + }' "/api/v1/organization//autoprunepolicy/" +---- ++ +This command does not return output. Continue to the next step. + +. Check your auto-prune policy by entering the following command: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " http:///api/v1/organization//autoprunepolicy/ +---- ++ +.Example output ++ +[source,terminal] +---- +{"policies": [{"uuid": "ebf7448b-93c3-4f14-bf2f-25aa6857c7b0", "method": "creation_date", "value": "4d", "tagPattern": "^v*", "tagPatternMatches": true}, {"uuid": "da4d0ad7-3c2d-4be8-af63-9c51f9a501bc", "method": "number_of_tags", "value": 10, "tagPattern": null, "tagPatternMatches": true}, {"uuid": "17b9fd96-1537-4462-a830-7f53b43f94c2", "method": "creation_date", "value": "7d", "tagPattern": "^v*", "tagPatternMatches": true}]} +---- + +. You can delete the auto-prune policy for your organization by entering the following command. Note that deleting the policy requires the UUID. ++ +[source,terminal] +---- +$ curl -X DELETE -H "Authorization: Bearer " http:///api/v1/organization//autoprunepolicy/73d64f05-d587-42d9-af6d-e726a4a80d6e +---- + +[id="creating-policy-api-current-user"] +== Creating an auto-prune policy for a namespace for the current user by using the API + +You can use {productname} API endpoints to manage auto-pruning policies for your account. + +[NOTE] +==== +The use of `/user/` in the following commands represents the user that is currently logged into {productname}. +==== + +.Prerequisites + +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. +* You have created an OAuth access token. +* You have logged into {productname}. + +.Procedure + +. Enter the following `POST` command create a new policy that limits the number of tags for the current user: ++ +[source,terminal] +---- +$ curl -X POST -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{"method": "number_of_tags", "value": 10}' http:///api/v1/user/autoprunepolicy/ +---- ++ +.Example output ++ +[source,terminal] +---- +{"uuid": "8c03f995-ca6f-4928-b98d-d75ed8c14859"} +---- + +. Check your auto-prune policy by entering the following command: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " http:///api/v1/user/autoprunepolicy/ +---- ++ +Alternatively, you can include the UUID: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " http:///api/v1/user/autoprunepolicy/8c03f995-ca6f-4928-b98d-d75ed8c14859 +---- ++ +.Example output ++ +[source,terminal] +---- +{"policies": [{"uuid": "8c03f995-ca6f-4928-b98d-d75ed8c14859", "method": "number_of_tags", "value": 10}]} +---- + +. You can delete the auto-prune policy by entering the following command. Note that deleting the policy requires the UUID. ++ +[source,terminal] +---- +$ curl -X DELETE -H "Authorization: Bearer " http:///api/v1/user/autoprunepolicy/8c03f995-ca6f-4928-b98d-d75ed8c14859 +---- ++ +.Example output ++ +[source,terminal] +---- +{"uuid": "8c03f995-ca6f-4928-b98d-d75ed8c14859"} +---- + +[id="creating-policy-repository-v2-ui"] +== Creating an auto-prune policy for a repository using the {productname} v2 UI + +Use the following procedure to create an auto-prune policy for a repository using the {productname} v2 UI. + +.Prerequisites + +* You have enabled the `FEATURE_AUTO_PRUNE` feature. +* You have pushed image tags to your repository. + +.Procedure + +. On the {productname} v2 UI, click *Repository* in the navigation pane. + +. Select the name of an organization that you will apply the auto-pruning feature to, for example, `/`. + +. Click *Settings*. + +. Click *Repository Auto-Prune Policies*. + +. Click the drop down menu and select the desired policy, for example, *By age of tags*. + +. Set a time, for example, `5` and an interval, for example `minutes` to delete tags older than the specified time frame. For this example, tags older than 5 minutes are marked for deletion. + +. Optional. With the introduction of _regular expressions_, you are provided the following options to fine-grain your auto-pruning policy: ++ +* *Match*: When selecting this option, the auto-pruner prunes all tags that match the given _regex_ pattern. +* *Does not match*: When selecting this option, the auto-pruner prunes all tags that _do not_ match the _regex_ pattern. ++ +If you do not select an option, the auto-pruner defaults to pruning all image tags. ++ +For this example, click the *Tag pattern* box and select *Does not match*. In the _regex_ box, enter a pattern to match tags against. For example, to automatically prune all tags that _do not_ match the `test` tag, enter `^test.*`. + +. Optional. You can create a second auto-prune policy by clicking *Add Policy* and entering the required information. + +. Click *Save*. A notification that your auto-prune policy has been updated appears. + +.Verification + +* Navigate to the *Tags* page of your Organization's repository. With this example, Tags that are older than 5 minutes that _do not_ match the `^test.*` _regex_ tag are automatically pruned when the pruner runs. ++ +After tags are automatically pruned, they go into the {productname} time machine, or the amount of time after a tag is deleted that the tag is accessible before being garbage collected. The expiration time of an image tag is dependent on your organization's settings. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#garbage-collection[{productname} garbage collection]. + +[id="creating-repository-policy-api"] +== Creating an auto-prune policy for a repository using the {productname} API + +You can use {productname} API endpoints to manage auto-pruning policies for an repository. + +.Prerequisites + +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. +* You have created an OAuth access token. +* You have logged into {productname}. + +.Procedure + +. Enter the following link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#updateorganizationautoprunepolicy[`POST /api/v1/repository/{repository}/autoprunepolicy/`] command create a new policy that limits the number of tags allowed in an organization: ++ +[source,terminal] +---- +$ curl -X POST -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{"method": "number_of_tags","value": 2}' http:///api/v1/repository///autoprunepolicy/ +---- ++ +Alternatively, you can can set tags to expire for a specified time after their creation date: ++ +[source,terminal] +---- +$ curl -X POST -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{"method": "creation_date", "value": "7d"}' http:///api/v1/repository///autoprunepolicy/ +---- ++ +.Example output ++ +[source,terminal] +---- +{"uuid": "ce2bdcc0-ced2-4a1a-ac36-78a9c1bed8c7"} +---- + +. Optional. You can add an additional policy and pass in the `tagPattern` and `tagPatternMatches` fields to prune only tags that match the given regex pattern. For example: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "method": "", + "value": "<7d>", + "tagPattern": "<^test.>*", + "tagPatternMatches": <1> + }' \ + "https:///api/v1/repository///autoprunepolicy/" +---- +<1> Setting `tagPatternMatches` to `false` makes it so that tags that all tags that _do not_ match the given regex pattern are pruned. In this example, all tags _but_ `^test.` are pruned. ++ +.Example output ++ +[source,terminal] +---- +{"uuid": "b53d8d3f-2e73-40e7-96ff-736d372cd5ef"} +---- + +. You can update your policy for the repository by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#updaterepositoryautoprunepolicy[`PUT /api/v1/repository/{repository}/autoprunepolicy/{policy_uuid}`] command and passing in the UUID. For example: ++ +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "method": "number_of_tags", + "value": "5", + "tagPattern": "^test.*", + "tagPatternMatches": true + }' \ + "https://quay-server.example.com/api/v1/repository///autoprunepolicy/" +---- ++ +This command does not return output. Continue to the next step to check your auto-prune policy. + +. Check your auto-prune policy by entering the following command: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " http:///api/v1/repository///autoprunepolicy/ +---- ++ +Alternatively, you can include the UUID: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " http:///api/v1/repository///autoprunepolicy/ce2bdcc0-ced2-4a1a-ac36-78a9c1bed8c7 +---- ++ +.Example output ++ +[source,terminal] +---- +{"policies": [{"uuid": "ce2bdcc0-ced2-4a1a-ac36-78a9c1bed8c7", "method": "number_of_tags", "value": 10}]} +---- + +. You can delete the auto-prune policy by entering the following command. Note that deleting the policy requires the UUID. ++ +[source,terminal] +---- +$ curl -X DELETE -H "Authorization: Bearer " http:///api/v1/repository///autoprunepolicy/ce2bdcc0-ced2-4a1a-ac36-78a9c1bed8c7 +---- ++ +.Example output ++ +[source,terminal] +---- +{"uuid": "ce2bdcc0-ced2-4a1a-ac36-78a9c1bed8c7"} +---- + +[id="creating-policy-api-other-user"] +== Creating an auto-prune policy on a repository for a user with the API + +You can use {productname} API endpoints to manage auto-pruning policies on a repository for user accounts that are not your own, so long as you have `admin` privileges on the repository. + +.Prerequisites + +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. +* You have created an OAuth access token. +* You have logged into {productname}. +* You have `admin` privileges on the repository that you are creating the policy for. + +.Procedure + +. Enter the following link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createuserautoprunepolicy[`POST /api/v1/repository///autoprunepolicy/`] command create a new policy that limits the number of tags for the user: ++ +[source,terminal] +---- +$ curl -X POST -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{"method": "number_of_tags","value": 2}' https:///api/v1/repository///autoprunepolicy/ +---- ++ +.Example output ++ +[source,terminal] +---- +{"uuid": "7726f79c-cbc7-490e-98dd-becdc6fefce7"} +---- + +. Optional. You can add an additional policy for the current user and pass in the `tagPattern` and `tagPatternMatches` fields to prune only tags that match the given regex pattern. For example: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "method": "creation_date", + "value": "7d", + "tagPattern": "^v*", + "tagPatternMatches": true + }' \ + "http:///api/v1/repository///autoprunepolicy/" +---- ++ +.Example output ++ +[source,terminal] +---- +{"uuid": "b3797bcd-de72-4b71-9b1e-726dabc971be"} +---- + +. You can update your policy for the current user by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#updateuserautoprunepolicy[`PUT /api/v1/repository///autoprunepolicy/`] command. For example: ++ +[source,terminal] +---- +$ curl -X PUT -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{ + "method": "creation_date", + "value": "4d", + "tagPattern": "^test.", + "tagPatternMatches": true + }' "https:///api/v1/repository///autoprunepolicy/" +---- ++ +Updating a policy does not return output in the CLI. + +. Check your auto-prune policy by entering the following command: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " http:///api/v1/repository///autoprunepolicy/ +---- ++ +Alternatively, you can include the UUID: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " http:///api/v1/repository///autoprunepolicy/7726f79c-cbc7-490e-98dd-becdc6fefce7 +---- ++ +.Example output ++ +[source,terminal] +---- +{"uuid": "81ee77ec-496a-4a0a-9241-eca49437d15b", "method": "creation_date", "value": "7d", "tagPattern": "^v*", "tagPatternMatches": true} +---- + +. You can delete the auto-prune policy by entering the following command. Note that deleting the policy requires the UUID. ++ +[source,terminal] +---- +$ curl -X DELETE -H "Authorization: Bearer " http:///api/v1/repository///autoprunepolicy/ +---- ++ +.Example output ++ +[source,terminal] +---- +{"uuid": "7726f79c-cbc7-490e-98dd-becdc6fefce7"} +---- diff --git a/modules/managing-organization-quota-superuser-api.adoc b/modules/managing-organization-quota-superuser-api.adoc new file mode 100644 index 000000000..e799e28d0 --- /dev/null +++ b/modules/managing-organization-quota-superuser-api.adoc @@ -0,0 +1,69 @@ +[id="managing-organization-quota-superuser-api"] += Managing organization quota with the {productname} API + +Quota can be managed with the {productname} API with superuser admin privileges. These endpoints allow superusers to manage quota policies for all organizations within the registry. + +.Procedure + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createuserquotasuperuser[`POST /api/v1/superuser/organization/{namespace}/quota`] API endpoint to create a quota policy for an organization: ++ +[source,terminal] +---- +$ curl -X POST "https://quay-server.example.com/api/v1/superuser/organization//quota" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "limit_bytes": 10737418240 + }' +---- ++ +.Example output ++ +[source,terminal] +---- +"Created" +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listuserquotasuperuser[`GET /api/v1/superuser/organization/{namespace}/quota`] API endpoint to obtain information about the policy, including the quota ID: ++ +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/superuser/organization//quota" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +[{"id": 2, "limit_bytes": 10737418240, "limit": "10.0 GiB", "default_config": false, "limits": [{"id": 1, "type": "Reject", "limit_percent": 90}], "default_config_exists": false}] +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#changeuserquotasuperuser[`PUT /api/v1/superuser/organization/{namespace}/quota/{quota_id}`] API endpoint to change the quota policy: ++ +[source,terminal] +---- +$ curl -X PUT "https://quay-server.example.com/api/v1/superuser/organization//quota/" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "limit_bytes": + }' +---- ++ +.Example output ++ +[source,terminal] +---- +{"id": 2, "limit_bytes": 10737418240, "limit": "10.0 GiB", "default_config": false, "limits": [{"id": 1, "type": "Reject", "limit_percent": 90}], "default_config_exists": false} +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleteuserquotasuperuser[`DELETE /api/v1/superuser/organization/{namespace}/quota/{quota_id}`] API endpoint to ++ +[source,terminal] +---- +$ curl -X DELETE "https://quay-server.example.com/api/v1/superuser/organization//quota/" \ + -H "Authorization: Bearer " +---- ++ +This command does not return output in the CLI. \ No newline at end of file diff --git a/modules/managing-organization-superuser-api.adoc b/modules/managing-organization-superuser-api.adoc new file mode 100644 index 000000000..b10a36cd3 --- /dev/null +++ b/modules/managing-organization-superuser-api.adoc @@ -0,0 +1,56 @@ +[id="organization-manage-api"] += Managing organizations as a superuser with the {productname} API + +Superusers have the ability to list, change, and delete organizations by using the {productname} API. + +.Procedure + +* Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listallorganizations[`GET /api/v1/superuser/organizations`] endpoint to list all organizations: ++ +[source,terminal] +---- +$ curl -L -X GET \ + -H "Authorization: Bearer " \ + "https:///api/v1/superuser/organizations?name=" +---- ++ +.Example output ++ +[source,terminal] +---- +{"organizations": [{"name": "fed_test", "email": "fe11fc59-bd09-459a-a21c-b57692d151c9", "avatar": {"name": "fed_test", "hash": "e2ce1fb42ec2e0602362beb64b5ebd1e6ad291b710a0355f9296c16157bef3cb", "color": "#ff7f0e", "kind": "org"}, "quotas": [{"id": 3, "limit_bytes": 10737418240, "limits": []}], "quota_report": {"quota_bytes": 0, "configured_quota": 10737418240, "running_backfill": "complete", "backfill_status": "complete"}}, {"name": "test", "email": "new-contact@test-org.com", "avatar": {"name": "test", "hash": "a15d479002b20f211568fd4419e76686d2b88a4980a5b4c4bc10420776c5f6fe", "color": "#aec7e8", "kind": "org"}, "quotas": [{"id": 2, "limit_bytes": 10737418240, "limits": [{"id": 1, "type": "Reject", "limit_percent": 90}]}], "quota_report": {"quota_bytes": 0, "configured_quota": 10737418240, "running_backfill": "complete", "backfill_status": "complete"}}]} +---- + +* Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#changeorganizatio[`PUT /api/v1/superuser/organizations/{name}`] endpoint to change or update information for an organization: ++ +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "email": "", + "invoice_email": , + "invoice_email_address": "", + "tag_expiration_s": + }' \ + "https:///api/v1/superuser/organizations/" +---- ++ +.Example output ++ +[source,terminal] +---- +{"name": "test", "email": "new-contact@test-org.com", "avatar": {"name": "test", "hash": "a15d479002b20f211568fd4419e76686d2b88a4980a5b4c4bc10420776c5f6fe", "color": "#aec7e8", "kind": "org"}, "quotas": [{"id": 2, "limit_bytes": 10737418240, "limits": [{"id": 1, "type": "Reject", "limit_percent": 90}]}], "quota_report": {"quota_bytes": 0, "configured_quota": 10737418240, "running_backfill": "complete", "backfill_status": "complete"}} +---- + +* Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleteorganization[`DELETE /api/v1/superuser/organizations/{name}`] endpoint to delete and organization: ++ +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + "https:///api/v1/superuser/organizations/" +---- ++ +This command does not return output in the CLI. \ No newline at end of file diff --git a/modules/managing-restricted-users.adoc b/modules/managing-restricted-users.adoc new file mode 100644 index 000000000..bba1a0d04 --- /dev/null +++ b/modules/managing-restricted-users.adoc @@ -0,0 +1,41 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: REFERENCE +[id="managing-restricted-users"] += Managing restricted users + +By default, all {productname} members part of a registry can create repositories and upload content to their own user account. For example, when `user1` pushes an artifact tag such as `//:`, a repository of the name `user1/image` is created. Inside of that repository is information about the artifact tag. + +With the `FEATURE_RESTRICTED_USERS` configuration field, {productname} administrators can restrict all users that are part of their registry from pushing images or artifacts to the registry. This configuration field effectively renders all users from creating new organizations or pushing content altogether _unless they are already part of that organization and defined as a team member of that organization_; that is, restricted users still have normal permissions in organizations based on the teams that they are members of. + +For example, a {productname} administrator sets the `FEATURE_RESTRICTED_USERS` configuration field in their `config.yaml` file as follows: + +[source,yaml] +---- +FEATURE_RESTRICTED_USERS: true +---- + +When set as shown, `user1` is unable to create a new organization by using the {productname} UI. Upon attempt, the following error message is returned: `Unauthorized`. Additionally, if `user1` attempts to push an image to their own namespace by using the CLI (that is, `//:`), the following error message is returned: `Error: writing blob: initiating layer upload to /v2/user1//blobs/uploads/ in : unauthorized: access to the requested resource is not authorized`. However, if `user1` is part of an organization's team as defined by an administrator, they maintain the permissions capable of that team. For example, if `user1` is added to an organization's team and given the *Admin* role, they have administrative privileges for that organization. + +When `FEATURE_RESTRICTED_USERS` is leveraged with the `RESTRICTED_USERS_WHITELIST` configuration field, however, {productname} administrators can allow specified members the ability to continue to push to the registry or make organizations. In general, when `FEATURE_RESTRICTED_USERS` is set, {productname} administrators might also set `RESTRICTED_USERS_WHITELIST`, otherwise all members of the registry (with the exception of those defined by a team) are rendered incapable of doing basic tasks. + +For example, a {productname} administrator sets the `FEATURE_RESTRICTED_USERS` and `RESTRICTED_USERS_WHITELIST` configuration fields in their `config.yaml` file as follows: + +[source,yaml] +---- +# ... +FEATURE_RESTRICTED_USERS: true +RESTRICTED_USERS_WHITELIST: + - user2 +# ... +---- + +With this configuration, all users _except_ `user2` are restricted from pushing images or creating organizations. Other users part of a team will also have these privileges. Users part of the registry that are either not defined by the `RESTRICTED_USERS_WHITELIST` field or part of an organization's team have no permissions within the registry, and will therefor be unable to perform basic tasks. + +[NOTE] +==== +This feature works differently for LDAP deployment types. For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/3/html-single/manage_red_hat_quay/index#ldap-authentication-setup-for-quay-enterprise[LDAP authentication setup for {productname}]. +==== \ No newline at end of file diff --git a/modules/managing-robot-account-permissions-v2-ui.adoc b/modules/managing-robot-account-permissions-v2-ui.adoc new file mode 100644 index 000000000..24a2d8a8d --- /dev/null +++ b/modules/managing-robot-account-permissions-v2-ui.adoc @@ -0,0 +1,36 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: PROCEDURE +[id="managing-robot-account-permissions-v2-ui"] += Bulk managing robot account repository access + +Use the following procedure to manage, in bulk, robot account repository access by using the {productname} v2 UI. + +.Prerequisites + +* You have created a robot account. +* You have created multiple repositories under a single organization. + +.Procedure + +. On the {productname} v2 UI landing page, click *Organizations* in the navigation pane. + +. On the *Organizations* page, select the name of the organization that has multiple repositories. The number of repositories under a single organization can be found under the *Repo Count* column. + +. On your organization's page, click *Robot accounts*. + +. For the robot account that will be added to multiple repositories, click the kebab icon -> *Set repository permissions*. + +. On the *Set repository permissions* page, check the boxes of the repositories that the robot account will be added to. For example: ++ +image:set-repository-permissions-robot-account.png[Set repository permissions] + +. Set the permissions for the robot account, for example, *None*, *Read*, *Write*, *Admin*. + +. Click *save*. An alert that says *Success alert: Successfully updated repository permission* appears on the *Set repository permissions* page, confirming the changes. + +. Return to the *Organizations* -> *Robot accounts* page. Now, the *Repositories* column of your robot account shows the number of repositories that the robot account has been added to. \ No newline at end of file diff --git a/modules/managing-service-keys-api.adoc b/modules/managing-service-keys-api.adoc new file mode 100644 index 000000000..b246f8ccf --- /dev/null +++ b/modules/managing-service-keys-api.adoc @@ -0,0 +1,107 @@ +[id="service-key-manage-api"] += Managing service keys as a superuser with the {productname} API + +Superusers have the ability to create, list, change, and delete service keys by using the {productname} API. + +.Procedure + +* Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createservicekey[`POST /api/v1/superuser/keys`] endpoint to create a service key: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "service": "", + "expiration": + }' \ + "/api/v1/superuser/keys" +---- ++ +.Example output ++ +[source,terminal] +---- +{"message":""} +---- + +* Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#approveservicekey[`POST /api/v1/superuser/approvedkeys/{kid}`] endpoint to approved a service key: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "notes": "" + }' \ + "https:///api/v1/superuser/approvedkeys/" +---- ++ +This command does not return output in the CLI. + +* Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listservicekeys[`GET /api/v1/superuser/keys`] endpoint to list service keys: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https:///api/v1/superuser/keys" +---- ++ +.Example output ++ +[source,terminal] +---- +{"keys":[{"approval":{"approval_type":"ServiceKeyApprovalType.AUTOMATIC","approved_date":"Mon, 20 Jan 2025 14:46:01 GMT","approver":null,"notes":""},"created_date":"Mon, 20 Jan 2025 14:46:01 GMT","expiration_date":"Wed, 05 Feb 2025 22:03:37 GMT","jwk":{"e":"AQAB","kid":"","kty":"RSA","n":""},"kid":"7fr8soqXGgea8JqjwgItjjJT9GKlt-bMyMCDmvzy6WQ","metadata":{"created_by":"CLI tool"},"name":"http://quay-server.example.com:80","rotation_duration":null,"service":"quay"}]} +---- + +* Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getservicekey[`GET /api/v1/superuser/keys/{kid}`] endpoint to retrieve information about a service account by its kid: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https:///api/v1/superuser/keys/" +---- ++ +.Example output ++ +[source,terminal] +---- +{"approval":{"approval_type":"ServiceKeyApprovalType.AUTOMATIC","approved_date":"Mon, 20 Jan 2025 14:46:01 GMT","approver":null,"notes":""},"created_date":"Mon, 20 Jan 2025 14:46:01 GMT","expiration_date":"Wed, 05 Feb 2025 22:03:37 GMT","jwk":{"e":"AQAB","kid":"7fr8soqXGgea8JqjwgItjjJT9GKlt-bMyMCDmvzy6WQ","kty":"RSA","n":"5iMX7RQ_4F_zdb1qonMsuWUDauCOqEyRpD8L_EhgnwDxrgMHuOlJ4_7sEOrOa3Jkx3QhwIW6LJCP69PR5X0wvz6vmC1DoWEaWv41bAq23Knzj7gUU9-N_fkZPZN9NQwZ-D-Zqg9L1c_cJF93Dy93py8_JswWFDj1FxMaThJmrX68wBwjhF-JLYqgCAGFyezzJ3oTpO-esV9v6R7skfkaqtx_cjLZk_0cKB4VKTtxiy2A8D_5nANTOSSbZLXNh2Vatgh3yrOmnTTNLIs0YO3vFIuylEkczHlln-40UMAzRB3HNspUySyzImO_2yGdrA762LATQrOzJN8E1YKCADx5CQ"},"kid":"7fr8soqXGgea8JqjwgItjjJT9GKlt-bMyMCDmvzy6WQ","metadata":{"created_by":"CLI tool"},"name":"http://quay-server.example.com:80","rotation_duration":null,"service":"quay"} +---- + +* Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#updateservicekey[`PUT /api/v1/superuser/keys/{kid}`] endpoint to update your service key, such as the metadata: ++ +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "name": "", + "metadata": {"": ""}, + "expiration": + }' \ + "https:///api/v1/superuser/keys/" +---- ++ +.Example output ++ +[source,terminal] +---- +{"approval":{"approval_type":"ServiceKeyApprovalType.AUTOMATIC","approved_date":"Mon, 20 Jan 2025 14:46:01 GMT","approver":null,"notes":""},"created_date":"Mon, 20 Jan 2025 14:46:01 GMT","expiration_date":"Mon, 03 Mar 2025 10:40:00 GMT","jwk":{"e":"AQAB","kid":"7fr8soqXGgea8JqjwgItjjJT9GKlt-bMyMCDmvzy6WQ","kty":"RSA","n":"5iMX7RQ_4F_zdb1qonMsuWUDauCOqEyRpD8L_EhgnwDxrgMHuOlJ4_7sEOrOa3Jkx3QhwIW6LJCP69PR5X0wvz6vmC1DoWEaWv41bAq23Knzj7gUU9-N_fkZPZN9NQwZ-D-Zqg9L1c_cJF93Dy93py8_JswWFDj1FxMaThJmrX68wBwjhF-JLYqgCAGFyezzJ3oTpO-esV9v6R7skfkaqtx_cjLZk_0cKB4VKTtxiy2A8D_5nANTOSSbZLXNh2Vatgh3yrOmnTTNLIs0YO3vFIuylEkczHlln-40UMAzRB3HNspUySyzImO_2yGdrA762LATQrOzJN8E1YKCADx5CQ"},"kid":"7fr8soqXGgea8JqjwgItjjJT9GKlt-bMyMCDmvzy6WQ","metadata":{"created_by":"CLI tool","environment":"production"},"name":"quay-service-key-updated","rotation_duration":null,"service":"quay"} +---- + +* Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleteservicekey[`DELETE /api/v1/superuser/keys/{kid}`] endpoint to delete a service key: ++ +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + "https:///api/v1/superuser/keys/" +---- ++ +This command does not return output in the CLI. \ No newline at end of file diff --git a/modules/managing-superuser-full-access.adoc b/modules/managing-superuser-full-access.adoc new file mode 100644 index 000000000..f8cfe9707 --- /dev/null +++ b/modules/managing-superuser-full-access.adoc @@ -0,0 +1,28 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc + +:_content-type: REFERENCE +[id="managing-superuser-access"] += Managing superuser access to organizations + +When a user, for example, `user1` creates an organization within a registry, they own the access and permissions to that organization. As such, they can create repositories, define teams and memberships, create robot accounts, set default permissions, view logs, and adjust other settings as warranted. It is, for all intents and purposes, the user's organization. + +By default, superusers do not have access to a user's organization. However, {productname} administrators can use the `FEATURE_SUPERUSERS_FULL_ACCESS` configuration field to grant superusers the ability to read, write, and delete content from other repositories in namespaces or organizations that they do not own or have explicit permissions for. + +[NOTE] +==== +* This feature is only available on the beta of the new UI. When enabled, it shows all organizations that the superuser has access to. +* When this field is enabled, the superuser cannot view the image repository of every organization at once. This is a known limitation and will be fixed in a future version of {productname}. As a temporary workaround, the superuser can view image repositories by navigating to them from the *Organizations* page. +==== + +To grant superusers full access to all organizations within the registry, you can use the following YAML configuration: + +[source,yaml] +---- +# ... +FEATURE_SUPERUSERS_FULL_ACCESS: true +# ... +---- + +After sitting `FEATURE_SUPERUSERS_FULL_ACCESS: true`, all organizations will be visible on the superuser's *Organization* page. \ No newline at end of file diff --git a/modules/managing-tags-api.adoc b/modules/managing-tags-api.adoc new file mode 100644 index 000000000..1d2ce57f8 --- /dev/null +++ b/modules/managing-tags-api.adoc @@ -0,0 +1,67 @@ +[id="tag-api"] += Managing tags with the {productname} API + +Tags can be changed, restored, deleted, or listed by using the {productname} API. + +.Procedure + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#changetag[`PUT /api/v1/repository/{repository}/tag/{tag}`] endpoint to change which image a tag points to or create a new tag: ++ +[source,terminal] +---- +$ curl -X PUT "https://quay-server.example.com/api/v1/repository///tag/" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{"manifest_digest": ""}' +---- ++ +.Example output ++ +[source,terminal] +---- +"Updated" +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#restoretag[`POST /api/v1/repository/{repository}/tag/{tag}/restore`] endpoint to restore a repository tag back to a previous image in the repository: ++ +[source,terminal] +---- +$ curl -X POST "https://quay-server.example.com/api/v1/repository///tag//restore" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{"manifest_digest": "sha256:"}' +---- ++ +.Example output ++ +[source,terminal] +---- +{} + +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listrepotags[`GET /api/v1/repository/{repository}/tag/`] endpoint to obtain a list of repository tags: ++ +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/repository///tag/" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" +---- ++ +.Example output ++ +[source,terminal] +---- +{"tags": [{"name": "test", "reversion": true, "start_ts": 1740496373, "manifest_digest": "sha256:d08334991a3dba62307016833083d6433f489ab0f7d36d0a4771a20b4569b2f6", "is_manifest_list": false, "size": 2280303, "last_modified": "Tue, 25 Feb 2025 15:12:53 -0000"}, {"name": "test", "reversion": false, "start_ts": 1740495442, "end_ts": 1740496373, "manifest_digest": "sha256:d08334991a3dba62307016833083d6433f489ab0f7d36d0a4771a20b4569b2f6", "is_manifest_list": false, "size": 2280303, "last_modified": "Tue, 25 Feb 2025 14:57:22 -0000", "expiration": "Tue, 25 Feb 2025 15:12:53 -0000"}, {"name": "test", "reversion": false, "start_ts": 1740495408, "end_ts": 1740495442, "manifest_digest": "sha256:d08334991a3dba62307016833083d6433f489ab0f7d36d0a4771a20b4569b2f6", "is_manifest_list": false, "size": 2280303, "last_modified": "Tue, 25 Feb 2025 14:56:48 -0000", "expiration": "Tue, 25 Feb 2025 14:57:22 -0000"}], "page": 1, "has_additional": false} +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deletefulltag[`DELETE /api/v1/repository/{repository}/tag/{tag}`] endpoint to delete a tag from a repository: ++ +[source,terminal] +---- +$ curl -X DELETE "https://quay-server.example.com/api/v1/repository///tag/" \ + -H "Authorization: Bearer " +---- ++ +This command does not return output in the CLI. \ No newline at end of file diff --git a/modules/managing-team-members-api.adoc b/modules/managing-team-members-api.adoc new file mode 100644 index 000000000..b4efb638d --- /dev/null +++ b/modules/managing-team-members-api.adoc @@ -0,0 +1,76 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: PROCEDURE +[id="managing-team-members-api"] += Managing team members and repository permissions by using the API + +Use the following procedures to add a member to a team (by direct invite or by email), or to remove a member from a team. + +.Prerequisites + +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +* Enter the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#updateorganizationteammember[`PUT /api/v1/organization/{orgname}/team/{teamname}/members/{membername}`] command to add or invite a member to an existing team: ++ +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + "/api/v1/organization//team//members/" +---- ++ +.Example output ++ +[source,terminal] +---- +{"name": "testuser", "kind": "user", "is_robot": false, "avatar": {"name": "testuser", "hash": "d51d17303dc3271ac3266fb332d7df919bab882bbfc7199d2017a4daac8979f0", "color": "#5254a3", "kind": "user"}, "invited": false} +---- + +* Enter the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleteorganizationteammember[`DELETE /api/v1/organization/{orgname}/team/{teamname}/members/{membername}`] command to remove a member of a team: ++ +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + "/api/v1/organization//team//members/" +---- ++ +This command does not an output in the CLI. To ensure that a member has been deleted, you can enter the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getorganizationteammembers[`GET /api/v1/organization/{orgname}/team/{teamname}/members`] command and ensure that the member is not returned in the output. ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "/api/v1/organization//team//members" +---- ++ +.Example output ++ +[source,terminal] +---- +{"name": "owners", "members": [{"name": "quayadmin", "kind": "user", "is_robot": false, "avatar": {"name": "quayadmin", "hash": "b28d563a6dc76b4431fc7b0524bbff6b810387dac86d9303874871839859c7cc", "color": "#17becf", "kind": "user"}, "invited": false}, {"name": "test-org+test", "kind": "user", "is_robot": true, "avatar": {"name": "test-org+test", "hash": "aa85264436fe9839e7160bf349100a9b71403a5e9ec684d5b5e9571f6c821370", "color": "#8c564b", "kind": "robot"}, "invited": false}], "can_edit": true} +---- + +* You can enter the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#inviteteammemberemail[`PUT /api/v1/organization/{orgname}/team/{teamname}/invite/{email}`] command to invite a user, by email address, to an existing team: ++ +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + "/api/v1/organization//team//invite/" +---- + +* You can enter the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleteteammemberemailinvite[`DELETE /api/v1/organization/{orgname}/team/{teamname}/invite/{email}`] command to delete the invite of an email address to join a team. For example: ++ +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + "/api/v1/organization//team//invite/" +---- \ No newline at end of file diff --git a/modules/managing-team-members-repo-permissions-ui.adoc b/modules/managing-team-members-repo-permissions-ui.adoc new file mode 100644 index 000000000..81ac5b070 --- /dev/null +++ b/modules/managing-team-members-repo-permissions-ui.adoc @@ -0,0 +1,25 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: PROCEDURE +[id="managing-team-members-repo-permissions-ui"] +== Managing team members and repository permissions + +Use the following procedure to manage team members and set repository permissions. + +* On the *Teams and membership* page of your organization, you can also manage team members and set repository permissions. + +** Click the kebab menu, and select one of the following options: ++ +** **Manage Team Members**. On this page, you can view all members, team members, robot accounts, or users who have been invited. You can also add a new team member by clicking *Add new member*. ++ +** **Set repository permissions**. On this page, you can set the repository permissions to one of the following: ++ +*** *None*. Team members have no permission to the repository. +*** *Read*. Team members can view and pull from the repository. +*** *Write*. Team members can read (pull) from and write (push) to the repository. +*** *Admin*. Full access to pull from, and push to, the repository, plus the ability to do administrative tasks associated with the repository. ++ +** **Delete**. This popup windows allows you to delete the team by clicking *Delete*. \ No newline at end of file diff --git a/modules/managing-team-ui.adoc b/modules/managing-team-ui.adoc new file mode 100644 index 000000000..f35348c67 --- /dev/null +++ b/modules/managing-team-ui.adoc @@ -0,0 +1,10 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: PROCEDURE +[id="managing-a-team-ui"] += Managing a team by using the UI + +After you have created a team, you can use the UI to manage team members, set repository permissions, delete the team, or view more general information about the team. \ No newline at end of file diff --git a/modules/managing-teams-api.adoc b/modules/managing-teams-api.adoc new file mode 100644 index 000000000..51616463b --- /dev/null +++ b/modules/managing-teams-api.adoc @@ -0,0 +1,10 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: PROCEDURE +[id="managing-teams-api"] += Managing teams by using the API + +Team can be managing by using the {productname} API. diff --git a/modules/managing-user-options-api.adoc b/modules/managing-user-options-api.adoc new file mode 100644 index 000000000..6e813d3e2 --- /dev/null +++ b/modules/managing-user-options-api.adoc @@ -0,0 +1,81 @@ +[id="manage-user-options-api"] += Managing current user options by using the {productname} API + +Some user options, like starring a repository, or getting information about your account, are available with the {productname} API. + +.Procedure + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getloggedinuser[`GET /api/v1/user/`] endpoint to get user information for the authenticated user. ++ +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/user/" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +{"anonymous": false, "username": "quayadmin", "avatar": {"name": "quayadmin", "hash": "6d640d802fe23b93779b987c187a4b7a4d8fbcbd4febe7009bdff58d84498fba", "color": "#f7b6d2", "kind": "user"}, "can_create_repo": true, "is_me": true, "verified": true, "email": "test@gmil.com", "logins": [], "invoice_email": false, "invoice_email_address": null, "preferred_namespace": false, "tag_expiration_s": 1209600, "prompts": [], "company": null, "family_name": null, "given_name": null, "location": null, "is_free_account": true, "has_password_set": true, "quotas": [{"id": 4, "limit_bytes": 2199023255552, "limits": [{"id": 3, "type": "Reject", "limit_percent": 100}]}], "quota_report": {"quota_bytes": 2280675, "configured_quota": 2199023255552, "running_backfill": "complete", "backfill_status": "complete"}, "organizations": [{"name": "test", "avatar": {"name": "test", "hash": "a15d479002b20f211568fd4419e76686d2b88a4980a5b4c4bc10420776c5f6fe", "color": "#aec7e8", "kind": "org"}, "can_create_repo": true, "public": false, "is_org_admin": true, "preferred_namespace": false}, {"name": "sample", "avatar": {"name": "sample", "hash": "ba560c68f1d26e8c6b911ac9b5d10d513e7e43e576cc2baece1b8a46f36a29a5", "color": "#b5cf6b", "kind": "org"}, "can_create_repo": true, "public": false, "is_org_admin": true, "preferred_namespace": false}], "super_user": true} +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getuserinformation[`GET /api/v1/users/{username}`] endpoint to get user information for the specified user. ++ +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/users/example_user" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +{"anonymous": false, "username": "testuser", "avatar": {"name": "testuser", "hash": "f660ab912ec121d1b1e928a0bb4bc61b15f5ad44d5efdc4e1c92a25e99b8e44a", "color": "#6b6ecf", "kind": "user"}, "super_user": false} +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createstar[`POST /api/v1/user/starred`] endpoint to star a repository: ++ +[source,terminal] +---- +$ curl -X POST "https://quay-server.example.com/api/v1/user/starred" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "namespace": "", + "repository": "" + }' +---- ++ +.Example output ++ +[source,terminal] +---- +{"namespace": "test", "repository": "testrepo"} +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/3.13/html-single/red_hat_quay_api_reference/index#liststarredrepos[`GET /api/v1/user/starred`] endpoint to list all starred repositories: ++ +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/user/starred?next_page=" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +{"repositories": [{"namespace": "test", "name": "testrepo", "description": "This repository is now under maintenance.", "is_public": true}]} +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/3.13/html-single/red_hat_quay_api_reference/index#deletestar[`DELETE /api/v1/user/starred/{repository}`] endpoint to delete a star from a repository: ++ +[source,terminal] +---- +$ curl -X DELETE "https://quay-server.example.com/api/v1/user/starred/namespace/repository-name" \ + -H "Authorization: Bearer " +---- ++ +This command does not return output in the CLI. \ No newline at end of file diff --git a/modules/managing-user-quota-superuser-api.adoc b/modules/managing-user-quota-superuser-api.adoc new file mode 100644 index 000000000..508df051f --- /dev/null +++ b/modules/managing-user-quota-superuser-api.adoc @@ -0,0 +1,69 @@ +[id="managing-user-quota-superuser-api"] += Managing user quota with the {productname} API + +As a superuser, you can manage user quota for specified organizations. + +.Procedure + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createorganizationquotasuperuser[`POST /api/v1/superuser/users/{namespace}/quota`] endpoint to create a quota policy for specific users within an organization: ++ +[source,terminal] +---- +$ curl -X POST "https://quay-server.example.com/api/v1/superuser/users//quota" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "limit_bytes": + }' +---- ++ +.Example output ++ +[source,terminal] +---- +"Created" +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listorganizationquotasuperuser[`GET /api/v1/superuser/users/{namespace}/quota`] endpoint to return a list of a user's allotted quota: ++ +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/superuser/users//quota" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +[{"id": 6, "limit_bytes": 10737418240, "limit": "10.0 GiB", "default_config": false, "limits": [], "default_config_exists": false}] +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#changeorganizationquotasuperuser[`PUT /api/v1/superuser/users/{namespace}/quota/{quota_id}`] endpoint to adjust the user's policy: ++ +[source,terminal] +---- +$ curl -X PUT "https://quay-server.example.com/api/v1/superuser/users//quota/" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "limit_bytes": + }' +---- ++ +.Example output ++ +[source,terminal] +---- +{"id": 6, "limit_bytes": 10737418240, "limit": "10.0 GiB", "default_config": false, "limits": [], "default_config_exists": false} +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleteorganizationquotasuperuser[`DELETE /api/v1/superuser/users/{namespace}/quota/{quota_id}`] endpoint to delete a user's policy: ++ +[source,terminal] +---- +$ curl -X DELETE "https://quay-server.example.com/api/v1/superuser/users//quota/" \ + -H "Authorization: Bearer " +---- ++ +This command does not return output in the CLI. \ No newline at end of file diff --git a/modules/manually-triggering-a-build-trigger.adoc b/modules/manually-triggering-a-build-trigger.adoc new file mode 100644 index 000000000..bbff1bd1a --- /dev/null +++ b/modules/manually-triggering-a-build-trigger.adoc @@ -0,0 +1,43 @@ +:_content-type: CONCEPT +[id="manually-triggering-a-build-trigger"] += Manually triggering a build + +_Builds_ can be triggered manually by using the following procedure. + +.Procedure + +. On the *Builds* page, *Start new build*. + +. When prompted, select *Invoke Build Trigger*. + +. Click *Run Trigger Now* to manually start the process. + +. Enter a commit ID from which to initiate the build, for example, `1c002dd`. ++ +After the build starts, you can see the _build ID_ on the *Repository Builds* page. + +ifeval::["{context}" == "quay-builders-image-automation"] +. You can check the status of your _build_ by clicking the commit in the *Build History* page, or by running the following command: ++ +---- +$ oc get pods -n virtual-builders +---- ++ +.Example output +---- +NAME READY STATUS RESTARTS AGE +f192fe4a-c802-4275-bcce-d2031e635126-9l2b5-25lg2 1/1 Running 0 7s +---- + +. After the _build_ has completed, the `oc get pods -n virtual-builders` command returns no resources: ++ +[source,terminal] +---- +$ oc get pods -n virtual-builders +---- ++ +.Example output +---- +No resources found in virtual-builders namespace. +---- +endif::[] \ No newline at end of file diff --git a/modules/mapping-repositories-to-cpe-information.adoc b/modules/mapping-repositories-to-cpe-information.adoc new file mode 100644 index 000000000..fee95ee39 --- /dev/null +++ b/modules/mapping-repositories-to-cpe-information.adoc @@ -0,0 +1,45 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: CONCEPT +[id="mapping-repositories-to-cpe-information"] += Mapping repositories to Common Product Enumeration information + +Clair's {rhel} scanner relies on a Common Product Enumeration (CPE) file to map RPM packages to the corresponding security data to produce matching results. Red{nbsp}Hat Product Security maintains and regularly updates these files. + +The CPE file must be present, or access to the file must be allowed, for the scanner to properly process RPM packages. If the file is not present, RPM packages installed in the container image will not be scanned. + +.Clair CPE mapping files +[options="header"] +|=== +|CPE | Link to JSON mapping file +| `repos2cpe` | link:https://www.redhat.com/security/data/metrics/repository-to-cpe.json[Red Hat Repository-to-CPE JSON] +| `names2repos` | link:https://access.redhat.com/security/data/metrics/container-name-repos-map.json[Red Hat Name-to-Repos JSON]. +|=== + +By default, Clair's indexer includes the `repos2cpe` and `names2repos` data files within the Clair container. This means that you can reference `/data/repository-to-cpe.json` and `/data/container-name-repos-map.json` in your `clair-config.yaml` file without the need for additional configuration. + +[IMPORTANT] +==== +Although Red{nbsp}Hat Product Security updates the `repos2cpe` and `names2repos` files regularly, the versions included in the `Clair` container are only updated with {productname} releases (for example, version 3.14.1 -> 3.14.2). This can lead to discrepancies between the latest CPE files and those bundled with Clair." +==== + +[id="mapping-repositories-to-cpe-configuration"] +== Mapping repositories to Common Product Enumeration example configuration + +Use the `repo2cpe_mapping_file` and `name2repos_mapping_file` fields in your Clair configuration to include the CPE JSON mapping files. For example: + +[source,yaml] +---- +indexer: + scanner: + repo: + rhel-repository-scanner: + repo2cpe_mapping_file: /data/repository-to-cpe.json + package: + rhel_containerscanner: + name2repos_mapping_file: /data/container-name-repos-map.json +---- + +For more information, see link:https://www.redhat.com/en/blog/how-accurately-match-oval-security-data-installed-rpms[How to accurately match OVAL security data to installed RPMs]. \ No newline at end of file diff --git a/modules/marathon-mesos-fail.adoc b/modules/marathon-mesos-fail.adoc new file mode 100644 index 000000000..29ec9fd37 --- /dev/null +++ b/modules/marathon-mesos-fail.adoc @@ -0,0 +1,117 @@ +:_content-type: CONCEPT +[id="marathon-mesos-fail"] += Pulling private images with Marathon or Mesos fails + +When using Marathon or Mesos, attempting to pull an image from a private repository fails with the following error: `msg="Error: Status 403 trying to pull repository repo/project: \"{\\\"error\\\": \\\"Permission Denied\\\"}\""`. + +As a workaround, you must copy the Docker configuration file's credentials on to the worker machines. For more information about configuring Mesos registry authentication, see link:https://mesosphere.github.io/marathon/docs/native-docker-private-registry.html[Using a Private Docker Registry]. + +When using Mesos app definitions, credentials must be provided as a URI that must be accessible by all nodes that might start your application. Approaches include distributing the file to the local filesystem of all nodes, for example through RSYNC/SCP, or storing it on a shared network drive, for example Amazon S3. It is worth considering the security implications of each approach. + +[id="deployment-docker-1-6-earlier"] +== For deployments using Docker 1.6 or earlier + +Use the following steps to configure Marathon or Mesos for use on private registries with Docker 1.6 or earlier. + +.Procedure + +. Download a configuration from a `Quay.io` credentials dialog, or log in to the private repository manually: ++ +[source,terminal] +---- +$ docker login quay.io +---- ++ +This create a configuration file in `$HOME/.dockercfg`. + +. Add the `.dockercfg` to the `uris` field of your Mesos app definition. The `$HOME` environment variable must then be set to the same value as `$MESOS_SANDBOX` so that Docker can automatically pick up the configuration file. The following is an example app definition: ++ +[source,yaml] +---- +{ + "id": "/some/name/or/id", + "cpus": 1, + "mem": 1024, + "instances": 1, + "container": { + "type": "DOCKER", + "docker": { + "image": "some.docker.host.com/namespace/repo", + "network": "HOST" + } + }, + "uris": [ + "file:///etc/.dockercfg" + ] +} +---- + +[id="deployment-docker-1-6-later"] +== For deployments using Docker 1.6 or later + +Use the following steps to configure Marathon or Mesos for use on private registries with Docker 1.6 or later. + +. Download a configuration from a `Quay.io` credentials dialog, or log in to the private repository manually: ++ +[source,terminal] +---- +$ docker login quay.io +---- ++ +This create a configuration file in `$HOME/.dockercfg/config.json`. + +. `Tar` and `GZIP` the `$HOME/.DOCKER` directory and its contents: ++ +[source,terminal] +---- +$ cd $HOME +---- ++ +[source,terminal] +---- +$ tar czf docker.tar.gz .docker +---- + +. Enter the following command to ensure that both the directory and the configuration are inside of the `tar`: ++ +[source,terminal] +---- +$ tar -tvf $HOME/docker.tar.gz +---- ++ +.Exampe output ++ +[source,terminal] +---- +drwx------ root/root 0 2015-07-28 02:54 .docker/ +-rw------- root/root 114 2015-07-28 01:31 .docker/config.json +---- + +. Optional. Put the `.tar` file into a directory readably by Mesos: ++ +[source,terminal] +---- +$ cp docker.tar.gz /etc/ +---- + +. Add the file to the `uris` field of your Mesos app definition: ++ +[source,yaml] +---- +{ + "id": "/some/name/or/id", + "cpus": 1, + "mem": 1024, + "instances": 1, + "container": { + "type": "DOCKER", + "docker": { + "image": "some.docker.host.com/namespace/repo", + "network": "HOST" + } + }, + "uris": [ + "file:///etc/docker.tar.gz" + ] +} +---- \ No newline at end of file diff --git a/modules/metrics-authentication.adoc b/modules/metrics-authentication.adoc new file mode 100644 index 000000000..813667565 --- /dev/null +++ b/modules/metrics-authentication.adoc @@ -0,0 +1,36 @@ +[[metrics-authentication]] += Authentication metrics + +The authentication metrics provide the number of authentication requests, labeled by type and whether it succeeded or not. For example, this metric could be used to monitor failed basic authentication requests. + +[options="header"] +|=== +| Metric name | Description +| quay_authentication_attempts_total | Number of authentication attempts across the registry and API +|=== + + +.Metric labels +* **auth_kind:** The type of auth used, including: +** basic +** oauth +** credentials +* **success:** true or false + + + +.Sample metrics output +[source,terminal] +---- +# TYPE quay_authentication_attempts_created gauge +quay_authentication_attempts_created{auth_kind="basic",host="example-registry-quay-app-6df87f7b66-9tfn6",instance="",job="quay",pid="221",process_name="registry:application",success="True"} 1.6317843039374158e+09 +... + +# HELP quay_authentication_attempts_total number of authentication attempts across the registry and API +# TYPE quay_authentication_attempts_total counter +quay_authentication_attempts_total{auth_kind="basic",host="example-registry-quay-app-6df87f7b66-9tfn6",instance="",job="quay",pid="221",process_name="registry:application",success="True"} 2 +... +---- + + + diff --git a/modules/metrics-garbage-collection.adoc b/modules/metrics-garbage-collection.adoc new file mode 100644 index 000000000..4c8c11d23 --- /dev/null +++ b/modules/metrics-garbage-collection.adoc @@ -0,0 +1,55 @@ +[[metrics-garbage-collection]] += Garbage collection metrics + +These metrics show you how many resources have been removed from garbage collection (gc). They show many times the gc workers have run and how many namespaces, repositories, and blobs were removed. + + +[options="header"] +|=== +| Metric name | Description +| quay_gc_iterations_total | Number of iterations by the GCWorker +| quay_gc_namespaces_purged_total | Number of namespaces purged by the NamespaceGCWorker +| quay_gc_repos_purged_total | Number of repositories purged by the RepositoryGCWorker or NamespaceGCWorker +| quay_gc_storage_blobs_deleted_total | Number of storage blobs deleted +|=== + + +.Sample metrics output +[source,terminal] +---- +# TYPE quay_gc_iterations_created gauge +quay_gc_iterations_created{host="example-registry-quay-app-6df87f7b66-9tfn6",instance="",job="quay",pid="208",process_name="secscan:application"} 1.6317823190189714e+09 +... + +# HELP quay_gc_iterations_total number of iterations by the GCWorker +# TYPE quay_gc_iterations_total counter +quay_gc_iterations_total{host="example-registry-quay-app-6df87f7b66-9tfn6",instance="",job="quay",pid="208",process_name="secscan:application"} 0 +... + +# TYPE quay_gc_namespaces_purged_created gauge +quay_gc_namespaces_purged_created{host="example-registry-quay-app-6df87f7b66-9tfn6",instance="",job="quay",pid="208",process_name="secscan:application"} 1.6317823190189433e+09 +... + +# HELP quay_gc_namespaces_purged_total number of namespaces purged by the NamespaceGCWorker +# TYPE quay_gc_namespaces_purged_total counter +quay_gc_namespaces_purged_total{host="example-registry-quay-app-6df87f7b66-9tfn6",instance="",job="quay",pid="208",process_name="secscan:application"} 0 +.... + +# TYPE quay_gc_repos_purged_created gauge +quay_gc_repos_purged_created{host="example-registry-quay-app-6df87f7b66-9tfn6",instance="",job="quay",pid="208",process_name="secscan:application"} 1.631782319018925e+09 +... + +# HELP quay_gc_repos_purged_total number of repositories purged by the RepositoryGCWorker or NamespaceGCWorker +# TYPE quay_gc_repos_purged_total counter +quay_gc_repos_purged_total{host="example-registry-quay-app-6df87f7b66-9tfn6",instance="",job="quay",pid="208",process_name="secscan:application"} 0 +... + +# TYPE quay_gc_storage_blobs_deleted_created gauge +quay_gc_storage_blobs_deleted_created{host="example-registry-quay-app-6df87f7b66-9tfn6",instance="",job="quay",pid="208",process_name="secscan:application"} 1.6317823190189059e+09 +... + +# HELP quay_gc_storage_blobs_deleted_total number of storage blobs deleted +# TYPE quay_gc_storage_blobs_deleted_total counter +quay_gc_storage_blobs_deleted_total{host="example-registry-quay-app-6df87f7b66-9tfn6",instance="",job="quay",pid="208",process_name="secscan:application"} 0 +... +---- diff --git a/modules/metrics-general-registry-stats.adoc b/modules/metrics-general-registry-stats.adoc new file mode 100644 index 000000000..97643abf9 --- /dev/null +++ b/modules/metrics-general-registry-stats.adoc @@ -0,0 +1,43 @@ +[[metrics-general-registry-stats]] += General registry statistics + + +General registry statistics can indicate how large the registry has grown. + + +[options="header"] +|=== +| Metric name | Description +| quay_user_rows | Number of users in the database +| quay_robot_rows | Number of robot accounts in the database +| quay_org_rows | Number of organizations in the database +| quay_repository_rows | Number of repositories in the database +| quay_security_scanning_unscanned_images_remaining_total | Number of images that are not scanned by the latest security scanner +|=== + + + +.Sample metrics output +[source,terminal] +---- +# HELP quay_user_rows number of users in the database +# TYPE quay_user_rows gauge +quay_user_rows{host="example-registry-quay-app-6df87f7b66-9tfn6",instance="",job="quay",pid="65",process_name="globalpromstats.py"} 3 + +# HELP quay_robot_rows number of robot accounts in the database +# TYPE quay_robot_rows gauge +quay_robot_rows{host="example-registry-quay-app-6df87f7b66-9tfn6",instance="",job="quay",pid="65",process_name="globalpromstats.py"} 2 + +# HELP quay_org_rows number of organizations in the database +# TYPE quay_org_rows gauge +quay_org_rows{host="example-registry-quay-app-6df87f7b66-9tfn6",instance="",job="quay",pid="65",process_name="globalpromstats.py"} 2 + +# HELP quay_repository_rows number of repositories in the database +# TYPE quay_repository_rows gauge +quay_repository_rows{host="example-registry-quay-app-6df87f7b66-9tfn6",instance="",job="quay",pid="65",process_name="globalpromstats.py"} 4 + +# HELP quay_security_scanning_unscanned_images_remaining number of images that are not scanned by the latest security scanner +# TYPE quay_security_scanning_unscanned_images_remaining gauge +quay_security_scanning_unscanned_images_remaining{host="example-registry-quay-app-6df87f7b66-9tfn6",instance="",job="quay",pid="208",process_name="secscan:application"} 5 +---- + diff --git a/modules/metrics-image-push-pull.adoc b/modules/metrics-image-push-pull.adoc new file mode 100644 index 000000000..032968c84 --- /dev/null +++ b/modules/metrics-image-push-pull.adoc @@ -0,0 +1,66 @@ +[[metrics-image-push-pull]] += Image push / pull metrics + +A number of metrics are available related to pushing and pulling images. + +== Image pulls total + +[options="header"] +|=== +| Metric name | Description +| quay_registry_image_pulls_total | The number of images downloaded from the registry. +|=== + +.Metric labels +* **protocol:** the registry protocol used (should always be v2) +* **ref:** ref used to pull - tag, manifest +* **status:** http return code of the request + + + +== Image bytes pulled + +[options="header"] +|=== +| Metric name | Description +| quay_registry_image_pulled_estimated_bytes_total | The number of bytes downloaded from the registry +|=== + +.Metric labels +* ** protocol:** the registry protocol used (should always be v2) + + + +== Image pushes total + +[options="header"] +|=== +| Metric name | Description +| quay_registry_image_pushes_total | The number of images uploaded from the registry. +|=== + + +.Metric labels +* **protocol:** the registry protocol used (should always be v2) +* **pstatus:** http return code of the request +* **pmedia_type:** the uploaded manifest type + + + +== Image bytes pushed + +[options="header"] +|=== +| Metric name | Description +| quay_registry_image_pushed_bytes_total | The number of bytes uploaded to the registry +|=== + +.Sample metrics output +[source,terminal] +---- +# HELP quay_registry_image_pushed_bytes_total number of bytes pushed to the registry +# TYPE quay_registry_image_pushed_bytes_total counter +quay_registry_image_pushed_bytes_total{host="example-registry-quay-app-6df87f7b66-9tfn6",instance="",job="quay",pid="221",process_name="registry:application"} 0 +... +---- + diff --git a/modules/metrics-intro.adoc b/modules/metrics-intro.adoc new file mode 100644 index 000000000..45bc6fd41 --- /dev/null +++ b/modules/metrics-intro.adoc @@ -0,0 +1,5 @@ +[[metrics-intro]] += Introduction to metrics + +{productname} provides metrics to help monitor the registry, including metrics for general registry usage, uploads, downloads, garbage collection, and authentication. + diff --git a/modules/metrics-multipart-uploads.adoc b/modules/metrics-multipart-uploads.adoc new file mode 100644 index 000000000..d8933f1f1 --- /dev/null +++ b/modules/metrics-multipart-uploads.adoc @@ -0,0 +1,36 @@ +[[metrics-multipart-uploads]] +== Multipart uploads metrics + + +The multipart uploads metrics show the number of blobs uploads to storage (S3, Rados, GoogleCloudStorage, RHOCS). These can help identify issues when Quay is unable to correctly upload blobs to storage. + + +[options="header"] +|=== +| Metric name | Description +| quay_multipart_uploads_started_total | Number of multipart uploads to Quay storage that started +| quay_multipart_uploads_completed_total | Number of multipart uploads to Quay storage that completed +|=== + + + +.Sample metrics output +[source,terminal] +---- +# TYPE quay_multipart_uploads_completed_created gauge +quay_multipart_uploads_completed_created{host="example-registry-quay-app-6df87f7b66-9tfn6",instance="",job="quay",pid="208",process_name="secscan:application"} 1.6317823308284895e+09 +... + +# HELP quay_multipart_uploads_completed_total number of multipart uploads to Quay storage that completed +# TYPE quay_multipart_uploads_completed_total counter +quay_multipart_uploads_completed_total{host="example-registry-quay-app-6df87f7b66-9tfn6",instance="",job="quay",pid="208",process_name="secscan:application"} 0 + +# TYPE quay_multipart_uploads_started_created gauge +quay_multipart_uploads_started_created{host="example-registry-quay-app-6df87f7b66-9tfn6",instance="",job="quay",pid="208",process_name="secscan:application"} 1.6317823308284352e+09 +... + +# HELP quay_multipart_uploads_started_total number of multipart uploads to Quay storage that started +# TYPE quay_multipart_uploads_started_total counter +quay_multipart_uploads_started_total{host="example-registry-quay-app-6df87f7b66-9tfn6",instance="",job="quay",pid="208",process_name="secscan:application"} 0 +... +---- diff --git a/modules/metrics-queue-items.adoc b/modules/metrics-queue-items.adoc new file mode 100644 index 000000000..3edd20e57 --- /dev/null +++ b/modules/metrics-queue-items.adoc @@ -0,0 +1,46 @@ +[[metrics-queue-items]] += Queue items + +The _queue items_ metrics provide information on the multiple queues used by Quay for managing work. + +[options="header"] +|=== +| Metric name | Description +| quay_queue_items_available | Number of items in a specific queue +| quay_queue_items_locked | Number of items that are running +| quay_queue_items_available_unlocked | Number of items that are waiting to be processed +|=== + +.Metric labels +* **queue_name:** The name of the queue. One of: +** **exportactionlogs:** Queued requests to export action logs. These logs are then processed and put in storage. A link is then sent to the requester via email. +** **namespacegc:** Queued namespaces to be garbage collected +** **notification:** Queue for repository notifications to be sent out +** **repositorygc:** Queued repositories to be garbage collected +** **secscanv4:** Notification queue specific for Clair V4 +** **dockerfilebuild:** Queue for Quay docker builds +** **imagestoragereplication:** Queued blob to be replicated across multiple storages +** **chunk_cleanup:** Queued blob segments that needs to be deleted. This is only used by some storage implementations, for example, Swift. + +For example, the queue labelled **repositorygc** contains the repositories marked for deletion by the repository garbage collection worker. For metrics with a **queue_name** label of **repositorygc**: + +* **quay_queue_items_locked** is the number of repositories currently being deleted. +* **quay_queue_items_available_unlocked** is the number of repositories waiting to get processed by the worker. + +.Sample metrics output +[source,terminal] +---- +# HELP quay_queue_items_available number of queue items that have not expired +# TYPE quay_queue_items_available gauge +quay_queue_items_available{host="example-registry-quay-app-6df87f7b66-9tfn6",instance="",job="quay",pid="63",process_name="exportactionlogsworker.py",queue_name="exportactionlogs"} 0 +... + +# HELP quay_queue_items_available_unlocked number of queue items that have not expired and are not locked +# TYPE quay_queue_items_available_unlocked gauge +quay_queue_items_available_unlocked{host="example-registry-quay-app-6df87f7b66-9tfn6",instance="",job="quay",pid="63",process_name="exportactionlogsworker.py",queue_name="exportactionlogs"} 0 +... + +# HELP quay_queue_items_locked number of queue items that have been acquired +# TYPE quay_queue_items_locked gauge +quay_queue_items_locked{host="example-registry-quay-app-6df87f7b66-9tfn6",instance="",job="quay",pid="63",process_name="exportactionlogsworker.py",queue_name="exportactionlogs"} 0 +---- diff --git a/modules/metrics-request-duration.adoc b/modules/metrics-request-duration.adoc new file mode 100644 index 000000000..1f41c1a49 --- /dev/null +++ b/modules/metrics-request-duration.adoc @@ -0,0 +1,44 @@ +[[metrics-request-duration]] += Request duration metrics + +The duration of api requests, grouped in different duration buckets of a histogram. + +[options="header"] +|=== +| Metric name | Description +| quay_request_duration_seconds_count | Seconds taken to process a request +| quay_request_duration_seconds_bucket | Duration grouped in different duration buckets +|=== + +.Metric labels +* **method:** HTTP method +* **route:** API request endpoint +* **status:** HTTP return code of the request + + +== Sample usage + +To get the request rates for `/v2/*` routes, you could use a query in Grafana of the form: + +---- +sum by (route)(aggregation:quay_request_duration_seconds_count:rate1m:sum{route=~"v2.*"}) +---- + +For the request latency for `/v2/*` routes (95 percentile of requests), you could use a query of the form: + +---- +histogram_quantile(0.95, sum by (le, route) (aggregation:quay_request_duration_seconds_bucket:rate5m:sum{route=~"v2.*"})) +---- + + +.Sample metrics output +[source,terminal] +---- +# HELP quay_request_duration_seconds seconds taken to process a request +# TYPE quay_request_duration_seconds histogram +quay_request_duration_seconds_bucket{host="example-registry-quay-app-6df87f7b66-9tfn6",instance="",job="quay",method="GET",pid="221",process_name="registry:application",route="v1.internal_ping",status="200",le="0.005"} 470 +... + +# TYPE quay_request_duration_seconds_created gauge +quay_request_duration_seconds_created{host="example-registry-quay-app-6df87f7b66-9tfn6",instance="",job="quay",method="GET",pid="221",process_name="registry:application",route="v1.internal_ping",status="200"} 1.631782365248095e+09 +---- diff --git a/modules/mirror-quay-api.adoc b/modules/mirror-quay-api.adoc new file mode 100644 index 000000000..e862092ea --- /dev/null +++ b/modules/mirror-quay-api.adoc @@ -0,0 +1,90 @@ +:_content-type: CONCEPT +[id="quay-mirror-api"] += Using the API to mirror a repository + +{productname} administrators can mirror external repositories by using the API. + +.Prerequisites + +* You have set `FEATURE_REPO_MIRROR: true` in your `config.yaml` file. + +.Procedure + +* Create a new repository mirror configuration by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createrepomirrorconfig[`POST /api/v1/repository/{repository}/mirror`] endpoint: ++ +[source,terminal] +---- +$ curl -X POST "https:///api/v1/repository///mirror" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "is_enabled": , + "external_reference": "", + "external_registry_username": "", + "external_registry_password": "", + "sync_start_date": "", + "sync_interval": , + "robot_username": "", + "root_rule": { + "rule": "", + "rule_type": "" + } + }' +---- + +* You can return information about the mirror configuration by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getrepomirrorconfig[`GET /api/v1/repository/{repository}/mirror`] endpoint: ++ +[source,terminal] +---- +$ curl -X GET "https:///api/v1/repository///mirror" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +{"is_enabled": true, "mirror_type": "PULL", "external_reference": "https://quay.io/repository/argoproj/argocd", "external_registry_username": null, "external_registry_config": {}, "sync_interval": 86400, "sync_start_date": "2025-01-15T12:00:00Z", "sync_expiration_date": null, "sync_retries_remaining": 3, "sync_status": "NEVER_RUN", "root_rule": {"rule_kind": "tag_glob_csv", "rule_value": ["*.latest*"]}, "robot_username": "quayadmin+mirror_robot"} +---- + +* You can use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#syncnow[`POST /api/v1/repository/{repository}/mirror/sync-now`] endpoint to sync the repositories. For example: ++ +[source,terminal] +---- +$ curl -X POST "https:///api/v1/repository///mirror/sync-now" \ + -H "Authorization: Bearer " +---- ++ +This command does not return output in the CLI. + +* Alternatively, you can cancel the sync with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#synccancel[`POST /api/v1/repository/{repository}/mirror/sync-cancel`] endpoint.For example: ++ +[source,terminal] +---- +$ curl -X POST "https:///api/v1/repository///mirror/sync-cancel" \ +---- ++ +This command does not return output in the CLI. + +* After creating a mirror configuration, you can make changes with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#changerepomirrorconfig[`PUT /api/v1/repository/{repository}/mirror`] command. For example, you might choose to disable automatic synchronizations: ++ +[source,terminal] +---- +$ curl -X PUT "https:///api/v1/repository///mirror" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "is_enabled": , <1> + "external_reference": "", + "external_registry_username": "", + "external_registry_password": "", + "sync_start_date": "", + "sync_interval": , + "robot_username": "", + "root_rule": { + "rule": "", + "rule_type": "" + } + }' +---- +<1> Disables automatic synchronization. \ No newline at end of file diff --git a/modules/mirrored-images-unable-pull-rhocp.adoc b/modules/mirrored-images-unable-pull-rhocp.adoc new file mode 100644 index 000000000..a6bccacb8 --- /dev/null +++ b/modules/mirrored-images-unable-pull-rhocp.adoc @@ -0,0 +1,20 @@ +:_content-type: CONCEPT +[id="mirrored-images-unable-pull-rhocp"] += Unable to pull mirrored images to {productname} on {ocp} + +After mirroring images into the {productname} registry on {ocp} using the `oc adm catalog` mirror command, you might receive the following error when attempting to use that mirrored image: `Failed to pull image "//:": rpc error: code = Unknown desc = reading manifest 1-191a in //:: unauthorized: access to the requested resource is not authorized`. This occurs when images are pushed to the {productname} registry without an existing repository. When this happens, a new, private, registry is created automatically. This restrains the kubelet on {ocp} nodes from pulling images and deploying the pod successfully. + +As a workaround to this issue, you can set the `CREATE_PRIVATE_REPO_ON_PUSH` to `false` in your `config.yaml` file. For example: + +[source,yaml] +---- +CREATE_PRIVATE_REPO_ON_PUSH: false +---- + +This helps create a public repository when you first push the image to the {productname} registry when using the `oc adm catalog mirror` command. + + +[role="_additional-resources"] +.Additional resources + +For more information, see link:https://access.redhat.com/solutions/6966410[Images mirrored to Quay can't be pulled in RHOCP]. \ No newline at end of file diff --git a/modules/mirroring-api-intro.adoc b/modules/mirroring-api-intro.adoc new file mode 100644 index 000000000..8fb67cf34 --- /dev/null +++ b/modules/mirroring-api-intro.adoc @@ -0,0 +1,10 @@ +:_content-type: CONCEPT +[id="arch-mirroring-api-intro"] += Mirroring API + +You can use the {productname} API to configure repository mirroring: + +.Mirroring API +image:swagger-mirroring.png[Mirroring API] + +More information is available in the link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index[{productname} API Guide] diff --git a/modules/mirroring-creating-repo.adoc b/modules/mirroring-creating-repo.adoc new file mode 100644 index 000000000..734e73113 --- /dev/null +++ b/modules/mirroring-creating-repo.adoc @@ -0,0 +1,70 @@ +:_content-type: PROCEDURE +[id="mirroring-creating-repo"] += Creating a mirrored repository + +When mirroring a repository from an external container registry, you must create a new private repository. Typically, the same name is used as the target repository, for example, `quay-rhel8`. + +image:repo_quay_rhel8.png[Create new {productname} repo] + +[id="mirroring-repository-mirroring-settings"] +== Repository mirroring settings + +Use the following procedure to adjust the settings of your mirrored repository. + +.Prerequisites + +* You have enabled repository mirroring in your {productname} configuration file. +* You have deployed a mirroring worker. + +.Procedure + +. In the Settings tab, set the Repository State to `Mirror`: ++ +image:repo_mirror_create.png[Create a new {productname} repo mirror] + +. In the Mirror tab, enter the details for connecting to the external registry, along with the tags, scheduling and access information: ++ +image:repo-mirror-details-start.png[Repository mirroring] + +. Enter the details as required in the following fields: ++ +* **Registry Location:** The external repository you want to mirror, for example, `registry.redhat.io/quay/quay-rhel8` +* **Tags:** This field is required. You may enter a comma-separated list of individual tags or tag patterns. (See _Tag Patterns_ section for details.) + +* **Start Date:** The date on which mirroring begins. The current date and time is used by default. +* **Sync Interval:** Defaults to syncing every 24 hours. You can change that based on hours or days. +* **Robot User:** Create a new robot account or choose an existing robot account to do the mirroring. +* **Username:** The username for accessing the external registry holding the repository you are mirroring. +* **Password:** The password associated with the Username. Note that the password +cannot include characters that require an escape character (\). + +[id="mirroring-advanced-settings"] +== Advanced settings + +In the *Advanced Settings* section, you can configure SSL/TLS and proxy with the following options: + +* **Verify TLS:** Select this option if you want to require HTTPS and to verify certificates when communicating with the target remote registry. +* **Accept Unsigned Images:** Selecting this option allows unsigned images to be mirrored. +* **HTTP Proxy:** Select this option if you want to require HTTPS and to verify certificates when communicating with the target remote registry. +* **HTTPS PROXY:** Identify the HTTPS proxy server needed to access the remote site, if a proxy server is needed. +* **No Proxy:** List of locations that do not require proxy. + + +[id="mirroring-synchronize-now"] +== Synchronize now + +Use the following procedure to initiate the mirroring operation. + +.Procedure + +* To perform an immediate mirroring operation, press the Sync Now button on the repository's Mirroring tab. The logs are available on the Usage Logs tab: ++ +image:repo-mirror-usage-logs.png[Usage logs] ++ +When the mirroring is complete, the images will appear in the Tags tab: ++ +image:repo-mirror-tags.png[Repository mirroring tags] ++ +Below is an example of a completed Repository Mirroring screen: ++ +image:repo-mirror-details.png[Repository mirroring details] diff --git a/modules/mirroring-events.adoc b/modules/mirroring-events.adoc new file mode 100644 index 000000000..895aee025 --- /dev/null +++ b/modules/mirroring-events.adoc @@ -0,0 +1,11 @@ +:_content-type: CONCEPT +[id="arch-mirroring-events"] += Event notifications for mirroring + +There are three notification events for repository mirroring: + +* Repository Mirror Started +* Repository Mirror Success +* Repository Mirror Unsuccessful + +The events can be configured inside of the *Settings* tab for each repository, and all existing notification methods such as email, Slack, Quay UI, and webhooks are supported. \ No newline at end of file diff --git a/modules/mirroring-intro.adoc b/modules/mirroring-intro.adoc new file mode 100644 index 000000000..8ba7b3466 --- /dev/null +++ b/modules/mirroring-intro.adoc @@ -0,0 +1,21 @@ +:_content-type: CONCEPT +[id="arch-mirroring-intro"] += Repository mirroring + +{productname} repository mirroring lets you mirror images from external container registries, or another local registry, into your {productname} cluster. Using repository mirroring, you can synchronize images to {productname} based on repository names and tags. + +From your {productname} cluster with repository mirroring enabled, you can perform the following: + +* Choose a repository from an external registry to mirror +* Add credentials to access the external registry +* Identify specific container image repository names and tags to sync +* Set intervals at which a repository is synced +* Check the current state of synchronization + +To use the mirroring functionality, you need to perform the following actions: + +* Enable repository mirroring in the {productname} configuration file +* Run a repository mirroring worker +* Create mirrored repositories + +All repository mirroring configurations can be performed using the configuration tool UI or by the {productname} API. \ No newline at end of file diff --git a/modules/mirroring-invalid-credentials.adoc b/modules/mirroring-invalid-credentials.adoc new file mode 100644 index 000000000..8717b6a16 --- /dev/null +++ b/modules/mirroring-invalid-credentials.adoc @@ -0,0 +1,12 @@ +:_content-type: PROCEDURE +[id="mirroring-invalid-credentials"] += Invalid credentials when mirroring + +In some cases, {productname} mirroring might fail and return the following error: `repomirrorworker stdout | time="2022-11-03T16:46:11Z" level=debug msg="Accessing \"registry.redhat.io/rhel8/nginx-118:1\" failed: unable to retrieve auth token: invalid username/password: unauthorized:`. when the {productname} cluster is missing a connection to the LoadBalancer. Consequently, {productname} is unable to connect to the network. + +To resolve this issue, ensure that your {productname} cluster has a stable connection to the LoadBalancer. + +[role="_additional-resources"] +.Additional resources + +For more information, see link:https://access.redhat.com/solutions/6989386[Quay Mirroring fail with invalid credentials]. \ No newline at end of file diff --git a/modules/mirroring-prereqs.adoc b/modules/mirroring-prereqs.adoc new file mode 100644 index 000000000..2c3a4a7c1 --- /dev/null +++ b/modules/mirroring-prereqs.adoc @@ -0,0 +1,10 @@ +[[mirroring-prereqs]] += Mirroring prerequisites + +Before you can use repository mirroring, you must enable repository mirroring from the {productname} +configuration screen and start the repository mirroring worker. + + + + + diff --git a/modules/mirroring-recommend.adoc b/modules/mirroring-recommend.adoc new file mode 100644 index 000000000..164f4c4fb --- /dev/null +++ b/modules/mirroring-recommend.adoc @@ -0,0 +1,19 @@ +:_content-type: CONCEPT +[id="arch-mirroring-recommend"] += Repository mirroring recommendations + +Best practices for repository mirroring include the following: + +* Repository mirroring pods can run on any node. This means that you can run mirroring on nodes where {productname} is already running. + +* Repository mirroring is scheduled in the database and runs in batches. As a result, repository workers check each repository mirror configuration file and reads when the next sync needs to be. More mirror workers means more repositories can be mirrored at the same time. For example, running 10 mirror workers means that a user can run 10 mirroring operators in parallel. If a user only has 2 workers with 10 mirror configurations, only 2 operators can be performed. + +* The optimal number of mirroring pods depends on the following conditions: + +** The total number of repositories to be mirrored +** The number of images and tags in the repositories and the frequency of changes +** Parallel batching ++ +For example, if a user is mirroring a repository that has 100 tags, the mirror will be completed by one worker. Users must consider how many repositories one wants to mirror in parallel, and base the number of workers around that. ++ +Multiple tags in the same repository cannot be mirrored in parallel. \ No newline at end of file diff --git a/modules/mirroring-tag-patterns.adoc b/modules/mirroring-tag-patterns.adoc new file mode 100644 index 000000000..6c980c939 --- /dev/null +++ b/modules/mirroring-tag-patterns.adoc @@ -0,0 +1,28 @@ +[[mirroring-tag-patterns]] += Mirroring tag patterns + +At least one tag must be entered. The following table references possible image tag patterns. + +== Pattern syntax + +[width="100%",cols=options="header"] +|================================================================== +| Pattern | Description +| * | Matches all characters +| ? | Matches any single character +| [seq] | Matches any character in _seq_ +| [!seq] | Matches any character not in _seq_ +|================================================================== + +== Example tag patterns + +[width="100%",cols=options="header"] +|================================================================== +| Example Pattern | Example Matches +| v3* | v32, v3.1, v3.2, v3.2-4beta, v3.3 +| v3.* | v3.1, v3.2, v3.2-4beta +| v3.? | v3.1, v3.2, v3.3 +| v3.[12] | v3.1, v3.2 +| v3.[12]* | v3.1, v3.2, v3.2-4beta +| v3.[!1]* | v3.2, v3.2-4beta, v3.3 +|================================================================== diff --git a/modules/mirroring-using.adoc b/modules/mirroring-using.adoc new file mode 100644 index 000000000..048a3f010 --- /dev/null +++ b/modules/mirroring-using.adoc @@ -0,0 +1,22 @@ +:_content-type: CONCEPT +[id="arch-mirroring-using"] += Using repository mirroring + +The following list shows features and limitations of {productname} repository mirroring: + +* With repository mirroring, you can mirror an entire repository or selectively limit which images are synced. Filters can be based on a comma-separated list of tags, a range of tags, or other means of identifying tags through Unix shell-style wildcards. For more information, see the documentation for link:https://tldp.org/LDP/GNU-Linux-Tools-Summary/html/x11655.htm[wildcards]. + +* When a repository is set as mirrored, you cannot manually add other images to that repository. + +* Because the mirrored repository is based on the repository and tags you set, it will hold only the content represented by the repository and tag pair. For example if you change +the tag so that some images in the repository no longer match, those images will be deleted. + +* Only the designated robot can push images to a mirrored repository, superseding any role-based access control permissions set on the repository. + +* Mirroring can be configured to rollback on failure, _or_ to run on a best-effort basis. + +* With a mirrored repository, a user with _read_ permissions can pull images from the repository but cannot push images to the repository. + +* Changing settings on your mirrored repository can be performed in the {productname} user interface, using the *Repositories* -> *Mirrors* tab for the mirrored repository you create. + +* Images are synced at set intervals, but can also be synced on demand. \ No newline at end of file diff --git a/modules/mirroring-versus-georepl.adoc b/modules/mirroring-versus-georepl.adoc new file mode 100644 index 000000000..3a8883003 --- /dev/null +++ b/modules/mirroring-versus-georepl.adoc @@ -0,0 +1,36 @@ +:_content-type: CONCEPT +[id="mirroring-versus-georepl"] += Repository mirroring compared to geo-replication + +{productname} geo-replication mirrors the entire image storage backend data between 2 or more different storage backends while the database is shared, for example, one {productname} registry with two different blob storage endpoints. The primary use cases for geo-replication include the following: + +* Speeding up access to the binary blobs for geographically dispersed setups + +* Guaranteeing that the image content is the same across regions + +Repository mirroring synchronizes selected repositories, or subsets of repositories, from one registry to another. The registries are distinct, with each registry having a separate database and separate image storage. + +The primary use cases for mirroring include the following: + +* Independent registry deployments in different data centers or regions, where a certain subset of the overall content is supposed to be shared across the data centers and regions +* Automatic synchronization or mirroring of selected (allowlisted) upstream repositories from external registries into a local {productname} deployment + +[NOTE] +==== +Repository mirroring and geo-replication can be used simultaneously. +==== + +.{productname} Repository mirroring and geo-replication comparison +[width="100%",options="header"] + +|=== +| Feature / Capability | Geo-replication | Repository mirroring +| What is the feature designed to do? | A shared, global registry | Distinct, different registries +| What happens if replication or mirroring has not been completed yet? | The remote copy is used (slower) | No image is served +| Is access to all storage backends in both regions required? | Yes (all {productname} nodes) | No (distinct storage) +| Can users push images from both sites to the same repository? | Yes | No +| Is all registry content and configuration identical across all regions (shared database)? | Yes | No +| Can users select individual namespaces or repositories to be mirrored? | No | Yes +| Can users apply filters to synchronization rules? | No | Yes +| Are individual / different role-base access control configurations allowed in each region | No | Yes +|=== diff --git a/modules/mirroring-worker.adoc b/modules/mirroring-worker.adoc new file mode 100644 index 000000000..0ddf80aa1 --- /dev/null +++ b/modules/mirroring-worker.adoc @@ -0,0 +1,25 @@ +[id="mirroring-worker"] += Mirroring worker + +Use the following procedure to start the repository mirroring worker. + +.Procedure + +* If you have not configured TLS communications using a `/root/ca.crt` certificate, enter the following command to start a `Quay` pod with the `repomirror` option: ++ +[subs="verbatim,attributes"] +---- +$ sudo podman run -d --name mirroring-worker \ + -v $QUAY/config:/conf/stack:Z \ + {productrepo}/{quayimage}:{productminv} repomirror +---- + +* If you have configured TLS communications using a `/root/ca.crt` certificate, enter the following command to start the repository mirroring worker: ++ +[subs="verbatim,attributes"] +---- +$ sudo podman run -d --name mirroring-worker \ + -v $QUAY/config:/conf/stack:Z \ + -v /root/ca.crt:/etc/pki/ca-trust/source/anchors/ca.crt:Z \ + {productrepo}/{quayimage}:{productminv} repomirror +---- diff --git a/modules/mirroring-working-with.adoc b/modules/mirroring-working-with.adoc new file mode 100644 index 000000000..ad7a4fe4b --- /dev/null +++ b/modules/mirroring-working-with.adoc @@ -0,0 +1,52 @@ +[[mirroring-working-with]] += Working with mirrored repositories + +Once you have created a mirrored repository, there are several +ways you can work with that repository. +Select your mirrored repository from the Repositories page +and do any of the following: + +* **Enable/disable the repository**: Select the Mirroring button in the left column, +then toggle the Enabled check box to enable or disable the repository temporarily. + +* **Check mirror logs**: To make sure the mirrored repository is working properly, +you can check the mirror logs. To do that, select the Usage Logs +button in the left column. Here's an example: ++ +image:repo_mirror_logs.png[View logs for your {productname} repo mirror] + +* **Sync mirror now**: To immediately sync the images in your repository, +select the Sync Now button. + +* **Change credentials**: To change the username and password, select DELETE from the Credentials line. +Then select None and add the username and password needed to log into the external registry when prompted. + +* **Cancel mirroring**: To stop mirroring, which keeps the current images available but stops +new ones from being synced, select the CANCEL button. + +* **Set robot permissions**: {productname} robot accounts are named tokens that hold credentials +for accessing external repositories. By assigning credentials to a robot, that robot can be used +across multiple mirrored repositories that need to access the same external registry. ++ +You can assign an existing robot to a repository by going to Account Settings, then selecting +the Robot Accounts icon in the left column. For the robot account, choose the +link under the REPOSITORIES column. From the pop-up window, you can: + +** Check which repositories are assigned to that robot. + +** Assign read, write or Admin privileges to that robot from the PERMISSION field shown in this figure: +image:repo_mirror_robot_assign.png[Assign a robot to mirrored repo] + +* **Change robot credentials**: Robots can hold credentials such as +Kubernetes secrets, Docker login information, and Mesos bundles. +To change robot credentials, select +the Options gear on the robot's account line on the Robot Accounts window and choose View Credentials. +Add the appropriate credentials for the external repository the robot needs to access. ++ +image:repo_mirror_robot_perm.png[Assign permission to a robot] + +* **Check and change general setting**: Select the Settings button (gear icon) from the left +column on the mirrored repository page. +On the resulting page, you can change settings associated with +the mirrored repository. In particular, you can change User and Robot Permissions, +to specify exactly which users and robots can read from or write to the repo. \ No newline at end of file diff --git a/modules/missing-runc-files.adoc b/modules/missing-runc-files.adoc new file mode 100644 index 000000000..b0a378503 --- /dev/null +++ b/modules/missing-runc-files.adoc @@ -0,0 +1,22 @@ +:_content-type: PROCEDURE +[id="missing-runc-files"] += Missing runc files prevent containers from running + +When attempting to start containers using the Podman client tool, users encounter an error due to missing runc files. The error message indicates a permission issue related to adding a seccomp filter rule for the `bdflush` syscall, leading to the container startup failure. + +The cause of this issue is the absence of required `runc` files in the older version of Podman. These missing files prevent the proper execution of containers, resulting in the encountered error. Updating Podman ensures that the necessary runc files are present, enabling the successful deployment of containers. + +To resolve this issue, it is recommended to update the Podman version to obtain the updated `runc` files. By updating Podman, the missing runc files will be installed, allowing containers to be deployed successfully. + +Use the following command to update Podman: +[source,terminal] +---- +# yum update podman -y +---- + +After updating Podman, restart the containers, and the error caused by missing runc files should no longer occur. + +[role="_additional-resources"] +.Additional resources + +For more information, see link:https://access.redhat.com/articles/6981027[Quay containers can't run due to missing runc files]. \ No newline at end of file diff --git a/modules/monitoring-single-namespace.adoc b/modules/monitoring-single-namespace.adoc new file mode 100644 index 000000000..79945a1b7 --- /dev/null +++ b/modules/monitoring-single-namespace.adoc @@ -0,0 +1,284 @@ +:_content-type: PROCEDURE +[id="monitoring-single-namespace"] += Enabling monitoring when the {productname} Operator is installed in a single namespace + +[NOTE] +==== +Currently, enabling monitoring when the {productname} Operator is installed in a single namespace is not supported on IBM Power and IBM Z. +==== + +When the {productname} Operator is installed in a single namespace, the monitoring component is set to `unmanaged`. To configure monitoring, you must enable it for user-defined namespaces in {ocp}. + +For more information, see the {ocp} documentation for link:https://docs.openshift.com/container-platform/{ocp-y}/monitoring/configuring-the-monitoring-stack.html[Configuring the monitoring stack] and link:https://docs.openshift.com/container-platform/{ocp-y}/monitoring/enabling-monitoring-for-user-defined-projects.html[Enabling monitoring for user-defined projects]. + +The following sections shows you how to enable monitoring for {productname} based on the {ocp} documentation. + +[id="creating-cluster-monitoring-config-map"] +== Creating a cluster monitoring config map + +Use the following procedure check if the `cluster-monitoring-config` `ConfigMap` object exists. + +.Procedure + +. Enter the following command to check whether the `cluster-monitoring-config` ConfigMap object exists: ++ +[source,terminal] +---- +$ oc -n openshift-monitoring get configmap cluster-monitoring-config +---- ++ +.Example output ++ +[source,terminal] +---- +Error from server (NotFound): configmaps "cluster-monitoring-config" not found +---- + +. Optional: If the `ConfigMap` object does not exist, create a YAML manifest. In the following example, the file is called `cluster-monitoring-config.yaml`. ++ +[source,terminal] +---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cluster-monitoring-config + namespace: openshift-monitoring +data: + config.yaml: | +---- + +. Optional: If the `ConfigMap` object does not exist, create the `ConfigMap` object: ++ +[source,terminal] +---- +$ oc apply -f cluster-monitoring-config.yaml +---- ++ +.Example output ++ +[source,terminal] +---- +configmap/cluster-monitoring-config created +---- + +. Ensure that the `ConfigMap` object exists by running the following command: ++ +[source,terminal] +---- +$ oc -n openshift-monitoring get configmap cluster-monitoring-config +---- ++ +.Example output ++ +[source,terminal] +---- +NAME DATA AGE +cluster-monitoring-config 1 12s +---- + +[id="creating-user-defined-workload-monitoring-config-map"] +== Creating a user-defined workload monitoring ConfigMap object + +Use the following procedure check if the `user-workload-monitoring-config` `ConfigMap` object exists. + +.Procedure + +. Enter the following command to check whether the `user-workload-monitoring-config` `ConfigMap` object exists: ++ +---- +$ oc -n openshift-user-workload-monitoring get configmap user-workload-monitoring-config +---- ++ +.Example output ++ +[source,terminal] +---- +Error from server (NotFound): configmaps "user-workload-monitoring-config" not found +---- + +. If the `ConfigMap` object does not exist, create a YAML manifest. In the following example, the file is called `user-workload-monitoring-config.yaml`. ++ +[source,terminal] +---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: user-workload-monitoring-config + namespace: openshift-user-workload-monitoring +data: + config.yaml: | +---- + +. Optional: Create the `ConfigMap` object by entering the following command: ++ +[source,terminal] +---- +$ oc apply -f user-workload-monitoring-config.yaml +---- ++ +.Example output ++ +[source,terminal] +---- +configmap/user-workload-monitoring-config created +---- + +[id="enabling-monitoring-user-defined-projects"] +== Enable monitoring for user-defined projects + +Use the following procedure to enable monitoring for user-defined projects. + +.Procedure + +. Enter the following command to check if monitoring for user-defined projects is running: ++ +[source,terminal] +---- +$ oc get pods -n openshift-user-workload-monitoring +---- ++ +.Example output ++ +[source,terminal] +---- +No resources found in openshift-user-workload-monitoring namespace. +---- + +. Edit the `cluster-monitoring-config` `ConfigMap` by entering the following command: ++ +---- +$ oc -n openshift-monitoring edit configmap cluster-monitoring-config +---- + +. Set `enableUserWorkload: true` in your `config.yaml` file to enable monitoring for user-defined projects on the cluster: ++ +[source,yaml] +---- +apiVersion: v1 +data: + config.yaml: | + enableUserWorkload: true +kind: ConfigMap +metadata: + annotations: +---- + +. Enter the following command to save the file, apply the changes, and ensure that the appropriate pods are running: ++ +---- +$ oc get pods -n openshift-user-workload-monitoring +---- ++ +.Example output ++ +[source,terminal] +---- +NAME READY STATUS RESTARTS AGE +prometheus-operator-6f96b4b8f8-gq6rl 2/2 Running 0 15s +prometheus-user-workload-0 5/5 Running 1 12s +prometheus-user-workload-1 5/5 Running 1 12s +thanos-ruler-user-workload-0 3/3 Running 0 8s +thanos-ruler-user-workload-1 3/3 Running 0 8s +---- + +[id="creating-service-object-expose-quay-metrics"] +== Creating a Service object to expose {productname} metrics + +Use the following procedure to create a `Service` object to expose {productname} metrics. + +.Procedure + +. Create a YAML file for the Service object: ++ +---- +$ cat < quay-service.yaml + +apiVersion: v1 +kind: Service +metadata: + annotations: + labels: + quay-component: monitoring + quay-operator/quayregistry: example-registry + name: example-registry-quay-metrics + namespace: quay-enterprise +spec: + ports: + - name: quay-metrics + port: 9091 + protocol: TCP + targetPort: 9091 + selector: + quay-component: quay-app + quay-operator/quayregistry: example-registry + type: ClusterIP +EOF +---- + +. Create the `Service` object by entering the following command: ++ +[source,terminal] +---- +$ oc apply -f quay-service.yaml +---- ++ +.Example output ++ +[source,terminal] +---- +service/example-registry-quay-metrics created +---- + +[id="creating-servicemonitor-object"] +== Creating a ServiceMonitor object + +Use the following procedure to configure OpenShift Monitoring to scrape the metrics by creating a `ServiceMonitor` resource. + +.Procedure + +. Create a YAML file for the `ServiceMonitor` resource: ++ +---- +$ cat < quay-service-monitor.yaml + +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + quay-operator/quayregistry: example-registry + name: example-registry-quay-metrics-monitor + namespace: quay-enterprise +spec: + endpoints: + - port: quay-metrics + namespaceSelector: + any: true + selector: + matchLabels: + quay-component: monitoring +EOF +---- + +. Create the `ServiceMonitor` resource by entering the following command: ++ +---- +$ oc apply -f quay-service-monitor.yaml +---- ++ +.Example output ++ +[source,terminal] +---- +servicemonitor.monitoring.coreos.com/example-registry-quay-metrics-monitor created +---- + +[id="view-metrics-in-ocp"] +== Viewing metrics in {ocp} + +You can access the metrics in the {ocp} console under *Monitoring* -> *Metrics*. In the Expression field, enter *quay_* to see the list of metrics available: + +image:metrics-single-namespace.png[Quay metrics] + +For example, if you have added users to your registry, select the *quay-users_rows* metric: + +image:metrics-single-namespace-users.png[Quay metrics] diff --git a/modules/moving-a-tag.adoc b/modules/moving-a-tag.adoc new file mode 100644 index 000000000..d557b0fd8 --- /dev/null +++ b/modules/moving-a-tag.adoc @@ -0,0 +1,16 @@ +:_content-type: CONCEPT +[id="moving-a-tag"] += Moving an image tag + +You can move a tag to a different image if desired. + +.Procedure + +* Click the *Settings*, or _gear_, icon next to the tag and click *Add New Tag* and enter an existing tag name. +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +confirms that you want the tag moved instead of added. \ No newline at end of file diff --git a/modules/namespace-auto-pruning-arch.adoc b/modules/namespace-auto-pruning-arch.adoc new file mode 100644 index 000000000..989bf96ef --- /dev/null +++ b/modules/namespace-auto-pruning-arch.adoc @@ -0,0 +1,76 @@ +:_content-type: CONCEPT +[id="namespace-auto-pruning-arch"] += Namespace auto-pruning architecture + +For the namespace auto-pruning feature, two distinct database tables within a database schema were created: one for `namespaceautoprunepolicy` and another for `autoprunetaskstatus`. An auto-prune worker carries out the configured policies. + +[discrete] +[id="namespaceautoprunepolicy-database-table"] +== Namespace auto prune policy database table + +The `namespaceautoprunepolicy` database table holds the policy configuration for a single namespace. There is only one entry per namespace, but there is support for multiple rows per `namespace_id`. The `policy` field holds the policy details, such as `{method: "creation_date", olderThan: "2w"}` or `{method: "number_of_tags", numTags: 100}`. + +.`namespaceautoprunepolicy` database table +[cols="1a,1a,1a,1a",options="header"] +|=== +| Field | Type |Attributes | Description + +| `uuid` | character varying (225) | Unique, indexed | Unique identifier for this policy + +| `namespace_id` | Integer | Foreign Key |Namespace that the policy falls under + +| `policy` | text | JSON | Policy configuration +|=== + +[discrete] +[id="autoprunetaskstatus-database-table"] +== Auto-prune task status database table + +The `autoprunetaskstatus` table registers tasks to be executed by the auto-prune worker. Tasks are executed within the context of a single namespace. Only one task per namespace exists. + +.`autoprunetaskstatus` database table +[cols="1a,1a,1a,1a",options="header"] + +|=== +| Field | Type |Attributes | Description +| `namespace_id` | Integer | Foreign Key | Namespace that this task belongs to + +| `last_ran_ms` | Big Integer (bigint) | Nullable, indexed | Last time that the worker executed the policies for this namespace + +| `status` | text | Nullable | Details from the last execution task +|=== + +[id="auto-prune-worker"] +== Auto-prune worker + +The following sections detail information about the auto-prune worker. + +[id="auto-prune-task-creation"] +=== Auto-prune-task-creation + +When a new policy is created in the `namespaceautoprunepolicy` database table, a row is also created in the `autoprunetask` table. This is done in the same transaction. The auto-prune worker uses the entry in the `autoprunetask` table to identify which namespace it should execute policies for. + +[id="auto-prune-worker-execution"] +=== Auto-prune worker execution + +The auto-pruning worker is an asynchronous job that executes configured policies. Its workflow is based on values in the `autoprunetask` table. When a task begins, the following occurs: + +* The auto-prune worker starts on a set interval, which defaults at 30 seconds. +* The auto-prune worker selects a row from `autoprunetask` with the least, or null, `last_ran_ms` and `FOR UPDATE SKIP LOCKED`. +** A null `last_ran_ms` indicates that the task was never ran. +** A task that hasn't been ran in he longest amount of time, or has never been run at all, is prioritized. + +* The auto-prune worker obtains the policy configuration from the `namespaceautoprunepolicy` table. +** If no policy configuration exists, the entry from `autoprunetask` is deleted for this namespace and the procedure stops immediately. + +* The auto-prune worker begins a paginated loop of all repositories under the organization. +** The auto-prune worker determines much pruning method to use based on `policy.method`. +* The auto-prune worker executes the pruning method with the policy configuration retrieved earlier. +** For pruning by the number of tags: the auto-pruner worker gets the number of currently active tags sorted by creation date, and deletes the older tags to the configured number. +** For pruning by date: the auto-pruner worker gets the active tags older than the specified time span and any tags returned are deleted. + +* The auto-prune worker adds audit logs of the tags deleted. + +* The `last_ran_ms` gets updated after a row from `autoprunetask` is selected. + +* The auto-prune worker ends. diff --git a/modules/nested-ldap-team-sync.adoc b/modules/nested-ldap-team-sync.adoc new file mode 100644 index 000000000..53a94ed4a --- /dev/null +++ b/modules/nested-ldap-team-sync.adoc @@ -0,0 +1,5 @@ +:_content-type: CONCEPT +[id="nested-ldap-team-sync"] += Does {productname} support nested LDAP groups for team synchronization? + +{productname} does not currently support nested LDAP team synchronization. As a temporary workaround, you can manually maintain {productname} team membership without using the `directory synchronization` feature, and perform regular syncs of your {productname} team with LDAP group members using cronjobs. \ No newline at end of file diff --git a/modules/notification-actions.adoc b/modules/notification-actions.adoc new file mode 100644 index 000000000..3f97adbda --- /dev/null +++ b/modules/notification-actions.adoc @@ -0,0 +1,72 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: CONCEPT +[id="notification-actions"] += Notification actions + +Notifications are added to the *Events and Notifications* section of the *Repository Settings* page. They are also added to the *Notifications* window, which can be found by clicking the _bell_ icon in the navigation pane of +ifeval::["{context}" == "quay-io"] +{quayio}. +endif::[] +ifeval::["{context}" == "use-quay"] +{productname}. +endif::[] + +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +notifications can be setup to be sent to a _User_, _Team_, or the entire _organization_. + +Notifications can be delivered by one of the following methods. + +[discrete] +[id="e-mail"] +== *E-mail notifications* + +E-mails are sent to specified addresses that describe the specified event. E-mail addresses must be verified on a _per-repository_ basis. + +[discrete] +[id="webhook-post"] +== *Webhook POST notifications* + +An HTTP `POST` call is made to the specified URL with the event's data. For more information about event data, see "Repository events description". + +When the URL is HTTPS, the call has an SSL client certificate set from +ifeval::["{context}" == "quay-io"] +{quayio}. +endif::[] +ifeval::["{context}" == "use-quay"] +{productname}. +endif::[] +Verification of this certificate proves that the call originated from +ifeval::["{context}" == "quay-io"] +{quayio}. +endif::[] +ifeval::["{context}" == "use-quay"] +{productname}. +endif::[] +Responses with the status code in the `2xx` range are considered successful. Responses with any other status code are considered failures and result in a retry of the webhook notification. + +[discrete] +[id="flowdock-notification"] +== *Flowdock notifications* + +Posts a message to Flowdock. + +[discrete] +[id="hipchat-notification"] +== *Hipchat notifications* + +Posts a message to HipChat. + +[discrete] +[id="slack-notification"] +== *Slack notifications* + +Posts a message to Slack. \ No newline at end of file diff --git a/modules/oauth2-access-tokens.adoc b/modules/oauth2-access-tokens.adoc new file mode 100644 index 000000000..625e4dfa8 --- /dev/null +++ b/modules/oauth2-access-tokens.adoc @@ -0,0 +1,43 @@ +:_content-type: REFERENCE +[id="oauth2-access-tokens"] += OAuth 2 access tokens + +link:https://oauth.net/2/[OAuth 2] access tokens (considered "API tokens" for {productname}) enable user-authenticated access to the {productname} API, suitable for applications that require user identity verification. These tokens are obtained through an OAuth 2 authorization process, where a {productname} administrator generates a token on behalf of themselves or another user to access {productname} API endpoints. OAuth 2 tokens authorize actions on API endpoints based on the scopes defined for the token. + +[NOTE] +==== +Although OAuth 2 tokens authorize actions on API endpoints based on the scopes defined for the token, access to the resources themselves is governed by {productname}'s role-based access control (RBAC) mechanisms. Actions can be created on a resource, for example, a repository, provided that you have the proper role (*Admin* or *Creator*) to do so for that namespace. This is true even if the API token was granted the `repo:admin` scope. +==== + +OAuth 2 access tokens can only be created by using the {productname} UI; there is no way to create an OAuth 2 access token by using the CLI. When creating an OAuth 2 token, the following options can be selected for a token holder: + +* *Administer Organization*. When selected, allows the user to be able to administer organizations, including creating robots, creating teams, adjusting team membership, and changing billing settings. + +* *Administer Repositories*. When selected, provides the user administrator access to all repositories to which the granting user has access. + +* *Create Repositories*. When selected, provides the user the ability to create repositories in any namespaces that the granting user is allowed to create repositories. + +* *View all visible repositories*. When selected, provides the user the ability to view and pull all repositories visible to the granting user. + +* *Read/Write to any accessible repositories*. When selected, provides the user the ability to view, push and pull to all repositories to which the granting user has write access. + +* *Super User Access*. When selected, provides the user the ability to administer your installation including managing users, managing organizations and other features found in the superuser panel. + +* *Administer User* When selected, provides the user the ability to administer your account including creating robots and granting them permissions to your repositories. + +* *Read User Information*. When selected, provides the user the ability to read user information such as username and email address. + +Token distributors should be mindful of the permissions that they are granting when generating a token on behalf of a user, and should have absolute trust in a user before granting such permissions as *Administer organization*, *Super User Access*, and *Administer User*. Additionally, the access token is only revealed at the time of creation; they cannot be listed from the CLI, nor can they be found on the {productname} UI. If an access token is lost or forgotten, a new token must be created; a token cannot be recovered. + +OAuth 2 access tokens are passed as a `Bearer` token in the `Authorization` header of an API call and, as a result, are used to provide authentication and authorization to the defined API endpoint, such as an image tag, a repository, an organization, and so on. + +The API is available from the `/api/v1` endpoint of your {productname} host. For example, `\https:///api/v1`. It allows users to connect to endpoints through their browser to `GET`, `POST`, `DELETE`, and `PUT` {productname} settings by enabling the Swagger UI. The API can be accessed by applications that make API calls and use OAuth tokens, and it sends and receives data as JSON. + +With {productname}, there is currently no way to rotate or to set an expiration time on an OAuth 2 access token, and the token lifespan is 10 years. Tokens can be deleted by deleting the applications in which they were created in the event that they are compromised, however, this deletes all tokens that were made within that specific application. + +[NOTE] +==== +In practice, {productname} administrators _could_ create a new OAuth application on the *Applications* page of their organization each time they wanted to create a new OAuth token for a user. This would ensure that a single application is not responsible for all OAuth tokens. As a result, in the event that a user's token is compromised, the administrator would delete the application of the compromised token. This would help avoid disruption for other users whose tokens might be part of the same application. +==== + +The following sections shows you how to generate and reassign an OAuth 2 access token. \ No newline at end of file diff --git a/modules/obtaining-quay-config-information.adoc b/modules/obtaining-quay-config-information.adoc new file mode 100644 index 000000000..844baddea --- /dev/null +++ b/modules/obtaining-quay-config-information.adoc @@ -0,0 +1,159 @@ +:_content-type: PROCEDURE +[id="obtaining-quay-config-information"] += Configuration information for {productname} + +Checking a configuration YAML can help identify and resolve various issues related to the configuration of {productname}. Checking the configuration YAML can help you address the following issues: + +* *Incorrect Configuration Parameters*: If the database is not functioning as expected or is experiencing performance issues, your configuration parameters could be at fault. By checking the configuration YAML, administrators can ensure that all the required parameters are set correctly and match the intended settings for the database. + +* *Resource Limitations*: The configuration YAML might specify resource limits for the database, such as memory and CPU limits. If the database is running into resource constraints or experiencing contention with other services, adjusting these limits can help optimize resource allocation and improve overall performance. + +* *Connectivity Issues*: Incorrect network settings in the configuration YAML can lead to connectivity problems between the application and the database. Ensuring that the correct network configurations are in place can resolve issues related to connectivity and communication. + +* *Data Storage and Paths*: The configuration YAML may include paths for storing data and logs. If the paths are misconfigured or inaccessible, the database may encounter errors while reading or writing data, leading to operational issues. + +* *Authentication and Security*: The configuration YAML may contain authentication settings, including usernames, passwords, and access controls. Verifying these settings is crucial for maintaining the security of the database and ensuring only authorized users have access. + +* *Plugin and Extension Settings*: Some databases support extensions or plugins that enhance functionality. Issues may arise if these plugins are misconfigured or not loaded correctly. Checking the configuration YAML can help identify any problems with plugin settings. + +* *Replication and High Availability Settings*: In clustered or replicated database setups, the configuration YAML may define replication settings and high availability configurations. Incorrect settings can lead to data inconsistency and system instability. + +* *Backup and Recovery Options*: The configuration YAML might include backup and recovery options, specifying how data backups are performed and how data can be recovered in case of failures. Validating these settings can ensure data safety and successful recovery processes. + +By checking your configuration YAML, {productname} administrators can detect and resolve these issues before they cause significant disruptions to the application or service relying on the database. + +[id="obtaining-configuration-information-quay"] +== Obtaining configuration information for {productname} + +Configuration information can be obtained for all types of {productname} deployments, include standalone, Operator, and geo-replication deployments. Obtaining configuration information can help you resolve issues with authentication and authorization, your database, object storage, and repository mirroring. After you have obtained the necessary configuration information, you can update your `config.yaml` file, search the link:https://access.redhat.com/knowledgebase[Red Hat Knowledgebase] for a solution, or file a support ticket with the Red Hat Support team. + +.Procedure + +. To obtain configuration information on {productname} Operator deployments, you can use `oc exec`, `oc cp`, or `oc rsync`. + +.. To use the `oc exec` command, enter the following command: ++ +[source,terminal] +---- +$ oc exec -it -- cat /conf/stack/config.yaml +---- ++ +This command returns your `config.yaml` file directly to your terminal. + +.. To use the `oc copy` command, enter the following commands: ++ +[source,terminal] +---- +$ oc cp :/conf/stack/config.yaml /tmp/config.yaml +---- ++ +To display this information in your terminal, enter the following command: ++ +[source,terminal] +---- +$ cat /tmp/config.yaml +---- + +.. To use the `oc rsync` command, enter the following commands: ++ +[source,terminal] +---- +oc rsync :/conf/stack/ /tmp/local_directory/ +---- ++ +To display this information in your terminal, enter the following command: ++ +[source,terminal] +---- +$ cat /tmp/local_directory/config.yaml +---- ++ +.Example output ++ +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: +local_us: +- RHOCSStorage +- access_key: redacted + bucket_name: lht-quay-datastore-68fff7b8-1b5e-46aa-8110-c4b7ead781f5 + hostname: s3.openshift-storage.svc.cluster.local + is_secure: true + port: 443 + secret_key: redacted + storage_path: /datastorage/registry +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: +- local_us +DISTRIBUTED_STORAGE_PREFERENCE: +- local_us +---- + +. To obtain configuration information on standalone {productname} deployments, you can use `podman cp` or `podman exec`. + +.. To use the `podman copy` command, enter the following commands: ++ +[source,terminal] +---- +$ podman cp :/conf/stack/config.yaml /tmp/local_directory/ +---- ++ +To display this information in your terminal, enter the following command: ++ +[source,terminal] +---- +$ cat /tmp/local_directory/config.yaml +---- + +.. To use `podman exec`, enter the following commands: ++ +[source,terminal] +---- +$ podman exec -it cat /conf/stack/config.yaml +---- ++ +.Example output ++ +[source,yaml] +---- +BROWSER_API_CALLS_XHR_ONLY: false +ALLOWED_OCI_ARTIFACT_TYPES: + application/vnd.oci.image.config.v1+json: + - application/vnd.oci.image.layer.v1.tar+zstd + application/vnd.sylabs.sif.config.v1+json: + - application/vnd.sylabs.sif.layer.v1+tar +AUTHENTICATION_TYPE: Database +AVATAR_KIND: local +BUILDLOGS_REDIS: + host: quay-server.example.com + password: strongpassword + port: 6379 +DATABASE_SECRET_KEY: 05ee6382-24a6-43c0-b30f-849c8a0f7260 +DB_CONNECTION_ARGS: {} +--- +---- + +[id="obtaining-db-config-info"] +== Obtaining database configuration information + +You can obtain configuration information about your database by using the following procedure. + +[WARNING] +==== +Interacting with the PostgreSQL database is potentially destructive. It is highly recommended that you perform the following procedure with the help of a {productname} Support Specialist. +==== + +.Procedure + +* If you are using the {productname} Operator on {ocp}, enter the following command: ++ +[source,terminal] +---- +$ oc exec -it -- cat /var/lib/pgsql/data/userdata/postgresql.conf +---- + +* If you are using a standalone deployment of {productname}, enter the following command: ++ +[source,terminal] +---- +$ podman exec -it cat /var/lib/pgsql/data/userdata/postgresql.conf +---- diff --git a/modules/obtaining-quay-logs.adoc b/modules/obtaining-quay-logs.adoc new file mode 100644 index 000000000..3d9edddee --- /dev/null +++ b/modules/obtaining-quay-logs.adoc @@ -0,0 +1,106 @@ +:_content-type: PROCEDURE +[id="obtaining-quay-logs"] += Logging information for {productname} + +Obtaining log information using can be beneficial in various ways for managing, monitoring, and troubleshooting applications running in containers or pods. Some of the reasons why obtaining log information is valuable include the following: + +* *Debugging and Troubleshooting*: Logs provide insights into what's happening inside the application, allowing developers and system administrators to identify and resolve issues. By analyzing log messages, one can identify errors, exceptions, warnings, or unexpected behavior that might occur during the application's execution. + +* *Performance Monitoring*: Monitoring logs helps to track the performance of the application and its components. Monitoring metrics like response times, request rates, and resource utilization can help in optimizing and scaling the application to meet the demand. + +* *Security Analysis*: Logs can be essential in auditing and detecting potential security breaches. By analyzing logs, suspicious activities, unauthorized access attempts, or any abnormal behavior can be identified, helping in detecting and responding to security threats. + +* *Tracking User Behavior*: In some cases, logs can be used to track user activities and behavior. This is particularly important for applications that handle sensitive data, where tracking user actions can be useful for auditing and compliance purposes. + +* *Capacity Planning*: Log data can be used to understand resource utilization patterns, which can aid in capacity planning. By analyzing logs, one can identify peak usage periods, anticipate resource needs, and optimize infrastructure accordingly. + +* *Error Analysis*: When errors occur, logs can provide valuable context about what happened leading up to the error. This can help in understanding the root cause of the issue and facilitating the debugging process. + +* *Verification of Deployment*: Logging during the deployment process can help verify if the application is starting correctly and if all components are functioning as expected. + +* *Continuous Integration/Continuous Deployment (CI/CD)*: In CI/CD pipelines, logging is essential to capture build and deployment statuses, allowing teams to monitor the success or failure of each stage. + +[id="obtaining-log-information-quay"] +== Obtaining log information for {productname} + +Log information can be obtained for all types of {productname} deployments, including geo-replication deployments, standalone deployments, and Operator deployments. Log information can also be obtained for mirrored repositories. It can help you troubleshoot authentication and authorization issues, and object storage issues. After you have obtained the necessary log information, you can search the link:https://access.redhat.com/knowledgebase[Red Hat Knowledgebase] for a solution, or file a support ticket with the Red Hat Support team. + +Use the following procedure to obtain logs for your {productname} deployment. + +.Procedure + +* If you are using the {productname} Operator on {ocp}, enter the following command to view the logs: ++ +[source,terminal] +---- +$ oc logs +---- + +* If you are on a standalone {productname} deployment, enter the following command: ++ +[source,terminal] +---- +$ podman logs +---- ++ +.Example output ++ +[source,terminal] +---- +... +gunicorn-web stdout | 2023-01-20 15:41:52,071 [205] [DEBUG] [app] Starting request: urn:request:0d88de25-03b0-4cf9-b8bc-87f1ac099429 (/oauth2/azure/callback) {'X-Forwarded-For': '174.91.79.124'} +... +---- + +[id="obtaining-verbose-container-pod-logs"] +== Examining verbose logs + +{productname} does not have verbose logs, however, with the following procedures, you can obtain a detailed status check of your database pod or container. + +[NOTE] +==== +Additional debugging information can be returned if you have deployed {productname} in one of the following ways: + +* You have deployed {productname} by passing in the `DEBUGLOG=true` variable. +* You have deployed {productname} with LDAP authentication enabled by passing in the `DEBUGLOG=true` and `USERS_DEBUG=1` variables. +* You have configured {productname-ocp} by updating the `QuayRegistry` resource to include `DEBUGLOG=true`. + +For more information, see "Running {productname} in debug mode". +==== +.Procedure + +. Enter the following commands to examine verbose database logs. + +.. If you are using the {productname} Operator on {ocp}, enter the following commands: ++ +[source,terminal] +---- +$ oc logs --previous +---- ++ +[source,terminal] +---- +$ oc logs --previous -c +---- ++ +[source,terminal] +---- +$ oc cp :/var/lib/pgsql/data/userdata/log/* /path/to/desired_directory_on_host +---- + +.. If you are using a standalone deployment of {productname}, enter the following commands: ++ +[source,terminal] +---- +$ podman logs --previous +---- ++ +[source,terminal] +---- +$ podman logs --previous -c +---- ++ +[source,terminal] +---- +$ podman cp :/var/lib/pgsql/data/userdata/log/* /path/to/desired_directory_on_host +---- \ No newline at end of file diff --git a/modules/oci-intro.adoc b/modules/oci-intro.adoc new file mode 100644 index 000000000..e87ab057d --- /dev/null +++ b/modules/oci-intro.adoc @@ -0,0 +1,53 @@ +:_content-type: CONCEPT +[id="oci-intro"] += Open Container Initiative support + +Container registries were originally designed to support container images in the Docker image format. To promote the use of additional runtimes apart from Docker, the Open Container Initiative (OCI) was created to provide a standardization surrounding container runtimes and image formats. Most container registries support the OCI standardization as it is based on the link:https://docs.docker.com/registry/spec/manifest-v2-2/[Docker image manifest V2, Schema 2] format. + +In addition to container images, a variety of artifacts have emerged that support not just individual applications, but also the Kubernetes platform as a whole. These range from Open Policy Agent (OPA) policies for security and governance to Helm charts and Operators that aid in application deployment. + +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +is a private container registry that not only stores container images, but also supports an entire ecosystem of tooling to aid in the management of containers. +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +strives to be as compatible as possible with the link:https://opencontainers.org/posts/blog/2024-03-13-image-and-distribution-1-1/[OCI 1.1 _Image and Distribution specifications_], and supports common media types like _Helm charts_ (as long as they pushed with a version of Helm that supports OCI) and a variety of arbitrary media types within the manifest or layer components of container images. +ifeval::["{context}" == "quay-io"] +Support for OCI media types differs from previous iterations of {quayio}, when the registry was more strict about accepted media types. Because {quayio} now works with a wider array of media types, including those that were previously outside the scope of its support, it is now more versatile accommodating not only standard container image formats but also emerging or unconventional types. +endif::[] +ifeval::["{context}" == "use-quay"] +Support for OCI media types differs from previous iterations of {productname}, when the registry was more strict about accepted media types. Because {productname} now works with a wider array of media types, including those that were previously outside the scope of its support, it is now more versatile accommodating not only standard container image formats but also emerging or unconventional types. +endif::[] + +In addition to its expanded support for novel media types, +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +ensures compatibility with Docker images, including V2_2 and V2_1 formats. This compatibility with Docker V2_2 and V2_1 images demonstrates +ifeval::["{context}" == "quay-io"] +{quayio}'s +endif::[] +ifeval::["{context}" == "use-quay"] +{productname}'s' +endif::[] +commitment to providing a seamless experience for Docker users. Moreover, +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +continues to extend its support for Docker V1 pulls, catering to users who might still rely on this earlier version of Docker images. + +Support for OCI artifacts are enabled by default. The following examples show you how to use some media types, which can be used as examples for using other OCI media types. \ No newline at end of file diff --git a/modules/oci-referrers-oauth-access-token.adoc b/modules/oci-referrers-oauth-access-token.adoc new file mode 100644 index 000000000..6cc66d76e --- /dev/null +++ b/modules/oci-referrers-oauth-access-token.adoc @@ -0,0 +1,14 @@ +:_content-type: REFERENCE +[id="oci-referrers-oauth-access-token"] += OCI referrers OAuth access token + +In some cases, depending on the features that your {productname} deployment is configured to use, you might need to leverage an _OCI referrers OAuth access token_. OCI referrers OAuth access tokens are used to list OCI referrers of a manifest under a repository, and uses a `curl` command to make a `GET` request to the {productname} `v2/auth` endpoint. + +These tokens are obtained via basic HTTP authentication, wherein the user provides a username and password encoded in Base64 to authenticate directly with the `v2/auth` API endpoint. As such, they are based directly on the user's credentials aod do not follow the same detailed authorization flow as OAuth 2, but still allow a user to authorize API requests. + +_OCI referrers OAuth access tokens_ do not offer scope-based permissions and do not expire. They are solely used to list OCI referrers of a manifest under a repository. + +[discrete] +== Additional resource + +* link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#attaching-referrers-image-tag[Attaching referrers to an image tag] \ No newline at end of file diff --git a/modules/oidc-config-fields.adoc b/modules/oidc-config-fields.adoc new file mode 100644 index 000000000..30e1752f7 --- /dev/null +++ b/modules/oidc-config-fields.adoc @@ -0,0 +1,65 @@ +[id="oidc-config-fields"] += OIDC configuration fields + +.OIDC fields +|=== +| Field | Type | Description +| **_LOGIN_CONFIG** + +(Required) | String | The parent key that holds the OIDC configuration settings. Typically the name of the OIDC provider, for example, `AZURE_LOGIN_CONFIG`, however any arbitrary string is accepted. +| **.CLIENT_ID** + +(Required) | String | The registered client ID for this {productname} instance. + + + +**Example:** `0e8dbe15c4c7630b6780` +| **.CLIENT_SECRET** + +(Required) | String | The registered client secret for this {productname} instance. + + + +**Example:** `e4a58ddd3d7408b7aec109e85564a0d153d3e846` +| **.DEBUGLOG** |Boolean | Whether to enable debugging. +| **.LOGIN_BINDING_FIELD** |String | Used when the internal authorization is set to LDAP. {productname} reads this parameter and tries to search through the LDAP tree for the user with this username. If it exists, it automatically creates a link to that LDAP account. +| **.LOGIN_SCOPES** | Object | Adds additional scopes that {productname} uses to communicate with the OIDC provider. +| **.OIDC_ENDPOINT_CUSTOM_PARAMS** | String | Support for custom query parameters on OIDC endpoints. The following endpoints are supported: +`authorization_endpoint`, `token_endpoint`, and `user_endpoint`. +| **.OIDC_ISSUER** | String | Allows the user to define the issuer to verify. For example, JWT tokens container a parameter known as `iss` which defines who issued the token. By default, this is read from the `.well-know/openid/configuration` endpoint, which is exposed by every OIDC provider. If this verification fails, there is no login. +| **.OIDC_SERVER** + +(Required) | String | The address of the OIDC server that is being used for authentication. + + + +**Example:** `\https://sts.windows.net/6c878.../` +| **.PREFERRED_USERNAME_CLAIM_NAME** |String |Sets the preferred username to a parameter from the token. +| **.SERVICE_ICON** | String | Changes the icon on the login screen. + +| **.SERVICE_NAME** + +(Required) | String | The name of the service that is being authenticated. + + + +**Example:** `Microsoft Entra ID` +| **.VERIFIED_EMAIL_CLAIM_NAME** | String | The name of the claim that is used to verify the email address of the user. + +| **.PREFERRED_GROUP_CLAIM_NAME** | String | The key name within the OIDC token payload that holds information about the user's group memberships. + +| **.OIDC_DISABLE_USER_ENDPOINT** | Boolean | Whether to allow or disable the `/userinfo` endpoint. If using Azure Entra ID, this field must be set to `true` because Azure obtains the user's information from the token instead of calling the `/userinfo` endpoint. + + + + **Default:** `false` +|=== + +[id="oidc-config"] +== OIDC configuration + +The following example shows a sample OIDC configuration. + +.Example OIDC configuration +[source,yaml] +---- +AUTHENTICATION_TYPE: OIDC +# ... +AZURE_LOGIN_CONFIG: + CLIENT_ID: + CLIENT_SECRET: + OIDC_SERVER: + DEBUGGING: true + SERVICE_NAME: Microsoft Entra ID + VERIFIED_EMAIL_CLAIM_NAME: + OIDC_DISABLE_USER_ENDPOINT: true + OIDC_ENDPOINT_CUSTOM_PARAMS": + "authorization_endpoint": + "some": "param", +# ... +---- \ No newline at end of file diff --git a/modules/openshift-routes-limitations.adoc b/modules/openshift-routes-limitations.adoc new file mode 100644 index 000000000..caeb35d6b --- /dev/null +++ b/modules/openshift-routes-limitations.adoc @@ -0,0 +1,39 @@ +:_content-type: PROCEDURE +[id="openshift-routes-limitations"] += {productname-ocp} _builds_ limitations with self-managed _routes_ + +The following limitations apply when you are using the {productname} Operator on {ocp} with a managed `route` component: + +* Currently, {ocp} _routes_ are only able to serve traffic to a single port. Additional steps are required to set up {productname} Builds. + +* Ensure that your `kubectl` or `oc` CLI tool is configured to work with the cluster where the {productname} Operator is installed and that your `QuayRegistry` exists; the `QuayRegistry` does not have to be on the same bare metal cluster where _builders_ run. + +* Ensure that HTTP/2 ingress is enabled on the OpenShift cluster by following link:https://docs.openshift.com/container-platform/{ocp-y}/networking/ingress-operator.html#nw-http2-haproxy_configuring-ingress[these steps]. + +* The {productname} Operator creates a `Route` resource that directs gRPC traffic to the Build manager server running inside of the existing `Quay` pod, or pods. If you want to use a custom hostname, or a subdomain like ``, ensure that you create a CNAME record with your DNS provider that points to the `status.ingress[0].host` of the create `Route` resource. For example: ++ +---- +$ kubectl get -n route -quay-builder -o jsonpath={.status.ingress[0].host} +---- + +* Using the {ocp} UI or CLI, update the `Secret` referenced by `spec.configBundleSecret` of the `QuayRegistry` with the _build_ cluster CA certificate. Name the key `extra_ca_cert_build_cluster.cert`. Update the `config.yaml` file entry with the correct values referenced in the _build_ configuration that you created when you configured {productname} _builds_, and add the `BUILDMAN_HOSTNAME` CONFIGURATION FIELD: ++ +[source,yaml] +---- +BUILDMAN_HOSTNAME: <1> +BUILD_MANAGER: +- ephemeral +- ALLOWED_WORKER_COUNT: 1 + ORCHESTRATOR_PREFIX: buildman/production/ + JOB_REGISTRATION_TIMEOUT: 600 + ORCHESTRATOR: + REDIS_HOST: + REDIS_SSL: true + REDIS_SKIP_KEYSPACE_EVENT_SETUP: false + EXECUTORS: + - EXECUTOR: kubernetes + BUILDER_NAMESPACE: builder + ... +---- +<1> The externally accessible server hostname which the _build jobs_ use to communicate back to the _build manager_. Default is the same as `SERVER_HOSTNAME`. For an OpenShift `route` resource, it is either `status.ingress[0].host` or the CNAME entry if using a custom hostname. `BUILDMAN_HOSTNAME` must include the port number, for example, `somehost:443` for an {ocp} `route` resource, as the gRPC client used to communicate with the _build manager_ does not infer any port if omitted. diff --git a/modules/operator-advanced.adoc b/modules/operator-advanced.adoc new file mode 100644 index 000000000..b50aac10a --- /dev/null +++ b/modules/operator-advanced.adoc @@ -0,0 +1,3 @@ += Advanced Concepts + + diff --git a/modules/operator-cloudfront.adoc b/modules/operator-cloudfront.adoc new file mode 100644 index 000000000..e6fe35ef4 --- /dev/null +++ b/modules/operator-cloudfront.adoc @@ -0,0 +1,19 @@ +:_content-type: PROCEDURE +[id="operator-cloudfront"] += AWS S3 CloudFront + +[NOTE] +==== +Currently, using AWS S3 CloudFront is not supported on IBM Power and IBM Z. +==== + +Use the following procedure if you are using AWS S3 Cloudfront for your backend registry storage. + +.Procedure + +. Enter the following command to specify the registry key: ++ +[source,terminal] +---- +$ oc create secret generic --from-file config.yaml=./config_awss3cloudfront.yaml --from-file default-cloudfront-signing-key.pem=./default-cloudfront-signing-key.pem test-config-bundle +---- \ No newline at end of file diff --git a/modules/operator-components-intro.adoc b/modules/operator-components-intro.adoc new file mode 100644 index 000000000..c82b70d17 --- /dev/null +++ b/modules/operator-components-intro.adoc @@ -0,0 +1,45 @@ +:_content-type: CONCEPT +[id="operator-components-intro"] += {productname} Operator components + +{productname} has many dependencies. These dependencies include a database, object storage, Redis, and others. The {productname} Operator manages an opinionated deployment of {productname} and its dependencies on Kubernetes. These dependencies are treated as _components_ and are configured through the `QuayRegistry` API. + +In the `QuayRegistry` custom resource, the `spec.components` field configures components. Each component contains two fields: `kind` (the name of the component), and `managed` (a boolean that addresses whether the component lifecycle is handled by the {productname} Operator). + +By default, all components are managed and auto-filled upon reconciliation for visibility: + +.Example `QuayRegistry` resource +[source,yaml] +---- +apiVersion: quay.redhat.com/v1 +kind: QuayRegistry +metadata: + name: example-registry + namespace: quay-enterprise + spec: + configBundleSecret: config-bundle-secret + components: + - kind: quay + managed: true + - kind: postgres + managed: true + - kind: clair + managed: true + - kind: redis + managed: true + - kind: horizontalpodautoscaler + managed: true + - kind: objectstorage + managed: true + - kind: route + managed: true + - kind: mirror + managed: true + - kind: monitoring + managed: true + - kind: tls + managed: true + - kind: clairpostgres + managed: true +---- + diff --git a/modules/operator-components-managed.adoc b/modules/operator-components-managed.adoc new file mode 100644 index 000000000..fa9bc4ac0 --- /dev/null +++ b/modules/operator-components-managed.adoc @@ -0,0 +1,39 @@ +:_content-type: REFERENCE +[id="operator-components-managed"] += Using managed components + +Unless your `QuayRegistry` custom resource specifies otherwise, the {productname} Operator uses defaults for the following managed components: + +* **quay:** Holds overrides for deployment of {productname-ocp}, for example, environment variables and number of replicas. This component is new as of {productname} 3.7 and cannot be set to unmanaged. + +* **postgres:** For storing the registry metadata, +ifeval::["{productname}" == "Red Hat Quay"] +As of {productname} 3.9, uses a version of PostgreSQL 13 from link:https://www.softwarecollections.org/en/[Software Collections]. ++ +[NOTE] +==== +When upgrading from {productname} 3.8 -> 3.9, the Operator automatically handles upgrading PostgreSQL 10 to PostgreSQL 13. This upgrade is required. PostgreSQL 10 had its final release on November 10, 2022 and is no longer supported. +==== +endif::[] +ifeval::["{productname}" == "Project Quay"] +As of {productname} 3.9, uses an upstream (CentOS) version of PostgreSQL 13. +endif::[] +* **clair:** Provides image vulnerability scanning. + +* **redis:** Stores live builder logs and the {productname} tutorial. Also includes the locking mechanism that is required for garbage collection. + +* **horizontalpodautoscaler:** Adjusts the number of `Quay` pods depending on memory/cpu consumption. + +* **objectstorage:** For storing image layer blobs, utilizes the `ObjectBucketClaim` Kubernetes API which is provided by Noobaa or {odf}. + +* **route:** Provides an external entrypoint to the {productname} registry from outside of {ocp}. + +* **mirror:** Configures repository mirror workers to support optional repository mirroring. + +* **monitoring:** Features include a Grafana dashboard, access to individual metrics, and notifications for frequently restarting `Quay` pods. + +* **tls:** Configures whether {productname} or {ocp} handles SSL/TLS. + +* **clairpostgres:** Configures a managed Clair database. This is a separate database than the PostgreSQL database used to deploy {productname}. + +The {productname} Operator handles any required configuration and installation work needed for {productname} to use the managed components. If the opinionated deployment performed by the {productname} Operator is unsuitable for your environment, you can provide the {productname} Operator with `unmanaged` resources, or overrides, as described in the following sections. \ No newline at end of file diff --git a/modules/operator-components-unmanaged-other.adoc b/modules/operator-components-unmanaged-other.adoc new file mode 100644 index 000000000..7c9d46834 --- /dev/null +++ b/modules/operator-components-unmanaged-other.adoc @@ -0,0 +1,5 @@ +:_content-type: CONCEPT +[id="operator-components-unmanaged-other"] += Configuring external Redis + +Use the content in this section to set up an external Redis deployment. \ No newline at end of file diff --git a/modules/operator-components-unmanaged.adoc b/modules/operator-components-unmanaged.adoc new file mode 100644 index 000000000..45570f8b6 --- /dev/null +++ b/modules/operator-components-unmanaged.adoc @@ -0,0 +1,26 @@ +:_content-type: REFERENCE +[id="operator-components-unmanaged"] += Using unmanaged components for dependencies + +If you have existing components such as PostgreSQL, Redis, or object storage that you want to use with {productname}, you first configure them within the {productname} configuration bundle, or the `config.yaml` file. Then, they must be referenced in your `QuayRegistry` bundle as a Kubernetes `Secret` while indicating which components are unmanaged. + +//Might be used in a note, however I have removed due to the removal of the config editor on OCP deployments. + +//The {productname} config editor can also be used to create or modify an existing config bundle and simplifies the process of updating the Kubernetes `Secret`, especially for multiple changes. When {productname}'s configuration is changed by the config editor and sent to the {productname} Operator, the deployment is updated to reflect the new configuration. + + +[NOTE] +==== +If you are using an unmanaged PostgreSQL database, and the version is PostgreSQL 10, it is highly recommended that you upgrade to PostgreSQL 13. PostgreSQL 10 had its final release on November 10, 2022 and is no longer supported. For more information, see the link:https://www.postgresql.org/support/versioning/[PostgreSQL Versioning Policy]. +==== + +See the following sections for configuring unmanaged components: + +* xref:operator-unmanaged-postgres[Using an existing PostgreSQL database] +* xref:operator-unmanaged-hpa[Using unmanaged Horizontal Pod Autoscalers] +* xref:operator-unmanaged-storage[Using unmanaged storage] +* xref:operator-unmanaged-storage-noobaa[Using an unmanaged NooBaa instance] +* xref:operator-unmanaged-redis[Using an unmanaged Redis database] +* xref:operator-unmanaged-route[Disabling the route component] +* xref:operator-unmanaged-monitoring[Disabling the monitoring component] +* xref:operator-unmanaged-mirroring[Disabling the mirroring component] \ No newline at end of file diff --git a/modules/operator-concepts.adoc b/modules/operator-concepts.adoc new file mode 100644 index 000000000..05079f84b --- /dev/null +++ b/modules/operator-concepts.adoc @@ -0,0 +1,17 @@ +:_content-type: CONCEPT +[id="operator-concepts"] += Introduction to the {productname} Operator + +Use the content in this chapter to execute the following: + +* Install {productname-ocp} using the {productname} Operator + +* Configure managed, or unmanaged, object storage + +* Configure unmanaged components, such as the database, Redis, routes, TLS, and so on + +* Deploy the {productname} registry on {ocp} using the {productname} Operator + +* Use advanced features supported by {productname} + +* Upgrade the {productname} registry by using the {productname} Operator \ No newline at end of file diff --git a/modules/operator-config-bundle-secret.adoc b/modules/operator-config-bundle-secret.adoc new file mode 100644 index 000000000..40f624a09 --- /dev/null +++ b/modules/operator-config-bundle-secret.adoc @@ -0,0 +1,7 @@ +:_content-type: REFERENCE +[id="operator-config-bundle-secret"] += Config bundle secret + +The `spec.configBundleSecret` field is a reference to the `metadata.name` of a `Secret` in the same namespace as the `QuayRegistry` resource. This `Secret` must contain a `config.yaml` key/value pair. + +The `config.yaml` file is a {productname} `config.yaml` file. This field is optional, and is auto-filled by the {productname} Operator if not provided. If provided, it serves as the base set of config fields which are later merged with other fields from any managed components to form a final output `Secret`, which is then mounted into the {productname} application pods. \ No newline at end of file diff --git a/modules/operator-config-cli-access.adoc b/modules/operator-config-cli-access.adoc new file mode 100644 index 000000000..ed06521cb --- /dev/null +++ b/modules/operator-config-cli-access.adoc @@ -0,0 +1,96 @@ +:_content-type: PROCEDURE +[id="operator-config-cli-access"] += Determining QuayRegistry endpoints and secrets + +Use the following procedure to find `QuayRegistry` endpoints and secrets. + +.Procedure + +. You can examine the `QuayRegistry` resource, using `oc describe quayregistry` or `oc get quayregistry -o yaml`, to find the current endpoints and secrets by entering the following command: ++ +[source,terminal] +---- +$ oc get quayregistry example-registry -n quay-enterprise -o yaml +---- ++ +.Example output ++ +[source,yaml] +---- +apiVersion: quay.redhat.com/v1 +kind: QuayRegistry +metadata: + ... + name: example-registry + namespace: quay-enterprise + ... +spec: + components: + - kind: quay + managed: true + ... + - kind: clairpostgres + managed: true + configBundleSecret: init-config-bundle-secret <1> +status: + currentVersion: 3.7.0 + lastUpdated: 2022-05-11 13:28:38.199476938 +0000 UTC + registryEndpoint: https://example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org <2> +---- +<1> The config bundle secret, containing the `config.yaml` file and any SSL/TLS certificates. +<2> The URL for your registry, for browser access to the registry UI, and for the registry API endpoint. + +//// +[id="determining-username-password-config-editor-tool"] +== Locating the username and password for the config editor tool + +Use the following procedure to locate the username and password for the config editor tool. + +.Procedure + +. Enter the following command to retrieve the secret: ++ +[source,terminal] +---- +$ oc get secret -n quay-enterprise example-registry-quay-config-editor-credentials-fg2gdgtm24 -o yaml +---- ++ +.Example output ++ +[source,yaml] +---- +apiVersion: v1 +data: + password: SkZwQkVKTUN0a1BUZmp4dA== + username: cXVheWNvbmZpZw== +kind: Secret +---- + +. Decode the username by entering the following command: ++ +[source,terminal] +---- +$ echo 'cXVheWNvbmZpZw==' | base64 --decode +---- ++ +.Example output ++ +[source,terminal] +---- +quayconfig +---- + +. Decode the password by entering the following command: ++ +[source,terminal] +---- +$ echo 'SkZwQkVKTUN0a1BUZmp4dA==' | base64 --decode +---- ++ +.Example output ++ +[source,terminal] +---- +JFpBEJMCtkPTfjxt +---- +//// \ No newline at end of file diff --git a/modules/operator-config-cli-download.adoc b/modules/operator-config-cli-download.adoc new file mode 100644 index 000000000..32267741f --- /dev/null +++ b/modules/operator-config-cli-download.adoc @@ -0,0 +1,65 @@ +[id="operator-config-cli-download"] += Downloading the existing configuration + +The following procedure shows you how to download the existing configuration by locating the `Config Bundle Secret`. + +.Procedure + +. Describe the `QuayRegistry` resource by entering the following command: ++ +[source,terminal] +---- +$ oc describe quayregistry -n +---- ++ +[source,terminal] +---- +# ... + Config Bundle Secret: example-registry-config-bundle-v123x +# ... +---- + +. Obtain the secret data by entering the following command: ++ +[source,terminal] +---- +$ oc get secret -n -o jsonpath='{.data}' +---- ++ +.Example output ++ +[source,yaml] +---- +{ + "config.yaml": "RkVBVFVSRV9VU0 ... MDAwMAo=" +} +---- + +. Decode the data by entering the following command: ++ +[source,terminal] +---- +$ echo 'RkVBVFVSRV9VU0 ... MDAwMAo=' | base64 --decode +---- ++ +.Example output ++ +[source,yaml] +---- +FEATURE_USER_INITIALIZE: true +BROWSER_API_CALLS_XHR_ONLY: false +SUPER_USERS: +- quayadmin +FEATURE_USER_CREATION: false +FEATURE_QUOTA_MANAGEMENT: true +FEATURE_PROXY_CACHE: true +FEATURE_BUILD_SUPPORT: true +DEFAULT_SYSTEM_REJECT_QUOTA_BYTES: 102400000 +---- + +. Optional. You can export the data into a YAML file into the current directory by passing in the `>> config.yaml` flag. For example: ++ +[source,terminal] +---- +$ echo 'RkVBVFVSRV9VU0 ... MDAwMAo=' | base64 --decode >> config.yaml +---- \ No newline at end of file diff --git a/modules/operator-config-cli.adoc b/modules/operator-config-cli.adoc new file mode 100644 index 000000000..dac3a809c --- /dev/null +++ b/modules/operator-config-cli.adoc @@ -0,0 +1,26 @@ +:_content-type: PROCEDURE +[id="operator-config-cli"] += Customizing {productname} on {ocp} + +After deployment, you can customize the {productname} application by editing the {productname} configuration bundle secret `spec.configBundleSecret`. You can also change the managed status of components and configure resource requests for some components in the `spec.components` object of the `QuayRegistry` resource. + +[id="editing-config-bundle-secret-in-ocp-console"] +== Editing the config bundle secret in the {ocp} console + +Use the following procedure to edit the config bundle secret in the {ocp} console. + +.Procedure + +. On the {productname} Registry overview screen, click the link for the *Config Bundle Secret*. ++ +image:operator-quay-registry-overview.png[{productname} Registry overview] + +. To edit the secret, click **Actions** -> **Edit Secret**. ++ +image:operator-config-bundle-edit-secret.png[Edit secret] + +. Modify the configuration and save the changes. ++ +image:operator-save-config-changes.png[Save changes] + +. Monitor the deployment to ensure successful completion and that the configuration changes have taken effect. diff --git a/modules/operator-config-ui-access.adoc b/modules/operator-config-ui-access.adoc new file mode 100644 index 000000000..62cc22530 --- /dev/null +++ b/modules/operator-config-ui-access.adoc @@ -0,0 +1,40 @@ +:_content-type: PROCEDURE +[id="operator-config-ui-access"] += Accessing the config editor + +[NOTE] +==== +The Config Editor UI has been removed and is not supported on IBM Power and IBM Z. +==== + +In the *Details* section of the `QuayRegistry` object, the endpoint for the config editor is available, along with a link to the `Secret` object that contains the credentials for logging into the config editor. For example: + +image:config-editor-details-openshift.png[Config editor details] + +[id="retrieving-the-config-editor-credentials"] +== Retrieving the config editor credentials + +Use the following procedure to retrieve the config editor credentials. + +.Procedure + +. Click on the link for the config editor secret: ++ +image:config-editor-secret.png[Config editor secret] + +. In the *Data* section of the *Secret* details page, click *Reveal values* to see the credentials for logging into the config editor. For example: ++ +image:config-editor-secret-reveal.png[Config editor secret reveal] + +[id="logging-into-config-editor"] +== Logging into the config editor + +Use the following procedure to log into the config editor. + +.Procedure + +* Navigate the config editor endpoint. When prompted, enter the username, for example, `quayconfig`, and the password. For example: ++ +image:config-editor-ui.png[Config editor user interface] + + diff --git a/modules/operator-config-ui-change.adoc b/modules/operator-config-ui-change.adoc new file mode 100644 index 000000000..6d3c3601d --- /dev/null +++ b/modules/operator-config-ui-change.adoc @@ -0,0 +1,32 @@ +:_content-type: PROCEDURE +[id="operator-config-ui-change"] +== Changing configuration + +In the following example, you will update your configuration file by changing the default expiration period of deleted tags. + +.Procedure + +. On the config editor, locate the *Time Machine* section. + +. Add an expiration period to the *Allowed expiration periods* box, for example, `4w`: ++ +image:ui-time-machine-add.png[Add expiration period] + +. Select *Validate Configuration Changes* to ensure that the changes are valid. + +. Apply the changes by pressing *Reconfigure Quay*: ++ +image:config-editor-reconfigure.png[Reconfigure] + +After applying the changes, the config tool notifies you that the changes made have been submitted to your {productname} deployment: + +image:config-editor-reconfigured.png[Reconfigured] + + +[NOTE] +==== +Reconfiguring {productname} using the config tool UI can lead to the registry being unavailable for a short time while the updated configuration is applied. +==== + + + diff --git a/modules/operator-config-ui-monitoring.adoc b/modules/operator-config-ui-monitoring.adoc new file mode 100644 index 000000000..1c64b976b --- /dev/null +++ b/modules/operator-config-ui-monitoring.adoc @@ -0,0 +1,27 @@ +:_content-type: CONCEPT +[id="operator-config-ui-monitoring"] += Monitoring reconfiguration in the {productname} UI + +You can monitor the reconfiguration of {productname} in real-time. + +[id="reconfiguring-quayregistry-resource"] +== QuayRegistry resource + +After reconfiguring the {productname} Operator, you can track the progress of the redeployment in the *YAML* tab for the specific instance of `QuayRegistry`, in this case, `example-registry`: + +image:ui-monitor-deploy-update.png[] + +Each time the status changes, you will be prompted to reload the data to see the updated version. Eventually, the {productname} Operator reconciles the changes, and there are be no unhealthy components reported. + +image:ui-monitor-deploy-done.png[] + +[id="reconfiguring-events-tab"] +== Events + +The *Events* tab for the `QuayRegistry` shows some events related to the redeployment. For example: + +image:ui-monitor-deploy-streaming-events.png[] + +Streaming events, for all resources in the namespace that are affected by the reconfiguration, are available in the {ocp} console under *Home* -> *Events*. For example: + +image:ui-monitor-deploy-streaming-events.png[] \ No newline at end of file diff --git a/modules/operator-config-ui-updated.adoc b/modules/operator-config-ui-updated.adoc new file mode 100644 index 000000000..fabe731db --- /dev/null +++ b/modules/operator-config-ui-updated.adoc @@ -0,0 +1,27 @@ +:_content-type: PROCEDURE +[id="operator-config-ui-updated"] += Accessing updated information after reconfiguration + +Use the following procedure to access the updated `config.yaml` file using the {productname} UI and the config bundle. + +.Procedure + +. On the `QuayRegistry` *Details* screen, click on the *Config Bundle Secret*. + +. In the *Data* section of the `Secret` details screen, click *Reveal values* to see the `config.yaml` file. + +. Check that the change has been applied. In this case, `4w` should be in the list of `TAG_EXPIRATION_OPTIONS`. For example: ++ +[source,yaml] +---- +--- +SERVER_HOSTNAME: example-quay-openshift-operators.apps.docs.quayteam.org +SETUP_COMPLETE: true +SUPER_USERS: +- quayadmin +TAG_EXPIRATION_OPTIONS: +- 2w +- 4w +- 3y +--- +---- diff --git a/modules/operator-config-ui.adoc b/modules/operator-config-ui.adoc new file mode 100644 index 000000000..3c197ef34 --- /dev/null +++ b/modules/operator-config-ui.adoc @@ -0,0 +1,9 @@ +:_content-type: CONCEPT +[id="operator-config-ui"] += Using the config tool to reconfigure {productname} on {ocp} + +As of {productname} 3.10, the configuration tool has been removed on {ocp} deployments, meaning that users cannot configure, or reconfigure, directly from the {ocp} console. Additionally, the `quay-config-editor` pod no longer deploys, users cannot check the status of the config editor route, and the Config Editor Endpoint no longer generates on the {productname} Operator *Details* page + +As a workaround, you can deploy the configuration tool locally and create your own configuration bundle. This includes entering the database and storage credentials used for your {productname} on {ocp} deployment, generating a `config.yaml` file, and using it to deploy {productname} on {ocp} via the command-line interface. + +To deploy the configuration tool locally, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.10/html-single/deploy_red_hat_quay_for_proof-of-concept_non-production_purposes/index#poc-getting-started[Getting started with {productname}] and follow the instructions up to "Configuring {productname}". Advanced configuration settings, such as using custom SSL certificates, can be found on the same page. \ No newline at end of file diff --git a/modules/operator-console-monitoring-alerting.adoc b/modules/operator-console-monitoring-alerting.adoc new file mode 100644 index 000000000..3b581b28d --- /dev/null +++ b/modules/operator-console-monitoring-alerting.adoc @@ -0,0 +1,52 @@ +:_content-type: CONCEPT +[id="operator-console-monitoring-alerting"] += Console monitoring and alerting + +{productname} provides support for monitoring instances that were deployed by using the {productname} Operator, from inside the {ocp} console. The new monitoring features include a Grafana dashboard, access to individual metrics, and alerting to notify for frequently restarting `Quay` pods. + +[NOTE] +==== +To enable the monitoring features, you must select *All namespaces on the cluster* as the installation mode when installing the {productname} Operator. +==== + +[id="operator-dashboard"] +== Dashboard + +On the {ocp} console, click *Monitoring* -> *Dashboards* and search for the dashboard of your desired {productname} registry instance: + +image:choose-dashboard.png[Choose Quay dashboard] + +The dashboard shows various statistics including the following: + +* The number of *Organizations*, *Repositories*, *Users*, and *Robot accounts* +* CPU Usage +* Max memory usage +* Rates of pulls and pushes, and authentication requests +* API request rate +* Latencies + +image:console-dashboard-1.png[Console dashboard] + +[id="operator-metrics"] +== Metrics + +You can see the underlying metrics behind the {productname} dashboard by accessing *Monitoring* -> *Metrics* in the UI. In the *Expression* field, enter the text `quay_` to see the list of metrics available: + +image:quay-metrics.png[Quay metrics] + +Select a sample metric, for example, `quay_org_rows`: + +image:quay-metrics-org-rows.png[Number of Quay organizations] + +This metric shows the number of organizations in the registry. It is also directly surfaced in the dashboard. + +[id="operator-alerting"] +== Alerting + +An alert is raised if the `Quay` pods restart too often. The alert can be configured by accessing the *Alerting* rules tab from *Monitoring* -> *Alerting* in the console UI and searching for the Quay-specific alert: + +image:alerting-rules.png[Alerting rules] + +Select the `QuayPodFrequentlyRestarting` rule detail to configure the alert: + +image:quay-pod-frequently-restarting.png[Alerting rule details] diff --git a/modules/operator-custom-ssl-certs-config-bundle.adoc b/modules/operator-custom-ssl-certs-config-bundle.adoc new file mode 100644 index 000000000..d21be4c40 --- /dev/null +++ b/modules/operator-custom-ssl-certs-config-bundle.adoc @@ -0,0 +1,21 @@ +:_content-type: PROCEDURE +[id="operator-custom-ssl-certs-config-bundle"] += Configuring custom SSL/TLS certificates for {productname-ocp} + +When {productname} is deployed on {ocp}, the `tls` component of the `QuayRegistry` custom resource definition (CRD) is set to `managed` by default. As a result, {ocp}'s Certificate Authority is used to create HTTPS endpoints and to rotate SSL/TLS certificates. + +You can configure custom SSL/TLS certificates before or after the initial deployment of {productname-ocp}. This process involves creating or updating the `configBundleSecret` resource within the `QuayRegistry` YAML file to integrate your custom certificates and setting the `tls` component to `unmanaged`. + +[IMPORTANT] +==== +When configuring custom SSL/TLS certificates for {productname}, administrators are responsible for certificate rotation. +==== + +The following procedures enable you to apply custom SSL/TLS certificates to ensure secure communication and meet specific security requirements for your {productname-ocp} deployment. These steps assumed you have already created a Certificate Authority (CA) bundle or an `ssl.key`, and an `ssl.cert`. The procedure then shows you how to integrate those files into your {productname-ocp} deployment, which ensures that your registry operates with the specified security settings and conforms to your organization's SSL/TLS policies. + +[NOTE] +==== +* The following procedure is used for securing {productname} with an HTTPS certificate. Note that this differs from managing Certificate Authority Trust Bundles. CA Trust Bundles are used by system processes within the `Quay` container to verify certificates against trusted CAs, and ensure that services like LDAP, storage backend, and OIDC connections are trusted. + +* If you are adding the certificates to an existing deployment, you must include the existing `config.yaml` file in the new config bundle secret, even if you are not making any configuration changes. +==== \ No newline at end of file diff --git a/modules/operator-customize-images.adoc b/modules/operator-customize-images.adoc new file mode 100644 index 000000000..06f7699ac --- /dev/null +++ b/modules/operator-customize-images.adoc @@ -0,0 +1,80 @@ +:_content-type: PROCEDURE +[id="operator-customize-images"] += Customizing Default Operator Images + +[NOTE] +==== +Currently, customizing default Operator images is not supported on IBM Power and IBM Z. +==== + +In certain circumstances, it might be useful to override the default images used by the {productname} Operator. This can be done by setting one or more environment variables in the {productname} Operator `ClusterServiceVersion`. + +[IMPORTANT] +==== +Using this mechanism is not supported for production {productname} environments and is strongly encouraged only for development or testing purposes. There is no guarantee your deployment will work correctly when using non-default images with the {productname} Operator. +==== + +[id="custom-environment-variables"] +== Environment Variables + +The following environment variables are used in the {productname} Operator to override component images: + +[cols=2*] +|=== +|Environment Variable +|Component + +|`RELATED_IMAGE_COMPONENT_QUAY` +|`base` + +|`RELATED_IMAGE_COMPONENT_CLAIR` +|`clair` + +|`RELATED_IMAGE_COMPONENT_POSTGRES` +|`postgres` and `clair` databases + +|`RELATED_IMAGE_COMPONENT_REDIS` +|`redis` +|=== + +[NOTE] +==== +Overridden images *must* be referenced by manifest (@sha256:) and not by tag (:latest). +==== + +[id="applying-overrides-to-running-operator"] +== Applying overrides to a running Operator + +When the {productname} Operator is installed in a cluster through the link:https://docs.openshift.com/container-platform/{ocp-y}/operators/understanding/olm/olm-understanding-olm.html[Operator Lifecycle Manager (OLM)], the managed component container images can be easily overridden by modifying the `ClusterServiceVersion` object. + +Use the following procedure to apply overrides to a running {productname} Operator. + +.Procedure + +. The `ClusterServiceVersion` object is Operator Lifecycle Manager's representation of a running Operator in the cluster. Find the {productname} Operator's `ClusterServiceVersion` by using a Kubernetes UI or the `kubectl`/`oc` CLI tool. For example: ++ +[source,terminal] +---- +$ oc get clusterserviceversions -n +---- + +. Using the UI, `oc edit`, or another method, modify the {productname} `ClusterServiceVersion` to include the environment variables outlined above to point to the override images: ++ +*JSONPath*: `spec.install.spec.deployments[0].spec.template.spec.containers[0].env` ++ +[source,yaml] +---- +- name: RELATED_IMAGE_COMPONENT_QUAY + value: quay.io/projectquay/quay@sha256:c35f5af964431673f4ff5c9e90bdf45f19e38b8742b5903d41c10cc7f6339a6d +- name: RELATED_IMAGE_COMPONENT_CLAIR + value: quay.io/projectquay/clair@sha256:70c99feceb4c0973540d22e740659cd8d616775d3ad1c1698ddf71d0221f3ce6 +- name: RELATED_IMAGE_COMPONENT_POSTGRES + value: centos/postgresql-10-centos7@sha256:de1560cb35e5ec643e7b3a772ebaac8e3a7a2a8e8271d9e91ff023539b4dfb33 +- name: RELATED_IMAGE_COMPONENT_REDIS + value: centos/redis-32-centos7@sha256:06dbb609484330ec6be6090109f1fa16e936afcf975d1cbc5fff3e6c7cae7542 +---- + +[NOTE] +==== +This is done at the Operator level, so every `QuayRegistry` will be deployed using these same overrides. +==== diff --git a/modules/operator-customize.adoc b/modules/operator-customize.adoc new file mode 100644 index 000000000..37fb141b9 --- /dev/null +++ b/modules/operator-customize.adoc @@ -0,0 +1,6 @@ +[[operator-customize]] += Customizing Quay after deployment + +The Quay Operator takes an opinionated strategy towards deploying Quay and its dependencies, however there are places where the Quay deployment can be customized. + +== Quay Application Configuration diff --git a/modules/operator-deploy-cli.adoc b/modules/operator-deploy-cli.adoc new file mode 100644 index 000000000..55618c9fc --- /dev/null +++ b/modules/operator-deploy-cli.adoc @@ -0,0 +1,149 @@ +:_content-type: PROCEDURE +[id="operator-deploy-cli"] += Deploying {productname} from the command line + +Use the following procedure to deploy {productname} from using the command-line interface (CLI). + +.Prerequisites + +* You have logged into {ocp} using the CLI. + +.Procedure + +. Create a namespace, for example, `quay-enterprise`, by entering the following command: ++ +[source,terminal] +---- +$ oc new-project quay-enterprise +---- + +. Optional. If you want to pre-configure any aspects of your {productname} deployment, create a `Secret` for the config bundle: ++ +[source,terminal] +---- +$ oc create secret generic quay-enterprise-config-bundle --from-file=config-bundle.tar.gz=/path/to/config-bundle.tar.gz +---- + +. Create a `QuayRegistry` custom resource in a file called `quayregistry.yaml` + +.. For a minimal deployment, using all the defaults: ++ +.quayregistry.yaml: +[source,yaml] +---- +apiVersion: quay.redhat.com/v1 +kind: QuayRegistry +metadata: + name: example-registry + namespace: quay-enterprise +---- + +.. Optional. If you want to have some components unmanaged, add this information in the `spec` field. A minimal deployment might look like the following example: ++ +.Example quayregistry.yaml with unmanaged components ++ +[source,yaml] +---- +apiVersion: quay.redhat.com/v1 +kind: QuayRegistry +metadata: + name: example-registry + namespace: quay-enterprise +spec: + components: + - kind: clair + managed: false + - kind: horizontalpodautoscaler + managed: false + - kind: mirror + managed: false + - kind: monitoring + managed: false +---- + +.. Optional. If you have created a config bundle, for example, `init-config-bundle-secret`, reference it in the `quayregistry.yaml` file: ++ +.Example quayregistry.yaml with a config bundle ++ +[source,yaml] +---- +apiVersion: quay.redhat.com/v1 +kind: QuayRegistry +metadata: + name: example-registry + namespace: quay-enterprise +spec: + configBundleSecret: init-config-bundle-secret +---- + +.. Optional. If you have a proxy configured, you can add the information using overrides for {productname}, Clair, and mirroring: ++ +.Example quayregistry.yaml with proxy configured ++ +[source,yaml] +---- + kind: QuayRegistry + metadata: + name: quay37 + spec: + configBundleSecret: config-bundle-secret + components: + - kind: objectstorage + managed: false + - kind: route + managed: true + - kind: mirror + managed: true + overrides: + env: + - name: DEBUGLOG + value: "true" + - name: HTTP_PROXY + value: quayproxy.qe.devcluster.openshift.com:3128 + - name: HTTPS_PROXY + value: quayproxy.qe.devcluster.openshift.com:3128 + - name: NO_PROXY + value: svc.cluster.local,localhost,quay370.apps.quayperf370.perfscale.devcluster.openshift.com + - kind: tls + managed: false + - kind: clair + managed: true + overrides: + env: + - name: HTTP_PROXY + value: quayproxy.qe.devcluster.openshift.com:3128 + - name: HTTPS_PROXY + value: quayproxy.qe.devcluster.openshift.com:3128 + - name: NO_PROXY + value: svc.cluster.local,localhost,quay370.apps.quayperf370.perfscale.devcluster.openshift.com + - kind: quay + managed: true + overrides: + env: + - name: DEBUGLOG + value: "true" + - name: NO_PROXY + value: svc.cluster.local,localhost,quay370.apps.quayperf370.perfscale.devcluster.openshift.com + - name: HTTP_PROXY + value: quayproxy.qe.devcluster.openshift.com:3128 + - name: HTTPS_PROXY + value: quayproxy.qe.devcluster.openshift.com:3128 +---- + +. Create the `QuayRegistry` in the specified namespace by entering the following command: ++ +[source,terminal] +---- +$ oc create -n quay-enterprise -f quayregistry.yaml +---- + +. Enter the following command to see when the `status.registryEndpoint` is populated: ++ +[source,terminal] +---- +$ oc get quayregistry -n quay-enterprise example-registry -o jsonpath="{.status.registryEndpoint}" -w +---- + +.Additional resources + +* For more information about how to track the progress of your {productname} deployment, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/deploying_the_red_hat_quay_operator_on_openshift_container_platform/index#operator-monitor-deploy-cli[Monitoring and debugging the deployment process]. \ No newline at end of file diff --git a/modules/operator-deploy-hpa.adoc b/modules/operator-deploy-hpa.adoc new file mode 100644 index 000000000..00e13f949 --- /dev/null +++ b/modules/operator-deploy-hpa.adoc @@ -0,0 +1,27 @@ +:_content-type: REFERENCE +[id="operator-deploy-hpa"] += Horizontal Pod Autoscaling + +A default deployment shows the following running pods: + +* Two pods for the {productname} application itself (`example-registry-quay-app-*``) +* One Redis pod for {productname} logging (`example-registry-quay-redis-*`) +* One database pod for PostgreSQL used by {productname} for metadata storage (`example-registry-quay-database-*`) +* Two `Quay` mirroring pods (`example-registry-quay-mirror-*`) +* Two pods for the Clair application (`example-registry-clair-app-*`) +* One PostgreSQL pod for Clair (`example-registry-clair-postgres-*`) + +Horizontal PPod Autoscaling is configured by default to be `managed`, and the number of pods for Quay, Clair and repository mirroring is set to two. This facilitates the avoidance of downtime when updating or reconfiguring {productname} through the {productname} Operator or during rescheduling events. You can enter the following command to view information about HPA objects: + +[source,terminal] +---- +$ oc get hpa -n quay-enterprise +---- +.Example output +[source,terminal] +---- +NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE +example-registry-clair-app Deployment/example-registry-clair-app 16%/90%, 0%/90% 2 10 2 13d +example-registry-quay-app Deployment/example-registry-quay-app 31%/90%, 1%/90% 2 20 2 13d +example-registry-quay-mirror Deployment/example-registry-quay-mirror 27%/90%, 0%/90% 2 20 2 13d +---- \ No newline at end of file diff --git a/modules/operator-deploy-infrastructure.adoc b/modules/operator-deploy-infrastructure.adoc new file mode 100644 index 000000000..0885f0a51 --- /dev/null +++ b/modules/operator-deploy-infrastructure.adoc @@ -0,0 +1,234 @@ +[id="operator-deploy-infrastructure"] += Deploying {productname} on infrastructure nodes + +By default, `Quay` related pods are placed on arbitrary worker nodes when using the {productname} Operator to deploy the registry. For more information about how to use machine sets to configure nodes to only host infrastructure components, see link:https://docs.openshift.com/container-platform/{ocp-y}/machine_management/creating-infrastructure-machinesets.html[Creating infrastructure machine sets]. + +If you are not using {ocp} machine set resources to deploy infra nodes, the section in this document shows you how to manually label and taint nodes for infrastructure purposes. After you have configured your infrastructure nodes either manually or use machines sets, you can control the placement of `Quay` pods on these nodes using node selectors and tolerations. + +[id="labeling-taint-nodes-for-infrastructure-use"] +== Labeling and tainting nodes for infrastructure use + +Use the following procedure to label and tain nodes for infrastructure use. + +. Enter the following command to reveal the master and worker nodes. In this example, there are three master nodes and six worker nodes. ++ +[source,terminal] +---- +$ oc get nodes +---- ++ +.Example output ++ +[source,terminal] +---- +NAME                                               STATUS   ROLES    AGE     VERSION +user1-jcnp6-master-0.c.quay-devel.internal         Ready    master   3h30m   v1.20.0+ba45583 +user1-jcnp6-master-1.c.quay-devel.internal         Ready    master   3h30m   v1.20.0+ba45583 +user1-jcnp6-master-2.c.quay-devel.internal         Ready    master   3h30m   v1.20.0+ba45583 +user1-jcnp6-worker-b-65plj.c.quay-devel.internal   Ready    worker   3h21m   v1.20.0+ba45583 +user1-jcnp6-worker-b-jr7hc.c.quay-devel.internal   Ready    worker   3h21m   v1.20.0+ba45583 +user1-jcnp6-worker-c-jrq4v.c.quay-devel.internal   Ready    worker   3h21m   v1.20.0+ba45583 +user1-jcnp6-worker-c-pwxfp.c.quay-devel.internal   Ready    worker   3h21m   v1.20.0+ba45583 +user1-jcnp6-worker-d-h5tv2.c.quay-devel.internal   Ready    worker   3h22m   v1.20.0+ba45583 +user1-jcnp6-worker-d-m9gg4.c.quay-devel.internal   Ready    worker   3h21m   v1.20.0+ba45583 +---- + +. Enter the following commands to label the three worker nodes for infrastructure use: ++ +[source,terminal] +---- +$ oc label node --overwrite user1-jcnp6-worker-c-pwxfp.c.quay-devel.internal node-role.kubernetes.io/infra= +---- ++ +[source,terminal] +---- +$ oc label node --overwrite user1-jcnp6-worker-d-h5tv2.c.quay-devel.internal node-role.kubernetes.io/infra= +---- ++ +[source,terminal] +---- +$ oc label node --overwrite user1-jcnp6-worker-d-m9gg4.c.quay-devel.internal node-role.kubernetes.io/infra= +---- + +. Now, when listing the nodes in the cluster, the last three worker nodes have the `infra` role. For example: ++ +[source,terminal] +---- +$ oc get nodes +---- ++ +.Example ++ +[source,terminal] +---- +NAME                                               STATUS   ROLES          AGE     VERSION +user1-jcnp6-master-0.c.quay-devel.internal         Ready    master         4h14m   v1.20.0+ba45583 +user1-jcnp6-master-1.c.quay-devel.internal         Ready    master         4h15m   v1.20.0+ba45583 +user1-jcnp6-master-2.c.quay-devel.internal         Ready    master         4h14m   v1.20.0+ba45583 +user1-jcnp6-worker-b-65plj.c.quay-devel.internal   Ready    worker         4h6m    v1.20.0+ba45583 +user1-jcnp6-worker-b-jr7hc.c.quay-devel.internal   Ready    worker         4h5m    v1.20.0+ba45583 +user1-jcnp6-worker-c-jrq4v.c.quay-devel.internal   Ready    worker         4h5m    v1.20.0+ba45583 +user1-jcnp6-worker-c-pwxfp.c.quay-devel.internal   Ready    infra,worker   4h6m    v1.20.0+ba45583 +user1-jcnp6-worker-d-h5tv2.c.quay-devel.internal   Ready    infra,worker   4h6m    v1.20.0+ba45583 +user1-jcnp6-worker-d-m9gg4.c.quay-devel.internal   Ready    infra,worker   4h6m    v1.20.0+ba4558 +---- + +. When a worker node is assigned the `infra` role, there is a chance that user workloads could get inadvertently assigned to an infra node. To avoid this, you can apply a taint to the infra node, and then add tolerations for the pods that you want to control. For example: ++ +[source,terminal] +---- +$ oc adm taint nodes user1-jcnp6-worker-c-pwxfp.c.quay-devel.internal node-role.kubernetes.io/infra:NoSchedule +---- ++ +[source,terminal] +---- +$ oc adm taint nodes user1-jcnp6-worker-d-h5tv2.c.quay-devel.internal node-role.kubernetes.io/infra:NoSchedule +---- ++ +[source,terminal] +---- +$ oc adm taint nodes user1-jcnp6-worker-d-m9gg4.c.quay-devel.internal node-role.kubernetes.io/infra:NoSchedule +---- + +[id="creating-project-node-selector-toleration"] +== Creating a project with node selector and tolerations + +Use the following procedure to create a project with node selector and tolerations. + +[NOTE] +==== +The following procedure can also be completed by removing the installed {productname} Operator and the namespace, or namespaces, used when creating the deployment. Users can then create a new resource with the following annotation. +==== + +.Procedure + +. Enter the following command to edit the namespace where {productname} is deployed, and the following annotation: ++ +[source,terminal] +---- +$ oc annotate namespace openshift.io/node-selector='node-role.kubernetes.io/infra=' +---- ++ +Example output ++ +[source,yaml] +---- +namespace/ annotated +---- + +. Obtain a list of available pods by entering the following command: ++ +[source,terminal] +---- +$ oc get pods -o wide +---- ++ +.Example output ++ +[source,terminal] +---- +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +example-registry-clair-app-5744dd64c9-9d5jt 1/1 Running 0 173m 10.130.4.13 stevsmit-quay-ocp-tes-5gwws-worker-c-6xkn7 +example-registry-clair-app-5744dd64c9-fg86n 1/1 Running 6 (3h21m ago) 3h24m 10.131.0.91 stevsmit-quay-ocp-tes-5gwws-worker-c-dnhdp +example-registry-clair-postgres-845b47cd88-vdchz 1/1 Running 0 3h21m 10.130.4.10 stevsmit-quay-ocp-tes-5gwws-worker-c-6xkn7 +example-registry-quay-app-64cbc5bcf-8zvgc 1/1 Running 1 (3h24m ago) 3h24m 10.130.2.12 stevsmit-quay-ocp-tes-5gwws-worker-a-tk8dx +example-registry-quay-app-64cbc5bcf-pvlz6 1/1 Running 0 3h24m 10.129.4.10 stevsmit-quay-ocp-tes-5gwws-worker-b-fjhz4 +example-registry-quay-app-upgrade-8gspn 0/1 Completed 0 3h24m 10.130.2.10 stevsmit-quay-ocp-tes-5gwws-worker-a-tk8dx +example-registry-quay-database-784d78b6f8-2vkml 1/1 Running 0 3h24m 10.131.4.10 stevsmit-quay-ocp-tes-5gwws-worker-c-2frtg +example-registry-quay-mirror-d5874d8dc-fmknp 1/1 Running 0 3h24m 10.129.4.9 stevsmit-quay-ocp-tes-5gwws-worker-b-fjhz4 +example-registry-quay-mirror-d5874d8dc-t4mff 1/1 Running 0 3h24m 10.129.2.19 stevsmit-quay-ocp-tes-5gwws-worker-a-k7w86 +example-registry-quay-redis-79848898cb-6qf5x 1/1 Running 0 3h24m 10.130.2.11 stevsmit-quay-ocp-tes-5gwws-worker-a-tk8dx + +---- + +. Enter the following command to delete the available pods: ++ +[source,terminal] +---- +$ oc delete pods --selector quay-operator/quayregistry=example-registry -n quay-enterprise +---- ++ +Example output ++ +[source,terminal] +---- +pod "example-registry-clair-app-5744dd64c9-9d5jt" deleted +pod "example-registry-clair-app-5744dd64c9-fg86n" deleted +pod "example-registry-clair-postgres-845b47cd88-vdchz" deleted +pod "example-registry-quay-app-64cbc5bcf-8zvgc" deleted +pod "example-registry-quay-app-64cbc5bcf-pvlz6" deleted +pod "example-registry-quay-app-upgrade-8gspn" deleted +pod "example-registry-quay-database-784d78b6f8-2vkml" deleted +pod "example-registry-quay-mirror-d5874d8dc-fmknp" deleted +pod "example-registry-quay-mirror-d5874d8dc-t4mff" deleted +pod "example-registry-quay-redis-79848898cb-6qf5x" deleted +---- ++ +After the pods have been deleted, they automatically cycle back up and should be scheduled on the dedicated infrastructure nodes. + +//// +. Enter the following command to create the project on infra nodes: ++ +[source,terminal] +---- +$ oc apply -f .yaml +---- ++ +.Example output ++ +[source,terminal] +---- +project.project.openshift.io/quay-registry created +---- ++ +Subsequent resources created in the `` namespace should now be scheduled on the dedicated infrastructure nodes. +//// + +[id="installing-quay-operator-namespace"] +== Installing {productname-ocp} on a specific namespace + +Use the following procedure to install {productname-ocp} in a specific namespace. + +* To install the {productname} Operator in a specific namespace, you must explicitly specify the appropriate project namespace, as in the following command. ++ +In the following example, the `quay-registry` namespace is used. This results in the `quay-operator` pod landing on one of the three infrastructure nodes. For example: ++ +[source,terminal] +---- +$ oc get pods -n quay-registry -o wide +---- ++ +.Example output ++ +[source,terminal] +---- +NAME                                    READY   STATUS    RESTARTS   AGE   IP            NODE                                               +quay-operator.v3.4.1-6f6597d8d8-bd4dp   1/1     Running   0          30s   10.131.0.16   user1-jcnp6-worker-d-h5tv2.c.quay-devel.internal +---- + +[id="creating-registry"] +== Creating the {productname} registry + +Use the following procedure to create the {productname} registry. + +* Enter the following command to create the {productname} registry. Then, wait for the deployment to be marked as `ready`. In the following example, you should see that they have only been scheduled on the three nodes that you have labelled for infrastructure purposes. ++ +[source,terminal] +---- +$ oc get pods -n quay-registry -o wide +---- ++ +.Example output ++ +[source,terminal] +---- +NAME                                                   READY   STATUS      RESTARTS   AGE     IP            NODE                                                 +example-registry-clair-app-789d6d984d-gpbwd            1/1     Running     1          5m57s   10.130.2.80   user1-jcnp6-worker-d-m9gg4.c.quay-devel.internal +example-registry-clair-postgres-7c8697f5-zkzht         1/1     Running     0          4m53s   10.129.2.19   user1-jcnp6-worker-c-pwxfp.c.quay-devel.internal +example-registry-quay-app-56dd755b6d-glbf7             1/1     Running     1          5m57s   10.129.2.17   user1-jcnp6-worker-c-pwxfp.c.quay-devel.internal +example-registry-quay-database-8dc7cfd69-dr2cc         1/1     Running     0          5m43s   10.129.2.18   user1-jcnp6-worker-c-pwxfp.c.quay-devel.internal +example-registry-quay-mirror-78df886bcc-v75p9          1/1     Running     0          5m16s   10.131.0.24   user1-jcnp6-worker-d-h5tv2.c.quay-devel.internal +example-registry-quay-postgres-init-8s8g9              0/1     Completed   0          5m54s   10.130.2.79   user1-jcnp6-worker-d-m9gg4.c.quay-devel.internal +example-registry-quay-redis-5688ddcdb6-ndp4t           1/1     Running     0          5m56s   10.130.2.78   user1-jcnp6-worker-d-m9gg4.c.quay-devel.internal +quay-operator.v3.4.1-6f6597d8d8-bd4dp                  1/1     Running     0          22m     10.131.0.16   user1-jcnp6-worker-d-h5tv2.c.quay-devel.internal +---- diff --git a/modules/operator-deploy-ui.adoc b/modules/operator-deploy-ui.adoc new file mode 100644 index 000000000..c3ebe8269 --- /dev/null +++ b/modules/operator-deploy-ui.adoc @@ -0,0 +1,17 @@ +:_content-type: PROCEDURE +[id="operator-deploy-ui"] += Deploying {productname} from the {ocp} console + +. Create a namespace, for example, `quay-enterprise`. + +. Select *Operators* -> *Installed Operators*, then select the Quay Operator to navigate to the Operator detail view. + +. Click 'Create Instance' on the 'Quay Registry' tile under 'Provided APIs'. + +. Optionally change the 'Name' of the `QuayRegistry`. This will affect the hostname of the registry. All other fields have been populated with defaults. + +. Click 'Create' to submit the `QuayRegistry` to be deployed by the Quay Operator. + +. You should be redirected to the `QuayRegistry` list view. Click on the `QuayRegistry` you just created to see the details view. + +. Once the 'Registry Endpoint' has a value, click it to access your new Quay registry via the UI. You can now select 'Create Account' to create a user and sign in. diff --git a/modules/operator-deploy-view-pods-cli.adoc b/modules/operator-deploy-view-pods-cli.adoc new file mode 100644 index 000000000..dbcd133b2 --- /dev/null +++ b/modules/operator-deploy-view-pods-cli.adoc @@ -0,0 +1,36 @@ +:_content-type: PROCEDURE +[id="operator-deploy-view-pods-cli"] += Viewing created components using the command line + +Use the following procedure to view deployed {productname} components. + +.Prerequisites + +* You have deployed {productname-ocp}. + +.Procedure + +. Enter the following command to view the deployed components: ++ +[source,terminal] +---- +$ oc get pods -n quay-enterprise +---- ++ +.Example output ++ +[source,terminal] +---- +NAME READY STATUS RESTARTS AGE +example-registry-clair-app-5ffc9f77d6-jwr9s 1/1 Running 0 3m42s +example-registry-clair-app-5ffc9f77d6-wgp7d 1/1 Running 0 3m41s +example-registry-clair-postgres-54956d6d9c-rgs8l 1/1 Running 0 3m5s +example-registry-quay-app-79c6b86c7b-8qnr2 1/1 Running 4 3m42s +example-registry-quay-app-79c6b86c7b-xk85f 1/1 Running 4 3m41s +example-registry-quay-app-upgrade-5kl5r 0/1 Completed 4 3m50s +example-registry-quay-database-b466fc4d7-tfrnx 1/1 Running 2 3m42s +example-registry-quay-mirror-6d9bd78756-6lj6p 1/1 Running 0 2m58s +example-registry-quay-mirror-6d9bd78756-bv6gq 1/1 Running 0 2m58s +example-registry-quay-postgres-init-dzbmx 0/1 Completed 0 3m43s +example-registry-quay-redis-8bd67b647-skgqx 1/1 Running 0 3m42s +---- \ No newline at end of file diff --git a/modules/operator-deploy.adoc b/modules/operator-deploy.adoc new file mode 100644 index 000000000..26144abe0 --- /dev/null +++ b/modules/operator-deploy.adoc @@ -0,0 +1,14 @@ +:_content-type: REFERENCE +[id="operator-deploy"] += Deploying {productname} using the Operator + +{productname-ocp} can be deployed using command-line interface or from the {ocp} console. The steps are fundamentally the same. + + + + + + + + + diff --git a/modules/operator-differences.adoc b/modules/operator-differences.adoc new file mode 100644 index 000000000..1122563d3 --- /dev/null +++ b/modules/operator-differences.adoc @@ -0,0 +1,28 @@ +:_content-type: CONCEPT +[id="operator-differences"] +//= Differences from Earlier Versions + +With the release of {productname} 3.4.0, the {productname} Operator was re-written to offer an enhanced experience and to add more support for Day 2 operations. As a result, the {productname} Operator is now simpler to use and is more opinionated. The key difference from versions prior to {productname} 3.4.0 include the following: + +* The `QuayEcosystem` custom resource has been replaced with the `QuayRegistry` custom resource. +* The default installation options produces a fully supported {productname} environment, with all managed dependencies, such as database, caches, object storage, and so on, supported for production use. ++ +[NOTE] +==== +Some components might not be highly available. +==== + +* A new validation library for {productname}'s configuration. + +ifeval::["{productname}" == "Red Hat Quay"] +* Object storage can now be managed by the {productname} Operator using the `ObjectBucketClaim` Kubernetes API ++ +[NOTE] +==== +Red Hat OpenShift Data Foundation can be used to provide a supported implementation of this API on {ocp}. +==== +endif::[] +ifeval::["{productname}" == "Project Quay"] +* Object storage can now be provided by the {productname} Operator using the `ObjectBucketClaim` Kubernetes API. For example, the NooBaa Operator from `OperatorHub.io` can be used to provide an implementation of that API. +endif::[] +* Customization of the container images used by deployed pods for testing and development scenarios. \ No newline at end of file diff --git a/modules/operator-external-access.adoc b/modules/operator-external-access.adoc new file mode 100644 index 000000000..156e054c2 --- /dev/null +++ b/modules/operator-external-access.adoc @@ -0,0 +1,116 @@ +[id="operator-external-access"] += External Access to the Registry + +When running on {ocp}, the `Routes` API is available and is automatically used as a managed component. After creating the `QuayRegistry` object, the external access point can be found in the status block of the `QuayRegistry` object. For example: + +[source,yaml] +---- +status: + registryEndpoint: some-quay.my-namespace.apps.mycluster.com +---- + +//// +When running on native Kubernetes, the Operator creates a Service of `type: ClusterIP` for your registry. You are then responsible for external access (like `Ingress`). + +``` +$ kubectl get services -n +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +some-quay ClusterIP 172.30.143.199 443/TCP,9091/TCP 23h +``` +//// + + +//// +== Using a Custom Hostname and TLS + +By default the Operator creates the Route and uses OpenShift's cluster wildcard certificate. If you want to access {productname} using a custom hostname and bring your own TLS certificate/key pair, follow these steps. + +If `FEATURE_BUILD_SUPPORT: true`, then make sure the certificate/key pair is also valid for the `BUILDMAN_HOSTNAME`. + +If the given cert/key pair is invalid for the above hostnames, then the Quay Operator will reject your provided certificate/key pair. + +Next, create a `Secret` with the following content: + +[source,yaml] +---- +apiVersion: v1 +kind: Secret +metadata: + name: my-config-bundle +data: + config.yaml: + ssl.cert: + ssl.key: +---- + +Then, create a QuayRegistry which references the created `Secret`: + +[source,yaml] +---- +apiVersion: quay.redhat.com/v1 +kind: QuayRegistry +metadata: + name: some-quay +spec: + configBundleSecret: my-config-bundle +---- + +== Using OpenShift Provided TLS Certificate + +It is preferred to have TLS terminated in the Quay app container. Therefore, to use the OpenShift provided TLS, you must create a `Route` with type "reencrypt", which will use the OpenShift provided TLS at the edge, and Quay Operator-generated TLS within the cluster. This is achieved by marking the `route` component as unmanaged, and creating your own `Route` which link:https://docs.openshift.com/container-platform/4.7/networking/routes/secured-routes.html[reencrypts TLS] using the Operator-generated CA certificate. + +Create a `Secret` with a `config.yaml` key containing the `SERVER_HOSTNAME` field of value `-.apps.` (the `Route` with this hostname will be created in a later step). + +[source,yaml] +---- +apiVersion: v1 +kind: Secret +metadata: + name: my-config-bundle +data: + config.yaml: +---- + +Create a `QuayRegistry` referencing the above `Secret` and with the `route` component unmanaged: + +[source,yaml] +---- +apiVersion: quay.redhat.com/v1 +kind: QuayRegistry +metadata: + name: some-quay +spec: + configBundleSecret: my-config-bundle + components: + - kind: route + managed: false +---- + +Wait for the `QuayRegistry` to be fully reconciled by the Quay Operator. Then, acquire the generated TLS certificate by finding the `Secret` being mounted into the Quay app pods and copying the `tls.cert` value. + +Create a `Route` with TLS reencryption and the destination CA certificate you copied above: + +[source,yaml] +---- +apiVersion: v1 +kind: Route +metadata: + name: registry + namespace: +spec: + to: + kind: Service + name: + tls: + termination: reencrypt + destinationCACertificate: + -----BEGIN CERTIFICATE----- + [...] + -----END CERTIFICATE----- +---- + +You can now access your Quay registry using the created `Route`. + +//// + + diff --git a/modules/operator-first-user-ui.adoc b/modules/operator-first-user-ui.adoc new file mode 100644 index 000000000..6848e004d --- /dev/null +++ b/modules/operator-first-user-ui.adoc @@ -0,0 +1,32 @@ +:_content-type: PROCEDURE +[id="operator-first-user"] += Using the {productname} UI to create the first user + +Use the following procedure to create the first user by the {productname} UI. + +[NOTE] +==== +This procedure assumes that the `FEATURE_USER_CREATION` config option has not been set to `false.` If it is `false`, the `Create Account` functionality on the UI will be disabled, and you will have to use the API to create the first user. +==== + +.Procedure + +. In the {ocp} console, navigate to *Operators* -> *Installed Operators*, with the appropriate namespace / project. + +. Click on the newly installed `QuayRegistry` object to view the details. For example: ++ +image:config-editor-details-operator-36.png[QuayRegistry details] + +. After the `Registry Endpoint` has a value, navigate to this URL in your browser. + +. Select *Create Account* in the {productname} registry UI to create a user. For example: ++ +image:create-account-1.png[Create Account] + +. Enter the details for *Username*, *Password*, *Email*, and then click *Create Account*. For example: ++ +image:create-account-2.png[Enter account details] + +After creating the first user, you are automatically logged in to the {productname} registry. For example: + +image:create-account-3.png[Initial log in] \ No newline at end of file diff --git a/modules/operator-geo-replication.adoc b/modules/operator-geo-replication.adoc new file mode 100644 index 000000000..11f50bc5e --- /dev/null +++ b/modules/operator-geo-replication.adoc @@ -0,0 +1,5 @@ +:_content-type: CONCEPT +[id="operator-geo-replication-faq"] += Does the {productname} Operator support geo-replication? + +As of {productname} 3.7 and later, the {productname} Operator supports geo-replication deployments. Earlier versions of {productname} do not support geo-replication. This feature will not be backported to earlier versions of {productname}. You must upgrade {productname} to 3.7 or later to use the geo-replication feature. \ No newline at end of file diff --git a/modules/operator-georepl-site-removal.adoc b/modules/operator-georepl-site-removal.adoc new file mode 100644 index 000000000..e90d436f1 --- /dev/null +++ b/modules/operator-georepl-site-removal.adoc @@ -0,0 +1,79 @@ +:_content-type: PROCEDURE +[id="operator-georepl-site-removal"] += Removing a geo-replicated site from your {productname-ocp} deployment + +By using the following procedure, {productname} administrators can remove sites in a geo-replicated setup. + +.Prerequisites + +* You are logged into {ocp}. +* You have configured {productname} geo-replication with at least two sites, for example, `usstorage` and `eustorage`. +* Each site has its own Organization, Repository, and image tags. + +.Procedure + +. Sync the blobs between all of your defined sites by running the following command: ++ +[source,terminal] +---- +$ python -m util.backfillreplication +---- ++ +[WARNING] +==== +Prior to removing storage engines from your {productname} `config.yaml` file, you *must* ensure that all blobs are synced between all defined sites. + +When running this command, replication jobs are created which are picked up by the replication worker. If there are blobs that need replicated, the script returns UUIDs of blobs that will be replicated. If you run this command multiple times, and the output from the return script is empty, it does not mean that the replication process is done; it means that there are no more blobs to be queued for replication. Customers should use appropriate judgement before proceeding, as the allotted time replication takes depends on the number of blobs detected. + +Alternatively, you could use a third party cloud tool, such as Microsoft Azure, to check the synchronization status. + +This step must be completed before proceeding. +==== + +. In your {productname} `config.yaml` file for site `usstorage`, remove the `DISTRIBUTED_STORAGE_CONFIG` entry for the `eustorage` site. + +. Enter the following command to identify your `Quay` application pods: ++ +[source,terminal] +---- +$ oc get pod -n +---- ++ +.Example output ++ +[source,terminal] +---- +quay390usstorage-quay-app-5779ddc886-2drh2 +quay390eustorage-quay-app-66969cd859-n2ssm +---- + +. Enter the following command to open an interactive shell session in the `usstorage` pod: ++ +[source,terminal] +---- +$ oc rsh quay390usstorage-quay-app-5779ddc886-2drh2 +---- + +. Enter the following command to permanently remove the `eustorage` site: ++ +[IMPORTANT] +==== +The following action cannot be undone. Use with caution. +==== ++ +[source,terminal] +---- +sh-4.4$ python -m util.removelocation eustorage +---- ++ +.Example output ++ +[source,terminal] +---- +WARNING: This is a destructive operation. Are you sure you want to remove eustorage from your storage locations? [y/n] y +Deleted placement 30 +Deleted placement 31 +Deleted placement 32 +Deleted placement 33 +Deleted location eustorage +---- \ No newline at end of file diff --git a/modules/operator-helm-oci.adoc b/modules/operator-helm-oci.adoc new file mode 100644 index 000000000..819a9cd7f --- /dev/null +++ b/modules/operator-helm-oci.adoc @@ -0,0 +1,46 @@ +[id="operator-helm-oci"] += Enabling OCI support with the {productname} Operator + +Use the following procedure to configure Open Container Initiative (OCI) support for {productname}. + +.Procedure + +. Create a `quay-config-bundle` YAML file that includes the following information: ++ +[source,yaml] +---- +apiVersion: v1 +stringData: + config.yaml: | + FEATURE_GENERAL_OCI_SUPPORT: true +kind: Secret +metadata: + name: quay-config-bundle + namespace: quay-enterprise +type: Opaque +---- + +. Enter the following command to create a the `quay-config-bundle` object in the appropriate namespace, passing in the necessary properties to enable OCI support. For example: ++ +[source,terminal] +---- +$ oc create -n quay-enterprise -f quay-config-bundle.yaml +---- + +. In your `quay-registry.yaml` file, reference the secret for the `spec.configBundleSecret` field. For example: ++ +[source,yaml] +---- +apiVersion: quay.redhat.com/v1 +kind: QuayRegistry +metadata: + name: example-registry + namespace: quay-enterprise +spec: + configBundleSecret: quay-config-bundle +---- + +[role="_additional-resources"] +.Additional resources + +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/use_red_hat_quay/oci-intro#doc-wrapper[OCI Support and {productname}] \ No newline at end of file diff --git a/modules/operator-install.adoc b/modules/operator-install.adoc new file mode 100644 index 000000000..a01e42c73 --- /dev/null +++ b/modules/operator-install.adoc @@ -0,0 +1,25 @@ +:_content-type: PROCEDURE +[id="operator-install"] += Installing the {productname} Operator from the OperatorHub + +Use the following procedure to install the {productname} Operator from the {ocp} OperatorHub. + +.Procedure + +. Using the {ocp} console, select *Operators* -> *OperatorHub*. + +. In the search box, type *{productname}* and select the official {productname} Operator provided by Red Hat. This directs you to the *Installation* page, which outlines the features, prerequisites, and deployment information. + +. Select *Install*. This directs you to the *Operator Installation* page. + +. The following choices are available for customizing the installation: + +.. **Update Channel:** Choose the update channel, for example, `stable-{producty}` for the latest release. + +.. **Installation Mode:** +... Choose `All namespaces on the cluster` if you want the {productname} Operator to be available cluster-wide. It is recommended that you install the {productname} Operator cluster-wide. If you choose a single namespace, the monitoring component will not be available by default. +... Choose `A specific namespace on the cluster` if you want it deployed only within a single namespace. + +* **Approval Strategy:** Choose to approve either automatic or manual updates. Automatic update strategy is recommended. + +. Select *Install*. \ No newline at end of file diff --git a/modules/operator-ipv6-dual-stack.adoc b/modules/operator-ipv6-dual-stack.adoc new file mode 100644 index 000000000..c213e116a --- /dev/null +++ b/modules/operator-ipv6-dual-stack.adoc @@ -0,0 +1,65 @@ +:_content-type: CONCEPT +[id="operator-ipv6-dual-stack"] += Deploying IPv6 on {productname-ocp} + +[NOTE] +==== +Currently, deploying IPv6 on the {productname-ocp} is not supported on IBM Power and IBM Z. +==== + +Your {productname-ocp} deployment can now be served in locations that only support IPv6, such as Telco and Edge environments. + +For a list of known limitations, see xref:operator-ipv6-limitations-38[IPv6 limitations] + +[id="proc-manage-enabling-ipv6"] +== Enabling the IPv6 protocol family + +Use the following procedure to enable IPv6 support on your {productname} deployment. + +.Prerequisites + +* You have updated {productname} to at least version 3.8. +* Your host and container software platform (Docker, Podman) must be configured to support IPv6. + +.Procedure + +. In your deployment's `config.yaml` file, add the `FEATURE_LISTEN_IP_VERSION` parameter and set it to `IPv6`, for example: ++ +[source,yaml] +---- +# ... +FEATURE_GOOGLE_LOGIN: false +FEATURE_INVITE_ONLY_USER_CREATION: false +FEATURE_LISTEN_IP_VERSION: IPv6 +FEATURE_MAILING: false +FEATURE_NONSUPERUSER_TEAM_SYNCING_SETUP: false +# ... +---- + +. Start, or restart, your {productname} deployment. + +. Check that your deployment is listening to IPv6 by entering the following command: ++ +[source,terminal] +---- +$ curl /health/instance +{"data":{"services":{"auth":true,"database":true,"disk_space":true,"registry_gunicorn":true,"service_key":true,"web_gunicorn":true}},"status_code":200} +---- + +After enabling IPv6 in your deployment's `config.yaml`, all {productname} features can be used as normal, so long as your environment is configured to use IPv6 and is not hindered by the xref:operator-ipv6-limitations-38[IPv6 and dual-stack limitations]. + +[WARNING] +==== +If your environment is configured to IPv4, but the `FEATURE_LISTEN_IP_VERSION` configuration field is set to `IPv6`, {productname} will fail to deploy. +==== + +[id="operator-ipv6-limitations-38"] +== IPv6 limitations + +* Currently, attempting to configure your {productname} deployment with the common Microsoft Azure Blob Storage configuration will not work on IPv6 single stack environments. Because the endpoint of Microsoft Azure Blob Storage does not support IPv6, there is no workaround in place for this issue. ++ +For more information, see link:https://issues.redhat.com/browse/PROJQUAY-4433[PROJQUAY-4433]. + +* Currently, attempting to configure your {productname} deployment with Amazon S3 CloudFront will not work on IPv6 single stack environments. Because the endpoint of Amazon S3 CloudFront does not support IPv6, there is no workaround in place for this issue. ++ +For more information, see link:https://issues.redhat.com/browse/PROJQUAY-4470[PROJQUAY-4470]. \ No newline at end of file diff --git a/modules/operator-managed-postgres.adoc b/modules/operator-managed-postgres.adoc new file mode 100644 index 000000000..ba294cc21 --- /dev/null +++ b/modules/operator-managed-postgres.adoc @@ -0,0 +1,34 @@ +:_content-type: CONCEPT +[id="operator-managed-postgres"] += Using the managed PostgreSQL database + +With {productname} 3.9, if your database is managed by the {productname} Operator, updating from {productname} 3.8 -> 3.9 automatically handles upgrading PostgreSQL 10 to PostgreSQL 13. + +[IMPORTANT] +==== +* Users with a managed database are required to upgrade their PostgreSQL database from 10 -> 13. +* If your {productname} and Clair databases are managed by the Operator, the database upgrades for each component must succeed for the 3.9.0 upgrade to be successful. If either of the database upgrades fail, the entire {productname} version upgrade fails. This behavior is expected. +==== + +If you do not want the {productname} Operator to upgrade your PostgreSQL deployment from PostgreSQL 10 -> 13, you must set the PostgreSQL parameter to `managed: false` in your `quayregistry.yaml` file. For more information about setting your database to unmanaged, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index#operator-unmanaged-postgres[Using an existing Postgres database]. + +[IMPORTANT] +==== +* It is highly recommended that you upgrade to PostgreSQL 13. PostgreSQL 10 had its final release on November 10, 2022 and is no longer supported. For more information, see the link:https://www.postgresql.org/support/versioning/[PostgreSQL Versioning Policy]. + +==== + +If you want your PostgreSQL database to match the same version as your {rhel} system, see link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/deploying_different_types_of_servers/using-databases#migrating-to-a-rhel-8-version-of-postgresql_using-postgresql[Migrating to a RHEL 8 version of PostgreSQL] for {rhel-short} 8 or link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/configuring_and_using_database_servers/using-postgresql_configuring-and-using-database-servers#migrating-to-a-rhel-9-version-of-postgresql_using-postgresql[Migrating to a RHEL 9 version of PostgreSQL] for {rhel-short} 9. + +For more information about the {productname} 3.8 -> 3.9 procedure, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.9/html-single/upgrade_red_hat_quay/index#operator-upgrade[Upgrading the {productname} Operator overview]. + +[id="operator-managed-postgres-recommendations"] +== PostgreSQL database recommendations + +The {productname} team recommends the following for managing your PostgreSQL database. + +* Database backups should be performed regularly using either the supplied tools on the PostgreSQL image or your own backup infrastructure. The {productname} Operator does not currently ensure that the PostgreSQL database is backed up. + +* Restoring the PostgreSQL database from a backup must be done using PostgreSQL tools and procedures. Be aware that your `Quay` pods should not be running while the database restore is in progress. + +* Database disk space is allocated automatically by the {productname} Operator with 50 GiB. This number represents a usable amount of storage for most small to medium {productname} installations but might not be sufficient for your use cases. Resizing the database volume is currently not handled by the {productname} Operator. \ No newline at end of file diff --git a/modules/operator-managed-storage.adoc b/modules/operator-managed-storage.adoc new file mode 100644 index 000000000..072beb0b0 --- /dev/null +++ b/modules/operator-managed-storage.adoc @@ -0,0 +1,27 @@ +:_content-type: CONCEPT +[id="operator-managed-storage"] += Managed storage + +ifeval::["{productname}" == "Red Hat Quay"] +If you want the {productname} Operator to manage object storage for {productname}, your cluster needs to be capable of providing object storage through the `ObjectBucketClaim` API. Using the {odf} Operator, there are two supported options available: + +* A standalone instance of the Multi-Cloud Object Gateway backed by a local Kubernetes `PersistentVolume` storage +** Not highly available +** Included in the {productname} subscription +** Does not require a separate subscription for {odf} +* A production deployment of {odf} with scale-out Object Service and Ceph +** Highly available +** Requires a separate subscription for {odf} + +To use the standalone instance option, continue reading below. For production deployment of {odf}, please refer to the link:https://access.redhat.com/documentation/en-us/red_hat_openshift_container_storage/[official documentation]. + +endif::[] + +ifeval::["{productname}" == "Project Quay"] +If you want the Operator to manage object storage for {productname}, your cluster needs to be capable of providing it through the `ObjectBucketClaim` API. There are multiple implementations of this API available, for instance, link:https://operatorhub.io/operator/noobaa-operator[NooBaa] in combination with Kubernetes `PersistentVolumes` or scalable storage backends like Ceph. Refer to the link:https://github.com/noobaa/noobaa-core[NooBaa documentation] for more details on how to deploy this component. +endif::[] + +[NOTE] +==== +Object storage disk space is allocated automatically by the {productname} Operator with 50 GiB. This number represents a usable amount of storage for most small to medium {productname} installations but might not be sufficient for your use cases. Resizing the {odf} volume is currently not handled by the {productname} Operator. See the section below about resizing managed storage for more details. +==== \ No newline at end of file diff --git a/modules/operator-monitor-deploy-cli.adoc b/modules/operator-monitor-deploy-cli.adoc new file mode 100644 index 000000000..b9338e120 --- /dev/null +++ b/modules/operator-monitor-deploy-cli.adoc @@ -0,0 +1,150 @@ +:_content-type: PROCEDURE +[id="operator-monitor-deploy-cli"] += Monitoring and debugging the deployment process + +Users can now troubleshoot problems during the deployment phase. The status in the `QuayRegistry` object can help you monitor the health of the components during the deployment an help you debug any problems that may arise. + +.Procedure + +. Enter the following command to check the status of your deployment: ++ +[source,terminal] +---- +$ oc get quayregistry -n quay-enterprise -o yaml +---- ++ +.Example output ++ +Immediately after deployment, the `QuayRegistry` object will show the basic configuration: ++ +[source,yaml] +---- +apiVersion: v1 +items: +- apiVersion: quay.redhat.com/v1 + kind: QuayRegistry + metadata: + creationTimestamp: "2021-09-14T10:51:22Z" + generation: 3 + name: example-registry + namespace: quay-enterprise + resourceVersion: "50147" + selfLink: /apis/quay.redhat.com/v1/namespaces/quay-enterprise/quayregistries/example-registry + uid: e3fc82ba-e716-4646-bb0f-63c26d05e00e + spec: + components: + - kind: postgres + managed: true + - kind: clair + managed: true + - kind: redis + managed: true + - kind: horizontalpodautoscaler + managed: true + - kind: objectstorage + managed: true + - kind: route + managed: true + - kind: mirror + managed: true + - kind: monitoring + managed: true + - kind: tls + managed: true + - kind: clairpostgres + managed: true + configBundleSecret: example-registry-config-bundle-kt55s +kind: List +metadata: + resourceVersion: "" + selfLink: "" +---- + +. Use the `oc get pods` command to view the current state of the deployed components: ++ +[source,terminal] +---- +$ oc get pods -n quay-enterprise +---- ++ +.Example output ++ +[source,terminal] +---- +NAME READY STATUS RESTARTS AGE +example-registry-clair-app-86554c6b49-ds7bl 0/1 ContainerCreating 0 2s +example-registry-clair-app-86554c6b49-hxp5s 0/1 Running 1 17s +example-registry-clair-postgres-68d8857899-lbc5n 0/1 ContainerCreating 0 17s +example-registry-quay-app-upgrade-h2v7h 0/1 ContainerCreating 0 9s +example-registry-quay-database-66f495c9bc-wqsjf 0/1 ContainerCreating 0 17s +example-registry-quay-mirror-854c88457b-d845g 0/1 Init:0/1 0 2s +example-registry-quay-mirror-854c88457b-fghxv 0/1 Init:0/1 0 17s +example-registry-quay-postgres-init-bktdt 0/1 Terminating 0 17s +example-registry-quay-redis-f9b9d44bf-4htpz 0/1 ContainerCreating 0 17s +---- + +. While the deployment is in progress, the `QuayRegistry` object will show the current status. In this instance, database migrations are taking place, and other components are waiting until completion: ++ +[source,terminal] +---- + status: + conditions: + - lastTransitionTime: "2021-09-14T10:52:04Z" + lastUpdateTime: "2021-09-14T10:52:04Z" + message: all objects created/updated successfully + reason: ComponentsCreationSuccess + status: "False" + type: RolloutBlocked + - lastTransitionTime: "2021-09-14T10:52:05Z" + lastUpdateTime: "2021-09-14T10:52:05Z" + message: running database migrations + reason: MigrationsInProgress + status: "False" + type: Available + lastUpdated: 2021-09-14 10:52:05.371425635 +0000 UTC + unhealthyComponents: + clair: + - lastTransitionTime: "2021-09-14T10:51:32Z" + lastUpdateTime: "2021-09-14T10:51:32Z" + message: 'Deployment example-registry-clair-postgres: Deployment does not have minimum availability.' + reason: MinimumReplicasUnavailable + status: "False" + type: Available + - lastTransitionTime: "2021-09-14T10:51:32Z" + lastUpdateTime: "2021-09-14T10:51:32Z" + message: 'Deployment example-registry-clair-app: Deployment does not have minimum availability.' + reason: MinimumReplicasUnavailable + status: "False" + type: Available + mirror: + - lastTransitionTime: "2021-09-14T10:51:32Z" + lastUpdateTime: "2021-09-14T10:51:32Z" + message: 'Deployment example-registry-quay-mirror: Deployment does not have minimum availability.' + reason: MinimumReplicasUnavailable + status: "False" + type: Available +---- + +. When the deployment process finishes successfully, the status in the `QuayRegistry` object shows no unhealthy components: ++ +[source,terminal] +---- + status: + conditions: + - lastTransitionTime: "2021-09-14T10:52:36Z" + lastUpdateTime: "2021-09-14T10:52:36Z" + message: all registry component healthchecks passing + reason: HealthChecksPassing + status: "True" + type: Available + - lastTransitionTime: "2021-09-14T10:52:46Z" + lastUpdateTime: "2021-09-14T10:52:46Z" + message: all objects created/updated successfully + reason: ComponentsCreationSuccess + status: "False" + type: RolloutBlocked + currentVersion: {producty} + lastUpdated: 2021-09-14 10:52:46.104181633 +0000 UTC + registryEndpoint: https://example-registry-quay-quay-enterprise.apps.docs.quayteam.org + unhealthyComponents: {} +---- \ No newline at end of file diff --git a/modules/operator-preconfig-storage.adoc b/modules/operator-preconfig-storage.adoc new file mode 100644 index 000000000..2a5322e21 --- /dev/null +++ b/modules/operator-preconfig-storage.adoc @@ -0,0 +1,9 @@ +:_content-type: REFERENCE +[id="operator-storage-preconfig"] += Configuring object storage + +You need to configure object storage before installing {productname}, irrespective of whether you are allowing the {productname} Operator to manage the storage or managing it yourself. + +If you want the {productname} Operator to be responsible for managing storage, see the section on xref:operator-managed-storage[Managed storage] for information on installing and configuring NooBaa and the Red Hat OpenShift Data Foundations Operator. + +If you are using a separate storage solution, set `objectstorage` as `unmanaged` when configuring the Operator. See the following section. xref:operator-unmanaged-storage[Unmanaged storage], for details of configuring existing storage. diff --git a/modules/operator-preconfig-tls-routes.adoc b/modules/operator-preconfig-tls-routes.adoc new file mode 100644 index 000000000..ba8c97e1e --- /dev/null +++ b/modules/operator-preconfig-tls-routes.adoc @@ -0,0 +1,47 @@ +:_content-type: REFERENCE +[id="operator-preconfig-tls-routes"] += Configuring SSL/TLS and Routes + +Support for {ocp} _edge termination_ routes have been added by way of a new managed component, `tls`. This separates the `route` component from SSL/TLS and allows users to configure both separately. + +`EXTERNAL_TLS_TERMINATION: true` is the opinionated setting. + +[NOTE] +==== +* Managed `tls` means that the default cluster wildcard certificate is used. +* Unmanaged `tls` means that the user provided key and certificate pair is be injected into the route. +==== + +The `ssl.cert` and `ssl.key` are now moved to a separate, persistent secret, which ensures that the key and certificate pair are not regenerated upon every reconcile. The key and certificate pair are now formatted as `edge` routes and mounted to the same directory in the `Quay` container. + +Multiple permutations are possible when configuring SSL/TLS and routes, but the following rules apply: + +* If SSL/TLS is `managed`, then your route must also be `managed`. +* If SSL/TLS is `unmanaged` then you must supply certificates directly in the config bundle. +//* However, it is possible to have both TLS and route `unmanaged` and not supply certs. + +The following table describes the valid options: + +.Valid configuration options for TLS and routes +[width="100%",cols="2,2,2,2,3"options="header"] +|=== +|Option | Route | TLS | Certs provided |Result +| My own load balancer handles TLS | Managed | Managed | No |Edge route with default wildcard cert +| {productname} handles TLS | Managed | Unmanaged | Yes | Passthrough route with certs mounted inside the pod +| {productname} handles TLS | Unmanaged | Unmanaged | Yes | Certificates are set inside of the `quay` pod, but the route must be created manually +// | None (Not for production) | Unmanaged | Unmanaged | No | Sets a passthrough route, allows HTTP traffic directly from the route and into the Pod +|=== + +[id="creating-config-bundle-secret-tls-cert-key-pair"] +== Creating the config bundle secret with the SSL/TLS cert and key pair + +Use the following procedure to create a config bundle secret that includes your own SSL/TLS certificate and key pair. + +.Procedure + +* Enter the following command to create config bundle secret that includes your own SSL/TLS certificate and key pair: ++ +[source,terminal] +---- +$ oc create secret generic --from-file config.yaml=./config.yaml --from-file ssl.cert=./ssl.cert --from-file ssl.key=./ssl.key config-bundle-secret +---- diff --git a/modules/operator-preconfigure.adoc b/modules/operator-preconfigure.adoc new file mode 100644 index 000000000..a9f0c9070 --- /dev/null +++ b/modules/operator-preconfigure.adoc @@ -0,0 +1,84 @@ +:_content-type: PROCEDURE +[id="operator-preconfigure"] += Configuring {productname} before deployment + +The {productname} Operator can manage all of the {productname} components when deployed on {ocp}. This is the default configuration, however, you can manage one or more components externally when you want more control over the set up. + +Use the following pattern to configure unmanaged {productname} components. + +.Procedure + +. Create a `config.yaml` configuration file with the appropriate settings. Use the following reference for a minimal configuration: ++ +[source,terminal] +---- +$ touch config.yaml +---- ++ +[source,yaml] +---- +AUTHENTICATION_TYPE: Database +BUILDLOGS_REDIS: + host: + password: + port: 6379 + ssl: false +DATABASE_SECRET_KEY: <0ce4f796-c295-415b-bf9d-b315114704b8> +DB_URI: +DEFAULT_TAG_EXPIRATION: 2w +DISTRIBUTED_STORAGE_CONFIG: + default: + - LocalStorage + - storage_path: /datastorage/registry +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] +DISTRIBUTED_STORAGE_PREFERENCE: + - default +PREFERRED_URL_SCHEME: http +SECRET_KEY: +SERVER_HOSTNAME: +SETUP_COMPLETE: true +TAG_EXPIRATION_OPTIONS: + - 0s + - 1d + - 1w + - 2w + - 4w + - 3y +USER_EVENTS_REDIS: + host: + port: 6379 + ssl: false +---- + +. Create a `Secret` using the configuration file by entering the following command: ++ +[source,terminal] +---- +$ oc create secret generic --from-file config.yaml=./config.yaml config-bundle-secret +---- + +. Create a `quayregistry.yaml` file, identifying the unmanaged components and also referencing the created `Secret`, for example: ++ +.Example `QuayRegistry` YAML file ++ +[source,yaml] +---- +apiVersion: quay.redhat.com/v1 +kind: QuayRegistry +metadata: + name: example-registry + namespace: quay-enterprise +spec: + configBundleSecret: + components: + - kind: objectstorage + managed: false +# ... +---- + +. Enter the following command to deploy the registry by using the `quayregistry.yaml` file: ++ +[source,terminal] +---- +$ oc create -n quay-enterprise -f quayregistry.yaml +---- diff --git a/modules/operator-prereq.adoc b/modules/operator-prereq.adoc new file mode 100644 index 000000000..7599a73db --- /dev/null +++ b/modules/operator-prereq.adoc @@ -0,0 +1,45 @@ +:_content-type: REFERENCE +[id="operator-prereq"] += Prerequisites for {productname} on {ocp} + +Consider the following prerequisites prior to deploying {productname} on {ocp} using the {productname} Operator. + +[id="openshift-cluster"] +== {ocp} cluster + +To deploy the {productname} Operator, you must have an {ocp} 4.5 or later cluster and access to an administrative account. The administrative account must have the ability to create namespaces at the cluster scope. + +[id="resource-requirements"] +== Resource Requirements + +Each {productname} application pod has the following resource requirements: + +* 8 Gi of memory +* 2000 millicores of CPU + +The {productname} Operator creates at least one application pod per {productname} deployment it manages. Ensure your {ocp} cluster has sufficient compute resources for these requirements. + +[id="object-storage"] +== Object Storage + +By default, the {productname} Operator uses the `ObjectBucketClaim` Kubernetes API to provision object storage. Consuming this API decouples the {productname} Operator from any vendor-specific implementation. {odf} provides this API through its NooBaa component, which is used as an example throughout this documentation. + +{productname} can be manually configured to use multiple storage cloud providers, including the following: + +* Amazon S3 (see link:https://access.redhat.com/solutions/3680151[S3 IAM Bucket Policy] for details on configuring an S3 bucket policy for {productname}) +* Microsoft Azure Blob Storage +* Google Cloud Storage +* Ceph Object Gateway (RADOS) +* OpenStack Swift +* CloudFront + S3 + +For a complete list of object storage providers, the link:https://access.redhat.com/articles/4067991[Quay Enterprise 3.x support matrix]. + +[id="storage-class"] +== StorageClass + +When deploying `Quay` and `Clair` PostgreSQL databases using the {productname} Operator, a default `StorageClass` is configured in your cluster. + +The default `StorageClass` used by the {productname} Operator provisions the Persistent Volume Claims required by the `Quay` and `Clair` databases. These PVCs are used to store data persistently, ensuring that your {productname} registry and Clair vulnerability scanner remain available and maintain their state across restarts or failures. + +Before proceeding with the installation, verify that a default `StorageClass` is configured in your cluster to ensure seamless provisioning of storage for `Quay` and `Clair` components. \ No newline at end of file diff --git a/modules/operator-quayregistry-api.adoc b/modules/operator-quayregistry-api.adoc new file mode 100644 index 000000000..0efa66e76 --- /dev/null +++ b/modules/operator-quayregistry-api.adoc @@ -0,0 +1,11 @@ +:_content-type: CONCEPT +[id="operator-quayregistry-api"] += QuayRegistry API + +The {productname} Operator provides the `QuayRegistry` custom resource API to declaratively manage `Quay` container registries on the cluster. Use either the {ocp} UI or a command-line tool to interact with this API. + +* Creating a `QuayRegistry` results in the {productname} Operator deploying and configuring all necessary resources needed to run {productname} on the cluster. +* Editing a `QuayRegistry` results in the {productname} Operator reconciling the changes and creating, updating, and deleting objects to match the desired configuration. +* Deleting a `QuayRegistry` results in garbage collection of all previously created resources. After deletion, the `Quay` container registry is no longer be available. + +`QuayRegistry` API fields are outlined in the following sections. \ No newline at end of file diff --git a/modules/operator-quayregistry-status.adoc b/modules/operator-quayregistry-status.adoc new file mode 100644 index 000000000..02f4cb5f4 --- /dev/null +++ b/modules/operator-quayregistry-status.adoc @@ -0,0 +1,20 @@ +:_content-type: REFERENCE +[id="operator-quayregistry-status"] += Viewing the status of the QuayRegistry object + +Lifecycle observability for a given {productname} deployment is reported in the `status` section of the corresponding `QuayRegistry` object. The {productname} Operator constantly updates this section, and this should be the first place to look for any problems or state changes in {productname} or its managed dependencies. + +[id="quayregistry-endpoint"] +== Viewing the registry endpoint + +Once {productname} is ready to be used, the `status.registryEndpoint` field will be populated with the publicly available hostname of the registry. + +[id="quayregistry-current-version"] +== Viewing the version of {productname} in use + +The current version of {productname} that is running will be reported in `status.currentVersion`. + +[id="quayregistry-conditions"] +== Viewing the conditions of your {productname} deployment + +Certain conditions will be reported in `status.conditions`. \ No newline at end of file diff --git a/modules/operator-resize-storage.adoc b/modules/operator-resize-storage.adoc new file mode 100644 index 000000000..b99d57d92 --- /dev/null +++ b/modules/operator-resize-storage.adoc @@ -0,0 +1,42 @@ +:_content-type: PROCEDURE +[id="operator-resize-storage"] += Resizing Managed Storage + +When deploying {productname-ocp}, three distinct persistent volume claims (PVCs) are deployed: + +* One for the PostgreSQL 13 registry. +* One for the Clair PostgreSQL 13 registry. +* One that uses NooBaa as a backend storage. + +[NOTE] +==== +The connection between {productname} and NooBaa is done through the S3 API and ObjectBucketClaim API in {ocp}. {productname} leverages that API group to create a bucket in NooBaa, obtain access keys, and automatically set everything up. On the backend, or NooBaa, side, that bucket is creating inside of the backing store. As a result, NooBaa PVCs are not mounted or connected to {productname} pods. +==== + +The default size for the PostgreSQL 13 and Clair PostgreSQL 13 PVCs is set to 50 GiB. You can expand storage for these PVCs on the {ocp} console by using the following procedure. + +[NOTE] +==== +The following procedure shares commonality with link:https://access.redhat.com/documentation/en-us/red_hat_openshift_container_storage/4.5/html/managing_openshift_container_storage/managing-persistent-volume-claims_rhocs#expanding-persistent-volume-claims_rhocs[Expanding Persistent Volume Claims] on {odf}. +==== + +[id="resizing-noobaa-pvc"] +== Resizing PostgreSQL 13 PVCs on {productname} + +Use the following procedure to resize the PostgreSQL 13 and Clair PostgreSQL 13 PVCs. + +.Prerequisites + +* You have cluster admin privileges on {ocp}. + +.Procedure + +. Log into the {ocp} console and select *Storage* -> *Persistent Volume Claims*. + +. Select the desired `PersistentVolumeClaim` for either PostgreSQL 13 or Clair PostgreSQL 13, for example, `example-registry-quay-postgres-13`. + +. From the *Action* menu, select *Expand PVC*. + +. Enter the new size of the Persistent Volume Claim and select *Expand*. ++ +After a few minutes, the expanded size should reflect in the PVC's *Capacity* field. \ No newline at end of file diff --git a/modules/operator-standalone-object-gateway.adoc b/modules/operator-standalone-object-gateway.adoc new file mode 100644 index 000000000..60f716df9 --- /dev/null +++ b/modules/operator-standalone-object-gateway.adoc @@ -0,0 +1,309 @@ +:_content-type: PROCEDURE +[id="operator-standalone-object-gateway"] += Leveraging the Multicloud Object Gateway Component in the {odf} Operator for {productname} + +As part of a {productname} subscription, users are entitled to use the _Multicloud Object Gateway_ component of the {odf} Operator (formerly known as OpenShift Container Storage Operator). This gateway component allows you to provide an S3-compatible object storage interface to {productname} backed by Kubernetes `PersistentVolume`-based block storage. The usage is limited to a {productname} deployment managed by the Operator and to the exact specifications of the multicloud Object Gateway instance as documented below. + +Since {productname} does not support local filesystem storage, users can leverage the gateway in combination with Kubernetes `PersistentVolume` storage instead, to provide a supported deployment. A `PersistentVolume` is directly mounted on the gateway instance as a backing store for object storage and any block-based `StorageClass` is supported. + +By the nature of `PersistentVolume`, this is not a scale-out, highly available solution and does not replace a scale-out storage system like {odf}. Only a single instance of the gateway is running. If the pod running the gateway becomes unavailable due to rescheduling, updates or unplanned downtime, this will cause temporary degradation of the connected {productname} instances. + +Using the following procedures, you will install the Local Storage Operator, {odf}, and create a standalone Multicloud Object Gateway to deploy {productname} on {ocp}. + +[NOTE] +==== +The following documentation shares commonality with the official link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.12/html/deploying_openshift_data_foundation_using_bare_metal_infrastructure/deploy-standalone-multicloud-object-gateway#doc-wrapper[{odf} documentation]. +==== + +[id="installing-local-storage-operator"] +== Installing the Local Storage Operator on {ocp} + +Use the following procedure to install the Local Storage Operator from the *OperatorHub* before creating {odf} clusters on local storage devices. + +. Log in to the *OpenShift Web Console*. + +. Click *Operators* → *OperatorHub*. + +. Type *local storage* into the search box to find the Local Storage Operator from the list of Operators. Click *Local Storage*. + +. Click *Install*. + +. Set the following options on the Install Operator page: ++ +* For Update channel, select *stable*. +* For Installation mode, select *A specific namespace on the cluster*. +* For Installed Namespace, select *Operator recommended namespace openshift-local-storage*. +* For Update approval, select *Automatic*. + +. Click *Install*. + +[id="installing-odf"] +== Installing {odf} on {ocp} + +Use the following procedure to install {odf} on {ocp}. + +.Prerequisites + +* Access to an {ocp} cluster using an account with `cluster-admin` and Operator installation permissions. +* You must have at least three worker nodes in the {ocp} cluster. +* For additional resource requirements, see the link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.12/html-single/planning_your_deployment/index[Planning your deployment] guide. + +.Procedure + +. Log in to the *OpenShift Web Console*. + +. Click *Operators* → *OperatorHub*. + +. Type *OpenShift Data Foundation* in the search box. Click *OpenShift Data Foundation*. + +. Click *Install*. + +. Set the following options on the Install Operator page: ++ +* For Update channel, select the most recent stable version. +* For Installation mode, select *A specific namespace on the cluster*. +* For Installed Namespace, select *Operator recommended Namespace: openshift-storage*. +* For Update approval, select *Automatic* or *Manual*. ++ +If you select *Automatic* updates, then the Operator Lifecycle Manager (OLM) automatically upgrades the running instance of your Operator without any intervention. ++ +If you select *Manual* updates, then the OLM creates an update request. As a cluster administrator, you must then manually approve that update request to update the Operator to a newer version. + +* For Console plugin, select *Enable*. + +. Click *Install*. ++ +After the Operator is installed, a pop-up with a message, `Web console update is available` appears on the user interface. Click *Refresh web console* from this pop-up for the console changes to reflect. + +. Continue to the following section, "Creating a standalone Multicloud Object Gateway", to leverage the Multicloud Object Gateway Component for {productname}. + +[id="creating-mcg"] +== Creating a standalone Multicloud Object Gateway using the {ocp} UI + +Use the following procedure to create a standalone Multicloud Object Gateway. + +.Prerequisites + +* You have installed the Local Storage Operator. +* You have installed the {odf} Operator. + +.Procedure + +. In the *OpenShift Web Console*, click *Operators* -> *Installed Operators* to view all installed Operators. ++ +Ensure that the namespace is `openshift-storage`. + +. Click *Create StorageSystem*. + +. On the *Backing storage* page, select the following: +.. Select *Multicloud Object Gateway* for *Deployment type*. +.. Select the *Create a new StorageClass using the local storage devices* option. +.. Click *Next*. ++ +[NOTE] +==== +You are prompted to install the Local Storage Operator if it is not already installed. Click *Install*, and follow the procedure as described in "Installing the Local Storage Operator on {ocp}". +==== + +. On the *Create local volume set* page, provide the following information: +.. Enter a name for the *LocalVolumeSet* and the *StorageClass*. By default, the local volume set name appears for the storage class name. You can change the name. +.. Choose one of the following: ++ +* *Disk on all nodes* ++ +Uses the available disks that match the selected filters on all the nodes. ++ +* *Disk on selected nodes* ++ +Uses the available disks that match the selected filters only on the selected nodes. + +.. From the available list of *Disk Type*, select *SSD/NVMe*. + +.. Expand the *Advanced* section and set the following options: ++ +|=== +|*Volume Mode* | Filesystem is selected by default. Always ensure that Filesystem is selected for Volume Mode. +|*Device Type* | Select one or more device type from the dropdown list. +|*Disk Size*| Set a minimum size of 100GB for the device and maximum available size of the device that needs to be included. +|*Maximum Disks Limit* | This indicates the maximum number of PVs that can be created on a node. If this field is left empty, then PVs are created for all the available disks on the matching nodes. +|=== + +.. Click *Next* ++ +A pop-up to confirm the creation of `LocalVolumeSet` is displayed. + +.. Click *Yes* to continue. + +. In the *Capacity and nodes* page, configure the following: ++ +.. *Available raw capacity* is populated with the capacity value based on all the attached disks associated with the storage class. This takes some time to show up. The *Selected nodes* list shows the nodes based on the storage class. +.. Click *Next* to continue. + +. Optional. Select the *Connect to an external key management service* checkbox. This is optional for cluster-wide encryption. +.. From the *Key Management Service Provider* drop-down list, either select *Vault* or *Thales CipherTrust Manager (using KMIP)*. If you selected *Vault*, go to the next step. If you selected *Thales CipherTrust Manager (using KMIP)*, go to step iii. +.. Select an *Authentication Method*. ++ +Using Token Authentication method ++ +* Enter a unique *Connection Name*, host *Address* of the Vault server ('https://'), *Port* number and *Token*. ++ +* Expand *Advanced Settings* to enter additional settings and certificate details based on your `Vault` configuration: ++ +** Enter the Key Value secret path in *Backend Path* that is dedicated and unique to OpenShift Data Foundation. +** Optional: Enter *TLS Server Name* and *Vault Enterprise Namespace*. +** Upload the respective PEM encoded certificate file to provide the *CA Certificate*, *Client Certificate,* and *Client Private Key*. +** Click *Save* and skip to step iv. ++ +Using Kubernetes authentication method ++ +* Enter a unique Vault *Connection Name*, host *Address* of the Vault server ('https://'), *Port* number and *Role* name. +* Expand *Advanced Settings* to enter additional settings and certificate details based on your Vault configuration: +** Enter the Key Value secret path in *Backend Path* that is dedicated and unique to {odf}. +** Optional: Enter *TLS Server Name* and *Authentication Path* if applicable. +** Upload the respective PEM encoded certificate file to provide the *CA Certificate*, *Client Certificate*, and *Client Private Key*. +** Click *Save* and skip to step iv. + +.. To use *Thales CipherTrust Manager (using KMIP)* as the KMS provider, follow the steps below: + +... Enter a unique *Connection Name* for the Key Management service within the project. +... In the *Address* and *Port* sections, enter the IP of Thales CipherTrust Manager and the port where the KMIP interface is enabled. For example: ++ +* *Address*: 123.34.3.2 +* *Port*: 5696 +... Upload the *Client Certificate*, *CA certificate*, and *Client Private Key*. +... If StorageClass encryption is enabled, enter the Unique Identifier to be used for encryption and decryption generated above. +... The *TLS Server* field is optional and used when there is no DNS entry for the KMIP endpoint. For example,`kmip_all_.ciphertrustmanager.local`. + +.. Select a *Network*. +.. Click *Next*. + +. In the *Review and create* page, review the configuration details. To modify any configuration settings, click *Back*. + +. Click *Create StorageSystem*. + + +[id="creating-standalone-object-gateway"] +== Create A standalone Multicloud Object Gateway using the CLI + +Use the following procedure to install the {odf} (formerly known as OpenShift Container Storage) Operator and configure a single instance Multi-Cloud Gateway service. + +[NOTE] +==== +The following configuration cannot be run in parallel on a cluster with {odf} installed. +==== + +.Procedure + +. On the *OpenShift Web Console*, and then select *Operators* -> *OperatorHub*. + +. Search for *{odf}*, and then select *Install*. + +. Accept all default options, and then select *Install*. + +. Confirm that the Operator has installed by viewing the *Status* column, which should be marked as *Succeeded*. ++ +[WARNING] +==== +When the installation of the {odf} Operator is finished, you are prompted to create a storage system. Do not follow this instruction. Instead, create NooBaa object storage as outlined the following steps. +==== + +. On your machine, create a file named `noobaa.yaml` with the following information: ++ +[source,yaml] ++ +---- +apiVersion: noobaa.io/v1alpha1 +kind: NooBaa +metadata: + name: noobaa + namespace: openshift-storage +spec: + dbResources: + requests: + cpu: '0.1' + memory: 1Gi + dbType: postgres + coreResources: + requests: + cpu: '0.1' + memory: 1Gi +---- ++ +This creates a single instance deployment of the _Multi-cloud Object Gateway_. + +. Apply the configuration with the following command: ++ +[source,terminal] +---- +$ oc create -n openshift-storage -f noobaa.yaml +---- ++ +.Example output ++ +[source,terminal] +---- +noobaa.noobaa.io/noobaa created +---- + +. After a few minutes, the _Multi-cloud Object Gateway_ should finish provisioning. You can enter the following command to check its status: ++ +[source,terminal] +---- +$ oc get -n openshift-storage noobaas noobaa -w +---- ++ +.Example output ++ +[source,terminal] +---- +NAME MGMT-ENDPOINTS S3-ENDPOINTS IMAGE PHASE AGE +noobaa [https://10.0.32.3:30318] [https://10.0.32.3:31958] registry.redhat.io/ocs4/mcg-core-rhel8@sha256:56624aa7dd4ca178c1887343c7445a9425a841600b1309f6deace37ce6b8678d Ready 3d18h +---- + +. Configure a backing store for the gateway by creating the following YAML file, named `noobaa-pv-backing-store.yaml`: ++ +[source,yaml] +---- +apiVersion: noobaa.io/v1alpha1 +kind: BackingStore +metadata: + finalizers: + - noobaa.io/finalizer + labels: + app: noobaa + name: noobaa-pv-backing-store + namespace: openshift-storage +spec: + pvPool: + numVolumes: 1 + resources: + requests: + storage: 50Gi <1> + storageClass: STORAGE-CLASS-NAME <2> + type: pv-pool +---- +<1> The overall capacity of the object storage service. Adjust as needed. +<2> The `StorageClass` to use for the `PersistentVolumes` requested. Delete this property to use the cluster default. + +. Enter the following command to apply the configuration: ++ +[source,terminal] +---- +$ oc create -f noobaa-pv-backing-store.yaml +---- ++ +.Example output ++ +[source,terminal] +---- +backingstore.noobaa.io/noobaa-pv-backing-store created +---- ++ +This creates the backing store configuration for the gateway. All images in {productname} will be stored as objects through the gateway in a `PersistentVolume` created by the above configuration. + +. Run the following command to make the `PersistentVolume` backing store the default for all `ObjectBucketClaims` issued by the {productname} Operator: ++ +[source,terminal] +---- +$ oc patch bucketclass noobaa-default-bucket-class --patch '{"spec":{"placementPolicy":{"tiers":[{"backingStores":["noobaa-pv-backing-store"]}]}}}' --type merge -n openshift-storage +---- diff --git a/modules/operator-unmanaged-hpa.adoc b/modules/operator-unmanaged-hpa.adoc new file mode 100644 index 000000000..5cc93f773 --- /dev/null +++ b/modules/operator-unmanaged-hpa.adoc @@ -0,0 +1,120 @@ +:_content-type: REFERENCE +[id="operator-unmanaged-hpa"] += Using unmanaged Horizontal Pod Autoscalers + +Horizontal Pod Autoscalers (HPAs) are now included with the `Clair`, `Quay`, and `Mirror` pods, so that they now automatically scale during load spikes. + +As HPA is configured by default to be managed, the number of `Clair`, `Quay`, and `Mirror` pods is set to two. This facilitates the avoidance of downtime when updating or reconfiguring {productname} through the Operator or during rescheduling events. + +[NOTE] +==== +There is a known issue when disabling the `HorizontalPodAutoscaler` component and attempting to edit the HPA resource itself and increase the value of the `minReplicas` field. When attempting this setup, `Quay` application pods are scaled out by the unmanaged HPA and, after 60 seconds, the replica count is reconciled by the {productname} Operator. As a result, HPA pods are continuously created and then removed by the Operator. + +To resolve this issue, you should upgrade your {productname} deployment to at least version 3.12.5 or 3.13.1 and then use the following example to avoid the issue. + +This issue will be fixed in a future version of {productname}. For more information, see link:https://issues.redhat.com/browse/PROJQUAY-6474[PROJQUAY-6474]. +==== + +[id="operator-disabling-hpa"] +== Disabling the Horizontal Pod Autoscaler + +To disable autoscaling or create your own `HorizontalPodAutoscaler` component, specify the component as `unmanaged` in the `QuayRegistry` custom resource definition. To avoid the known issue noted above, you must modify the `QuayRegistry` CRD object and set the replicas equal to `null` for the `quay`, `clair`, and `mirror` components. + +.Procedure + +* Edit the `QuayRegistry` CRD to include the following `replicas: null` for the `quay` component: ++ +[source,terminal] +---- +$ oc edit quayregistry -n +---- ++ +[source,yaml] +---- +apiVersion: quay.redhat.com/v1 +kind: QuayRegistry +metadata: +name: quay-registry +namespace: quay-enterprise +spec: +components: + - kind: horizontalpodautoscaler + managed: false + - kind: quay + managed: true + overrides: + replicas: null <1> + - kind: clair + managed: true + overrides: + replicas: null + - kind: mirror + managed: true + overrides: + replicas: null +# ... +---- +<1> After setting `replicas: null` in your `QuayRegistry` CRD, a new replica set might be generated because the deployment manifest of the `Quay` app is changed with `replicas: 1`. + +.Verification + +. Create a customized `HorizontalPodAutoscalers` CRD and increase the `minReplicas` amount to a higher value, for exampe, `3`: ++ +[source,yaml] +---- +kind: HorizontalPodAutoscaler +apiVersion: autoscaling/v2 +metadata: + name: quay-registry-quay-app + namespace: quay-enterprise +spec: + scaleTargetRef: + kind: Deployment + name: quay-registry-quay-app + apiVersion: apps/v1 + minReplicas: 3 + maxReplicas: 20 + metrics: + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: 90 + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 90 +---- + +. Ensure that your `QuayRegistry` application successfully starts by entering the following command: ++ +[source,terminal] +---- +$ oc get pod | grep quay-app +---- ++ +.Example output ++ +[source,terminal] +---- +quay-registry-quay-app-5b8fd49d6b-7wvbk 1/1 Running 0 34m +quay-registry-quay-app-5b8fd49d6b-jslq9 1/1 Running 0 3m42s +quay-registry-quay-app-5b8fd49d6b-pskpz 1/1 Running 0 43m +quay-registry-quay-app-upgrade-llctl 0/1 Completed 0 51m +---- + +. Ensure that your `HorizontalPodAutoscalers` successfully starts by entering the following command: ++ +[source,terminal] +---- +$ oc get hpa +---- ++ +[source,terminal] +---- +NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE +quay-registry-quay-app Deployment/quay-registry-quay-app 67%/90%, 54%/90% 3 20 3 51m +---- \ No newline at end of file diff --git a/modules/operator-unmanaged-mirroring.adoc b/modules/operator-unmanaged-mirroring.adoc new file mode 100644 index 000000000..b05f8a7d5 --- /dev/null +++ b/modules/operator-unmanaged-mirroring.adoc @@ -0,0 +1,19 @@ +:_content-type: REFERENCE +[id="operator-unmanaged-mirroring"] += Disabling the mirroring component + +To disable mirroring, use the following YAML configuration: + +.Unmanaged mirroring example YAML configuration +[source,yaml] +---- +apiVersion: quay.redhat.com/v1 +kind: QuayRegistry +metadata: + name: example-registry + namespace: quay-enterprise +spec: + components: + - kind: mirroring + managed: false +---- \ No newline at end of file diff --git a/modules/operator-unmanaged-monitoring.adoc b/modules/operator-unmanaged-monitoring.adoc new file mode 100644 index 000000000..4c09df6c4 --- /dev/null +++ b/modules/operator-unmanaged-monitoring.adoc @@ -0,0 +1,24 @@ +:_content-type: REFERENCE +[id="operator-unmanaged-monitoring"] += Disabling the monitoring component + +If you install the {productname} Operator in a single namespace, the monitoring component is automatically set to `managed: false`. Use the following reference to explicitly disable monitoring. + +.Unmanaged monitoring +[source,yaml] +---- +apiVersion: quay.redhat.com/v1 +kind: QuayRegistry +metadata: + name: example-registry + namespace: quay-enterprise +spec: + components: + - kind: monitoring + managed: false +---- + +[NOTE] +==== +Monitoring cannot be enabled when the {productname} Operator is installed in a single namespace. +==== \ No newline at end of file diff --git a/modules/operator-unmanaged-postgres.adoc b/modules/operator-unmanaged-postgres.adoc new file mode 100644 index 000000000..e5a559684 --- /dev/null +++ b/modules/operator-unmanaged-postgres.adoc @@ -0,0 +1,50 @@ +:_content-type: PROCEDURE +[id="operator-unmanaged-postgres"] += Using an existing PostgreSQL database + +If you are using an externally managed PostgreSQL database, you must manually enable the `pg_trgm` extension for a successful deployment. + +[IMPORTANT] +==== +You must not use the same externally managed PostgreSQL database for both {productname} and Clair deployments. Your PostgreSQL database must also not be shared with other workloads, as it might exhaust the natural connection limit on the PostgreSQL side when connection-intensive workloads, like {productname} or Clair, contend for resources. Additionally, pgBouncer is not supported with {productname} or Clair, so it is not an option to resolve this issue. +==== + +Use the following procedure to deploy an existing PostgreSQL database. + +.Procedure + +. Create a `config.yaml` file with the necessary database fields. For example: ++ +.Example `config.yaml` file: ++ +[source,yaml] +---- +DB_URI: postgresql://test-quay-database:postgres@test-quay-database:5432/test-quay-database +---- +. Create a `Secret` using the configuration file: ++ +---- +$ kubectl create secret generic --from-file config.yaml=./config.yaml config-bundle-secret +---- ++ +. Create a `QuayRegistry.yaml` file which marks the `postgres` component as `unmanaged` and references the created `Secret`. For example: ++ +.Example `quayregistry.yaml` file ++ +[source,yaml] +---- +apiVersion: quay.redhat.com/v1 +kind: QuayRegistry +metadata: + name: example-registry + namespace: quay-enterprise +spec: + configBundleSecret: config-bundle-secret + components: + - kind: postgres + managed: false +---- + +.Next steps + +* Continue to the following sections to deploy the registry. \ No newline at end of file diff --git a/modules/operator-unmanaged-redis.adoc b/modules/operator-unmanaged-redis.adoc new file mode 100644 index 000000000..6ab77c137 --- /dev/null +++ b/modules/operator-unmanaged-redis.adoc @@ -0,0 +1,49 @@ +[id="operator-unmanaged-redis"] += Using an unmanaged Redis database + +Use the following procedure to set up an external Redis database. + +.Procedure + +. Create a `config.yaml` file using the following Redis fields: ++ +[source,yaml] +---- +# ... +BUILDLOGS_REDIS: + host: + port: 6379 + ssl: false +# ... +USER_EVENTS_REDIS: + host: + port: 6379 + ssl: false +# ... +---- + +. Enter the following command to create a secret using the configuration file: ++ +[source,terminal] +---- +$ oc create secret generic --from-file config.yaml=./config.yaml config-bundle-secret +---- + +. Create a `quayregistry.yaml` file that sets the Redis component to `unmanaged` and references the created secret: ++ +[source,yaml] +---- +apiVersion: quay.redhat.com/v1 +kind: QuayRegistry +metadata: + name: example-registry + namespace: quay-enterprise +spec: + configBundleSecret: config-bundle-secret + components: + - kind: redis + managed: false +# ... +---- + +. Deploy the {productname} registry. \ No newline at end of file diff --git a/modules/operator-unmanaged-route.adoc b/modules/operator-unmanaged-route.adoc new file mode 100644 index 000000000..f8d95379a --- /dev/null +++ b/modules/operator-unmanaged-route.adoc @@ -0,0 +1,58 @@ +:_content-type: PROCEDURE +[id="operator-unmanaged-route"] += Disabling the Route component + +Use the following procedure to prevent the {productname} Operator from creating a route. + +.Procedure + +. Set the component as `managed: false` in the `quayregistry.yaml` file: ++ +[source,yaml] +---- +apiVersion: quay.redhat.com/v1 +kind: QuayRegistry +metadata: + name: example-registry + namespace: quay-enterprise +spec: + components: + - kind: route + managed: false +---- + +. Edit the `config.yaml` file to specify that {productname} handles SSL/TLS. For example: ++ +[source,yaml] +---- +# ... +EXTERNAL_TLS_TERMINATION: false +# ... +SERVER_HOSTNAME: example-registry-quay-quay-enterprise.apps.user1.example.com +# ... +PREFERRED_URL_SCHEME: https +# ... +---- ++ +If you do not configure the unmanaged route correctly, the following error is returned: ++ +[source,json] +---- +{ + { + "kind":"QuayRegistry", + "namespace":"quay-enterprise", + "name":"example-registry", + "uid":"d5879ba5-cc92-406c-ba62-8b19cf56d4aa", + "apiVersion":"quay.redhat.com/v1", + "resourceVersion":"2418527" + }, + "reason":"ConfigInvalid", + "message":"required component `route` marked as unmanaged, but `configBundleSecret` is missing necessary fields" +} +---- + +[NOTE] +==== +Disabling the default route means you are now responsible for creating a `Route`, `Service`, or `Ingress` in order to access the {productname} instance. Additionally, whatever DNS you use must match the `SERVER_HOSTNAME` in the {productname} config. +==== diff --git a/modules/operator-unmanaged-storage-noobaa.adoc b/modules/operator-unmanaged-storage-noobaa.adoc new file mode 100644 index 000000000..51755c4b6 --- /dev/null +++ b/modules/operator-unmanaged-storage-noobaa.adoc @@ -0,0 +1,29 @@ +[id="operator-unmanaged-storage-noobaa"] += Using an unmanaged NooBaa instance + +Use the following procedure to use an unmanaged NooBaa instance for your {productname} deployment. + +.Procedure + +. Create a NooBaa Object Bucket Claim in the console at Storage -> Object Bucket Claims. + +. Retrieve the Object Bucket Claim Data details including the `Access Key`, `Bucket Name`, `Endpoint (hostname)`, and `Secret Key`. + +. Create a `config.yaml` configuration file using the information for the Object Bucket Claim. For example: ++ +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + default: + - RHOCSStorage + - access_key: WmrXtSGk8B3nABCDEFGH + bucket_name: my-noobaa-bucket-claim-8b844191-dc6c-444e-9ea4-87ece0abcdef + hostname: s3.openshift-storage.svc.cluster.local + is_secure: true + port: "443" + secret_key: X9P5SDGJtmSuHFCMSLMbdNCMfUABCDEFGH+C5QD + storage_path: /datastorage/registry +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] +DISTRIBUTED_STORAGE_PREFERENCE: + - default +---- diff --git a/modules/operator-unmanaged-storage.adoc b/modules/operator-unmanaged-storage.adoc new file mode 100644 index 000000000..f092d101a --- /dev/null +++ b/modules/operator-unmanaged-storage.adoc @@ -0,0 +1,210 @@ +:_content-type: REFERENCE +[id="operator-unmanaged-storage"] += Using unmanaged storage + +This section provides configuration examples for unmanaged storage for your convenience. Refer to the {productname} configuration guide for complete instructions on how to set up object storage. + +[id="aws-s3-storage-example"] +== AWS S3 storage + +Use the following example when configuring AWS S3 storage for your {productname} deployment. + +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + s3Storage: + - S3Storage + - host: s3.us-east-2.amazonaws.com + s3_access_key: ABCDEFGHIJKLMN + s3_secret_key: OL3ABCDEFGHIJKLMN + s3_bucket: quay_bucket + s3_region: + storage_path: /datastorage/registry +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] +DISTRIBUTED_STORAGE_PREFERENCE: + - s3Storage +---- + +[id="aws-cloudfront-storage-example"] +== AWS Cloudfront storage + +Use the following example when configuring AWS Cloudfront for your {productname} deployment. + +[NOTE] +==== +* When configuring AWS Cloudfront storage, the following conditions must be met for proper use with {productname}: +** You must set an *Origin path* that is consistent with {productname}'s storage path as defined in your `config.yaml` file. Failure to meet this require results in a `403` error when pulling an image. For more information, see link:https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesOriginPath[Origin path]. +** You must configure a link:https://docs.aws.amazon.com/whitepapers/latest/secure-content-delivery-amazon-cloudfront/s3-origin-with-cloudfront.html[*Bucket policy*] and a link:https://docs.aws.amazon.com/AmazonS3/latest/userguide/cors.html[*Cross-origin resource sharing (CORS)*] policy. +==== + +.Cloudfront S3 example YAML +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + default: + - CloudFrontedS3Storage + - cloudfront_distribution_domain: + cloudfront_key_id: + cloudfront_privatekey_filename: + host: + s3_access_key: + s3_bucket: + s3_secret_key: + storage_path: + s3_region: +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: + - default +DISTRIBUTED_STORAGE_PREFERENCE: + - default +---- + +.Bucket policy example +[source,json] +---- +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:iam:::user/CloudFront Origin Access Identity " + }, + "Action": "s3:GetObject", + "Resource": "arn:aws:s3:::/*" + }, + { + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:iam:::user/CloudFront Origin Access Identity " + }, + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::" + } + ] +} + +---- + +[id="gcp-storage-example"] +== Google Cloud storage + +Use the following example when configuring Google Cloud storage for your {productname} deployment. + +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + googleCloudStorage: + - GoogleCloudStorage + - access_key: GOOGQIMFB3ABCDEFGHIJKLMN + bucket_name: quay-bucket + secret_key: FhDAYe2HeuAKfvZCAGyOioNaaRABCDEFGHIJKLMN + storage_path: /datastorage/registry + boto_timeout: 120 <1> +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] +DISTRIBUTED_STORAGE_PREFERENCE: + - googleCloudStorage +---- +<1> Optional. The time, in seconds, until a timeout exception is thrown when attempting to read from a connection. The default is `60` seconds. Also encompasses the time, in seconds, until a timeout exception is thrown when attempting to make a connection. The default is `60` seconds. + +[id="azure-storage-example"] +== Microsoft Azure storage + +Use the following example when configuring Microsoft Azure storage for your {productname} deployment. + +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + azureStorage: + - AzureStorage + - azure_account_name: azure_account_name_here + azure_container: azure_container_here + storage_path: /datastorage/registry + azure_account_key: azure_account_key_here + sas_token: some/path/ + endpoint_url: https://[account-name].blob.core.usgovcloudapi.net <1> +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] +DISTRIBUTED_STORAGE_PREFERENCE: + - azureStorage +---- +<1> The `endpoint_url` parameter for Microsoft Azure storage is optional and can be used with Microsoft Azure Government (MAG) endpoints. If left blank, the `endpoint_url` will connect to the normal Microsoft Azure region. ++ +As of {productname} 3.7, you must use the Primary endpoint of your MAG Blob service. Using the Secondary endpoint of your MAG Blob service will result in the following error: `AuthenticationErrorDetail:Cannot find the claimed account when trying to GetProperties for the account whusc8-secondary`. + +[id="ceph-rados-storage-example"] +== Ceph/RadosGW Storage + +Use the following example when configuring Ceph/RadosGW storage for your {productname} deployment. + +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + radosGWStorage: #storage config name + - RadosGWStorage #actual driver + - access_key: access_key_here #parameters + secret_key: secret_key_here + bucket_name: bucket_name_here + hostname: hostname_here + is_secure: 'true' + port: '443' + storage_path: /datastorage/registry +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] +DISTRIBUTED_STORAGE_PREFERENCE: #must contain name of the storage config + - radosGWStorage +---- + +[id="swift-storage-example"] +== Swift storage + +Use the following example when configuring Swift storage for your {productname} deployment. + +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + swiftStorage: + - SwiftStorage + - swift_user: swift_user_here + swift_password: swift_password_here + swift_container: swift_container_here + auth_url: https://example.org/swift/v1/quay + auth_version: 3 + os_options: + tenant_id: + user_domain_name: + ca_cert_path: /conf/stack/swift.cert" + storage_path: /datastorage/registry +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] +DISTRIBUTED_STORAGE_PREFERENCE: + - swiftStorage +---- + +[id="noobaa-unmanaged-storage-example"] +== NooBaa unmanaged storage + +Use the following procedure to deploy NooBaa as your unmanaged storage configuration. + +.Procedure + +. Create a NooBaa Object Bucket Claim in the {productname} console by navigating to *Storage* -> *Object Bucket Claims*. + +. Retrieve the Object Bucket Claim Data details, including the Access Key, Bucket Name, Endpoint (hostname), and Secret Key. + +. Create a `config.yaml` configuration file that uses the information for the Object Bucket Claim: ++ +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + default: + - RHOCSStorage + - access_key: WmrXtSGk8B3nABCDEFGH + bucket_name: my-noobaa-bucket-claim-8b844191-dc6c-444e-9ea4-87ece0abcdef + hostname: s3.openshift-storage.svc.cluster.local + is_secure: true + port: "443" + secret_key: X9P5SDGJtmSuHFCMSLMbdNCMfUABCDEFGH+C5QD + storage_path: /datastorage/registry +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] +DISTRIBUTED_STORAGE_PREFERENCE: + - default +---- + +For more information about configuring an Object Bucket Claim, see link:https://access.redhat.com/documentation/en-us/red_hat_openshift_container_storage/4.8/html-single/managing_hybrid_and_multicloud_resources/index#object-bucket-claim[Object Bucket Claim]. \ No newline at end of file diff --git a/modules/operator-upgrade.adoc b/modules/operator-upgrade.adoc new file mode 100644 index 000000000..85cbc175e --- /dev/null +++ b/modules/operator-upgrade.adoc @@ -0,0 +1,481 @@ +[id="operator-upgrade"] += Upgrading the {productname} Operator Overview + +The {productname} Operator follows a _synchronized versioning_ scheme, which means that each version of the Operator is tied to the version of {productname} and the components that it manages. There is no field on the `QuayRegistry` custom resource which sets the version of {productname} to `deploy`; the Operator can only deploy a single version of all components. This scheme was chosen to ensure that all components work well together and to reduce the complexity of the Operator needing to know how to manage the lifecycles of many different versions of {productname} on Kubernetes. + +[id="operator-lifecycle-manager"] +== Operator Lifecycle Manager + +The {productname} Operator should be installed and upgraded using the link:https://docs.openshift.com/container-platform/{ocp-y}/operators/understanding/olm/olm-understanding-olm.html[Operator Lifecycle Manager (OLM)]. When creating a `Subscription` with the default `approvalStrategy: Automatic`, OLM will automatically upgrade the {productname} Operator whenever a new version becomes available. + +[WARNING] +==== +When the {productname} Operator is installed by Operator Lifecycle Manager, it might be configured to support automatic or manual upgrades. This option is shown on the *OperatorHub* page for the {productname} Operator during installation. It can also be found in the {productname} Operator `Subscription` object by the `approvalStrategy` field. Choosing `Automatic` means that your {productname} Operator will automatically be upgraded whenever a new Operator version is released. If this is not desirable, then the `Manual` approval strategy should be selected. +==== + +[id="upgrading-quay-operator"] +== Upgrading the {productname} Operator + +The standard approach for upgrading installed Operators on {ocp} is documented at link:https://docs.openshift.com/container-platform/{ocp-y}/operators/admin/olm-upgrading-operators.html[Upgrading installed Operators]. + +In general, {productname} supports upgrades from a prior (N-1) minor version only. For example, upgrading directly from {productname} 3.5 to the latest version of {producty-min} is not supported. Instead, users would have to upgrade as follows: + +. 3.9.z -> 3.10.z +. 3.10.z -> 3.11.z +. 3.11.z -> 3.12.z +. 3.12.z -> 3.13.z +. 3.13.z -> 3.14.z + +This is required to ensure that any necessary database migrations are done correctly and in the right order during the upgrade. + +In some cases, {productname} supports direct, single-step upgrades from prior (N-2, N-3) minor versions. This simplifies the upgrade procedure for customers on older releases. The following upgrade paths are supported for {productname} {productmin}: + +* 3.11.z -> {productmin} +* 3.12.z -> {productmin} +* 3.13.z -> {productmin} + +For users on standalone deployments of {productname} wanting to upgrade to {productmin} see the link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#standalone_upgrade[Standalone upgrade] guide. + +[id="upgrading-red-hat-quay"] +=== Upgrading {productname} to version {productmin} + +To update {productname} from one minor version to the next, for example, {producty-n1} -> {productmin}, you must change the update channel for the {productname} Operator. + +.Procedure + +. In the {ocp} Web Console, navigate to *Operators* -> *Installed Operators*. + +. Click on the {productname} Operator. + +. Navigate to the *Subscription* tab. + +. Under *Subscription details* click *Update channel*. + +. Select *stable-3.14* -> *Save*. + +. Check the progress of the new installation under *Upgrade status*. Wait until the upgrade status changes to *1 installed* before proceeding. + +. In your {ocp} cluster, navigate to *Workloads* -> *Pods*. Existing pods should be terminated, or in the process of being terminated. + +. Wait for the following pods, which are responsible for upgrading the database and alembic migration of existing data, to spin up: `clair-postgres-upgrade`, `quay-postgres-upgrade`, and `quay-app-upgrade`. + +. After the `clair-postgres-upgrade`, `quay-postgres-upgrade`, and `quay-app-upgrade` pods are marked as *Completed*, the remaining pods for your {productname} deployment spin up. This takes approximately ten minutes. + +. Verify that the `quay-database` uses the `postgresql-13` image, and `clair-postgres` pods now uses the `postgresql-15` image. + +. After the `quay-app` pod is marked as *Running*, you can reach your {productname} registry. + +[id="upgrading-minor-red-hat-quay"] +=== Upgrading to the next minor release version + +For `z` stream upgrades, for example, 3.13.1 -> 3.13.2, updates are released in the major-minor channel that the user initially selected during install. The procedure to perform a `z` stream upgrade depends on the `approvalStrategy` as outlined above. If the approval strategy is set to `Automatic`, the {productname} Operator upgrades automatically to the newest `z` stream. This results in automatic, rolling {productname} updates to newer `z` streams with little to no downtime. Otherwise, the update must be manually approved before installation can begin. + +//// +[id="upgrading-312-to-313"] +=== Upgrading from {productname} 3.12 to 3.13 + +With {productname} 3.13, the `volumeSize` parameter has been implemented for use with the `clairpostgres` component of the `QuayRegistry` custom resource definition (CRD). This replaces the `volumeSize` parameter that was previously used for the `clair` component of the same CRD. + +If your {productname} 3.12 `QuayRegistry` custom resource definition (CRD) implemented a volume override for the `clair` component, you must ensure that the `volumeSize` field is included under the `clairpostgres` component of the `QuayRegistry` CRD. + +[IMPORTANT] +==== +Failure to move `volumeSize` from the `clair` component to the `clairpostgres` component will result in a failed upgrade to version 3.13. +==== + +For example: + +[source,yaml] +---- +spec: + components: + - kind: clair + managed: true + - kind: clairpostgres + managed: true + overrides: + volumeSize: +---- + + +[id="config-editor-removal"] +== Removing config editor objects on {productname} Operator + +The config editor has been removed from the {productname} Operator on {ocp} deployments. As a result, the `quay-config-editor` pod no longer deploys, and users cannot check the status of the config editor route. Additionally, the Config Editor Endpoint no longer generates on the {productname} Operator *Details* page. + +Users with existing {productname} Operators who are upgrading from 3.7, 3.8, or 3.9 to {producty} must manually remove the {productname} config editor by removing the `pod`, `deployment`, `route,` `service`, and `secret` objects. + +To remove the `deployment`, `route,` `service`, and `secret` objects, use the following procedure. + +.Prerequisites + +* You have deployed {productname} version 3.7, 3.8, or 3.9. +* You have a valid `QuayRegistry` object. + +.Procedure + +. Obtain the `quayregistry-quay-config-editor` route object by entering the following command: ++ +[source,terminal] +---- +$ oc get route +---- ++ +.Example output ++ +[source,terminal] +---- +--- +quayregistry-quay-config-editor-c866f64c4-68gtb 1/1 Running 0 49m +--- +---- + +. Remove the `quayregistry-quay-config-editor` route object by entering the following command: ++ +[source,terminal] +---- +$ oc delete route quayregistry-quay-config-editor +---- + +. Obtain the `quayregistry-quay-config-editor` deployment object by entering the following command: ++ +[source,terminal] +---- +$ oc get deployment +---- ++ +.Example output ++ +[source,terminal] +---- +--- +quayregistry-quay-config-editor +--- +---- + +. Remove the `quayregistry-quay-config-editor` deployment object by entering the following command: ++ +[source,terminal] +---- +$ oc delete deployment quayregistry-quay-config-editor +---- + +. Obtain the `quayregistry-quay-config-editor` service object by entering the following command: ++ +[source,terminal] +---- +$ oc get svc | grep config-editor +---- ++ +.Example output ++ +[source,terminal] +---- +quayregistry-quay-config-editor ClusterIP 172.30.219.194 80/TCP 6h15m +---- + +. Remove the `quayregistry-quay-config-editor` service object by entering the following command: ++ +[source,terminal] +---- +$ oc delete service quayregistry-quay-config-editor +---- + +. Obtain the `quayregistry-quay-config-editor-credentials` secret by entering the following command: ++ +[source,terminal] +---- +$ oc get secret | grep config-editor +---- ++ +.Example output ++ +[source,terminal] +---- +quayregistry-quay-config-editor-credentials-mb8kchfg92 Opaque 2 52m +---- + +. Delete the `quayregistry-quay-config-editor-credentials` secret by entering the following command: ++ +[source,terminal] +---- +$ oc delete secret quayregistry-quay-config-editor-credentials-mb8kchfg92 +---- + +. Obtain the `quayregistry-quay-config-editor` pod by entering the following command: ++ +[source,terminal] +---- +$ $ oc get pod +---- ++ +.Example output ++ +[source,terminal] +---- +--- +quayregistry-quay-config-editor-c866f64c4-68gtb 1/1 Running 0 49m +--- +---- + +. Delete the `quayregistry-quay-config-editor` pod by entering the following command: ++ +[source,terminal] +---- +$ oc delete pod quayregistry-quay-app-6bc4fbd456-8bc9c +---- + +[id="upgrading-postgresql-databases"] +=== Updating {productname} from 3.8 -> 3.9 + +[IMPORTANT] +==== +If your {productname} deployment is upgrading from one y-stream to the next, for example, from 3.8.10 -> 3.8.11, you must not switch the upgrade channel from `stable-3.8` to `stable-3.9`. Changing the upgrade channel in the middle of a y-stream upgrade will disallow {productname} from upgrading to 3.9. This is a known issue and will be fixed in a future version of {productname}. +==== + +When updating {productname} 3.8 -> 3.9, the Operator automatically upgrades the existing PostgreSQL databases for Clair and {productname} from version 10 to version 13. + +[IMPORTANT] +==== +* This upgrade is irreversible. It is highly recommended that you upgrade to PostgreSQL 13. PostgreSQL 10 had its final release on November 10, 2022 and is no longer supported. For more information, see the link:https://www.postgresql.org/support/versioning/[PostgreSQL Versioning Policy]. +* By default, {productname} is configured to remove old persistent volume claims (PVCs) from PostgreSQL 10. To disable this setting and backup old PVCs, you must set `POSTGRES_UPGRADE_RETAIN_BACKUP` to `True` in your `quay-operator` `Subscription` object. +==== + +.Prerequisites + +* You have installed {productname} 3.8 on {ocp}. +* 100 GB of free, additional storage. ++ +During the upgrade process, additional persistent volume claims (PVCs) are provisioned to store the migrated data. This helps prevent a destructive operation on user data. The upgrade process rolls out PVCs for 50 GB for both the {productname} database upgrade, and the Clair database upgrade. + +.Procedure + +. Optional. Back up your old PVCs from PostgreSQL 10 by setting `POSTGRES_UPGRADE_RETAIN_BACKUP` to `True` your `quay-operator` `Subscription` object. For example: ++ +[source,yaml] +---- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: quay-operator + namespace: quay-enterprise +spec: + channel: stable-3.8 + name: quay-operator + source: redhat-operators + sourceNamespace: openshift-marketplace + config: + env: + - name: POSTGRES_UPGRADE_RETAIN_BACKUP + value: "true" +---- + +. In the {ocp} Web Console, navigate to *Operators* -> *Installed Operators*. + +. Click on the {productname} Operator. + +. Navigate to the *Subscription* tab. + +. Under *Subscription details* click *Update channel*. + +. Select *stable-3.9* and save the changes. + +. Check the progress of the new installation under *Upgrade status*. Wait until the upgrade status changes to *1 installed* before proceeding. + +. In your {ocp} cluster, navigate to *Workloads* -> *Pods*. Existing pods should be terminated, or in the process of being terminated. + +. Wait for the following pods, which are responsible for upgrading the database and alembic migration of existing data, to spin up: `clair-postgres-upgrade`, `quay-postgres-upgrade`, and `quay-app-upgrade`. + +. After the `clair-postgres-upgrade`, `quay-postgres-upgrade`, and `quay-app-upgrade` pods are marked as *Completed*, the remaining pods for your {productname} deployment spin up. This takes approximately ten minutes. + +. Verify that the `quay-database` and `clair-postgres` pods now use the `postgresql-13` image. + +. After the `quay-app` pod is marked as *Running*, you can reach your {productname} registry. + + +[id="upgrade-33-36"] +=== Upgrading directly from 3.3.z or 3.4.z to 3.6 + +The following section provides important information when upgrading from {productname} 3.3.z or 3.4.z to 3.6. + +[id="upgrading-edge-routing-enabled"] +==== Upgrading with edge routing enabled + +* Previously, when running a 3.3.z version of {productname} with edge routing enabled, users were unable to upgrade to 3.4.z versions of {productname}. This has been resolved with the release of {productname} 3.6. + +* When upgrading from 3.3.z to 3.6, if `tls.termination` is set to `none` in your {productname} 3.3.z deployment, it will change to HTTPS with TLS edge termination and use the default cluster wildcard certificate. For example: ++ +[source,yaml] +---- +apiVersion: redhatcop.redhat.io/v1alpha1 +kind: QuayEcosystem +metadata: + name: quay33 +spec: + quay: + imagePullSecretName: redhat-pull-secret + enableRepoMirroring: true + image: quay.io/quay/quay:v3.3.4-2 + ... + externalAccess: + hostname: quayv33.apps.devcluster.openshift.com + tls: + termination: none + database: +... +---- + + +[id="upgrading-with-tls-cert-key-pairs-without-san"] +==== Upgrading with custom SSL/TLS certificate/key pairs without Subject Alternative Names + +There is an issue for customers using their own SSL/TLS certificate/key pairs without Subject Alternative Names (SANs) when upgrading from {productname} 3.3.4 to {productname} 3.6 directly. During the upgrade to {productname} 3.6, the deployment is blocked, with the error message from the {productname} Operator pod logs indicating that the {productname} SSL/TLS certificate must have SANs. + +If possible, you should regenerate your SSL/TLS certificates with the correct hostname in the SANs. A possible workaround involves defining an environment variable in the `quay-app`, `quay-upgrade` and `quay-config-editor` pods after upgrade to enable CommonName matching: + +---- + GODEBUG=x509ignoreCN=0 +---- + +The `GODEBUG=x509ignoreCN=0` flag enables the legacy behavior of treating the CommonName field on X.509 certificates as a hostname when no SANs are present. However, this workaround is not recommended, as it will not persist across a redeployment. + + +[id="configuring-clair-v4-upgrading-from-33-34-to-36"] +==== Configuring Clair v4 when upgrading from 3.3.z or 3.4.z to 3.6 using the {productname} Operator + +To set up Clair v4 on a new {productname} deployment on {ocp}, it is highly recommended to use the {productname} Operator. By default, the {productname} Operator will install or upgrade a Clair deployment along with your {productname} deployment and configure Clair automatically. + +//link needs replaced +For instructions about setting up Clair v4 in a disconnected {ocp} cluster, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#clair-openshift[Setting Up Clair on a {productname} OpenShift deployment]. + +[id="swift-config-upgrading-from-33-to-36"] +=== Swift configuration when upgrading from 3.3.z to 3.6 + +When upgrading from {productname} 3.3.z to 3.6.z, some users might receive the following error: `Switch auth v3 requires tenant_id (string) in os_options`. As a workaround, you can manually update your `DISTRIBUTED_STORAGE_CONFIG` to add the `os_options` and `tenant_id` parameters: + +[source,yaml] +---- + DISTRIBUTED_STORAGE_CONFIG: + brscale: + - SwiftStorage + - auth_url: http://****/v3 + auth_version: "3" + os_options: + tenant_id: **** + project_name: ocp-base + user_domain_name: Default + storage_path: /datastorage/registry + swift_container: ocp-svc-quay-ha + swift_password: ***** + swift_user: ***** +---- +//// + +[id="changing-update-channel-for-operator"] +=== Changing the update channel for the {productname} Operator + +The subscription of an installed Operator specifies an update channel, which is used to track and receive updates for the Operator. To upgrade the {productname} Operator to start tracking and receiving updates from a newer channel, change the update channel in the *Subscription* tab for the installed {productname} Operator. For subscriptions with an `Automatic` approval strategy, the upgrade begins automatically and can be monitored on the page that lists the Installed Operators. + +[id="manually-approving-pending-operator-upgrade"] +=== Manually approving a pending Operator upgrade + +If an installed Operator has the approval strategy in its subscription set to `Manual`, when new updates are released in its current update channel, the update must be manually approved before installation can begin. If the {productname} Operator has a pending upgrade, this status will be displayed in the list of Installed Operators. In the `Subscription` tab for the {productname} Operator, you can preview the install plan and review the resources that are listed as available for upgrade. If satisfied, click `Approve` and return to the page that lists Installed Operators to monitor the progress of the upgrade. + +The following image shows the *Subscription* tab in the UI, including the update `Channel`, the `Approval` strategy, the `Upgrade status` and the `InstallPlan`: + +image:update-channel-approval-strategy.png[Subscription tab including upgrade Channel and Approval strategy] + +The list of Installed Operators provides a high-level summary of the current Quay installation: + +image:installed-operators-list.png[Installed Operators] + +[id="upgrading-quayregistry"] +== Upgrading a QuayRegistry resource + +When the {productname} Operator starts, it immediately looks for any `QuayRegistries` it can find in the namespace(s) it is configured to watch. When it finds one, the following logic is used: + +* If `status.currentVersion` is unset, reconcile as normal. +* If `status.currentVersion` equals the Operator version, reconcile as normal. +* If `status.currentVersion` does not equal the Operator version, check if it can be upgraded. If it can, perform upgrade tasks and set the `status.currentVersion` to the Operator's version once complete. If it cannot be upgraded, return an error and leave the `QuayRegistry` and its deployed Kubernetes objects alone. + +[id="upgrading-quayecosystem"] +== Upgrading a QuayEcosystem + +Upgrades are supported from previous versions of the Operator which used the `QuayEcosystem` API for a limited set of configurations. To ensure that migrations do not happen unexpectedly, a special label needs to be applied to the `QuayEcosystem` for it to be migrated. A new `QuayRegistry` will be created for the Operator to manage, but the old `QuayEcosystem` will remain until manually deleted to ensure that you can roll back and still access Quay in case anything goes wrong. To migrate an existing `QuayEcosystem` to a new `QuayRegistry`, use the following procedure. + +.Procedure + +. Add `"quay-operator/migrate": "true"` to the `metadata.labels` of the `QuayEcosystem`. ++ +[source,terminal] +---- +$ oc edit quayecosystem +---- ++ +[source,yaml] +---- +metadata: + labels: + quay-operator/migrate: "true" +---- +. Wait for a `QuayRegistry` to be created with the same `metadata.name` as your `QuayEcosystem`. The `QuayEcosystem` will be marked with the label `"quay-operator/migration-complete": "true"`. + +. After the `status.registryEndpoint` of the new `QuayRegistry` is set, access {productname} and confirm that all data and settings were migrated successfully. + +. If everything works correctly, you can delete the `QuayEcosystem` and Kubernetes garbage collection will clean up all old resources. + +[id="reverting-quayecosystem-upgrade"] +=== Reverting QuayEcosystem Upgrade + +If something goes wrong during the automatic upgrade from `QuayEcosystem` to `QuayRegistry`, follow these steps to revert back to using the `QuayEcosystem`: + +.Procedure + +. Delete the `QuayRegistry` using either the UI or `kubectl`: ++ +[source,terminal] +---- +$ kubectl delete -n quayregistry +---- + +. If external access was provided using a `Route`, change the `Route` to point back to the original `Service` using the UI or `kubectl`. + +[NOTE] +==== +If your `QuayEcosystem` was managing the PostgreSQL database, the upgrade process will migrate your data to a new PostgreSQL database managed by the upgraded Operator. Your old database will not be changed or removed but {productname} will no longer use it once the migration is complete. If there are issues during the data migration, the upgrade process will exit and it is recommended that you continue with your database as an unmanaged component. +==== + +[id="supported-quayecossytem-configurations-for-upgrades"] +=== Supported QuayEcosystem Configurations for Upgrades + +The {productname} Operator reports errors in its logs and in `status.conditions` if migrating a `QuayEcosystem` component fails or is unsupported. All unmanaged components should migrate successfully because no Kubernetes resources need to be adopted and all the necessary values are already provided in {productname}'s `config.yaml` file. + +*Database* + +Ephemeral database not supported (`volumeSize` field must be set). + +*Redis* + +Nothing special needed. + +*External Access* + +Only passthrough `Route` access is supported for automatic migration. Manual migration required for other methods. + +* `LoadBalancer` without custom hostname: +After the `QuayEcosystem` is marked with label `"quay-operator/migration-complete": "true"`, delete the `metadata.ownerReferences` field from existing `Service` _before_ deleting the `QuayEcosystem` to prevent Kubernetes from garbage collecting the `Service` and removing the load balancer. A new `Service` will be created with `metadata.name` format `-quay-app`. Edit the `spec.selector` of the existing `Service` to match the `spec.selector` of the new `Service` so traffic to the old load balancer endpoint will now be directed to the new pods. You are now responsible for the old `Service`; the Quay Operator will not manage it. + +* `LoadBalancer`/`NodePort`/`Ingress` with custom hostname: +A new `Service` of type `LoadBalancer` will be created with `metadata.name` format `-quay-app`. Change your DNS settings to point to the `status.loadBalancer` endpoint provided by the new `Service`. + +*Clair* + +Nothing special needed. + +*Object Storage* + +`QuayEcosystem` did not have a managed object storage component, so object storage will always be marked as unmanaged. Local storage is not supported. + +*Repository Mirroring* + +Nothing special needed. diff --git a/modules/operator-volume-size-overrides.adoc b/modules/operator-volume-size-overrides.adoc new file mode 100644 index 000000000..b676d5cd2 --- /dev/null +++ b/modules/operator-volume-size-overrides.adoc @@ -0,0 +1,37 @@ +:_content-type: REFERENCE +[id="operator-volume-size-overrides"] += Volume size overrides + +You can specify the desired size of storage resources provisioned for managed components. The default size for Clair and the PostgreSQL databases is `50Gi`. You can now choose a large enough capacity upfront, either for performance reasons or in the case where your storage backend does not have resize capability. + +In the following example, the volume size for the Clair and the Quay PostgreSQL databases has been set to `70Gi`: + +[source,yaml] +---- +apiVersion: quay.redhat.com/v1 +kind: QuayRegistry +metadata: + name: quay-example + namespace: quay-enterprise +spec: + configBundleSecret: config-bundle-secret + components: + - kind: objectstorage + managed: false + - kind: route + managed: true + - kind: tls + managed: false + - kind: clair + managed: true + overrides: + volumeSize: 70Gi + - kind: postgres + managed: true + overrides: + volumeSize: 70Gi + - kind: clairpostgres + managed: true + overrides: + volumeSize: 70Gi +---- \ No newline at end of file diff --git a/modules/optional-enabling-read-only-mode-backup-restore-ocp.adoc b/modules/optional-enabling-read-only-mode-backup-restore-ocp.adoc new file mode 100644 index 000000000..faa78255c --- /dev/null +++ b/modules/optional-enabling-read-only-mode-backup-restore-ocp.adoc @@ -0,0 +1,304 @@ +:_content-type: PROCEDURE +[id="optional-enabling-read-only-mode-backup-restore-ocp"] += Optional: Enabling read-only mode for {productname-ocp} + +Enabling read-only mode for your {productname-ocp} deployment allows you to manage the registry's operations. Administrators can enable read-only mode to restrict write access to the registry, which helps ensure data integrity, mitigate risks during maintenance windows, and provide a safeguard against unintended modifications to registry data. It also helps to ensure that your {productname} registry remains online and available to serve images to users. + +When backing up and restoring, you are required to scale down your {productname-ocp} deployment. This results in service unavailability during the backup period which, in some cases, might be unacceptable. Enabling read-only mode ensures service availability during the backup and restore procedure for {productname-ocp} deployments. + +[NOTE] +==== +In some cases, a read-only option for {productname} is not possible since it requires inserting a service key and other manual configuration changes. As an alternative to read-only mode, {productname} administrators might consider enabling the `DISABLE_PUSHES` feature. When this field is set to `true`, users are unable to push images or image tags to the registry when using the CLI. Enabling `DISABLE_PUSHES` differs from `read-only` mode because the database is not set as `read-only` when it is enabled. + +This field might be useful in some situations such as when {productname} administrators want to calculate their registry's quota and disable image pushing until after calculation has completed. With this method, administrators can avoid putting putting the whole registry in `read-only` mode, which affects the database, so that most operations can still be done. + +For information about enabling this configuration field, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index#config-fields-misc[Miscellaneous configuration fields]. +==== + +.Prerequisites + +* If you are using {rhel} 7.x: +** You have enabled the Red Hat Software Collections List (RHSCL). +** You have installed Python 3.6. +** You have downloaded the `virtualenv` package. +** You have installed the `git` CLI. + +* If you are using {rhel} 8: +** You have installed Python 3 on your machine. +** You have downloaded the `python3-virtualenv` package. +** You have installed the `git` CLI. + +* You have cloned the `https://github.com/quay/quay.git` repository. +* You have installed the `oc` CLI. +* You have access to the cluster with `cluster-admin` privileges. + +[id="creating-service-keys-quay-ocp"] +== Creating service keys for {productname-ocp} + +{productname} uses service keys to communicate with various components. These keys are used to sign completed requests, such as requesting to scan images, login, storage access, and so on. + +.Procedure + +. Enter the following command to obtain a list of {productname} pods: ++ +[source,terminal] +---- +$ oc get pods -n +---- ++ +Example output ++ +[source,terminal] +---- +example-registry-clair-app-7dc7ff5844-4skw5 0/1 Error 0 70d +example-registry-clair-app-7dc7ff5844-nvn4f 1/1 Running 0 31d +example-registry-clair-app-7dc7ff5844-x4smw 0/1 ContainerStatusUnknown 6 (70d ago) 70d +example-registry-clair-app-7dc7ff5844-xjnvt 1/1 Running 0 60d +example-registry-clair-postgres-547d75759-75c49 1/1 Running 0 70d +example-registry-quay-app-76c8f55467-52wjz 1/1 Running 0 70d +example-registry-quay-app-76c8f55467-hwz4c 1/1 Running 0 70d +example-registry-quay-app-upgrade-57ghs 0/1 Completed 1 70d +example-registry-quay-database-7c55899f89-hmnm6 1/1 Running 0 70d +example-registry-quay-mirror-6cccbd76d-btsnb 1/1 Running 0 70d +example-registry-quay-mirror-6cccbd76d-x8g42 1/1 Running 0 70d +example-registry-quay-redis-85cbdf96bf-4vk5m 1/1 Running 0 70d +---- + +. Open a remote shell session to the `Quay` container by entering the following command: ++ +[source,terminal] +---- +$ oc rsh example-registry-quay-app-76c8f55467-52wjz +---- + +. Enter the following command to create the necessary service keys: ++ +[source,terminal] +---- +sh-4.4$ python3 tools/generatekeypair.py quay-readonly +---- ++ +Example output ++ +[source,terminal] +---- +Writing public key to quay-readonly.jwk +Writing key ID to quay-readonly.kid +Writing private key to quay-readonly.pem +---- + +[id="adding-keys-postgresql-database"] +== Adding keys to the PostgreSQL database + +Use the following procedure to add your service keys to the PostgreSQL database. + +.Prerequistes + +* You have created the service keys. + +.Procedure + +. Enter the following command to enter your {productname} database environment: ++ +[source,terminal] +---- +$ oc rsh example-registry-quay-app-76c8f55467-52wjz psql -U -d +---- + +. Display the approval types and associated notes of the `servicekeyapproval` by entering the following command: ++ +[source,terminal] +---- +quay=# select * from servicekeyapproval; +---- ++ +Example output ++ +[source,terminal] +---- + id | approver_id | approval_type | approved_date | notes +----+-------------+----------------------------------+----------------------------+------- + 1 | | ServiceKeyApprovalType.AUTOMATIC | 2024-05-07 03:47:48.181347 | + 2 | | ServiceKeyApprovalType.AUTOMATIC | 2024-05-07 03:47:55.808087 | + 3 | | ServiceKeyApprovalType.AUTOMATIC | 2024-05-07 03:49:04.27095 | + 4 | | ServiceKeyApprovalType.AUTOMATIC | 2024-05-07 03:49:05.46235 | + 5 | 1 | ServiceKeyApprovalType.SUPERUSER | 2024-05-07 04:05:10.296796 | +... +---- + +. Add the service key to your {productname} database by entering the following query: ++ +[source,terminal] +---- +quay=# INSERT INTO servicekey + (name, service, metadata, kid, jwk, created_date, expiration_date) + VALUES ('quay-readonly', + 'quay', + '{}', + '{}', + '{}', + '{}', + '{}'); +---- ++ +Example output ++ +[source,terminal] +---- +INSERT 0 1 +---- + +. Next, add the key approval with the following query: ++ +[source,terminal] +---- +quay=# INSERT INTO servicekeyapproval ('approval_type', 'approved_date', 'notes') + VALUES ("ServiceKeyApprovalType.SUPERUSER", "CURRENT_DATE", + {include_notes_here_on_why_this_is_being_added}); +---- ++ +Example output ++ +[source,terminal] +---- +INSERT 0 1 +---- + +. Set the `approval_id` field on the created service key row to the `id` field from the created service key approval. You can use the following `SELECT` statements to get the necessary IDs: ++ +[source,terminal] +---- +UPDATE servicekey +SET approval_id = (SELECT id FROM servicekeyapproval WHERE approval_type = 'ServiceKeyApprovalType.SUPERUSER') +WHERE name = 'quay-readonly'; +---- ++ +[source,terminal] +---- +UPDATE 1 +---- + +[id="quay-ocp-readonly"] +== Configuring read-only mode {productname-ocp} + +After the service keys have been created and added to your PostgreSQL database, you must restart the `Quay` container on your {ocp} deployment. + +[IMPORTANT] +==== +Deploying {productname-ocp} in read-only mode requires you to modify the secrets stored inside of your {ocp} cluster. It is highly recommended that you create a backup of the secret prior to making changes to it. +==== + +.Prerequisites + +* You have created the service keys and added them to your PostgreSQL database. + +.Procedure + +. Enter the following command to read the secret name of your {productname-ocp} deployment: ++ +[source,terminal] +---- +$ oc get deployment -o yaml +---- + +. Use the `base64` command to encode the `quay-readonly.kid` and `quay-readonly.pem` files: ++ +[source,terminal] +---- +$ base64 -w0 quay-readonly.kid +---- ++ +Example output ++ +[source,terminal] +---- +ZjUyNDFm... +---- ++ +[source,terminal] +---- +$ base64 -w0 quay-readonly.pem +---- ++ +Example output ++ +[source,terminal] +---- +LS0tLS1CRUdJTiBSU0E... +---- + +. Obtain the current configuration bundle and secret by entering the following command: ++ +[source,terminal] +---- +$ oc get secret quay-config-secret-name -o json | jq '.data."config.yaml"' | cut -d '"' -f2 | base64 -d -w0 > config.yaml +---- + +. Edit the `config.yaml` file and add the following information: ++ +[source,yaml] +---- +# ... +REGISTRY_STATE: readonly +INSTANCE_SERVICE_KEY_KID_LOCATION: 'conf/stack/quay-readonly.kid' +INSTANCE_SERVICE_KEY_LOCATION: 'conf/stack/quay-readonly.pem' +# ... +---- + +. Save the file and `base64` encode it by running the following command: ++ +[source,terminal] +---- +$ base64 -w0 quay-config.yaml +---- + +. Scale down the {productname} Operator pods to `0`. This ensures that the Operator does not reconcile the secret after editing it. ++ +[source,terminal] +---- +$ oc scale --replicas=0 deployment quay-operator -n openshift-operators +---- + +. Edit the secret to include the new content: ++ +[source,terminal] +---- +$ oc edit secret quay-config-secret-name -n quay-namespace +---- ++ +[source,yaml] +---- +# ... +data: + "quay-readonly.kid": "ZjUyNDFm..." + "quay-readonly.pem": "LS0tLS1CRUdJTiBSU0E..." + "config.yaml": "QUNUSU9OX0xPR19..." +# ... +---- ++ +With your {productname-ocp} deployment on read-only mode, you can safely manage your registry's operations and perform such actions as backup and restore. + +[id="scaling-up-quay-ocp-read-only-deployment"] +=== Scaling up the {productname-ocp} from a read-only deployment + +When you no longer want {productname-ocp} to be in read-only mode, you can scale the deployment back up and remove the content added from the secret. + +.Procedure + +. Edit the `config.yaml` file and remove the following information: ++ +[source,yaml] +---- +# ... +REGISTRY_STATE: readonly +INSTANCE_SERVICE_KEY_KID_LOCATION: 'conf/stack/quay-readonly.kid' +INSTANCE_SERVICE_KEY_LOCATION: 'conf/stack/quay-readonly.pem' +# ... +---- + +. Scale the {productname} Operator back up by entering the following command: ++ +[source,terminal] +---- +oc scale --replicas=1 deployment quay-operator -n openshift-operators +---- \ No newline at end of file diff --git a/modules/optional-enabling-read-only-mode-backup-restore-standalone.adoc b/modules/optional-enabling-read-only-mode-backup-restore-standalone.adoc new file mode 100644 index 000000000..b46ec1afa --- /dev/null +++ b/modules/optional-enabling-read-only-mode-backup-restore-standalone.adoc @@ -0,0 +1,315 @@ +:_content-type: PROCEDURE +[id="optional-enabling-read-only-mode-backup-restore-standalone"] += Optional: Enabling read-only mode for {productname} + +Enabling read-only mode for your {productname} deployment allows you to manage the registry's operations. {productname} administrators can enable read-only mode to restrict write access to the registry, which helps ensure data integrity, mitigate risks during maintenance windows, and provide a safeguard against unintended modifications to registry data. It also helps to ensure that your {productname} registry remains online and available to serve images to users. + +[NOTE] +==== +In some cases, a read-only option for {productname} is not possible since it requires inserting a service key and other manual configuration changes. As an alternative to read-only mode, {productname} administrators might consider enabling the `DISABLE_PUSHES` feature. When this field is set to `true`, users are unable to push images or image tags to the registry when using the CLI. Enabling `DISABLE_PUSHES` differs from `read-only` mode because the database is not set as `read-only` when it is enabled. + +This field might be useful in some situations such as when {productname} administrators want to calculate their registry's quota and disable image pushing until after calculation has completed. With this method, administrators can avoid putting putting the whole registry in `read-only` mode, which affects the database, so that most operations can still be done. + +For information about enabling this configuration field, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index#config-fields-misc[Miscellaneous configuration fields]. +==== + +.Prerequisites + +* If you are using {rhel} 7.x: +** You have enabled the Red Hat Software Collections List (RHSCL). +** You have installed Python 3.6. +** You have downloaded the `virtualenv` package. +** You have installed the `git` CLI. + +* If you are using {rhel} 8: +** You have installed Python 3 on your machine. +** You have downloaded the `python3-virtualenv` package. +** You have installed the `git` CLI. + +* You have cloned the `https://github.com/quay/quay.git` repository. + +[id="creating-service-keys"] +== Creating service keys for standalone {productname} + +{productname} uses service keys to communicate with various components. These keys are used to sign completed requests, such as requesting to scan images, login, storage access, and so on. + +.Procedure + +. If your {productname} registry is readily available, you can generate service keys inside of the `Quay` registry container. + +.. Enter the following command to generate a key pair inside of the `Quay` container: ++ +[source,terminal] +---- +$ podman exec quay python3 tools/generatekeypair.py quay-readonly +---- + +. If your {productname} is not readily available, you must generate your service keys inside of a virtual environment. + +.. Change into the directory of your {productname} deployment and create a virtual environment inside of that directory: ++ +[source,terminal] +---- +$ cd <$QUAY>/quay && virtualenv -v venv +---- + +.. Activate the virtual environment by entering the following command: ++ +[source,terminal] +---- +$ source venv/bin/activate +---- + +.. Optional. Install the `pip` CLI tool if you do not have it installed: ++ +[source,terminal] +---- +$ venv/bin/pip install --upgrade pip +---- + +.. In your {productname} directory, create a `requirements-generatekeys.txt` file with the following content: ++ +[source,terminal] +---- +$ cat << EOF > requirements-generatekeys.txt +cryptography==3.4.7 +pycparser==2.19 +pycryptodome==3.9.4 +pycryptodomex==3.9.4 +pyjwkest==1.4.2 +PyJWT==1.7.1 +Authlib==1.0.0a2 +EOF +---- + +.. Enter the following command to install the Python dependencies defined in the `requirements-generatekeys.txt` file: ++ +[source,terminal] +---- +$ venv/bin/pip install -r requirements-generatekeys.txt +---- + +.. Enter the following command to create the necessary service keys: ++ +[source,terminal] +---- +$ PYTHONPATH=. venv/bin/python //tools/generatekeypair.py quay-readonly +---- ++ +Example output ++ +[source,terminal] +---- +Writing public key to quay-readonly.jwk +Writing key ID to quay-readonly.kid +Writing private key to quay-readonly.pem +---- + +.. Enter the following command to deactivate the virtual environment: ++ +[source,terminal] +---- +$ deactivate +---- + +[id="adding-keys-postgresql-database"] +== Adding keys to the PostgreSQL database + +Use the following procedure to add your service keys to the PostgreSQL database. + +.Prerequistes + +* You have created the service keys. + +.Procedure + +. Enter the following command to enter your {productname} database environment: ++ +[source,terminal] +---- +$ podman exec -it postgresql-quay psql -U postgres -d quay +---- + +. Display the approval types and associated notes of the `servicekeyapproval` by entering the following command: ++ +[source,terminal] +---- +quay=# select * from servicekeyapproval; +---- ++ +Example output ++ +[source,terminal] +---- + id | approver_id | approval_type | approved_date | notes +----+-------------+----------------------------------+----------------------------+------- + 1 | | ServiceKeyApprovalType.AUTOMATIC | 2024-05-07 03:47:48.181347 | + 2 | | ServiceKeyApprovalType.AUTOMATIC | 2024-05-07 03:47:55.808087 | + 3 | | ServiceKeyApprovalType.AUTOMATIC | 2024-05-07 03:49:04.27095 | + 4 | | ServiceKeyApprovalType.AUTOMATIC | 2024-05-07 03:49:05.46235 | + 5 | 1 | ServiceKeyApprovalType.SUPERUSER | 2024-05-07 04:05:10.296796 | +... +---- + +. Add the service key to your {productname} database by entering the following query: ++ +[source,terminal] +---- +quay=# INSERT INTO servicekey + (name, service, metadata, kid, jwk, created_date, expiration_date) + VALUES ('quay-readonly', + 'quay', + '{}', + '{}', + '{}', + '{}', + '{}'); +---- ++ +Example output ++ +[source,terminal] +---- +INSERT 0 1 +---- + +. Next, add the key approval with the following query: ++ +[source,terminal] +---- +quay=# INSERT INTO servicekeyapproval ('approval_type', 'approved_date', 'notes') + VALUES ("ServiceKeyApprovalType.SUPERUSER", "CURRENT_DATE", + {include_notes_here_on_why_this_is_being_added}); +---- ++ +Example output ++ +[source,terminal] +---- +INSERT 0 1 +---- + +. Set the `approval_id` field on the created service key row to the `id` field from the created service key approval. You can use the following `SELECT` statements to get the necessary IDs: ++ +[source,terminal] +---- +UPDATE servicekey +SET approval_id = (SELECT id FROM servicekeyapproval WHERE approval_type = 'ServiceKeyApprovalType.SUPERUSER') +WHERE name = 'quay-readonly'; +---- ++ +[source,terminal] +---- +UPDATE 1 +---- + +[id="quay-standalone-readonly"] +== Configuring read-only mode for standalone {productname} + +After the service keys have been created and added to your PostgreSQL database, you must restart the `Quay` container on your standalone deployment. + +.Prerequisites + +* You have created the service keys and added them to your PostgreSQL database. + +.Procedure + +. Shutdown all {productname} instances on all virtual machines. For example: ++ +[source,terminal] +---- +$ podman stop +---- ++ +[source,terminal] +---- +$ podman stop +---- + +. Enter the following command to copy the contents of the `quay-readonly.kid` file and the `quay-readonly.pem` file to the directory that holds your {productname} configuration bundle: ++ +[source,terminal] +---- +$ cp quay-readonly.kid quay-readonly.pem $Quay/config +---- + +. Enter the following command to set file permissions on all files in your configuration bundle folder: ++ +[source,terminal] +---- +$ setfacl -m user:1001:rw $Quay/config/* +---- + +. Modify your {productname} `config.yaml` file and add the following information: ++ +[source,terminal] +---- +# ... +REGISTRY_STATE: readonly +INSTANCE_SERVICE_KEY_KID_LOCATION: 'conf/stack/quay-readonly.kid' +INSTANCE_SERVICE_KEY_LOCATION: 'conf/stack/quay-readonly.pem' +# ... +---- + +. Distribute the new configuration bundle to all {productname} instances. + +. Start {productname} by entering the following command: ++ +[source,terminal] +---- +$ podman run -d --rm -p 80:8080 -p 443:8443 \ + --name=quay-main-app \ + -v $QUAY/config:/conf/stack:Z \ + -v $QUAY/storage:/datastorage:Z \ + {productrepo}/{quayimage}:{productminv} +---- + +. After starting {productname}, a banner inside in your instance informs users that {productname} is running in read-only mode. Pushes should be rejected and a 405 error should be logged. You can test this by running the following command: ++ +[source,terminal] +---- +$ podman push /quayadmin/busybox:test +---- ++ +Example output ++ +[source,terminal] +---- +613be09ab3c0: Preparing +denied: System is currently read-only. Pulls will succeed but all write operations are currently suspended. +---- ++ +With your {productname} deployment on read-only mode, you can safely manage your registry's operations and perform such actions as backup and restore. + +. Optional. After you are finished with read-only mode, you can return to normal operations by removing the following information from your `config.yaml` file. Then, restart your {productname} deployment: ++ +[source,terminal] +---- +# ... +REGISTRY_STATE: readonly +INSTANCE_SERVICE_KEY_KID_LOCATION: 'conf/stack/quay-readonly.kid' +INSTANCE_SERVICE_KEY_LOCATION: 'conf/stack/quay-readonly.pem' +# ... +---- ++ +[source,terminal] +---- +$ podman restart +---- + +[id="updating-read-only-expiration-time"] +== Updating read-only expiration time + +The {productname} read-only key has an expiration date, and when that date passes the key is deactivated. Before the key expires, its expiration time can be updated in the database. To update the key, connect your {productname} production database using the methods described earlier and issue the following query: + +[source,terminal] +---- +quay=# UPDATE servicekey SET expiration_date = 'new-date' WHERE id = servicekey_id; +---- + +The list of service key IDs can be obtained by running the following query: + +[source,terminal] +---- +SELECT id, name, expiration_date FROM servicekey; +---- \ No newline at end of file diff --git a/modules/oras-annotation-parsing.adoc b/modules/oras-annotation-parsing.adoc new file mode 100644 index 000000000..9817f282d --- /dev/null +++ b/modules/oras-annotation-parsing.adoc @@ -0,0 +1,166 @@ +:_content-type: CONCEPT +[id="annotation-parsing-oras"] += Annotation parsing + +Some OCI media types do not utilize labels and, as such, critical information such as expiration timestamps are not included. {productname} supports metadata passed through annotations to accommodate OCI media types that do not include these labels for metadata transmission. Tools such as ORAS (OCI Registry as Storage) can now be used to embed information with artifact types to help ensure that images operate properly, for example, to expire. + +The following procedure uses ORAS to add an expiration date to an OCI media artifact. + +[IMPORTANT] +==== +If you pushed an image with `podman push`, and then add an annotation with `oras`, the MIME type is changed. Consequently, you will not be able to pull the same image with `podman pull` because Podman does not recognize that MIME type. +==== + +.Prerequisites + +* You have downloaded the `oras` CLI. For more information, see link:https://oras.land/docs/installation[Installation]. +* You have pushed an OCI media artifact to your {productname} repository. + +//// +.Procedure + +. Create an example artifact: ++ +[source,terminal] +---- +$ cat < Dockerfile +FROM alpine +CMD echo 'hello world!' +EOF +---- + +. Create an artifact directory: ++ +[source,terminal] +---- +$ mkdir +---- + +. Build the Docker image by using Podman. For example: ++ +[source,terminal] +---- +$ podman build -t . +---- ++ +.Example output ++ +[source,terminal] +---- +STEP 1/2: FROM alpine +STEP 2/2: CMD echo 'hello world!' +--> Using cache a5c9903200482a77ad9be3234962d2eac4dfef0b35d08eb4e966bf0125e0679b +COMMIT myartifact-image +--> a5c990320048 +Successfully tagged localhost/myartifact-image:latest +Successfully tagged localhost/hello-world:v1 +a5c9903200482a77ad9be3234962d2eac4dfef0b35d08eb4e966bf0125e0679b +---- + +. Confirm that the image has been built: ++ +[source,terminal] +---- +$ podman images +---- ++ +.Example output ++ +[source,terminal] +---- +REPOSITORY TAG IMAGE ID CREATED SIZE +localhost/ latest a5c990320048 18 hours ago 8.08 MB +---- + +. Convert the image to an OCI artifact. For example: ++ +[source,terminal] +---- +$ podman save --format oci-archive -o myartifact-image.tar +---- + +. Inspect the image to ensure that it follows the OCI format: ++ +[source,terminal] +---- +$ podman inspect myartifact-image +---- ++ +.Example output ++ +[source,terminal] +---- +--- +[ + { + "Id": "a5c9903200482a77ad9be3234962d2eac4dfef0b35d08eb4e966bf0125e0679b", + "Digest": "sha256:cc9c20f447dfd2b30019a44290d451a2edc5dec51736d29b5697c23fe7e55afb", + "RepoTags": [ + "localhost/myartifact-image:latest", + "localhost/hello-world:v1" +--- +---- + +. Tag the image by entering the following command: ++ +[source,terminal] +---- +$ podman tag quay.io///: +---- + +. Push the artifact to your {productname} registry. For example: ++ +[source,terminal] +---- +$ podman push quay.io///: +---- +//// + +.Procedure + +* By default, some OCI media types, like `application/vnd.oci.image.manifest.v1+json`, do not use certain labels, like expiration timestamps. You can use a CLI tool like ORAS (`oras`) to add annotations to OCI media types. For example: ++ +[source,terminal] +---- +$ oras push --annotation "quay.expires-after=2d" \ <1> +--annotation "expiration = 2d" \ <2> +quay.io///: +---- +<1> Set the expiration time for 2 days, indicated by `2d`. +<2> Adds the expiration label. ++ +.Example output ++ +[source,terminal] +---- +✓ Exists application/vnd.oci.empty.v1+json 2/2 B 100.00% 0s + └─ sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a +✓ Uploaded application/vnd.oci.image.manifest.v1+json 561/561 B 100.00% 511ms + └─ sha256:9b4f2d43b62534423894d077f0ff0e9e496540ec8b52b568ea8b757fc9e7996b +Pushed [registry] quay.io/stevsmit/testorg3/oci-image:v1 +ArtifactType: application/vnd.unknown.artifact.v1 +Digest: sha256:9b4f2d43b62534423894d077f0ff0e9e496540ec8b52b568ea8b757fc9e7996b +---- + +.Verification + +. Pull the image with `oras`. For example: ++ +[source,terminal] +---- +$ oras pull quay.io///: +---- + +. Inspect the changes using `oras`. For example: ++ +[source,terminal] +---- +$ oras manifest fetch quay.io///: +---- ++ +.Example output ++ +[source,terminal] +---- +{"schemaVersion":2,"mediaType":"application/vnd.oci.image.manifest.v1+json","artifactType":"application/vnd.unknown.artifact.v1","config":{"mediaType":"application/vnd.oci.empty.v1+json","digest":"sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a","size":2,"data":"e30="},"layers":[{"mediaType":"application/vnd.oci.empty.v1+json","digest":"sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a","size":2,"data":"e30="}],"annotations":{"org.opencontainers.image.created":"2024-07-11T15:22:42Z","version ":" 8.11"}} +---- \ No newline at end of file diff --git a/modules/org-application-create-api.adoc b/modules/org-application-create-api.adoc new file mode 100644 index 000000000..a01f30c8d --- /dev/null +++ b/modules/org-application-create-api.adoc @@ -0,0 +1,110 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc + +:_content-type: CONCEPT +[id="org-application-create-api"] += Creating an organization application by using the {productname} API + +Organization applications can be created by using the {productname} UI. + +[NOTE] +==== +Organization applications can be created by using the UI, however OAuth 2 access tokens must be created on the UI. +==== + +.Procedure + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createorganizationapplication[`POST /api/v1/organization/{orgname}/applications`] endpoint to create a new application for your organization. For example: ++ +[source,terminal] +---- +$ curl -X POST "https:///api/v1/organization//applications" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "name": "", + "redirect_uri": "", + "application_uri": "", + "description": "", + "avatar_email": "" + }' +---- ++ +.Example output ++ +[source,terminal] +---- +{"name": "new-application", "description": "", "application_uri": "", "client_id": "E6GJSHOZMFBVNHTHNB53", "client_secret": "SANSWCWSGLVAUQ60L4Q4CEO3C1QAYGEXZK2VKJNI", "redirect_uri": "", "avatar_email": null} +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getorganizationapplications[`GET /api/v1/organization/{orgname}/applications`] endpoint to return a list of all organization applications. For example: ++ +[source,terminal] +---- +$ curl -X GET "https:///api/v1/organization//applications" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +{"applications": [{"name": "test", "description": "", "application_uri": "", "client_id": "MCJ61D8KQBFS2DXM56S2", "client_secret": "J5G7CCX5QCA8Q5XZLWGI7USJPSM4M5MQHJED46CF", "redirect_uri": "", "avatar_email": null}, {"name": "new-token", "description": "", "application_uri": "", "client_id": "IG58PX2REEY9O08IZFZE", "client_secret": "2LWTWO89KH26P2CO4TWFM7PGCX4V4SUZES2CIZMR", "redirect_uri": "", "avatar_email": null}, {"name": "second-token", "description": "", "application_uri": "", "client_id": "6XBK7QY7ACSCN5XBM3GS", "client_secret": "AVKBOUXTFO3MXBBK5UJD5QCQRN2FWL3O0XPZZT78", "redirect_uri": "", "avatar_email": null}, {"name": "new-application", "description": "", "application_uri": "", "client_id": "E6GJSHOZMFBVNHTHNB53", "client_secret": "SANSWCWSGLVAUQ60L4Q4CEO3C1QAYGEXZK2VKJNI", "redirect_uri": "", "avatar_email": null}]} +---- ++ +Applications can also be returned for a specific client using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getorganizationapplication[`GET /api/v1/organization/{orgname}/applications/{client_id}`] endpoint. For example: ++ +[source,terminal] +---- +$ curl -X GET "https:///api/v1/organization//applications/" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +{"name": "test", "description": "", "application_uri": "", "client_id": "MCJ61D8KQBFS2DXM56S2", "client_secret": "J5G7CCX5QCA8Q5XZLWGI7USJPSM4M5MQHJED46CF", "redirect_uri": "", "avatar_email": null} +---- + +. After creation, organization applications can be updated, for example, if you want to add a redirect URI or a new description, using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#updateorganizationapplication[`PUT /api/v1/organization/{orgname}/applications/{client_id}`] endpoint: ++ +[source,terminal] +---- +$ curl -X PUT "https://quay-server.example.com/api/v1/organization/test/applications/12345" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "name": "Updated Application Name", + "redirect_uri": "https://example.com/oauth/callback", + "application_uri": "https://example.com", + "description": "Updated description for the application", + "avatar_email": "avatar@example.com" + }' +---- + +. After creation, application information can be returned by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getapplicationinformation[`GET /api/v1/app/{client_id}`] endpoint: ++ +[source,terminal] +---- +$ curl -X GET "https:///api/v1/app/" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +{"name": "new-application3", "description": "", "uri": "", "avatar": {"name": "new-application3", "hash": "a15d479002b20f211568fd4419e76686d2b88a4980a5b4c4bc10420776c5f6fe", "color": "#aec7e8", "kind": "app"}, "organization": {"name": "test", "email": "new-contact@test-org.com", "avatar": {"name": "test", "hash": "a15d479002b20f211568fd4419e76686d2b88a4980a5b4c4bc10420776c5f6fe", "color": "#aec7e8", "kind": "user"}, "is_admin": true, "is_member": true, "teams": {}, "ordered_teams": [], "invoice_email": true, "invoice_email_address": "billing@test-org.com", "tag_expiration_s": 1209600, "is_free_account": true, "quotas": [{"id": 2, "limit_bytes": 10737418240, "limits": [{"id": 1, "type": "Reject", "limit_percent": 90}]}], "quota_report": {"quota_bytes": 0, "configured_quota": 10737418240, "running_backfill": "complete", "backfill_status": "complete"}}} +---- + +. Organization applications can be deleted with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleteorganizationapplication[`DELETE /api/v1/organization/{orgname}/applications/{client_id}`] endpoint. For example: ++ +[source,terminal] +---- +$ curl -X DELETE "https:///api/v1/organization/{orgname}/applications/{client_id}" \ + -H "Authorization: Bearer " +---- ++ +This command does not return output. \ No newline at end of file diff --git a/modules/org-create-api.adoc b/modules/org-create-api.adoc new file mode 100644 index 000000000..822aa47fc --- /dev/null +++ b/modules/org-create-api.adoc @@ -0,0 +1,53 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc + +:_content-type: CONCEPT +[id="org-create-api"] += Creating an organization by using the {productname} API + +Use the following procedure to create a new organization using the {productname} API. + +.Prerequisites + +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +. Enter the following command to create a new organization using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createorganization[`POST /api/v1/organization/`] endpoint: ++ +[source,terminal] +---- +$ curl -X POST -H "Authorization: Bearer " -H "Content-Type: application/json" -d '{ + "name": "" + }' "https:///api/v1/organization/" +---- ++ +Example output ++ +[source,terminal] +---- +"Created" +---- + +. After creation, organization details can be changed, such as adding an email address, with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#changeorganizationdetails[`PUT /api/v1/organization/{orgname}`] command. For example: ++ +[source,terminal] +---- +$ curl -X PUT "https:///api/v1/organization/" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "email": "", + "invoice_email": , + "invoice_email_address": "" + }' +---- ++ +.Example output ++ +[source,terminal] +---- +{"name": "test", "email": "new-contact@test-org.com", "avatar": {"name": "test", "hash": "a15d479002b20f211568fd4419e76686d2b88a4980a5b4c4bc10420776c5f6fe", "color": "#aec7e8", "kind": "user"}, "is_admin": true, "is_member": true, "teams": {"owners": {"name": "owners", "description": "", "role": "admin", "avatar": {"name": "owners", "hash": "6f0e3a8c0eb46e8834b43b03374ece43a030621d92a7437beb48f871e90f8d90", "color": "#c7c7c7", "kind": "team"}, "can_view": true, "repo_count": 0, "member_count": 1, "is_synced": false}}, "ordered_teams": ["owners"], "invoice_email": true, "invoice_email_address": "billing@test-org.com", "tag_expiration_s": 1209600, "is_free_account": true, "quotas": [{"id": 2, "limit_bytes": 10737418240, "limits": [{"id": 1, "type": "Reject", "limit_percent": 90}]}], "quota_report": {"quota_bytes": 0, "configured_quota": 10737418240, "running_backfill": "complete", "backfill_status": "complete"}} +---- \ No newline at end of file diff --git a/modules/org-create.adoc b/modules/org-create.adoc new file mode 100644 index 000000000..bbf07e298 --- /dev/null +++ b/modules/org-create.adoc @@ -0,0 +1,26 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc + +// Needs updated when v2 UI panel is available +:_content-type: CONCEPT +[id="org-create"] += Creating an organization by using the UI + +Use the following procedure to create a new organization by using the UI. + +.Procedure + +. Log in to your {productname} registry. + +. Click *Organization* in the navigation pane. + +. Click *Create Organization*. + +. Enter an *Organization Name*, for example, `testorg`. + +. Enter an *Organization Email*. + +. Click *Create*. + +Now, your example organization should populate under the *Organizations* page. \ No newline at end of file diff --git a/modules/org-delete-api.adoc b/modules/org-delete-api.adoc new file mode 100644 index 000000000..1f0c6eeb5 --- /dev/null +++ b/modules/org-delete-api.adoc @@ -0,0 +1,41 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc + +:_content-type: CONCEPT +[id="org-delete-api"] += Deleting an organization by using the {productname} API + +Use the following procedure to delete an organization using the {productname} API. + +.Prerequisites + +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +. Enter the following command to delete an organization using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleteadminedorganization[`DELETE /api/v1/organization/{orgname}`] endpoint: ++ +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + "https:///api/v1/organization/" +---- + +. The CLI does not return information when deleting an organization from the CLI. To confirm deletion, you can check the {productname} UI, or you can enter the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getorganization[`GET /api/v1/organization/{orgname}`] command to see if details are returned for the deleted organization: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "/api/v1/organization/" +---- ++ +Example output ++ +[source,terminal] +---- +{"detail": "Not Found", "error_message": "Not Found", "error_type": "not_found", "title": "not_found", "type": "http:///api/v1/error/not_found", "status": 404} +---- \ No newline at end of file diff --git a/modules/org-delete.adoc b/modules/org-delete.adoc new file mode 100644 index 000000000..9867a5513 --- /dev/null +++ b/modules/org-delete.adoc @@ -0,0 +1,34 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc + +// Needs updated when v2 UI panel is available +:_content-type: CONCEPT +[id="org-delete"] += Deleting an organization by using the UI + +Use the following procedure to delete an organization using the v2 UI. + +.Procedure + +. On the *Organizations* page, select the name of the organization you want to delete, for example, `testorg`. + +. Click the *More Actions* drop down menu. + +. Click *Delete*. ++ +[NOTE] +==== +On the *Delete* page, there is a *Search* input box. With this box, users can search for specific organizations to ensure that they are properly scheduled for deletion. For example, if a user is deleting 10 organizations and they want to ensure that a specific organization was deleted, they can use the *Search* input box to confirm said organization is marked for deletion. +==== + +. Confirm that you want to permanently delete the organization by typing *confirm* in the box. + +. Click *Delete*. ++ +After deletion, you are returned to the *Organizations* page. ++ +[NOTE] +==== +You can delete more than one organization at a time by selecting multiple organizations, and then clicking *More Actions* -> *Delete*. +==== \ No newline at end of file diff --git a/modules/org-proxy-cache-configuration-api.adoc b/modules/org-proxy-cache-configuration-api.adoc new file mode 100644 index 000000000..131b0c746 --- /dev/null +++ b/modules/org-proxy-cache-configuration-api.adoc @@ -0,0 +1,66 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc + +:_content-type: CONCEPT +[id="org-proxy-cache-configuration-api"] += Configuring a proxy cache for an organization by using the {productname} API + +Proxy caching for an organization can be configured by using the {productname} API. + +.Procedure + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createproxycacheconfig[`POST /api/v1/organization/{orgname}/proxycache`] endpoint to create a proxy cache configuration for the organization. ++ +[source,terminal] +---- +$ curl -X POST "https:///api/v1/organization//proxycache" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "upstream_registry": "" + }' +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#validateproxycacheconfig[`POST /api/v1/organization/{orgname}/validateproxycache`] endpoint to validate the proxy configuration: ++ +[source,terminal] +---- +$ curl -X POST "https:///api/v1/organization/{orgname}/validateproxycache" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "upstream_registry": "" + }' +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getproxycacheconfig[`GET /api/v1/organization/{orgname}/proxycache`] endpoint to obtain information about the proxcy cache. For example: ++ +[source,terminal] +---- +$ curl -X GET "https:///api/v1/organization/{orgname}/proxycache" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +{"upstream_registry": "quay.io", "expiration_s": 86400, "insecure": false} +---- + + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleteproxycacheconfig[`DELETE /api/v1/organization/{orgname}/proxycache`] endpoint to ++ +[source,terminal] +---- +$ curl -X DELETE "https:///api/v1/organization/{orgname}/proxycache" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +"Deleted" +---- \ No newline at end of file diff --git a/modules/org-team-member-api.adoc b/modules/org-team-member-api.adoc new file mode 100644 index 000000000..143e9e8af --- /dev/null +++ b/modules/org-team-member-api.adoc @@ -0,0 +1,66 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc + +:_content-type: CONCEPT +[id="org-member-info-api"] += Retrieving organization member information by using the API + +Information about organization members can be retrieved by using the {productname} API. + +.Procedure + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getorganizationmembers[`GET /api/v1/organization/{orgname}/members`] to return a list of organization members: ++ +[source,terminal] +---- +$ curl -X GET "https:///api/v1/organization//members" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +{"members": [{"name": "quayadmin", "kind": "user", "avatar": {"name": "quayadmin", "hash": "6d640d802fe23b93779b987c187a4b7a4d8fbcbd4febe7009bdff58d84498fba", "color": "#f7b6d2", "kind": "user"}, "teams": [{"name": "owners", "avatar": {"name": "owners", "hash": "6f0e3a8c0eb46e8834b43b03374ece43a030621d92a7437beb48f871e90f8d90", "color": "#c7c7c7", "kind": "team"}}], "repositories": ["testrepo"]}, {"name": "testuser", "kind": "user", "avatar": {"name": "testuser", "hash": "f660ab912ec121d1b1e928a0bb4bc61b15f5ad44d5efdc4e1c92a25e99b8e44a", "color": "#6b6ecf", "kind": "user"}, "teams": [{"name": "owners", "avatar": {"name": "owners", "hash": "6f0e3a8c0eb46e8834b43b03374ece43a030621d92a7437beb48f871e90f8d90", "color": "#c7c7c7", "kind": "team"}}], "repositories": []}]} +---- + +. You can use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getorganizationcollaborators[`GET /api/v1/organization/{orgname}/collaborators`] to return a list of organization collaborators: ++ +[source,terminal] +---- +$ curl -X GET "https:///api/v1/organization/{orgname}/collaborators" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +{"collaborators": [user-test]} +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getorganizationmember[`GET /api/v1/organization/{orgname}/members/{membername}`] endpoint to obtain more specific information about a user: ++ +[source,terminal] +---- +$ curl -X GET "https:///api/v1/organization//members/" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +{"name": "quayadmin", "kind": "user", "avatar": {"name": "quayadmin", "hash": "6d640d802fe23b93779b987c187a4b7a4d8fbcbd4febe7009bdff58d84498fba", "color": "#f7b6d2", "kind": "user"}, "teams": [{"name": "owners", "avatar": {"name": "owners", "hash": "6f0e3a8c0eb46e8834b43b03374ece43a030621d92a7437beb48f871e90f8d90", "color": "#c7c7c7", "kind": "team"}}], "repositories": ["testrepo"]} +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#removeorganizationmember[`DELETE /api/v1/organization/{orgname}/members/{membername}`] endpoint to delete a team member. ++ +[source,terminal] +---- +$ curl -X DELETE "https:///api/v1/organization//members/" \ + -H "Authorization: Bearer " +---- ++ +This command does not return output. \ No newline at end of file diff --git a/modules/organization-management-api.adoc b/modules/organization-management-api.adoc new file mode 100644 index 000000000..333984b4b --- /dev/null +++ b/modules/organization-management-api.adoc @@ -0,0 +1,4 @@ +[id="organization-management-api"] += Establishing quota with the {productname} API + +Organizations can be created and managed through API endpoints. With the {productname} API, you can create organizations, view organization information, create proxy caches for an organization, edit users with access to the organization, change organization details, delete organizations, and more. \ No newline at end of file diff --git a/modules/organization-settings-v2-ui.adoc b/modules/organization-settings-v2-ui.adoc new file mode 100644 index 000000000..35798b493 --- /dev/null +++ b/modules/organization-settings-v2-ui.adoc @@ -0,0 +1,40 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: PROCEDURE +[id="organization-settings-v2-ui"] += Organization settings + +With +ifeval::["{context}" == "quay-io"] += {quayio}, +endif::[] +ifeval::["{context}" == "use-quay"] += {productname}, +endif::[] +some basic organization settings can be adjusted by using the UI. This includes adjusting general settings, such as the e-mail address associated with the organization, and _time machine_ settings, which allows administrators to adjust when a tag is garbage collected after it is permanently deleted. + +Use the following procedure to alter your organization settings by using the v2 UI. + +.Procedure + +. On the v2 UI, click *Organizations*. + +. Click the name of the organization that you will create the robot account for, for example, `test-org`. + +. Click the *Settings* tab. + +. Optional. Enter the email address associated with the organization. + +. Optional. Set the allotted time for the *Time Machine* feature to one of the following: ++ +* *A few seconds* +* *A day* +* *7 days* +* *14 days* +* *A month* + +. Click *Save*. \ No newline at end of file diff --git a/modules/organizations-overview.adoc b/modules/organizations-overview.adoc new file mode 100644 index 000000000..8b46a0948 --- /dev/null +++ b/modules/organizations-overview.adoc @@ -0,0 +1,44 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc + +// Needs updated when v2 UI panel is available +:_content-type: CONCEPT +[id="organizations-overview"] +ifeval::["{context}" == "quay-io"] += {quayio} organizations overview +endif::[] +ifeval::["{context}" == "use-quay"] += {productname} organizations overview +endif::[] + +In +ifeval::["{context}" == "quay-io"] += {quayio} +endif::[] +ifeval::["{context}" == "use-quay"] += {productname} +endif::[] +an organization is a grouping of users, repositories, and teams. It provides a means to organize and manage access control and permissions within the registry. With organizations, administrators can assign roles and permissions to users and teams. Other useful information about organizations includes the following: + +* You cannot have an organization embedded within another organization. To subdivide an +organization, you use teams. + +* Organizations cannot contain users directly. You must first add a team, and then add one or more users to each team. ++ +[NOTE] +==== +Individual users can be added to specific repositories inside of an organization. Consequently, those users are not members of any team on the *Repository Settings* page. The *Collaborators View* on the *Teams and Memberships* page shows users who have direct access to specific repositories within the organization without needing to be part of that organization specifically. +==== + +* Teams can be set up in organizations as just members who use the repositories and +associated images, or as administrators with special privileges for managing +the Organization. + +ifeval::["{context}" == "quay-io"] +Users can create their own organization to share repositories of container images. This can be done through the {quayio} UI. +endif::[] +ifeval::["{context}" == "use-quay"] +Users can create their own organization to share repositories of container images. This can be done through the {productname} UI, or by the {productname} API if you have an OAuth token. +endif::[] + diff --git a/modules/other-oci-artifacts-with-quay.adoc b/modules/other-oci-artifacts-with-quay.adoc new file mode 100644 index 000000000..5b4cec0a8 --- /dev/null +++ b/modules/other-oci-artifacts-with-quay.adoc @@ -0,0 +1,64 @@ +// Document included in the following assemblies: + +// Configuring Red hat Quay + +:_content-type: REFERENCE +[id="other-oci-artifacts-with-quay"] += Open Container Initiative configuration fields + +.Additional OCI artifact configuration field +[cols="3a,1a,2a",options="header"] +|=== +|Field |Type |Description +|**FEATURE_REFERRERS_API** |Boolean| Enables OCI 1.1's referrers API. +|=== + +.Example OCI referrers enablement YAML +[source,yaml] +---- +# ... +FEATURE_REFERRERS_API: True +# ... +---- + + +//// +[id="configuring-oci-artifact-types"] +== Configuring additional artifact types + +Other OCI artifact types that are not supported by default can be added to your {productname} deployment by using the `ALLOWED_OCI_ARTIFACT_TYPES` configuration field. + +Use the following reference to add additional OCI artifact types: + +.OCI artifact types configuration +[source,yaml] +---- +FEATURE_GENERAL_OCI_SUPPORT: true +ALLOWED_OCI_ARTIFACT_TYPES: + : + - + - + + : + - + - +---- + +For example, you can add Singularity (SIF) support by adding the following to your `config.yaml` file: + +.Example OCI artifact type configuration +[source,yaml] +---- +ALLOWED_OCI_ARTIFACT_TYPES: + application/vnd.oci.image.config.v1+json: + - application/vnd.dev.cosign.simplesigning.v1+json + application/vnd.cncf.helm.config.v1+json: + - application/tar+gzip + application/vnd.sylabs.sif.config.v1+json: + - application/vnd.sylabs.sif.layer.v1+tar +---- +[NOTE] +==== +When adding OCI artifact types that are not configured by default, {productname} administrators will also need to manually add support for cosign and Helm if desired. +==== +//// \ No newline at end of file diff --git a/modules/permissions-intro.adoc b/modules/permissions-intro.adoc new file mode 100644 index 000000000..ed5bc2d5f --- /dev/null +++ b/modules/permissions-intro.adoc @@ -0,0 +1,8 @@ += Permissions + +Organizations are organized into a set of Teams and can provide access to a subset of the repositories under that namespace. Permissions for users within an organization can be set for individuals, teams, and robot accounts. + +[NOTE] +==== +Permissions are only applied to new users and teams when a new repository is created. +==== diff --git a/modules/poc-creating-dual-stack-cn.adoc b/modules/poc-creating-dual-stack-cn.adoc new file mode 100644 index 000000000..c8820f834 --- /dev/null +++ b/modules/poc-creating-dual-stack-cn.adoc @@ -0,0 +1,16 @@ +:_content-type: PROCEDURE +[id="poc-creating-dual-stack-cn"] += Creating a dual-stack container network + +Use the following procedure to create a new container network that is dual-stack. + +.Procedure + +* Unless there is a requirement that the _default_ container network use both IPv4 and IPv6, it is suggested that a _new_ container network is created that is dual-stack. As a root user, create a new container network that is dual-stack by running the following command: ++ +[source,terminal] +---- +# podman network create ip-dual-stack --ipv6 +---- ++ +With this command, new containers use this network are a natively dual-stack. \ No newline at end of file diff --git a/modules/prepare-ocp-for-bare-metal-builds.adoc b/modules/prepare-ocp-for-bare-metal-builds.adoc new file mode 100644 index 000000000..979cf81b1 --- /dev/null +++ b/modules/prepare-ocp-for-bare-metal-builds.adoc @@ -0,0 +1,226 @@ +:_content-type: PROCEDURE +[id="prepare-ocp-for-bare-metal-builds"] += Configuring bare metal builds for {productname-ocp} + +Use the following procedure to configure _bare metal builds_ for {productname-ocp}. + +[NOTE] +==== +If you are using the {productname} Operator on {ocp} with a managed `route` component in your `QuayRegistry` CRD, see "{productname-ocp} _builds_ limitations with self-managed _routes_". +==== + +.Prerequisites + +* You have an {ocp} cluster provisioned with the {productname} Operator running. +* You have set the `tls` component to `unmanaged` and uploaded custom SSL/TLS certificates to the {productname} Operator. For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/securing_red_hat_quay/index#ssl-tls-quay-overview[SSL and TLS for {productname}]. +* You are logged into {ocp} as a cluster administrator. + +.Procedure + +. Enter the following command to create a project where Builds will be run, for example, `bare-metal-builder`: ++ +[source,terminal] +---- +$ oc new-project bare-metal-builder +---- + +. Create a new `ServiceAccount` in the the `bare-metal-builder` namespace by entering the following command: ++ +[source,terminal] +---- +$ oc create sa -n bare-metal-builder quay-builder +---- + +. Enter the following command to grant a user the `edit` role within the `bare-metal-builder` namespace: ++ +[source,terminal] +---- +$ oc policy add-role-to-user -n bare-metal-builder edit system:serviceaccount:bare-metal-builder:quay-builder +---- + +. Enter the following command to retrieve a token associated with the `quay-builder` service account in the `bare-metal-builder` namespace. This token is used to authenticate and interact with the {ocp} cluster's API server. + +.. If your {ocp} cluster is version 4.11+, enter the following command: ++ +[source,terminal] +---- +oc create token quay-builder -n bare-metal-builder --duration 24h +---- + +.. If your {ocp} cluster is earlier than version 4.11, for example, version 4.10, enter the following command: ++ +[source,terminal] +---- +$ oc sa get-token -n bare-metal-builder quay-builder +---- + +. Identify the URL for the {ocp} cluster's API server. This can be found in the {ocp} web console. + +. Identify a worker node label to be used when scheduling _build jobs_. Because _build pods_ must run on bare metal worker nodes, typically these are identified with specific labels. ++ +Check with your cluster administrator to determine exactly which node label should be used. + +. Obtain the Kube API Server's certificate authority (CA) to add to {productname}'s extra certificates. + +.. On {ocp} versions 4.15+, enter the following commands to obtain the name of the secret containing the CA: ++ +[source,terminal] +---- +$ oc extract cm/kube-root-ca.crt -n openshift-apiserver +---- ++ +[source,terminal] +---- +$ mv ca.crt build_cluster.crt +---- + +.. On {ocp} versions earlier than 4.15, for example, 4.14, enter the following command: ++ +[source,terminal] +---- +$ oc get sa openshift-apiserver-sa --namespace=openshift-apiserver -o json | jq '.secrets[] | select(.name | contains("openshift-apiserver-sa-token"))'.name +---- + +.. Obtain the `ca.crt` key value from the secret in the {ocp} Web Console. The value begins with *"-----BEGIN CERTIFICATE-----"`*. + +.. Import the CA to {productname}. Ensure that the name of this file matches the `K8S_API_TLS_CA` field used in Step 9. + +. Create the following `SecurityContextConstraints` resource for the `ServiceAccount`: ++ +[source,yaml] +---- +apiVersion: security.openshift.io/v1 +kind: SecurityContextConstraints +metadata: + name: quay-builder +priority: null +readOnlyRootFilesystem: false +requiredDropCapabilities: null +runAsUser: + type: RunAsAny +seLinuxContext: + type: RunAsAny +seccompProfiles: +- '*' +supplementalGroups: + type: RunAsAny +volumes: +- '*' +allowHostDirVolumePlugin: true +allowHostIPC: true +allowHostNetwork: true +allowHostPID: true +allowHostPorts: true +allowPrivilegeEscalation: true +allowPrivilegedContainer: true +allowedCapabilities: +- '*' +allowedUnsafeSysctls: +- '*' +defaultAddCapabilities: null +fsGroup: + type: RunAsAny +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: quay-builder-scc + namespace: bare-metal-builder +rules: +- apiGroups: + - security.openshift.io + resourceNames: + - quay-builder + resources: + - securitycontextconstraints + verbs: + - use +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: quay-builder-scc + namespace: bare-metal-builder +subjects: +- kind: ServiceAccount + name: quay-builder +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: quay-builder-scc +---- + +. Update the `config.yaml` file of your {productname-ocp} deployment to include an appropriate _bare metal builds_ configuration by using the {ocp} web console. + +.. Click *Operators* -> *Installed Operators* -> *Red Hat Quay* -> *Quay Registry*. + +.. Click the name of your registry, for example, *example-registry*. + +.. Under *Config Bundle Secret*, click the name of your configuration bundle, for example, *extra-ca-certificate-config-bundle-secret*. + +.. Click *Actions* -> *Edit Secret*. + +.. Add the following information to your {productname} `config.yaml` file, replacing each value with information that is relevant to your specific installation: ++ +[source,yaml] +---- +FEATURE_USER_INITIALIZE: true +BROWSER_API_CALLS_XHR_ONLY: false +SUPER_USERS: +- +FEATURE_USER_CREATION: false +FEATURE_QUOTA_MANAGEMENT: true +FEATURE_BUILD_SUPPORT: True +BUILDMAN_HOSTNAME: ${BUILDMAN_HOSTNAME}:443 <1> +BUILD_MANAGER: +- ephemeral +- ALLOWED_WORKER_COUNT: 10 + ORCHESTRATOR_PREFIX: buildman/production/ + ORCHESTRATOR: + REDIS_HOST: <2> + REDIS_PASSWORD: "" + REDIS_SSL: false + REDIS_SKIP_KEYSPACE_EVENT_SETUP: false + EXECUTORS: + - EXECUTOR: kubernetes + BUILDER_NAMESPACE: <3> + K8S_API_SERVER: <4> + K8S_API_TLS_CA: <5> + VOLUME_SIZE: 8G + KUBERNETES_DISTRIBUTION: openshift + CONTAINER_MEMORY_LIMITS: 1G <6> + CONTAINER_CPU_LIMITS: 300m <7> + CONTAINER_MEMORY_REQUEST: 1G <8> + CONTAINER_CPU_REQUEST: 300m <9> + NODE_SELECTOR_LABEL_KEY: beta.kubernetes.io/instance-type + NODE_SELECTOR_LABEL_VALUE: n1-standard-4 + CONTAINER_RUNTIME: podman + SERVICE_ACCOUNT_NAME: + SERVICE_ACCOUNT_TOKEN: <10> + QUAY_USERNAME: + QUAY_PASSWORD: + WORKER_IMAGE: /quay-quay-builder + WORKER_TAG: + BUILDER_VM_CONTAINER_IMAGE: registry.redhat.io/quay/quay-builder-qemu-rhcos-rhel8:v3.9.10-4 + SETUP_TIME: 180 + MINIMUM_RETRY_THRESHOLD: 0 + SSH_AUTHORIZED_KEYS: <11> + - + - + HTTP_PROXY: + HTTPS_PROXY: + NO_PROXY: +---- +<1> Obtained by running the following command: `$ oc get route quayregistry-quay-builder -n ${QUAY_PROJECT} -o jsonpath='{.spec.host}'`. +<2> The hostname for your Redis service. +<3> Set to match the name of your _bare metal builds_ namespace. This example used `bare-metal-builder`. +<4> The `K8S_API_SERVER` is obtained by running `$ oc cluster-info`. +<5> You must manually create and add your custom CA cert, for example, `K8S_API_TLS_CA: /conf/stack/extra_ca_certs/build-cluster.crt`. +<6> Defaults to `5120Mi` if left unspecified. +<7> Defaults to `1000m` if left unspecified. +<8> Defaults to `3968Mi` if left unspecified. +<9> Defaults to `500m` if left unspecified. +<10> Obtained when running `$ oc create sa`. +<11> Allows public SSH keys to be added to the build environment for remote troubleshooting access. This key, or keys, should correspond to the private key that an admin or developer will use to SSH into the build worker for debugging purposes. This key can be obtained by establishing an SSH connection to the remote host using a specific SSH key and port. For example: `$ ssh -i /path/to/ssh/key/set/in/ssh_authorized_keys -p 9999 core@localhost`. + +. Restart your {productname} registry to enable the _builds_ feature. \ No newline at end of file diff --git a/modules/preparing-system-deploy-quay.adoc b/modules/preparing-system-deploy-quay.adoc new file mode 100644 index 000000000..f8e812b6f --- /dev/null +++ b/modules/preparing-system-deploy-quay.adoc @@ -0,0 +1,6 @@ + +:_content-type: CONCEPT +[id="preparing-system-deploy-quay"] += Preparing your system to deploy {productname} + +For a proof of concept {productname} deployment, you must configure port mapping, a database, and Redis prior to deploying the registry. Use the following procedures to prepare your system to deploy {productname}. \ No newline at end of file diff --git a/modules/proc_configure-user-settings.adoc b/modules/proc_configure-user-settings.adoc new file mode 100644 index 000000000..12cc46d45 --- /dev/null +++ b/modules/proc_configure-user-settings.adoc @@ -0,0 +1,58 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: CONCEPT +[id="use-quay-manage-settings"] += User settings + +The *User Settings* page provides users a way to set their email address, password, account type, set up desktop notifications, select an avatar, delete an account, adjust the _time machine_ setting, and view billing information. + +[id="navigating-user-settings-page"] +== Navigating to the User Settings page + +Use the following procedure to navigate to the *User Settings* page. + +.Procedure + +. On {quayio}, click your username in the header. + +. Select *Account Settings*. You are redirected to the *User Settings* page. + +[id="adjusting-user-settings"] +== Adjusting user settings + +Use the following procedure to adjust user settings. + +.Procedure + +* To change your email address, select the current email address for *Email Address*. In the pop-up window, enter a new email address, then, click *Change Email*. A verification email will be sent before the change is applied. + +* To change your password, click *Change password*. Enter the new password in both boxes, then click *Change Password*. + +* Change the account type by clicking *Individual Account*, or the option next to *Account Type*. In some cases, you might have to leave an organization prior to changing the account type. + +* Adjust your desktop notifications by clicking the option next to *Desktop Notifications*. Users can either enable, or disable, this feature. + +* You can delete an account by clicking *Begin deletion*. You cannot delete an account if you have an active plan, or if you are a member of an organization where you are the only administrator. You must confirm deletion by entering the namespace. ++ +[IMPORTANT] +==== +Deleting an account is not reversible and will delete all of the account's data including repositories, created build triggers, and notifications. +==== + +* You can set the _time machine_ feature by clicking the drop-box next to *Time Machine*. This feature dictates the amount of time after a tag is deleted that the tag is accessible in time machine before being garbage collected. After selecting a time, click *Save Expiration Time*. + +[id="billing-information"] +== Billing information + +You can view billing information on the *User Settings*. In this section, the following information is available: + +* *Current Plan*. This section denotes the current plan {quayio} plan that you are signed up for. It also shows the amount of private repositories you have. + +* *Invoices*. If you are on a paid plan, you can click *View Invoices* to view a list of invoices. + +* *Receipts*. If you are on a paid plan, you can select whether to have receipts for payment emailed to you, another user, or to opt out of receipts altogether. + diff --git a/modules/proc_container-security-operator-setup.adoc b/modules/proc_container-security-operator-setup.adoc new file mode 100644 index 000000000..e52a5b4cd --- /dev/null +++ b/modules/proc_container-security-operator-setup.adoc @@ -0,0 +1,158 @@ +:_content-type: PROCEDURE +[id="container-security-operator-setup"] += Scanning pod images with the Container Security Operator + +The link:https://operatorhub.io/operator/container-security-operator[Container Security Operator] (CSO) is an addon for the Clair security scanner available on {ocp} and other Kubernetes platforms. With the CSO, users can scan container images associated with active pods for known vulnerabilities. + +[NOTE] +==== +The CSO does not work without {productname} and Clair. +==== + +The Container Security Operator (CSO) includes the following features: + +* Watches containers associated with pods on either specified or all namespaces. + +* Queries the container registry where the containers came from for vulnerability information, provided that an image's registry supports image scanning, such a a {productname} registry with Clair scanning. + +* Exposes vulnerabilities through the `ImageManifestVuln` object in the Kubernetes API. + +[NOTE] +==== +To see instructions on installing the CSO on Kubernetes, +select the *Install* button from the link:https://operatorhub.io/operator/container-security-operator[Container Security OperatorHub.io] page. +==== + +[id="running-cso-openshift"] +== Downloading and running the Container Security Operator in {ocp} + +Use the following procedure to download the Container Security Operator (CSO). + +[NOTE] +==== +In the following procedure, the CSO is installed in the `marketplace-operators` namespace. This allows the CSO to be used in all namespaces of your {ocp} cluster. +==== + +.Procedure + +. On the {ocp} console page, select *Operators* -> *OperatorHub* and search for *Container Security Operator*. + +. Select the Container Security Operator, then select *Install* to go to the *Create Operator Subscription* page. + +. Check the settings (all namespaces and automatic approval strategy, by default), and select +*Subscribe*. The *Container Security* appears after a few moments on the *Installed Operators* screen. + +. Optional: you can add custom certificates to the CSO. In this example, create a certificate +named `quay.crt` in the current directory. Then, run the following command to add the certificate to the CSO: ++ +[source,terminal] +---- +$ oc create secret generic container-security-operator-extra-certs --from-file=quay.crt -n openshift-operators +---- ++ +[NOTE] +==== +You must restart the Operator pod for the new certificates to take effect. +==== + +. Navigate to *Home* -> *Overview*. A link to *Image Vulnerabilities* appears under the status section, with a listing of the number of vulnerabilities found so far. Select the link to see a security breakdown, as shown in the following image: ++ +image:cso-dashboard.png[Access CSO scanning data from the {ocp} dashboard] ++ +[IMPORTANT] +==== +The Container Security Operator currently provides broken links for Red Hat Security advisories. For example, the following link might be provided: `https://access.redhat.com/errata/RHSA-2023:1842%20https://access.redhat.com/security/cve/CVE-2023-23916`. The `%20` in the URL represents a space character, however it currently results in the combination of the two URLs into one incomplete URL, for example, `https://access.redhat.com/errata/RHSA-2023:1842` and `https://access.redhat.com/security/cve/CVE-2023-23916`. As a temporary workaround, you can copy each URL into your browser to navigate to the proper page. This is a known issue and will be fixed in a future version of {productname}. +==== + +. You can do one of two things at this point to follow up on any detected vulnerabilities: + +.. Select the link to the vulnerability. You are taken to the container registry, {productname} or other registry where the container came from, where you can see information about the vulnerability. The following figure shows an example of detected vulnerabilities from a Quay.io registry: ++ +image:cso-registry-vulnerable.png[The CSO points you to a registry containing the vulnerable image] ++ +.. Select the namespaces link to go to the *Image Manifest Vulnerabilities* page, where you can see the name of the selected image and all namespaces where that image is running. The following figure indicates that a particular vulnerable image is running in two namespaces: ++ +image:cso-namespace-vulnerable.png[View namespaces a vulnerable image is running in] + +After executing this procedure, you are made aware of what images are vulnerable, what you must do to fix those vulnerabilities, and every namespace that the image was run in. Knowing this, you can perform the following actions: + +* Alert users who are running the image that they need to correct the vulnerability. +* Stop the images from running by deleting the deployment or the object that started the pod that the image is in. ++ +[NOTE] +==== +If you delete the pod, it might take a few minutes for the vulnerability to reset on the dashboard. +==== + +[id="query-image-vulnerabilities-from-cli"] +== Querying image vulnerabilities from the CLI + +Use the following procedure to query image vulnerabilities from the command line interface (CLI). + +.Procedure + +. Enter the following command to query for detected vulnerabilities: ++ +[source,terminal] +---- +$ oc get vuln --all-namespaces +---- ++ +.Example output ++ +[source,terminal] +---- +NAMESPACE NAME AGE +default sha256.ca90... 6m56s +skynet sha256.ca90... 9m37s +---- + +. Optional. To display details for a particular vulnerability, identify a specific vulnerability and its namespace, and use the `oc describe` command. The following example shows an active container whose image includes an RPM package with a vulnerability: ++ +[source,terminall] +---- +$ oc describe vuln --namespace sha256.ac50e3752... +---- +.Example output ++ +[source,terminal] +---- +Name: sha256.ac50e3752... +Namespace: quay-enterprise +... +Spec: + Features: + Name: nss-util + Namespace Name: centos:7 + Version: 3.44.0-3.el7 + Versionformat: rpm + Vulnerabilities: + Description: Network Security Services (NSS) is a set of libraries... +---- + +[id="uninstalling-container-security-operator"] +== Uninstalling the Container Security Operator + +To uninstall the Container Security Operator from your {ocp} deployment, you must uninstall the Operator and delete the `imagemanifestvulns.secscan.quay.redhat.com` custom resource definition (CRD). Without removing the CRD, image vulnerabilities are still reported on the {ocp} *Overview* page. + +.Procedure + +. On the {ocp} web console, click *Operators* -> *Installed Operators*. + +. Click the menu kebab of the Container Security Operator. + +. Click *Uninstall Operator*. Confirm your decision by clicking *Uninstall* in the popup window. + +. Remove the `imagemanifestvulns.secscan.quay.redhat.com` custom resource definition by entering the following command: ++ +[source,terminal] +---- +$ oc delete customresourcedefinition imagemanifestvulns.secscan.quay.redhat.com +---- ++ +.Example output ++ +[source,terminal] +---- +customresourcedefinition.apiextensions.k8s.io "imagemanifestvulns.secscan.quay.redhat.com" deleted +---- \ No newline at end of file diff --git a/modules/proc_creating-ocp-secret-for-oauth-token.adoc b/modules/proc_creating-ocp-secret-for-oauth-token.adoc new file mode 100644 index 000000000..a5c8e2b8b --- /dev/null +++ b/modules/proc_creating-ocp-secret-for-oauth-token.adoc @@ -0,0 +1,21 @@ +:_content-type: PROCEDURE +[id="creating-ocp-secret-for-oauth-token"] += Creating an {ocp} secret for the OAuth token + +In this procedure, you will add the previously obtained access token to communicate with your {productname} deployment. The access token will be stored within {ocp} as a secret. + +.Prerequisites + +* You have set up {productname} and obtained an access token. +* You have deployed the {qbo} on {ocp}. +* An {ocp} 4.6 or greater environment for which you have cluster administrator permissions. +* You have installed the OpenShift CLI (oc). + +.Procedure + +* Create a secret that contains the access token in the `openshift-operators` namespace: ++ +[source,terminal] +---- +$ oc create secret -n openshift-operators generic --from-literal=token= +---- diff --git a/modules/proc_creating-quay-integration-cr.adoc b/modules/proc_creating-quay-integration-cr.adoc new file mode 100644 index 000000000..bec445e36 --- /dev/null +++ b/modules/proc_creating-quay-integration-cr.adoc @@ -0,0 +1,78 @@ +:_content-type: PROCEDURE +[id="creating-quay-integration-cr"] += Creating the QuayIntegration custom resource + +In this procedure, you will create a `QuayIntegration` custom resource, which can be completed from either the web console or from the command line. + +.Prerequisites + +* You have set up {productname} and obtained an access token. +* You have deployed the {qbo} on {ocp}. +* An {ocp} 4.6 or greater environment for which you have cluster administrator permissions. +* Optional: You have installed the OpenShift CLI (oc). + +[id="creating-quay-integration-custom-resource-cli"] +== Optional: Creating the QuayIntegration custom resource using the CLI + +Follow this procedure to create the `QuayIntegration` custom resource using the command line. + +.Procedure + +. Create a `quay-integration.yaml`: ++ +---- +$ touch quay-integration.yaml +---- + +. Use the following configuration for a minimal deployment of the `QuayIntegration` custom resource: ++ +[source,yaml] +---- + apiVersion: quay.redhat.com/v1 + kind: QuayIntegration + metadata: + name: example-quayintegration + spec: + clusterID: openshift <1> + credentialsSecret: + namespace: openshift-operators + name: quay-integration<2> + quayHostname: https:// <3> + insecureRegistry: false <4> +---- +<1> The clusterID value should be unique across the entire ecosystem. This value is required and defaults to `openshift`. +<2> The `credentialsSecret` property refers to the namespace and name of the secret containing the token that was previously created. +<3> Replace the `QUAY_URL` with the hostname of your {productname} instance. +<4> If {productname} is using self signed certificates, set the property to `insecureRegistry: true`. + +For a list of all configuration fields, see "QuayIntegration configuration fields". + +. Create the `QuayIntegration` custom resource: ++ +---- +$ oc create -f quay-integration.yaml +---- + +[id="creating-quay-integration-custom-resource-web-console"] +== Optional: Creating the QuayIntegration custom resource using the web console + +Follow this procedure to create the `QuayIntegration` custom resource using the web console. + +.Procedure + +. Open the *Administrator* perspective of the web console and navigate to *Operators* -> *Installed Operators*. + +. Click *Red Hat {qbo}*. + +. On the *Details* page of the {qbo}, click *Create Instance* on the *Quay Integration* API card. + +. On the *Create QuayIntegration* page, enter the following required information in either *Form view* or *YAML view*: ++ +* *Name*: The name that will refer to the `QuayIntegration` custom resource object. +* *Cluster ID*: The ID associated with this cluster. This value should be unique across the entire ecosystem. Defaults to `openshift` if left unspecified. +* *Credentials secret*: Refers to the namespace and name of the secret containing the token that was previously created. +* *Quay hostname*: The hostname of the Quay registry. + +For a list of all configuration fields, see "link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index#quay-integration-config-fields[QuayIntegration configuration fields]". + +After the `QuayIntegration` custom resource is created, your {ocp} cluster will be linked to your {productname} instance. Organizations within your {productname} registry should be created for the related namespace for the {ocp} environment. diff --git a/modules/proc_deploy_quay_add.adoc b/modules/proc_deploy_quay_add.adoc index d9b09ff2b..ddbedb6ff 100644 --- a/modules/proc_deploy_quay_add.adoc +++ b/modules/proc_deploy_quay_add.adoc @@ -1,6 +1,6 @@ = Deploying {productname} -To deploy the {productname} service on the nodes in your cluster, you use the same quay container +To deploy the {productname} service on the nodes in your cluster, you use the same `Quay` container you used to create the configuration file. The differences here are that you: * Identify directories where the configuration files and data are stored @@ -14,7 +14,7 @@ three or more nodes (for example, quay01, quay02, and quay03). ==== The resulting {productname} service will listen on regular port 8080 and SSL port 8443. This is different from previous releases of {productname}, which listened on -standard ports 80 and 443, respectively. +standard ports 80 and 443, respectively. In this document, we map 8080 and 8443 to standard ports 80 and 443 on the host, respectively. Througout the rest of this document, we assume you have mapped the ports in this way. ==== @@ -46,29 +46,31 @@ run {productname} as a container, as follows: + [NOTE] ==== -Add `-e DEBUGLOG=true` to the `docker run` command line for -the quay container to enable debug level logging. +Add `-e DEBUGLOG=true` to the `podman run` command line for +the `Quay` container to enable debug level logging. +Add `-e IGNORE_VALIDATION=true` to bypass validation during +the startup process. ==== + [subs="verbatim,attributes"] -``` -# docker run --restart=always -p 443:8443 -p 80:8080 \ +---- +# sudo podman run --restart=always -p 443:8443 -p 80:8080 \ --sysctl net.core.somaxconn=4096 \ --privileged=true \ -v /mnt/quay/config:/conf/stack:Z \ -v /mnt/quay/storage:/datastorage:Z \ - -d quay.io/redhat/quay:v{productmin} -``` + -d {productrepo}/{quayimage}:{productminv} +---- -. **Open browser to UI**: Once the quay container has started, go to your web browser and -open the URL, to the node running the quay container. +. **Open browser to UI**: Once the `Quay` container has started, go to your web browser and +open the URL, to the node running the `Quay` container. . **Log into {productname}**: Using the superuser account you created during configuration, log in and make sure {productname} is working properly. . **Add more {productname} nodes**: At this point, you have the option of adding more nodes to this {productname} cluster by simply -going to each node, then adding the tarball and starting the quay container as just shown. +going to each node, then adding the tarball and starting the `Quay` container as just shown. . **Add optional features**: To add more features to your {productname} cluster, such as Clair images scanning and Repository Mirroring, continue on to the next section. @@ -76,15 +78,7 @@ Clair images scanning and Repository Mirroring, continue on to the next section. == Add Clair image scanning to {productname} Setting up and deploying Clair image scanning for your -{productname} deployment requires the following basic steps: - -* Restarting the {productname} Setup tool -* Creating authentication keys for Clair -* Setting up a database for Clair -* Deploying the Clair container - -These steps are described in -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/manage_red_hat_quay/index#quay-security-scanner[{productname} Security Scanning with Clair]. +{productname} deployment is described in link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#clair-v4[Clair Security Scanning] [[add-repo-mirroring]] == Add repository mirroring {productname} @@ -99,26 +93,30 @@ To add the repository mirroring feature to your {productname} cluster: `repomirror` option. * Select "Enable Repository Mirroring in the {productname} Setup tool. * Log into your {productname} Web UI and begin creating mirrored repositories -as described in link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/manage_red_hat_quay/index[Repository Mirroring in Red Hat Quay]. +as described in link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index[Repository Mirroring in {productname}]. The following procedure assumes you already have a running {productname} cluster on an OpenShift platform, with the {productname} Setup container running in your browser: -. **Start the repo mirroring worker**: Start the quay container in `repomirror` mode as follows: +. **Start the repo mirroring worker**: Start the `Quay` container in `repomirror` mode. +This example assumes you have configured TLS communications using a certificate +that is currently stored in `/root/ca.crt`. If not, then remove the line that adds +`/root/ca.crt` to the container: + [subs="verbatim,attributes"] -``` -$ docker run -d --name mirroring-worker \ - -v /mnt/quay/config:/conf/stack quay.io/redhat/quay:v{productmin} \ - repomirror -``` +---- +$ sudo podman run -d --name mirroring-worker \ + -v /mnt/quay/config:/conf/stack:Z \ + -v /root/ca.crt:/etc/pki/ca-trust/source/anchors/ca.crt \ + {productrepo}/{quayimage}:{productminv} repomirror +---- . **Log into config tool**: Log into the {productname} Setup Web UI (config tool). -. **Enable repository mirroring**: Scroll down the the Repository Mirroring section +. **Enable repository mirroring**: Scroll down the Repository Mirroring section and select the Enable Repository Mirroring check box, as shown here: . **Select HTTPS and cert verification**: If you want to require HTTPS communications and verify certificates during mirroring, select this check box. image:repo_mirror_config.png[Enable mirroring and require HTTPS and verified certificates] . **Save configuration**: Select the Save Configuration Changes button. Repository mirroring should now be enabled on your {productname} cluster. Refer to -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/manage_red_hat_quay/index[Repository Mirroring in {productname}] for details on setting up your own mirrored container image repositories. +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index[Repository Mirroring in {productname}] for details on setting up your own mirrored container image repositories. diff --git a/modules/proc_deploy_quay_common_superuser.adoc b/modules/proc_deploy_quay_common_superuser.adoc new file mode 100644 index 000000000..b7b67eebe --- /dev/null +++ b/modules/proc_deploy_quay_common_superuser.adoc @@ -0,0 +1,88 @@ += {productname} superuser + +A `superuser` is a Quay user account that has extended privileges, including the ability to: + +* Manage users +* Manage organizations +* Manage service keys +* View the change log +* Query the usage logs +* Create globally visible user messages + +== Adding a superuser to Quay using the UI + +This section covers how to add a superuser using the Quay UI. To add a superuser using the command line interface, see the following section. + +. Start the `Quay` container in configuration mode, loading the existing configuration as a volume: ++ +[subs="verbatim,attributes"] +.... +$ sudo podman run --rm -it --name quay_config \ + -p 8080:8080 \ + -p 443:8443 \ + -v $QUAY/config:/conf/stack:Z \ + {productrepo}/{quayimage}:{productminv} config secret +.... + +. Under the `Access Settings` section of the UI, enter the name of the user (in this instance, `quayadmin`) in the `Super Users` field and click `Add`. + +. Validate and download the `configuration` file and then terminate the `Quay` container that is running in config mode. Extract the `config.yaml` file to the configuration directory and restart the `Quay` container in registry mode: ++ +[subs="verbatim,attributes"] +``` +$ sudo podman rm -f quay +$ sudo podman run -d --rm -p 80:8080 -p 443:8443 \ +--name=quay \ +-v $QUAY/config:/conf/stack:Z \ +-v $QUAY/storage:/datastorage:Z \ +{productrepo}/{quayimage}:{productminv} +``` + + +== Editing the config.yaml file to add a superuser + +You can also add a superuser by editing the `config.yaml` file directly. The list of superuser accounts is stored as an array in the field `SUPER_USERS`. + +* Stop the container registry if it is running, and add the `SUPER_USERS` array to the `config.yaml` file: ++ +[source,yaml] +---- +SERVER_HOSTNAME: quay-server.example.com +SETUP_COMPLETE: true +SUPER_USERS: + - quayadmin +... +---- + +== Accessing the superuser admin panel + +. Restart the Quay registry: ++ +[subs="verbatim,attributes"] +---- +$ sudo podman rm -f quay +$ sudo podman run -d --rm -p 80:8080 -p 443:8443 \ +--name=quay \ +-v $QUAY/config:/conf/stack:Z \ +-v $QUAY/storage:/datastorage:Z \ +{productrepo}/{quayimage}:{productminv} +---- +. Access the Super User Admin Panel by clicking on the current user's name or avatar in the top right-hand corner of the UI. If the user has been added as a superuser, an extra item is presented in the drop-down list called Super User Admin Panel. ++ +image:super-user-admin-panel.png[Super User Admin Panel] + +=== Creating a globally visible user message + +Using the Superuser Admin Panel, you can create `Normal`, `Warning`, or `Error` messages for your organization. + +. Click your user name in the top right-hand corner of the UI. Select `Super User Admin Panel`. + +. On the {productname} Management page, click `Globally visible user messages` on the left hand pane. + +. Click `Create Message` to show a drop-down menu containing `Normal`, `Warning`, and `Error` message types: ++ +image:create-new-message.png[Creating a new messsage] + +. Enter a message by selecting `Click to set message`, then click `Create Message`. + +Messages can be deleted by clicking `Options` and then `Delete Message`. diff --git a/modules/proc_deploy_quay_guided.adoc b/modules/proc_deploy_quay_guided.adoc index c15ede127..1b04e7e03 100644 --- a/modules/proc_deploy_quay_guided.adoc +++ b/modules/proc_deploy_quay_guided.adoc @@ -1,49 +1,32 @@ = Configuring {productname} -Before running the {productname} service as a container, you need to use that same quay container +Before running the {productname} service as a container, you need to use that same `Quay` container to create the configuration file (`config.yaml`) needed to deploy {productname}. To do that, you pass a `config` argument and a password (replace my-secret-password here) -to the quay container. +to the `Quay` container. Later, you use that password to log into the configuration tool as the user `quayconfig`. Here's an example of how to do that: -. **Start quay in config mode**: On the first quay node, run the following: +. **Start quay in setup mode**: On the first quay node, run the following: + [subs="verbatim,attributes"] .... -# docker run --privileged=true -p 8443:8443 -d quay.io/redhat/quay:v{productmin} config my-secret-password +# sudo podman run --rm -it --name quay_config -p 8080:8080 {productrepo}/{quayimage}:{productminv} config my-secret-password .... -. **Open browser**: When the quay configuration tool starts up, open a browser to the URL and port 8443 +. **Open browser**: When the quay configuration tool starts up, open a browser to the URL and port 8080 of the system you are running the configuration tool on -(for example https://myquay.example.com:8443). You are prompted for a username and password. +(for example http://myquay.example.com:8080). You are prompted for a username and password. . **Log in as quayconfig**: When prompted, enter the `quayconfig` username and password -(the one from the `docker run` command line). +(the one from the `podman run` command line). -. **Choose configuration mode**: You are prompted to choose to either create a new -{productname} configuration file or edit an existing one in these two modes: - -* **Start New Registry Setup**: The result of this selection is the creation of a new -configuration file (`config.yaml`) and optional `ssl.cert` and `ssl.key` files. -Those files are bundled into a tarball file you can use to -actually deploy all your {productname} nodes. - -* **Modify an existing configuration**: With this selection, you are prompted -to provide an existing tarball and -modify it before you use it to start your {productname} nodes. -+ -The following figure shows an example of the resulting `Choose an option` page: -+ -image:Figure00x.png[Identifying the database {productname} will use] -+ -For an initial setup, you are asked to identify the database type. -For a subsequent configuration, you are prompted for a tarball containing the -`config.yaml` and credential files (optional). Then you -can continue on with the configuration. - -. **Identify the database**: For the initial setup, add the following information about the type and location of the database to be used by {productname}: +. **Fill in the required fields**: When you start the config tool without mounting an existing configuration bundle, +you will be booted into an initial setup session. In a setup session, default values will be filled automatically. The following +steps will walk through how to fill out the remaining required fields. + +. **Identify the database**: For the initial setup, you must include the following information about the type and location of the database to be used by {productname}: + * **Database Type**: Choose MySQL or PostgreSQL. MySQL will be used in the basic example; PostgreSQL is used with the high availability {productname} on OpenShift examples. @@ -63,18 +46,6 @@ The following figure shows an example of the screen for identifying the database + image:Figure01.png[Identifying the database {productname} will use] -+ -. **Validate database**: Select `Validate Database Settings`, and proceed to the next screen. - -. **Create {productname} superuser**: You need to set up an account with superuser privileges to {productname}, to use for editing {productname} configuration settings. That information includes a Username, Email address, and Password (entered twice). -+ -The following figure shows an example of the {productname} Setup screen for setting up a {productname} superuser account: -+ -image:Figure03.png[Set up a Quay superuser account to do Quay configuration] - -+ -Select `Create Super User`, and proceed to the next screen. - . **Identify the Redis hostname, Server Configuration and add other desired settings**: Other setting you can add to complete the setup are as follows. More settings for high availability {productname} deployment that for the basic deployment: + @@ -86,7 +57,7 @@ as described at the end of this procedure. + **Here are the settings you need to consider:** + -* **Custom SSL Certificates**: Upload custom or self-signed SSL certificates for use by {productname}. See link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/manage_red_hat_quay/index#using-ssl-to-protect-quay[Using SSL to protect connections to {productname}] for details. Recommended for high availability. +* **Custom SSL Certificates**: Upload custom or self-signed SSL certificates for use by {productname}. See link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#using-ssl-to-protect-quay[Using SSL to protect connections to {productname}] for details. Recommended for high availability. + [IMPORTANT] ==== @@ -100,7 +71,7 @@ as described in link:https://docs.docker.com/registry/insecure/[Test an Insecure * **Basic Configuration**: Upload a company logo to rebrand your {productname} registry. * **Server Configuration**: Hostname or IP address to reach the {productname} service, along with TLS indication (recommended for production installations). The Server Hostname is required for all {productname} deployments. TLS termination can be done in two different ways: -- On the instance itself, with all TLS traffic governed by the nginx server in the quay container (recommended). +- On the instance itself, with all TLS traffic governed by the nginx server in the `Quay` container (recommended). - On the load balancer. This is not recommended. Access to {productname} could be lost if the TLS setup is not done correctly on the load balancer. * **Data Consistency Settings**: Select to relax logging consistency guarantees to improve performance and availability. @@ -112,12 +83,15 @@ selected repositories from remote registries. Before you can enable repository m start the repository mirroring worker as described later in this procedure. * **Registry Storage**: Identify the location of storage. A variety of cloud and local storage options are available. Remote storage is required for high availability. Identify the Ceph storage location if you are following the example for {productname} high availability storage. On OpenShift, the example uses Amazon S3 storage. +* **Action Log Storage Configuration**: Action logs are stored in the {productname} +database by default. If you have a large amount of action logs, +you can have those logs directed to Elasticsearch for later search and analysis. +To do this, change the value of Action Logs Storage to Elasticsearch and configure +related settings as described in link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#configure-action-log-storage[Configure action log storage]. * **Action Log Rotation and Archiving**: Select to enable log rotation, which moves logs older than 30 days into storage, then indicate storage area. -* **Security Scanner**: Enable security scanning by selecting a security scanner endpoint and authentication key. To setup Clair to do image scanning, refer to link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/manage_red_hat_quay/#clair-initial-setup[Clair Setup] and link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/manage_red_hat_quay/#configuring-clair-for-tls[Configuring Clair]. Recommended for high availability. +* **Security Scanner**: Enable security scanning by selecting a security scanner endpoint and authentication key. To setup Clair to do image scanning, refer to link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/#clair-initial-setup[Clair Setup] and link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/#configuring-clair-for-tls[Configuring Clair]. Recommended for high availability. * **Application Registry**: Enable an additional application registry that includes things like Kubernetes manifests or Helm charts (see the link:https://github.com/app-registry[App Registry specification]). -* **BitTorrent-based download**: Allow all registry images to be downloaded using BitTorrent protocol (using the link:https://github.com/coreos/quayctl[`quayctl`] tool). -* **rkt Conversion**: Allow `rkt fetch` to be used to fetch images from {productname} registry. Public and private GPG2 keys are needed (see link:https://coreos.com/quay-enterprise/docs/latest/aci-signing-keys.html[Generating signing keys for ACI conversion] for details. -This field is deprecated. +* **rkt Conversion**: Allow `rkt fetch` to be used to fetch images from {productname} registry. Public and private GPG2 keys are needed. This field is deprecated. * **E-mail**: Enable e-mail to use for notifications and user password resets. * **Internal Authentication**: Change default authentication for the registry from Local Database to LDAP, Keystone (OpenStack), JWT Custom Authentication, or External Application Token. * **External Authorization (OAuth)**: Enable to allow GitHub or GitHub Enterprise to authenticate to the registry. @@ -130,9 +104,9 @@ you must explicitly whitelist the namespaces for which it is enabled. * **Dockerfile Build Support**: Enable to allow users to submit Dockerfiles to be built and pushed to {productname}. This is not recommended for multitenant environments. -. **Save the changes**: Select `Save Configuration Changes`. You are presented with the following Download Configuration screen: +. **Validate the changes**: Select `Validate Configuration Changes`. If validation is successful, you will be presented with the following Download Configuration modal: + -image:Figure04.png[Download the {productname} configuration tarball to the local system] +image:Figure05.png[Download the {productname} configuration tarball to the local system] . **Download configuration**: Select the `Download Configuration` button and save the tarball (`quay-config.tar.gz`) to a local directory to use later to start {productname}. diff --git a/modules/proc_deploy_quay_ha.adoc b/modules/proc_deploy_quay_ha.adoc index 9e67a5f58..4fc4e0d2b 100644 --- a/modules/proc_deploy_quay_ha.adoc +++ b/modules/proc_deploy_quay_ha.adoc @@ -1,18 +1,18 @@ == Set up Redis -With Red Hat Enterprise Linux server installed on each of the three {productname} +With Red Hat Enterprise Linux 8 server installed on each of the three {productname} systems (quay01, quay02, and quay03), install and start the Redis service as follows: -. **Setup Docker**: Install, enable, and start the docker service as shown here (see link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux_atomic_host/7/html-single/getting_started_with_containers/index#getting_docker_in_rhel_7[Getting Docker in RHEL 7] for details): +//. **Setup Docker**: Install, enable, and start the docker service as shown here (see link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux_atomic_host/7/html-single/getting_started_with_containers/index#getting_docker_in_rhel_7[Getting Docker in RHEL 7] for details): -. **Install / Deploy link:https://access.redhat.com/containers/?tab=overview#/registry.access.redhat.com/rhscl/redis-32-rhel7)[Redis]**: Run Redis as a container on each of the three quay0* systems: +. **Install / Deploy link:https://access.redhat.com/containers/?tab=overview#/registry.access.redhat.com/rhel8/redis-5)[Redis]**: Run Redis as a container on each of the three quay0* systems: + .... # mkdir -p /var/lib/redis # chmod 777 /var/lib/redis -# docker run -d --restart=always -p 6379:6379 \ +# sudo podman run -d -p 6379:6379 \ -v /var/lib/redis:/var/lib/redis/data:Z \ - registry.access.redhat.com/rhscl/redis-32-rhel7 + registry.redhat.io/rhel8/redis-5 .... . **Check redis connectivity**: You can use the `telnet` command to test connectivity to the redis service. Type MONITOR (to begin monitoring the service) and QUIT to exit: @@ -30,3 +30,8 @@ QUIT +OK Connection closed by foreign host. .... + +[NOTE] +==== +For more information on using `podman` and restarting containers, see the section "Using podman" earlier in this document. +==== diff --git a/modules/proc_deploy_quay_ha_ceph.adoc b/modules/proc_deploy_quay_ha_ceph.adoc index 2ec1a50c4..6921e85d4 100644 --- a/modules/proc_deploy_quay_ha_ceph.adoc +++ b/modules/proc_deploy_quay_ha_ceph.adoc @@ -1,27 +1,27 @@ == Set Up Ceph -For this Red Hat Quay configuration, we create a three-node Ceph cluster, with +For this {productname} configuration, we create a three-node Ceph cluster, with several other supporting nodes, as follows: * ceph01, ceph02, and ceph03 - Ceph Monitor, Ceph Manager and Ceph OSD nodes * ceph04 - Ceph RGW node * ceph05 - Ceph Ansible administration node -For details on installing Ceph nodes, see link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/3/html-single/installation_guide_for_red_hat_enterprise_linux[Installing Red Hat Ceph Storage on Red Hat Enterprise Linux]. +For details on installing Ceph nodes, see link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/{producty}/html-single/installation_guide_for_red_hat_enterprise_linux[Installing Red Hat Ceph Storage on Red Hat Enterprise Linux]. -Once you have set up the Ceph storage cluster, create a Ceph Object Gateway (also referred to as a RADOS gateway). See link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/3/html/installation_guide_for_red_hat_enterprise_linux/deploying-red-hat-ceph-storage#installing-the-ceph-object-gateway[Installing the Ceph Object Gateway] for details. +Once you have set up the Ceph storage cluster, create a Ceph Object Gateway (also referred to as a RADOS gateway). See link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/{producty}/html/installation_guide_for_red_hat_enterprise_linux/deploying-red-hat-ceph-storage#installing-the-ceph-object-gateway[Installing the Ceph Object Gateway] for details. === Install each Ceph node On ceph01, ceph02, ceph03, ceph04, and ceph05, do the following: -. Review prerequisites for setting up Ceph nodes in link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/3/html-single/installation_guide_for_red_hat_enterprise_linux/index#requirements-for-installing-rhcs[Requirements for Installing Red Hat Ceph Storage]. In particular: +. Review prerequisites for setting up Ceph nodes in link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/{producty}/html-single/installation_guide_for_red_hat_enterprise_linux/index#requirements-for-installing-rhcs[Requirements for Installing Red Hat Ceph Storage]. In particular: + -* Decide if you want to use link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/3/html-single/installation_guide_for_red_hat_enterprise_linux/index#considerations-for-using-a-raid-controller-with-osd-nodes[RAID controllers on OSD nodes]. +* Decide if you want to use link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/{producty}/html-single/installation_guide_for_red_hat_enterprise_linux/index#considerations-for-using-a-raid-controller-with-osd-nodes[RAID controllers on OSD nodes]. + -* Decide if you want a separate cluster network for your link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/3/html-single/installation_guide_for_red_hat_enterprise_linux/index#verifying-the-network-configuration-for-red-hat-ceph-storage[Ceph Network Configuration]. +* Decide if you want a separate cluster network for your link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/{producty}/html-single/installation_guide_for_red_hat_enterprise_linux/index#verifying-the-network-configuration-for-red-hat-ceph-storage[Ceph Network Configuration]. . Prepare OSD storage (ceph01, ceph02, and ceph03 only). Set up the OSD storage on the three OSD nodes (ceph01, ceph02, and ceph03). See OSD Ansible Settings -in link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/3/html-single/installation_guide_for_red_hat_enterprise_linux/index#installing-a-red-hat-ceph-storage-cluster[Table 3.2] +in link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/{producty}/html-single/installation_guide_for_red_hat_enterprise_linux/index#installing-a-red-hat-ceph-storage-cluster[Table 3.2] for details on supported storage types that you will enter into your Ansible configuration later. For this example, a single, unformatted block device (`/dev/sdb`), that is separate from the operating system, is configured on each @@ -29,7 +29,7 @@ of the OSD nodes. If you are installing on metal, you might want to add an extra . Install Red Hat Enterprise Linux Server edition, as described in the link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html-single/installation_guide/[RHEL 7 Installation Guide]. -. Register and subscribe each Ceph node as described in the link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/3/html-single/installation_guide_for_red_hat_enterprise_linux/#registering-red-hat-ceph-storage-nodes-to-cdn-and-attaching-subscriptions[Registering Red Hat Ceph Storage Nodes]. Here is how to subscribe to the necessary repos: +. Register and subscribe each Ceph node as described in the link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/{producty}/html-single/installation_guide_for_red_hat_enterprise_linux/#registering-red-hat-ceph-storage-nodes-to-cdn-and-attaching-subscriptions[Registering Red Hat Ceph Storage Nodes]. Here is how to subscribe to the necessary repos: + ``` @@ -102,7 +102,7 @@ systems: # cp site.yml.sample site.yml ``` . Edit the copied group_vars/all.yml file. See General Ansible Settings in -link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/3/html-single/installation_guide_for_red_hat_enterprise_linux/index#installing-a-red-hat-ceph-storage-cluster[Table 3.1] for details. For example: +link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/{producty}/html-single/installation_guide_for_red_hat_enterprise_linux/index#installing-a-red-hat-ceph-storage-cluster[Table 3.1] for details. For example: + ``` ceph_origin: repository @@ -115,7 +115,7 @@ public_network: 192.168.122.0/24 + Note that your network device and address range may differ. . Edit the copied `group_vars/osds.yml` file. See the OSD Ansible Settings in -link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/3/html-single/installation_guide_for_red_hat_enterprise_linux/index#installing-a-red-hat-ceph-storage-cluster[Table 3.2] for details. In this example, the second disk device (`/dev/sdb`) on each OSD node is +link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/{producty}/html-single/installation_guide_for_red_hat_enterprise_linux/index#installing-a-red-hat-ceph-storage-cluster[Table 3.2] for details. In this example, the second disk device (`/dev/sdb`) on each OSD node is used for both data and journal storage: + ``` @@ -203,4 +203,4 @@ Hello World! === Install the Ceph Object Gateway On the Ansible system (ceph05), configure a -Ceph Object Gateway to your Ceph Storage cluster (which will ultimately run on ceph04). See link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/3/html/installation_guide_for_red_hat_enterprise_linux/deploying-red-hat-ceph-storage#installing-the-ceph-object-gateway[Installing the Ceph Object Gateway] for details. +Ceph Object Gateway to your Ceph Storage cluster (which will ultimately run on ceph04). See link:https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/{producty}/html/installation_guide_for_red_hat_enterprise_linux/deploying-red-hat-ceph-storage#installing-the-ceph-object-gateway[Installing the Ceph Object Gateway] for details. diff --git a/modules/proc_deploy_quay_ha_lbdb.adoc b/modules/proc_deploy_quay_ha_lbdb.adoc index 242657d1d..f968a469c 100644 --- a/modules/proc_deploy_quay_ha_lbdb.adoc +++ b/modules/proc_deploy_quay_ha_lbdb.adoc @@ -1,29 +1,40 @@ -== Set up Load Balancer and Database +:_content-type: PROCEDURE +[id="setting-up-load-balancer-database"] +== Setting up the HAProxy load balancer and the PostgreSQL database -On the first two systems (q01 and q02), install the haproxy load balancer and postgresql database. Haproxy will be configured as the access point and load balancer for the following services running on other systems: +Use the following procedure to set up the HAProxy load balancer and the PostgreSQL database. +.Prerequisites + +* You have installed the Podman or Docker CLI. + +.Procedure + +. On the first two systems, `q01` and `q02`, install the HAProxy load balancer and the PostgreSQL database. This configures HAProxy as the access point and load balancer for the following services running on other systems: ++ * {productname} (ports 80 and 443 on B systems) * Redis (port 6379 on B systems) * RADOS (port 7480 on C systems) -Because the services on the two systems run as containers, you also need the docker service running. Here's how to set up the A systems: - -. **Install and start docker service**: Install, start, and enable the link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux_atomic_host/7/html-single/getting_started_with_containers/index#getting_docker_in_rhel_7[docker service]. -. **Open ports for haproxy service**: Open all haproxy ports in SELinux and selected haproxy ports in the firewall: +//. **Install and start docker service**: Install, start, and enable the link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux_atomic_host/7/html-single/getting_started_with_containers/index#getting_docker_in_rhel_7[docker service]. +. Open all HAProxy ports in SELinux and selected HAProxy ports in the firewall: + -``` +[source,terminal] +---- # setsebool -P haproxy_connect_any=on # firewall-cmd --permanent --zone=public --add-port=6379/tcp --add-port=7480/tcp success # firewall-cmd --reload success -``` -. **Set up link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html-single/load_balancer_administration/index#install_haproxy_example1[haproxy service]**: Configure the `/etc/haproxy/haproxy.cfg` to point to the systems and ports providing the {productname}, Redis, and Ceph RADOS services. Here are examples of defaults and added frontend and backend settings: +---- +//. **Set up link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html-single/load_balancer_administration/index#install_haproxy_example1[haproxy service]**: Configure the `/etc/haproxy/haproxy.cfg` to point to the systems and ports providing the {productname}, Redis, and Ceph RADOS services. Here are examples of defaults and added frontend and backend settings: + +. Configure the `/etc/haproxy/haproxy.cfg` to point to the systems and ports providing the {productname}, Redis and Ceph RADOS services. The following are examples of defaults and added frontend and backend settings: + -``` +---- #--------------------------------------------------------------------- # common defaults that all the 'listen' and 'backend' sections will # use if not designated in their block @@ -74,75 +85,138 @@ backend be_rdgw server ceph02 ceph02:7480 check server ceph03 ceph03:7480 check backend be_redis -server quay01 quay01:6380 check inter 1s -server quay02 quay02:6380 check inter 1s -server quay03 quay03:6380 check inter 1s -``` - +server quay01 quay01:6379 check inter 1s +server quay02 quay02:6379 check inter 1s +server quay03 quay03:6379 check inter 1s +---- + -Once the new haproxy.cfg file is in place, restart the haproxy service. +After the new `haproxy.cfg` file is in place, restart the HAProxy service by entering the following command: + -``` +[source,terminal] +---- # systemctl restart haproxy -``` - -. **Install / Deploy a Database**: Install, enable and start the link:https://access.redhat.com/containers/?tab=overview#/registry.access.redhat.com/rhscl/postgresql-96-rhel7)[PostgreSQL] database container. The following commands will: +---- +. Create a folder for the PostgreSQL database by entering the following command: + -* Start the PostgreSQL database with the user, password and database all set. Data from the container will be stored on the host system in the `/var/lib/pgsql/data` directory. -+ -* List available extensions. -+ -* Create the pg_trgm extension. -+ -* Confirm the extension is installed -+ -``` +[source,terminal] +---- $ mkdir -p /var/lib/pgsql/data +---- + +. Set the following permissions for the `/var/lib/pgsql/data` folder: ++ +[source,terminal] +---- $ chmod 777 /var/lib/pgsql/data -$ sudo docker run -d --name postgresql_database \ +---- + +. Enter the following command to start the PostgreSQL database: ++ +[source,terminal] +---- +$ sudo podman run -d --name postgresql_database \ -v /var/lib/pgsql/data:/var/lib/pgsql/data:Z \ -e POSTGRESQL_USER=quayuser -e POSTGRESQL_PASSWORD=quaypass \ -e POSTGRESQL_DATABASE=quaydb -p 5432:5432 \ - rhscl/postgresql-96-rhel7 + registry.redhat.io/rhel8/postgresql-13:1-109 +---- ++ +[NOTE] +==== +Data from the container will be stored on the host system in the `/var/lib/pgsql/data` directory. +==== -$ sudo docker exec -it postgresql_database /bin/bash -c 'echo "SELECT * FROM pg_available_extensions" | /opt/rh/rh-postgresql96/root/usr/bin/psql' +. List the available extensions by entering the following command: ++ +[source,terminal] +---- +$ sudo podman exec -it postgresql_database /bin/bash -c 'echo "SELECT * FROM pg_available_extensions" | /opt/rh/rh-postgresql96/root/usr/bin/psql' +---- ++ +.Example output ++ +[source,terminal] +---- name | default_version | installed_version | comment -----------+-----------------+-------------------+---------------------------------------- adminpack | 1.0 | | administrative functions for PostgreSQL ... +---- -$ sudo docker exec -it postgresql_database /bin/bash -c 'echo "CREATE EXTENSION IF NOT EXISTS pg_trgm;" | /opt/rh/rh-postgresql96/root/usr/bin/psql -d quaydb' +. Create the `pg_trgm` extension by entering the following command: ++ +[source,terminal] +---- +$ sudo podman exec -it postgresql_database /bin/bash -c 'echo "CREATE EXTENSION IF NOT EXISTS pg_trgm;" | /opt/rh/rh-postgresql96/root/usr/bin/psql -d quaydb' +---- -$ sudo docker exec -it postgresql_database /bin/bash -c 'echo "SELECT * FROM pg_extension" | /opt/rh/rh-postgresql96/root/usr/bin/psql' +. Confirm that the `pg_trgm` has been created by entering the following command: ++ +[source,terminal] +---- +$ sudo podman exec -it postgresql_database /bin/bash -c 'echo "SELECT * FROM pg_extension" | /opt/rh/rh-postgresql96/root/usr/bin/psql' +---- ++ +.Example output ++ +[source,terminal] +---- extname | extowner | extnamespace | extrelocatable | extversion | extconfig | extcondition ---------+----------+--------------+----------------+------------+-----------+-------------- plpgsql | 10 | 11 | f | 1.0 | | pg_trgm | 10 | 2200 | t | 1.3 | | (2 rows) +---- -$ sudo docker exec -it postgresql_database /bin/bash -c 'echo "ALTER USER quayuser WITH SUPERUSER;" | /opt/rh/rh-postgresql96/root/usr/bin/psql' +. Alter the privileges of the Postgres user `quayuser` and grant them the `superuser` role to give the user unrestricted access to the database: ++ +[source,terminal] +---- +$ sudo podman exec -it postgresql_database /bin/bash -c 'echo "ALTER USER quayuser WITH SUPERUSER;" | /opt/rh/rh-postgresql96/root/usr/bin/psql' +---- ++ +.Example output ++ +[source,terminal] +---- ALTER ROLE +---- -``` - -. **Open the firewall**: If you have a firewalld service active on your system, run the following commands to make the PostgreSQL port available through the firewall: - +. If you have a firewalld service active on your system, run the following commands to make the PostgreSQL port available through the firewall: + -``` +[source,terminal] +---- # firewall-cmd --permanent --zone=trusted --add-port=5432/tcp -success +---- ++ +[source,terminal] +---- # firewall-cmd --reload -success -``` - -. **Test PostgreSQL Connectivity**: Use the `psql` command to test connectivity to the PostgreSQL database. Try this on a remote system as well, to make sure you can access the service remotely: +---- +. Optional. If you do not have the `postgres` CLI package installed, install it by entering the following command: + -``` +[source,terminal] +---- # yum install postgresql -y +---- +. Use the `psql` command to test connectivity to the PostgreSQL database. ++ +[NOTE] +==== +To verify that you can access the service remotely, run the following command on a remote system. +==== ++ +---- # psql -h localhost quaydb quayuser +---- ++ +.Example output ++ +[source,terminal] +---- Password for user test: psql (9.2.23, server 9.6.5) WARNING: psql version 9.2, server version 9.6. @@ -150,4 +224,4 @@ WARNING: psql version 9.2, server version 9.6. Type "help" for help. test=> \q -``` +---- diff --git a/modules/proc_deploy_quay_local_ipv6.adoc b/modules/proc_deploy_quay_local_ipv6.adoc new file mode 100644 index 000000000..b376d72a1 --- /dev/null +++ b/modules/proc_deploy_quay_local_ipv6.adoc @@ -0,0 +1,13 @@ +:_content-type: CONCEPT +[id="poc-deploy-quay-local-ipv6"] += Configuring the Podman CNI to use IPv6 + +In some cases, you might want to run a local instance of {productname} to use IPv6. This setup is common for development or testing purposes. + +By default, the Podman network for a root user does not use IPv6, and instead defaults to use IPv4. You can configure the Podman Container Network Interface (CNI) to use both IPv4 and IPv6, which allows for a local instance of {productname} using IPv6. + +[discrete] +[id="additional-resources"] +== Additional resources + +* link:https://access.redhat.com/solutions/6196301[How to configure the default podman container network for the root user to use both IPv4 and IPv6]. \ No newline at end of file diff --git a/modules/proc_deploy_quay_openshift.adoc b/modules/proc_deploy_quay_openshift.adoc deleted file mode 100644 index 82a83b969..000000000 --- a/modules/proc_deploy_quay_openshift.adoc +++ /dev/null @@ -1,509 +0,0 @@ -[[set-up-red-hat-quay-services]] -= Set up {productname} services - -Deploying {productname} on OpenShift requires you to create a set of yaml files. -Although the `oc` command is used to configure the {productname} registry here, -you could use the OpenShift web UI instead, if you prefer. - -Refer to Appendix A for the contents of these yaml files. - -Here are a few -things to keep in mind: - -* Your OpenShift account must have permission to create namespaces -at the cluster scope. - -* {productname} runs under its own namespace inside a Kubernetes cluster, so that needs to be created first. You can create it through the `New project` in the OpenShift web console or using quay-enterprise-namespace.yaml (as described here). - -* You need a working enterprise-quality database. -In our example, we illustrate PostgreSQL -(version 9.4 or above is required, although we recommend 9.6). - -* You can use an existing Redis service (needed for build logs and the {productname} tutorial) or start one -as described in this procedure. - -Here are the major steps, detailed below, to complete a Red Hat Quay deployment on OpenShift: - -. Set up the Red Hat Quay namespace and secrets -. Create the Red Hat Quay database -. Create Red Hat Quay roles and privileges -. Create the Redis deployment -. Prepare to configure Red Hat Quay -. Start the Red Hat Quay configuration user interface -. Deploy the Red Hat Quay configuration -. Add Clair image scanning -. Add repository mirroring - -[[set-up-namespaces-secrets]] -== Set up {productname} namespaces and secrets - -. **Get {productname} yaml files**: Create a set of yaml files in a directory on your local system -from the contents shown in Appendix A. Study each file to determine where you might need to make modifications. -You will use `oc create` to create the needed resources from those files. - -. **Log in with oc cli**. Login as a user with cluster scope permissions to the OpenShift cluster. For example: -+ -``` -$ oc login -u system:admin -``` - -. **Create namespace**. Run `oc create` `quay-enterprise-namespace.yaml` and then make -`quay-enterprise` the current project. All objects will be deployed to this namespace/project: -+ -``` -$ oc create -f quay-enterprise-namespace.yaml -namespace "quay-enterprise" created -$ oc project quay-enterprise -``` - -. **Create the secret for the {productname} configuration and app**: Create the following secrets. -During {productname} configuration, the config.yaml, and optionally the ssl.cert and ssl.key, files -are added to the application's secret, so they can be included with the resulting {productname} application: -+ -``` -$ oc create -f quay-enterprise-config-secret.yaml -secret/quay-enterprise-config-secret created -$ oc create secret generic quay-enterprise-secret -``` - -. **Create the secret for quay.io**. -This pull secret provides credentials to pull containers from the Quay.io registry. -Refer to link:https://access.redhat.com/solutions/3533201[Accessing Red Hat {productname}] to get -the credentials you need to add to the quay-enterprise-redhat-quay-pull-secret.yaml file, then run `oc create`: -+ -``` -$ oc create -f quay-enterprise-redhat-quay-pull-secret.yaml -secret/redhat-quay-pull-secret created -``` - -. **Create the database**. If you are not using your own enterprise-quality -database (recommended), this procedure illustrates how to set up a Postgresql database -on an OpenShift cluster. This entails creating AWS storage, a postgres deployment, -and postgres service, then adding an extension to the database (see the description of -`quay-storageclass.yaml` in Appendix A for information on adding encryption to your volumes): -+ -``` -$ oc create -f quay-storageclass.yaml -storageclass.storage.k8s.io/quay-storageclass created -$ oc create -f db-pvc.yaml -persistentvolumeclaim/postgres-storage created -$ oc create -f postgres-deployment.yaml -deployment.extensions/postgres-new created -$ oc create -f postgres-service.yaml -service/postgres created -``` -+ -``` -$ oc get pods -n quay-enterprise -NAME READY STATUS RESTARTS AGE -postgres-xxxxxxxxxx-xxxxx 1/1 Running 0 3m26s -``` -+ -Run the following command, replacing the name of the postgres pod with your pod: -+ -``` -$ oc exec -it postgres-xxxxxxxxxx-xxxxx -n quay-enterprise -- /bin/bash -c 'echo "CREATE EXTENSION IF NOT EXISTS pg_trgm" | /opt/rh/rh-postgresql10/root/usr/bin/psql -d quay' -``` -+ -[NOTE] -==== -The `-d database_name` must not be omitted. If it is, -the extension will be created on the default PostgreSQL -database. -==== - -. **Create a serviceaccount for the database**: Create a serviceaccount and grant -it anyuid privilege. -Running the PostgreSQL deployment under anyuid lets you add -persistent storage to the deployment and allow it to store db metadata. - -+ -``` -# oc create serviceaccount postgres -n quay-enterprise -serviceaccount/postgres created -# oc adm policy add-scc-to-user anyuid -z system:serviceaccount:quay-enterprise:postgres \ -scc "anyuid" added to: ["system:serviceaccount:quay-enterprise:system:serviceaccount:quay-enterprise:postgres"] -``` - -. **Create the role and the role binding**: {productname} has native Kubernetes -integrations. These integrations require Service Account to have access to the -Kubernetes API. When Kubernetes RBAC is enabled, Role -Based Access Control policy manifests also have to be deployed. This role will -be used to run {productname} and also to write the config.yaml file that {productname} creates at -the end of the web interface setup: -+ -``` -$ oc create -f quay-servicetoken-role-k8s1-6.yaml -$ oc create -f quay-servicetoken-role-binding-k8s1-6.yaml -``` - -. **Add privilege**: Make sure that the service account has root privileges, because {productname} runs strictly under root (this will be changed in the future versions). Throughout this example, the namespace is `quay-enterprise`: -+ -``` -$ oc adm policy add-scc-to-user anyuid \ - system:serviceaccount:quay-enterprise:default -``` -. **Create Redis deployment**: If you haven't already deployed Redis, create a `quay-enterprise-redis.yaml` file and deploy it: -+ -``` -$ oc create -f quay-enterprise-redis.yaml -``` - -. **Set up to configure {productname}**: {productname} V3 added a tool for configuring -the {productname} service before deploying it. Although the config tool -is in the same container as the full {productname} service, it is deployed -in a different way, as follows: -+ -``` -$ oc create -f quay-enterprise-config.yaml -$ oc create -f quay-enterprise-config-service-clusterip.yaml -$ oc create -f quay-enterprise-config-route.yaml -``` -+ -The quay configuration container is now set up to be accessed from port 443 from your Web browser. -Before creating the configuration, however, you need to create a route to the permanent {productname} service. -This is because we need the {productname} service's publicly available FQDN when setting up the application. - -. **Start the {productname} application**: Identify the {productname} Kubernetes service and create a route for it, then start -the {productname} application as follows: -+ -``` -$ oc create -f quay-enterprise-service-clusterip.yaml -service/quay-enterprise-clusterip created -$ oc create -f quay-enterprise-app-route.yaml -route.route.openshift.io/quay-enterprise created -$ oc create -f quay-enterprise-app-rc.yaml -deployment.extensions/quay-enterprise-app created -``` -+ -[NOTE] -==== -The creation of the {productname} application (quay-enterprise-app pod) -will not complete until you have finished configuring the application. -So don't worry if you see that pod remain in "ContainerCreating" status -until the configuration is done. -At that point, the new configuration is fed to the application and it -will change to the "Running" state. -==== -+ -You will need to know the route to the {productname} application when you -do the configuration step. - -. **Begin to configure {productname}**: Open the public route to the {productname} configuration container in a Web browser. -To see the route to the quay configuration service, type the following: -+ -``` -$ oc get route -n quay-enterprise quay-enterprise-config -NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARD -quay-enterprise-config quay-enterprise-config-quay-enterprise.apps.test.example.com quay-enterprise-config passthrough None -``` -+ -For this example, you would open this URL in your web browser: -https://quay-enterprise-config-quay-enterprise.apps.test.example.com - -. **Log in as quayconfig**: When prompted, enter the username and password -(the password was set as an argument to the quay config container in: -`quay-enterprise-config.yaml`): - -- User Name: **quayconfig** - -- Password: **secret** - -+ -You are prompted to select a configuration mode, as shown in the following figure: -+ -image:Figure00.png[Identifying the database {productname} will use] - - -. **Choose configuration mode**: Select "Start new configuration for this cluster" -The result of this selection is the creation of a new -configuration file (`config.yaml`) that you will use later for your {productname} deployment. - -. **Identify the database**: For the initial setup, add the following information about the type and location of the database to be used by {productname}: -- **Database Type**: Choose MySQL or PostgreSQL. PostgreSQL is used with the -example shown here. -- **Database Server**: Identify the IP address or hostname of the database, -along with the port number if it is different from 3306. -- **Username**: Identify a user with full access to the database. -- **Password**: Enter the password you assigned to the selected user. -- **Database Name**: Enter the database name you assigned when you started the database server. -- **SSL Certificate**: For production environments, you should provide an SSL certificate to connect to the database. -+ -To verify the NAME of the service (postgres), type the following: -+ -``` -$ oc get services -n quay-enterprise postgres -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -postgres NodePort 172.30.127.41 5432:32212/TCP 19h -``` -+ -The following figure shows an example of the screen for identifying the database used by {productname}. -The example yaml file sets the database server to `postgres`, -the user name to `username`, the password to `password`, and the database to `quay`: -+ -image:Figure01.png[Identifying the database {productname} will use] - -+ -. **Validate database**: Select `Validate Database Settings` and proceed to the next screen. - -. **Create {productname} superuser**: You need to set up an account with superuser privileges to {productname}, to use for editing {productname} configuration settings. That information includes a Username, Email address, and Password (entered twice). -+ -The following figure shows an example of the {productname} Setup screen for setting up a {productname} superuser account: -+ -image:Figure03.png[Set up a {productname} superuser account to do {productname} configuration] -+ -Select `Create Super User`, and proceed to the next screen. - -. **Identify settings**: Go through each of the following settings. The minimum you must enter includes: -+ -- **Server hostname**: The URL to the {productname} service is required. -+ -- **Redis hostname**: The URL or IP address to the Redis service is required. -+ -Here are all the settings you need to consider: -+ -- **Custom SSL Certificates**: Upload custom or self-signed SSL certificates for use by {productname}. See link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/manage_red_hat_quay/index#using-ssl-to-protect-quay[Using SSL to protect connections to {productname}] for details. Recommended for high availability. -+ -[IMPORTANT] -==== -Using SSL certificates is recommended for both basic -and high availability deployments. If you decide to -not use SSL, you must configure your container clients -to use your new {productname} setup as an insecure registry -as described in link:https://docs.docker.com/registry/insecure/[Test an Insecure Registry]. -==== - -- **Basic Configuration**: Upload a company logo to rebrand your {productname} registry. -- **Server Configuration**: Hostname or IP address to reach the {productname} service, along with TLS indication (recommended for production installations). To get the route to the permanent {productname} service, type the following: -+ -``` -$ oc get route -n quay-enterprise quay-enterprise -NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARD -quay-enterprise quay-enterprise-quay-enterprise.apps.cnegus-ocp.devcluster.openshift.com quay-enterprise-clusterip None -``` -See link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/manage_red_hat_quay/index#using-ssl-to-protect-quay[Using SSL to protect connections to {productname}]. TLS termination can be done in two different ways: - ** On the instance itself, with all TLS traffic governed by the nginx server in the quay container (recommended). - ** On the load balancer. This is not recommended. Access to {productname} could be lost if the TLS setup is not done correctly on the load balancer. - -- **Data Consistency Settings**: Select to relax logging consistency guarantees to improve performance and availability. -- **Time Machine**: Allow older image tags to remain in the repository for set periods of time and allow users to select their own tag expiration times. -- **redis**: Identify the hostname or IP address (and optional password) to connect to the redis service used by {productname}. To find the address of the redis service, type the following: -+ -``` -$ oc get services -n quay-enterprise quay-enterprise-redis -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -quay-enterprise-redis ClusterIP 172.30.207.35 6379/TCP 40m -``` -- **Repository Mirroring**: Choose the checkbox to Enable Repository Mirroring. -With this enabled, you can create repositories in your {productname} cluster that mirror -selected repositories from remote registries. Before you can enable repository mirroring, -start the repository mirroring worker as described later in this procedure. -- **Registry Storage**: Identify the location of storage. A variety of cloud and local storage options are available. Remote storage is required for high availability. Identify the Ceph storage location -if you are following the example for {productname} high availability storage. On OpenShift, the example uses Amazon S3 storage. -- **Action Log Rotation and Archiving**: Select to enable log rotation, which moves logs older than 30 days into storage, then indicate storage area. -- **Security Scanner**: We recommend setting up the Clair security scanner after -you have completed the initial {productname} deployment. Clair setup is described -after the end of this procedure. -- **Application Registry**: Enable an additional application registry that includes things like Kubernetes manifests or Helm charts (see the link:https://github.com/app-registry[App Registry specification]). -- **BitTorrent-based download**: Allow all registry images to be downloaded using BitTorrent protocol (using the link:https://github.com/coreos/quayctl[`quayctl`] tool). -- **rkt Conversion**: Allow `rkt fetch` to be used to fetch images from the {productname} registry. Public and private GPG2 keys are needed (see link:https://coreos.com/quay-enterprise/docs/latest/aci-signing-keys.html[Generating signing keys for ACI conversion] for details. -This field is deprecated. -- **E-mail**: Enable e-mail to use for notifications and user password resets. -- **Internal Authentication**: Change default authentication for the registry from Local Database to LDAP, Keystone (OpenStack), JWT Custom Authentication, or External Application Token. -- **External Authorization (OAuth)**: Enable to allow GitHub or GitHub Enterprise to authenticate to the registry. -- **Google Authentication**: Enable to allow Google to authenticate to the registry. -- **Access settings**: Basic username/password authentication is enabled by default. Other authentication types that can be enabled include: external application tokens (user-generated tokens used with docker or rkt commands), anonymous access (enable for public access to anyone who can get to the registry), user creation (let users create their own accounts), encrypted client password (require command-line user access to include encrypted passwords), and prefix username autocompletion (disable to require exact username matches on autocompletion). -* **Registry Protocol Settings**: Leave the `Restrict V1 Push Support` checkbox enabled -to restrict access to Docker V1 protocol pushes. -Although Red Hat recommends against enabling Docker V1 push protocol, if you do allow it, -you must explicitly whitelist the namespaces for which it is enabled. -- **Dockerfile Build Support**: Enable to allow users to submit Dockerfiles to be built and pushed to {productname}. -This is not recommended for multitenant environments. - -. **Save the changes**: Select `Save Configuration Changes`. You are presented with the following Download Configuration screen: -+ -image:Figure04.png[Download the {productname} configuration tarball to the local system] -. **Download configuration**: Select the `Download Configuration` button and save the -tarball (`quay-config.tar.gz`) to a local directory. Save this file in case you want to deploy -the config files inside manually or just want a record of what you deployed. - -. **Deploy configuration**: Select to rollout the deployment. When prompted, click -`Populate configuration to deployments` to deploy the configuration to the {productname} -application. In a few minutes, you should see a green checkmark and the message "Configuration successfully rolled out and deployed!" -+ -[NOTE] -==== -If for some reason the deployment doesn't complete, try deleting the quay-enterprise-app pod. -OpenShift should create a new pod and pick up the needed configuration. If that doesn't work, -unpack the configuration files (`tar xvf quay-config.tar.gz`) and add them -manually to the secret: -``` -$ oc create secret generic quay-enterprise-secret -n quay-enterprise \ - --from-file=config.yaml=/path/to/config.yaml \ - --from-file=ssl.key=/path/to/ssl.key \ - --from-file=ssl.cert=/path/to/ssl.cert -``` -==== - -. **Check pods**: In a couple of minutes (depending on your connection speed), {productname} -should be up and running and the following pods should be visible in the quay-enterprise namespace -You might get a mount error at first, but that should resolve itself: -+ -``` -$ oc get pods -n quay-enterprise -NAME READY STATUS RESTARTS AGE -postgres-5b4c5d7dd9-f8tqz 1/1 Running 0 46h -quay-enterprise-app-7899c7c77f-jrsrc 1/1 Running 0 45h -quay-enterprise-config-app-86bbbcd446-mwmmg 1/1 Running 0 46h -quay-enterprise-redis-684b9d6f55-tx6w9 1/1 Running 0 46h -``` -. **Get the URL for {productname}**: Type the following to get the hostname of the new {productname} installation: -+ -``` -$ oc get routes -n quay-enterprise quay-enterprise -NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARD -quay-enterprise quay-enterprise-quay-enterprise.apps.test.example.com quay-enterprise-clusterip None -``` -. **Start using {productname}**: Open the hostname in a web browser to start using {productname}. - -[[add-clair-scanner]] -== Add Clair image scanning to {productname} - -Setting up and deploying Clair image scanning for your -{productname} deployment requires the following basic steps: - -* Setting up a database for Clair -* Creating authentication keys for Clair -* Deploying Clair - -The following procedure assumes you already have a running -{productname} cluster on an OpenShift platform with the {productname} Setup -container running in your browser: - -. **Create the Clair database**: This example configures a postgresql database to -use with the Clair image scanner. With the yaml files in the current directory, -review those files for possible modifications, then run the following: -+ -``` -$ oc create -f postgres-clair-storage.yaml -$ oc create -f postgres-clair-deployment.yaml -$ oc create -f postgres-clair-service.yaml -``` -. **Check Clair database objects**: To view the Clair database objects, type: -+ -``` -$ oc get all | grep -i clair -pod/postgres-clair-xxxxxxxxx-xxxx 1/1 Running 0 3m45s -deployment.apps/postgres-clair 1/1 1 1 3m45s -service/postgres-clair NodePort 172.30.193.64 5432:30680/TCP 159m -replicaset.apps/postgres-clair-xx 1 1 1 3m45s - -``` -+ -The output shows that the postgres-clair pod is running, postgres-clair was successfully -deployed, the postgres-clair service is available on the address and port shown, and 1 -replica set of postgres-clair is active. - -. **Open the {productname} Setup UI**: Reload the {productname} Setup UI and -select "Modify configuration for this cluster." - -. **Enable Security Scanning**: Scroll to the Security Scanner section and -select the "Enable Security Scanning" checkbox. From the fields that appear you need to create an -authentication key and enter the security scanner endpoint. Here's how: -+ -* **Generate key**: Click "Create Key" and then type a name for the Clair private key -and an optional expiration date (if blank, the key -never expires). Then select Generate Key. - -* **Copy the Clair key and PEM file**: Save the Key ID (to a notepad or similar) -and download a copy of the Private Key PEM file (named security_scanner.pem) -by selecting "Download Private Key" -(if you lose this key, you will need to generate a new one). - -. **Modify clair-config.yaml**: Return to the shell and the directory holding -your yaml files. Edit the `clair-config.yaml` file and modify the following values: -* **database.options.source**: Make sure the host, port, dbname, user, password, and ssl mode -match those values you set when you create the postgres database for Clair. -* **key_id**: Search for KEY_ID_HERE in this file and replace it with the contents of -the key you generated from the {productname} Setup screen in the Security Scanner section -(security_scanner.pam file). -* **private_key_path**: Identify the full path to the security_scanner.pem file you saved earlier. - -. **Create the Clair config secret and service**: Run the following commands, -identifying the paths to your `clair-config.yaml` and `security_scanner.pem` files. -+ -``` -$ oc create secret generic clair-scanner-config-secret \ - --from-file=config.yaml=/path/to/clair-config.yaml \ - --from-file=security_scanner.pem=/path/to/security_scanner.pem -$ oc create -f clair-service.yaml -$ oc create -f clair-deployment.yaml -``` -. **Get the clair-service endpoint**: In this example, the endpoint of -of clair-service would be http://172.30.133.227:6060: -+ -``` -$ oc get service clair-service -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -clair-service ClusterIP 172.30.133.227 6060/TCP,6061/TCP 76s -``` - -. **Enter Security Scanner Endpoint**: Return to the {productname} Setup screen -and fill in the clair-service endpoint. For example, http://clair-service:6060 - -. **Deploy configuration**: Select to save the configuration, then deploy it when prompted. - -A green check mark will appear on the screen when the deployment is done. -You can now start using Clair image scanning with {productname}. -For information on the data sources available with the Clair image scanner, see -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/manage_red_hat_quay/clair-initial-setup#clair-sources[Using Clair data sources]. - -[[add-repo-mirroring]] -== Add repository mirroring {productname} -Enabling repository mirroring allows you to create container image repositories -on your {productname} cluster that exactly match the content of a selected -external registry, then sync the contents of those repositories on -a regular schedule and on demand. - -To add the repository mirroring feature to your {productname} cluster: - -* Run the repository mirroring worker. To do this, you start a quay pod with the -`repomirror` option. -* Select "Enable Repository Mirroring in the {productname} Setup tool. -* Log into your {productname} Web UI and begin creating mirrored repositories -as described in link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/manage_red_hat_quay/index[Repository Mirroring in Red Hat Quay]. - -The following procedure assumes you already have a running -{productname} cluster on an OpenShift platform, with the {productname} Setup -container running in your browser: - -[NOTE] -==== -Instead of running repository mirroring in its own container, you -could start the quay application pod with the environment variable -`QUAY_OVERRIDE_SERVICES=repomirrorworker=true`. This causes the -repomirror worker to run inside the quay application pod instead -of as a separate container. -==== - -. **Start the repo mirroring worker**: Start the quay container in `repomirror` mode as follows: -+ -``` -$ oc create -f quay-enterprise-mirror.yaml -``` -. **Log into config tool**: Log into the {productname} Setup Web UI (config tool). -. **Enable repository mirroring**: Scroll down the the Repository Mirroring section -and select the Enable Repository Mirroring check box, as shown here: -. **Select HTTPS and cert verification**: If you want to require HTTPS -communications and verify certificates during mirroring, select this check box. -image:repo_mirror_config.png[Enable mirroring and require HTTPS and verified certificates] -. **Save configuration**: Select the Save Configuration Changes button. Repository -mirroring should now be enabled on your {productname} cluster. Refer to -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/manage_red_hat_quay/index[Repository Mirroring in {productname}] for details on setting up your own mirrored container image repositories. -[NOTE] -==== -The server hostname you set with the config tools may not represent and endpoint -that can be used to copy images to a mirror configured for that server. In that case, -you can set a `REPO_MIRROR_SERVER_HOSTNAME` environment variable to identify the server’s -URL in a way that it can be reached by a skopeo copy command. -==== diff --git a/modules/proc_deploy_quay_poc_conf.adoc b/modules/proc_deploy_quay_poc_conf.adoc new file mode 100644 index 000000000..c2e2b93e8 --- /dev/null +++ b/modules/proc_deploy_quay_poc_conf.adoc @@ -0,0 +1,105 @@ +:_content-type: PROCEDURE +[id="poc-configuring-quay"] += Deploying {productname} config tool + + +Use the following procedure to deploy the {productname} configuration tool. Afterwards, you can navigate to the registry endpoint and generate a configuration file that details all components, including registry settings, the database, and Redis connection parameters. + +.Procedure + +. To generate a configuration file, enter the following command to run the `Quay` container in `config` mode. You must specify a password, for example, the string `secret`: ++ +[subs="verbatim,attributes"] +---- +$ sudo podman run --rm -it --name quay_config -p 80:8080 -p 443:8443 {productrepo}/{quayimage}:{productminv} config secret +---- + +. Use your browser to access the user interface for the configuration tool at `\http://quay-server.example.com`. ++ +[NOTE] +==== +This documentation assumes that you have configured the `quay-server.example.com` hostname in your `/etc/hosts` file. +==== + +. Log in with username and password specified + +. Log in with the username and password you set in Step 1 of xref:poc-configuring-quay[Configuring {productname}]. ++ +[NOTE] +==== +If you followed this procedure, the username is *quayconfig* and the password is *secret*. +==== + +[id="poc-quay-setup"] +== {productname} setup + +In the {productname} configuration editor, you must enter the following credentials: + +* Basic configuration +* Server configuration +* Database +* Redis + +[id="poc-basic-configuration"] +=== Basic configuration + +*Basic configuration* includes the *Registry Title*, *Registry Title Short*, *Enterprise Logo URL*, and *Contact Information* fields. + +.Procedure + +[NOTE] +==== +The default values can be used if they are populated. +==== + +. For *Registry Title*, enter *Project Quay*. + +. For *Registry Title Short*, enter *Project Quay*. + +. Optional. Enter a URL for *Enterprise Logo URL*. + +. Optional. Enter contact information, choosing from one of the following options: *URL*, *E-mail*, *IRC*, *Telephone*. + +[id="poc-server-configuration"] +=== Server configuration + +*Server configuration* includes the *Server Hostname* and optional *TLS* fields. + +.Procedure + +* For this deployment, enter `quay-server.example.com`. + +[id="poc-database"] +=== Database + +In the *Database* section, specify the connection details for the database that {productname} uses to store metadata. + +.Procedure + +. For **Database Type,** enter `Postgres`. +. For **Database Server,** enter `quay-server.example.com:5432`. +. For **Username,** enter `quayuser`. +. For **Password,** enter `quaypass`. +. For **Database Name,** enter `quay`. + +[id="poc-redis"] +=== Redis + +The Redis key-value store is used to store real-time events and build logs. + +.Procedure + +. For **Redis Hostname,** enter `quay-server.example.com`. +. For **Redis port,** enter `6379`. This is the default value. +. For **Redis password,** enter `strongpassword`. + +[id="poc-validating"] +== Validate and download configuration + +After all required fields have been set, validate your settings. + +.Procedure + +* Click the *Validate Configuration Changes* button. If any errors are reported, continue editing your configuration until the settings are valid and {productname} can connect to your database and Redis servers. ++ +After validation, download the *Configuration* file. Stop the `Quay` container that is running the configuration editor. \ No newline at end of file diff --git a/modules/proc_deploy_quay_poc_db.adoc b/modules/proc_deploy_quay_poc_db.adoc new file mode 100644 index 000000000..34ede21fe --- /dev/null +++ b/modules/proc_deploy_quay_poc_db.adoc @@ -0,0 +1,50 @@ +:_content-type: PROCEDURE +[id="poc-configuring-database"] += Configuring the database + +{productname} requires a database for storing metadata. PostgreSQL is used throughout this document. For this deployment, a directory on the local file system to persist database data is used. + +Use the following procedure to set up a PostgreSQL database. + +.Procedure + +. In the installation folder, denoted here by the `$QUAY` variable, create a directory for the database data by entering the following command: ++ +[source,terminal] +---- +$ mkdir -p $QUAY/postgres-quay +---- + +. Set the appropriate permissions by entering the following command: ++ +[source,terminal] +---- +$ setfacl -m u:26:-wx $QUAY/postgres-quay +---- + +. Start the `Postgres` container, specifying the username, password, and database name and port, with the volume definition for database data: ++ +[subs="verbatim,attributes"] +---- +$ sudo podman run -d --rm --name postgresql-quay \ + -e POSTGRESQL_USER=quayuser \ + -e POSTGRESQL_PASSWORD=quaypass \ + -e POSTGRESQL_DATABASE=quay \ + -e POSTGRESQL_ADMIN_PASSWORD=adminpass \ + -p 5432:5432 \ + -v $QUAY/postgres-quay:/var/lib/pgsql/data:Z \ + {postgresimage} +---- + +. Ensure that the Postgres `pg_trgm` module is installed by running the following command: ++ +[source,terminal] ++ +---- +$ sudo podman exec -it postgresql-quay /bin/bash -c 'echo "CREATE EXTENSION IF NOT EXISTS pg_trgm" | psql -d quay -U postgres' +---- ++ +[NOTE] +==== +The `pg_trgm` module is required for the `Quay` container. +==== \ No newline at end of file diff --git a/modules/proc_deploy_quay_poc_dns.adoc b/modules/proc_deploy_quay_poc_dns.adoc new file mode 100644 index 000000000..70211116e --- /dev/null +++ b/modules/proc_deploy_quay_poc_dns.adoc @@ -0,0 +1,117 @@ += Using DNS + +The "Getting Started" section used dynamic IP addressing for the sake of convenience. If you want your deployment to survive container restarts, which typically result in changed IP addresses, you can implement a naming service. In this example, the link:https://github.com/containers/dnsname[dnsname] plugin is used to allow containers to resolve each other by name. + + +== Introduction to dnsname + +The `dnsname` plugin configures `dnsmasq` on a given CNI network so that containers can resolve each other by name. When configured, the container name and its IP address are added to a network specific hosts file that `dnsmasq` reads in. Similarly, when a container is removed from the network, it will remove the entry from the hosts file. Each CNI network will have its own `dnsmasq` instance. + + +== Deploying dnsname + +* Install the prerequisites for building `dnsname`: ++ +.... +$ sudo dnf install -y git make go dnsmasq +.... + +* Clone and build the repository ++ +.... +$ git clone https://github.com/containers/dnsname.git +$ cd dnsname/ +$ sudo make binaries install  PREFIX=/usr +.... + +This will install the `dnsname` plugin into `/usr/libexec/cni` where your CNI plugins should already exist. + + +== Configuring the default CNI network + +Edit the file `/etc/cni/net.d/87-podman-bridge.conflist` and add a stanza for the `dnsname` plugin, specifying your domain name, in this case 'dns.podman`: + +.... +{ +  "cniVersion": "0.4.0", +  "name": "podman", +  "plugins": [ + ... + { +      "type": "tuning" +    }, +    { +      "type": "dnsname", +      "domainName": "dns.podman", +      "capabilities": { +        "aliases": true +      } +    } +  ] +} +.... + +== Testing the naming service + +Ensure that the naming service is working by running a test to access one container from another, using the fully qualified name: + +.... +$ sudo podman run -dt --name web quay.io/libpod/alpine_nginx:latest +$ sudo podman run -it --name client quay.io/libpod/alpine_nginx:latest curl http://web.dns.podman/ +.... + +The `web` container should respond to the `client` request with the message `podman rulez`. + + +== Using names in the configuration tool + +The same basic commands are used to deploy the database and Redis for {productname}, but in the configuration tool you can now use names rather than IP addresses. + +* Stop the `Quay` container if it is running and start it up in config mode, specifying the config volume if it already exists: ++ +[subs="verbatim,attributes"] +---- +$ sudo podman run --rm -it --name quay_config -p 8080:8080 \ + -v $QUAY/config:/conf/stack:Z \ + {productrepo}/{quayimage}:{productminv} config secret +---- + +* Update the database and Redis configuration to use the container name rather than the IP address: +** **Database Type:** Postgres +** **Database Server:** **postgresql** +** **Username:** user +** **Password:** pass +** **Database Name:** quay +** ... +** **Redis Hostname:** **redis** +** **Redis port:** 6379 (default) +** **Redis password:** strongpassword + +* Validate the updated configuration and then download it. Stop the `Quay` container and extract the newly downloaded configuration bundle, overriding any existing files. + +== Redeploying using the naming service + +Restart the `Quay` container, specifying the appropriate volumes for your configuration data and local storage for image data: + +[subs="verbatim,attributes"] +---- +$ sudo podman run --rm -p 8080:8080 \ + --name=quay \ + -v $QUAY/config:/conf/stack:Z \ + -v $QUAY/storage:/datastorage:Z \ + {productrepo}/{quayimage}:{productminv} +---- + +Confirm that the redeployment has been successful after the switch to using the naming service. Log in to quay with the user you created earlier, either using the UI at `quay-server:8080` or via the command line using `sudo podman login --tls-verify=false quay-server:8080`. + +== Configuring a new network + +Instead of using the default network, you can create a new network with the command `sudo podman create network `. To configure the new network to use dnsname, edit the file `/etc/cni/net.d/.conflist` and add the dnsname stanza as in the default example. When running the `podman run` command, specify the new network using the `--network` flag, for example: + +.... +$ sudo podman run -dt --name web --network \ + quay.io/libpod/alpine_nginx:latest + +$ sudo podman run -it --name client --network \ + quay.io/libpod/alpine_nginx:latest curl http://web.dns.podman/ +.... \ No newline at end of file diff --git a/modules/proc_deploy_quay_poc_next.adoc b/modules/proc_deploy_quay_poc_next.adoc new file mode 100644 index 000000000..e4b38835c --- /dev/null +++ b/modules/proc_deploy_quay_poc_next.adoc @@ -0,0 +1,25 @@ +:_content-type: CONCEPT + +[id="poc-next-steps"] += Next steps + +The following sections might be useful after deploying a proof of concept version of {productname}. Many of these procedures can be used on a proof of concept deployment, offering insights to {productname}'s features. + +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/use_red_hat_quay/index[Using {productname}]. The content in this guide explains the following concepts: +** Adding users and repositories +** Using image tags +** Building Dockerfiles with build workers +** Setting up build triggers +** Adding notifications for repository events +** and more + +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/manage_red_hat_quay/index[Managing {productname}]. The content in this guide explains the following concepts: + +** Using SSL/TLS +** Configuring action log storage +** Configuring Clair security scanner +** Repository mirroring +** IPv6 and dual-stack deployments +** Configuring OIDC for {productname} +** Geo-replication +** and more \ No newline at end of file diff --git a/modules/proc_deploy_quay_poc_redis.adoc b/modules/proc_deploy_quay_poc_redis.adoc new file mode 100644 index 000000000..b8d78d2de --- /dev/null +++ b/modules/proc_deploy_quay_poc_redis.adoc @@ -0,0 +1,19 @@ +:_content-type: PROCEDURE +[id="poc-configuring-redis"] += Configuring Redis + +Redis is a key-value store that is used by {productname} for live builder logs. + +Use the following procedure to deploy the `Redis` container for the {productname} proof of concept. + +.Procedure + +* Start the `Redis` container, specifying the port and password, by entering the following command: ++ +[subs="verbatim,attributes"] +---- +$ sudo podman run -d --rm --name redis \ + -p 6379:6379 \ + -e REDIS_PASSWORD=strongpassword \ + {redisimage} +---- \ No newline at end of file diff --git a/modules/proc_deploy_quay_poc_restart.adoc b/modules/proc_deploy_quay_poc_restart.adoc new file mode 100644 index 000000000..57f97cb4f --- /dev/null +++ b/modules/proc_deploy_quay_poc_restart.adoc @@ -0,0 +1,159 @@ += Restarting containers + +Because the `--restart` option is not fully supported by podman, you can configure `podman` as a systemd service, as described +in +link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html-single/building_running_and_managing_containers/index#porting-containers-to-systemd-using-podman_building-running-and-managing-containers[Porting containers to systemd using Podman] + + + +== Using systemd unit files with Podman + +By default, Podman generates a unit file for existing containers or pods. You can generate more portable systemd unit files using the `podman generate systemd --new` command. The `--new` flag instructs Podman to generate unit files that create, start and remove containers. + +. Create the systemd unit files from a running {productname} registry as follows: ++ +.... +$ sudo podman generate systemd --new --files --name redis +$ sudo podman generate systemd --new --files --name postgresql-quay +$ sudo podman generate systemd --new --files --name quay +$ sudo podman generate systemd --new --files --name postgresql-clairv4 +$ sudo podman generate systemd --new --files --name clairv4 +.... + +. Copy the unit files to `/usr/lib/systemd/system` for installing them as a root user: ++ +.... +$ sudo cp -Z container-redis.service /usr/lib/systemd/system +$ sudo cp -Z container-postgresql-quay.service /usr/lib/systemd/system +$ sudo cp -Z container-quay.service /usr/lib/systemd/system +$ sudo cp -Z container-postgresql-clairv4.service /usr/lib/systemd/system +$ sudo cp -Z container-clairv4.service /usr/lib/systemd/system +.... + + +. Reload systemd manager configuration: ++ +.... +$ sudo systemctl daemon-reload +.... + +. Enable the services and start them at boot time: ++ +.... +$ sudo systemctl enable --now container-redis.service +$ sudo systemctl enable --now container-postgresql-quay.service +$ sudo systemctl enable --now container-quay.service +$ sudo systemctl enable --now container-postgresql-clairv4.service +$ sudo systemctl enable --now container-clairv4.service +.... + + +== Starting, stopping and checking the status of services + +. Check the status of the Quay components: ++ +.... +$ sudo systemctl status container-redis.service +$ sudo systemctl status container-postgresql-quay.service +$ sudo systemctl status container-quay.service +$ sudo systemctl status container-postgresql-clairv4.service +$ sudo systemctl status container-clairv4.service +.... + + +. To stop the Quay component services: ++ +.... +$ sudo systemctl stop container-redis.service +$ sudo systemctl stop container-postgresql-quay.service +$ sudo systemctl stop container-quay.service +$ sudo systemctl stop container-postgresql-clairv4.service +$ sudo systemctl stop container-clairv4.service +.... + +. To start the Quay component services: ++ +.... +$ sudo systemctl start container-redis.service +$ sudo systemctl start container-postgresql-quay.service +$ sudo systemctl start container-quay.service +$ sudo systemctl start container-postgresql-clairv4.service +$ sudo systemctl start container-clairv4.service +.... + +== Testing restart after reboot + +Once you have the services configured and enabled, reboot the system. When the system has re-started, use `podman ps` to check that all the containers for the Quay components have been restarted: + +.... +$ sudo podman ps -a +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +4e87c7889246 registry.redhat.io/rhel8/postgresql-13:1-109 run-postgresql 19 seconds ago Up 18 seconds ago 0.0.0.0:5432->5432/tcp postgresql-quay +b8fbac1920d4 registry.redhat.io/rhel8/redis-6:1-110) run-redis 19 seconds ago Up 18 seconds ago 0.0.0.0:6379->6379/tcp redis +d959d5bf7a24 registry.redhat.io/rhel8/postgresql-13:1-109 run-postgresql 18 seconds ago Up 18 seconds ago 0.0.0.0:5433->5432/tcp postgresql-clairv4 +e75ff8651dbd registry.redhat.io/quay/clair-rhel8:v3.4.0 18 seconds ago Up 17 seconds ago 0.0.0.0:8081->8080/tcp clairv4 +.... + +In this instance, the `Quay` container itself has failed to start up. This is due to the fact that, when security scanning is enabled in Quay, it tries to connect to Clair on startup. However, Clair has not finished initializing and cannot accept connections and, as a result, Quay terminates immediately. To overcome this issue, you need to configure the Quay service to have a dependency on the Clair service, as shown in the following section. + +== Configuring Quay's dependency on Clair + +In the `systemd` service file for Quay, set up a dependency on the Clair service in the `[Unit]` section by setting `After=container-clairv4.service`. To give the Clair container time to initialize, add a delay in the `[Service]` section, for example `RestartSec=30`. Here is an example of the modified Quay file, after configuring the dependency on Clair: + + +./usr/lib/systemd/system/container-quay.service +.... +# container-quay.service +# autogenerated by Podman 2.0.5 +# Tue Feb 16 17:02:26 GMT 2021 + +[Unit] +Description=Podman container-quay.service +Documentation=man:podman-generate-systemd(1) +Wants=network.target +After=container-clairv4.service + +[Service] +Environment=PODMAN_SYSTEMD_UNIT=%n +Restart=on-failure +RestartSec=30 +ExecStartPre=/bin/rm -f %t/container-quay.pid %t/container-quay.ctr-id +ExecStart=/usr/bin/podman run --conmon-pidfile %t/container-quay.pid --cidfile %t/container-quay.ctr-id --cgroups=no-conmon -d --rm -p 8080:8080 --name=quay -v /home/user1/quay/config:/conf/stack:Z -v /home/user1/quay/storage:/datastorage:Z registry.redhat.io/quay/quay-rhel8:v3.4.0 +ExecStop=/usr/bin/podman stop --ignore --cidfile %t/container-quay.ctr-id -t 10 +ExecStopPost=/usr/bin/podman rm --ignore -f --cidfile %t/container-quay.ctr-id +PIDFile=%t/container-quay.pid +KillMode=none +Type=forking + +[Install] +WantedBy=multi-user.target default.target +.... + + + +Once you have updated the Quay service configuration, reboot the server and immediately run `podman ps`: + +.... +$ sudo podman ps -a +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +4e87c7889246 registry.redhat.io/rhel8/postgresql-13:1-109 run-postgresql 29 seconds ago Up 28 seconds ago 0.0.0.0:5432->5432/tcp postgresql-quay +b8fbac1920d4 registry.redhat.io/rhel8/redis-6:1-110) run-redis 29 seconds ago Up 28 seconds ago 0.0.0.0:6379->6379/tcp redis +d959d5bf7a24 registry.redhat.io/rhel8/postgresql-13:1-109 run-postgresql 28 seconds ago Up 28 seconds ago 0.0.0.0:5433->5432/tcp postgresql-clairv4 +e75ff8651dbd registry.redhat.io/quay/clair-rhel8:v3.4.0 28 seconds ago Up 27 seconds ago 0.0.0.0:8081->8080/tcp clairv4 +.... + +Initially, the `Quay` container will not be available, but once the `RestartSec` delay has expired, it should start up: + +.... +$ sudo podman ps -a +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +4e87c7889246 registry.redhat.io/rhel8/postgresql-13:1-109 run-postgresql 35 seconds ago Up 34 seconds ago 0.0.0.0:5432->5432/tcp postgresql-quay +ab9f0e6ad7c3 registry.redhat.io/quay/quay-rhel8:v3.4.0 registry 3 seconds ago Up 2 seconds ago 0.0.0.0:8080->8080/tcp quay +b8fbac1920d4 registry.redhat.io/rhel8/redis-6:1-110) run-redis 35 seconds ago Up 34 seconds ago 0.0.0.0:6379->6379/tcp redis +d959d5bf7a24 registry.redhat.io/rhel8/postgresql-13:1-109 run-postgresql 34 seconds ago Up 34 seconds ago 0.0.0.0:5433->5432/tcp postgresql-clairv4 +e75ff8651dbd registry.redhat.io/quay/clair-rhel8:v3.4.0 34 seconds ago Up 33 seconds ago 0.0.0.0:8081->8080/tcp clairv4 +.... + +The `CREATED` field for the `Quay` container shows the 30 second difference in creation time, as configured in the service definition. + +Log in to the {productname} registry at `quay-server.example.com` and ensure that everything has restarted correctly. diff --git a/modules/proc_deploy_quay_poc_rhel.adoc b/modules/proc_deploy_quay_poc_rhel.adoc new file mode 100644 index 000000000..a12687380 --- /dev/null +++ b/modules/proc_deploy_quay_poc_rhel.adoc @@ -0,0 +1,123 @@ +:_content-type: PROCEDURE +[id="poc-configuring-rhel-server"] += Preparing Red Hat Enterprise Linux for a {productname} proof of concept deployment + +Use the following procedures to configure {rhel} for a {productname} proof of concept deployment. + +[id="poc-install-register-rhel-server"] +== Install and register the RHEL server + +Use the following procedure to configure the {rhel} server for a {productname} proof of concept deployment. + +.Procedure + +. Install the latest {rhel-short} 9 server. You can do a minimal, shell-access only install, or Server plus GUI if you want a desktop. + +. Register and subscribe your {rhel-short} server system as described in link:https://access.redhat.com/solutions/253273[How to register and subscribe a RHEL system to the Red Hat Customer Portal using Red Hat Subscription-Manager] + +. Enter the following commands to register your system and list available subscriptions. Choose an available {rhel-short} server subscription, attach to its pool ID, and upgrade to the latest software: ++ +[source,terminal] +---- +# subscription-manager register --username= --password= +# subscription-manager refresh +# subscription-manager list --available +# subscription-manager attach --pool= +# yum update -y +---- + +[id="poc-registry-authentication"] +== Registry authentication + +Use the following procedure to authenticate your registry for a {productname} proof of concept. + +.Procedure + +. Set up authentication to `registry.redhat.io` by following the link:https://access.redhat.com/RegistryAuthentication[Red Hat Container Registry Authentication] procedure. Setting up authentication allows you to pull the `Quay` container. ++ +[NOTE] +==== +This differs from earlier versions of {productname}, when the images were hosted on Quay.io. +==== + +. Enter the following command to log in to the registry: ++ +[source,terminal] +---- +$ sudo podman login registry.redhat.io +---- ++ +You are prompted to enter your `username` and `password`. + +[id=poc-firewall-configuration] +== Firewall configuration + +If you have a firewall running on your system, you might have to add rules that allow access to {productname}. Use the following procedure to configure your firewall for a proof of concept deployment. + +.Procedure + +* The commands required depend on the ports that you have mapped on your system, for example: ++ +[source,terminal] +---- +# firewall-cmd --permanent --add-port=80/tcp \ +&& firewall-cmd --permanent --add-port=443/tcp \ +&& firewall-cmd --permanent --add-port=5432/tcp \ +&& firewall-cmd --permanent --add-port=5433/tcp \ +&& firewall-cmd --permanent --add-port=6379/tcp \ +&& firewall-cmd --reload +---- + + +[id="poc-ip-naming"] +== IP addressing and naming services + +There are several ways to configure the component containers in {productname} so that they can communicate with each other, for example: + +//// +* **Using the IP addresses for the containers**. You can determine the IP address for containers with `podman inspect` and then use the values in the configuration tool when specifying the connection strings, for example: ++ +[source,terminal] +---- +$ sudo podman inspect -f "{{.NetworkSettings.IPAddress}}" postgresql-quay +---- ++ +This approach is susceptible to host restarts, as the IP addresses for the containers will change after a reboot. +//// + +* **Using a naming service**. If you want your deployment to survive container restarts, which typically result in changed IP addresses, you can implement a naming service. For example, the link:https://github.com/containers/dnsname[dnsname] plugin is used to allow containers to resolve each other by name. + +* **Using the host network**. You can use the `podman run` command with the `--net=host` option and then use container ports on the host when specifying the addresses in the configuration. This option is susceptible to port conflicts when two containers want to use the same port. This method is not recommended. + +* **Configuring port mapping**. You can use port mappings to expose ports on the host and then use these ports in combination with the host IP address or host name. + +This document uses port mapping and assumes a static IP address for your host system. + +.Sample proof of concept port mapping +[%header, cols="2,1,1"] +|=== +|Component +|Port mapping +|Address + +|Quay +|`-p 80:8080` `-p 443:8443` +|\http://quay-server.example.com + +|Postgres for Quay +|`-p 5432:5432` +|quay-server.example.com:5432 + +|Redis +|`-p 6379:6379` +|quay-server.example.com:6379 + +|Postgres for Clair V4 +|`-p 5433:5432` +|quay-server.example.com:5433 + +|Clair V4 +|`-p 8081:8080` +|\http://quay-server.example.com:8081 + +|=== diff --git a/modules/proc_deploy_quay_poc_run.adoc b/modules/proc_deploy_quay_poc_run.adoc new file mode 100644 index 000000000..20b166640 --- /dev/null +++ b/modules/proc_deploy_quay_poc_run.adoc @@ -0,0 +1,131 @@ +:_content-type: PROCEDURE +[id="poc-deploying-quay"] += Deploying {productname} + +After you have configured your {productname} deployment, you can deploy it using the following procedures. + +.Prerequisites + +* The {productname} database is running. +* The Redis server is running. + +[id="preparing-configuration-file"] +== Creating the YAML configuration file + +Use the following procedure to deploy {productname} locally. + +.Procedure + +. Enter the following command to create a minimal `config.yaml` file that is used to deploy the {productname} container: ++ +[source,terminal] +---- +$ touch config.yaml +---- + +. Copy and paste the following YAML configuration into the `config.yaml` file: ++ +[source,yaml] +---- +BUILDLOGS_REDIS: + host: quay-server.example.com + password: strongpassword + port: 6379 +CREATE_NAMESPACE_ON_PUSH: true +DATABASE_SECRET_KEY: a8c2744b-7004-4af2-bcee-e417e7bdd235 +DB_URI: postgresql://quayuser:quaypass@quay-server.example.com:5432/quay +DISTRIBUTED_STORAGE_CONFIG: + default: + - LocalStorage + - storage_path: /datastorage/registry +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: [] +DISTRIBUTED_STORAGE_PREFERENCE: + - default +FEATURE_MAILING: false +SECRET_KEY: e9bd34f4-900c-436a-979e-7530e5d74ac8 +SERVER_HOSTNAME: quay-server.example.com +SETUP_COMPLETE: true +USER_EVENTS_REDIS: + host: quay-server.example.com + password: strongpassword + port: 6379 +---- + +. Create a directory to copy the {productname} configuration bundle to: ++ +[source,terminal] +---- +$ mkdir $QUAY/config +---- + +. Copy the {productname} configuration file to the directory: ++ +[source,terminal] +---- +$ cp -v config.yaml $QUAY/config +---- + +[id="configuring-superuser"] +=== Configuring a {productname} superuser + +You can optionally add a superuser by editing the `config.yaml` file to add the necessary configuration fields. The list of superuser accounts is stored as an array in the field `SUPER_USERS`. Superusers have the following capabilities: + +* User management +* Organization management +* Service key management +* Change log transparency +* Usage log management +* Globally-visible user message creation + +.Procedure + +. Add the `SUPER_USERS` array to the `config.yaml` file: ++ +[source,yaml] +---- +SERVER_HOSTNAME: quay-server.example.com +SETUP_COMPLETE: true +SUPER_USERS: + - quayadmin <1> +... +---- +<1> If following this guide, use `quayadmin`. + +[id="preparing-local-storage"] +== Prepare local storage for image data + +Use the following procedure to set your local file system to store registry images. + +.Procedure + +. Create a local directory that will store registry images by entering the following command: ++ +[source,terminal] +---- +$ mkdir $QUAY/storage +---- + +. Set the directory to store registry images: ++ +[source,terminal] +---- +$ setfacl -m u:1001:-wx $QUAY/storage +---- + +[id="deploy-quay-registry"] +== Deploy the {productname} registry + +Use the following procedure to deploy the `Quay` registry container. + +.Procedure + +* Enter the following command to start the `Quay` registry container, specifying the appropriate volumes for configuration data and local storage for image data: ++ +[subs="verbatim,attributes"] +---- +$ sudo podman run -d --rm -p 80:8080 -p 443:8443 \ + --name=quay \ + -v $QUAY/config:/conf/stack:Z \ + -v $QUAY/storage:/datastorage:Z \ + {productrepo}/{quayimage}:{productminv} +---- diff --git a/modules/proc_deploy_quay_poc_use.adoc b/modules/proc_deploy_quay_poc_use.adoc new file mode 100644 index 000000000..0d53208fe --- /dev/null +++ b/modules/proc_deploy_quay_poc_use.adoc @@ -0,0 +1,145 @@ +:_content-type: CONCEPT +[id="use-quay-poc"] += Using {productname} + +The following steps show you how to use the interface to create new organizations and repositories, and to search and browse existing repositories. Following step 3, you can use the command line interface to interact with the registry and to push and pull images. + +.Procedure + +. Use your browser to access the user interface for the {productname} registry at `\http://quay-server.example.com`, assuming you have configured `quay-server.example.com` as your hostname in your `/etc/hosts` file and in your `config.yaml` file. + +. Click `Create Account` and add a user, for example, `quayadmin` with a password `password`. + +. From the command line, log in to the registry: ++ +[source,terminal] +---- +$ sudo podman login --tls-verify=false quay-server.example.com +---- ++ +.Example output ++ +[source,terminal] +---- +Username: quayadmin +Password: password +Login Succeeded! +---- + +[id="pushing-pulling-images-poc"] +== Pushing and pulling images on {productname} + +Use the following procedure to push and pull images to your {productname} registry. + +.Procedure + +. To test pushing and pulling images from the {productname} registry, first pull a sample image from an external registry: ++ +[source,terminal] +---- +$ sudo podman pull busybox +---- ++ +.Example output ++ +[source,terminal] +---- +Trying to pull docker.io/library/busybox... +Getting image source signatures +Copying blob 4c892f00285e done +Copying config 22667f5368 done +Writing manifest to image destination +Storing signatures +22667f53682a2920948d19c7133ab1c9c3f745805c14125859d20cede07f11f9 +---- + +. Enter the following command to see the local copy of the image: ++ +[source,terminal] +---- +$ sudo podman images +---- ++ +.Example output ++ +[source,terminal] +---- +REPOSITORY TAG IMAGE ID CREATED SIZE +docker.io/library/busybox latest 22667f53682a 14 hours ago 1.45 MB +---- + +. Enter the following command to tag this image, which prepares the image for pushing it to the registry: ++ +[source,terminal] +---- +$ sudo podman tag docker.io/library/busybox quay-server.example.com/quayadmin/busybox:test +---- + +. Push the image to your registry. Following this step, you can use your browser to see the tagged image in your repository. ++ +[source,terminal] +---- +$ sudo podman push --tls-verify=false quay-server.example.com/quayadmin/busybox:test +---- ++ +.Example output ++ +[source,terminal] +---- +Getting image source signatures +Copying blob 6b245f040973 done +Copying config 22667f5368 done +Writing manifest to image destination +Storing signatures +---- + +. To test access to the image from the command line, first delete the local copy of the image: ++ +[source,terminal] +---- +$ sudo podman rmi quay-server.example.com/quayadmin/busybox:test +---- ++ +Example output ++ +[source,terminal] +---- +Untagged: quay-server.example.com/quayadmin/busybox:test +---- + +. Pull the image again, this time from your {productname} registry: ++ +[source,terminal] +---- +$ sudo podman pull --tls-verify=false quay-server.example.com/quayadmin/busybox:test +---- ++ +.Example output ++ +[source,terminal] +---- +Trying to pull quay-server.example.com/quayadmin/busybox:test... +Getting image source signatures +Copying blob 6ef22a7134ba [--------------------------------------] 0.0b / 0.0b +Copying config 22667f5368 done +Writing manifest to image destination +Storing signatures +22667f53682a2920948d19c7133ab1c9c3f745805c14125859d20cede07f11f9 +---- + +[id="accessing-superuser-admin-panel"] +== Accessing the superuser admin panel + +If you added a superuser to your `config.yaml` file, you can access the *Superuser Admin Panel* on the {productname} UI by using the following procedure. + +.Prerequisites + +* You have configured a superuser. + +.Procedure + +. Access the *Superuser Admin Panel* on the {productname} UI by clicking on the current user's name or avatar in the navigation pane of the UI. Then, click *Superuser Admin Panel*. ++ +image:super-user-admin-panel.png[Super User Admin Panel] ++ +On this page, you can manage users, your organization, service keys, view change logs, view usage logs, and create global messages for your organization. \ No newline at end of file diff --git a/modules/proc_deploy_quay_single.adoc b/modules/proc_deploy_quay_single.adoc index e6307e333..4b6eaea35 100644 --- a/modules/proc_deploy_quay_single.adoc +++ b/modules/proc_deploy_quay_single.adoc @@ -1,7 +1,13 @@ -== Starting up the supporting services -Follow these steps to install {productname} on a single system (VM or bare metal). +:_content-type: ASSEMBLY +include::modules/attributes.adoc[] + +[id="starting-up-supporting-service"] += Starting up the supporting services + +Follow these steps to install {productname} on a single system, either virtual machine (VM) or bare metal. . **Install Red Hat Enterprise Linux server**: Install the latest RHEL server. You can do a Minimal install (shell access only) or Server plus GUI (if you want a desktop). + . **Register the System**: Register and subscribe your RHEL server system to Red Hat. See link:https://access.redhat.com/solutions/253273[How to register and subscribe a system...] for details. The following commands register your system and list available subscriptions. Choose an available RHEL server subscription, attach to its poolid, enable rhel-7-server-rpms and rhel-7-server-extras-rpms repositories, and upgrade to the latest software: + [NOTE] @@ -9,25 +15,50 @@ Follow these steps to install {productname} on a single system (VM or bare metal This procedure was tested on RHEL 7. The `docker` command is not included in RHEL 8, so you would need to use the `podman` command instead. Because the `--restart` option is not supported by podman, instead of using `--restart`, -you could set up to use `podman` as a systemd service, as described +you could set up to use `podman` as a systemd service, as described in link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html-single/building_running_and_managing_containers/index#starting_containers_with_systemd[Starting containers with systemd]. - ==== - + -.... +[source,terminal] +---- # subscription-manager register --username= --password= +---- ++ +[source,terminal] +---- # subscription-manager refresh +---- ++ +[source,terminal] +---- # subscription-manager list --available +---- ++ +[source,terminal] +---- # subscription-manager attach --pool= +---- ++ +[source,terminal] +---- # subscription-manager repos --disable="*" +---- ++ +[source,terminal] +---- # subscription-manager repos \ --enable="rhel-7-server-rpms" \ --enable="rhel-7-server-extras-rpms" +---- ++ +[source,terminal] +---- # yum update -y -.... +---- -. **Add Quay.io authentication**: Set up authentication to Quay.io, so you can pull the quay container, as described in link:https://access.redhat.com/solutions/3533201[Accessing {productname} without a CoreOS login]. +ifdef::downstream[] +. **Add registry.redhat.io authentication**: Set up authentication to registry.redhat.io, so you can pull the `Quay` container, as described in link:https://access.redhat.com/RegistryAuthentication[Red Hat Container Registry Authentication]. Note that this differs from earlier {productname} releases where the images were hosted on quay.io. +endif::downstream[] . **Setup Docker**: Install, enable, and start the docker service as shown here (see link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux_atomic_host/7/html-single/getting_started_with_containers/index#getting_docker_in_rhel_7[Getting Docker in RHEL 7] for details): @@ -41,17 +72,17 @@ active .... . **Open ports in firewall**: If you have a firewall running on your system, -to access the Red Hat Quay config tool (port 8443) and application (ports 80 and 443) -outside of the local system, run the following commands: +to access the {productname} config tool (port 8443) and application (ports 80 and 443) +outside of the local system, run the following commands (add `--zone=` for each command to open ports on a particular zone): + .... -# firewall-cmd --permanent --zone=trusted --add-port=8443/tcp -# firewall-cmd --permanent --zone=trusted --add-port=80/tcp -# firewall-cmd --permanent --zone=trusted --add-port=443/tcp +# firewall-cmd --permanent --add-port=8443/tcp +# firewall-cmd --permanent --add-port=80/tcp +# firewall-cmd --permanent --add-port=443/tcp # firewall-cmd --reload .... -. **Install / Deploy a Database**: Choose either MySQL or PostgreSQL as a database. This example shows how to deploy the link:https://access.redhat.com/containers/#/registry.access.redhat.com/rhscl/mysql-57-rhel7[MySQL database container] (see the link:https://access.redhat.com/documentation/en-us/red_hat_software_collections/2/html-single/using_red_hat_software_collections_container_images/#mysql[MySQL] section of Using Red Hat Software Collections Container Images for details.) To configure the MySQL database, you can use the values shown here or change any of the following for storing MySQL data (/var/lib/mysql) and setting database values: +. **Install / Deploy a Database**: Choose either MySQL or PostgreSQL as a database. This example shows how to deploy the link:https://access.redhat.com/containers/#/registry.access.redhat.com/rhel8/mysql-80[MySQL database container]. To configure the MySQL database, you can use the values shown here or change any of the following for storing MySQL data (/var/lib/mysql) and setting database values: + .... # mkdir -p /var/lib/mysql @@ -73,7 +104,7 @@ outside of the local system, run the following commands: --privileged=true \ --publish 3306:3306 \ -v /var/lib/mysql:/var/lib/mysql/data:Z \ - registry.access.redhat.com/rhscl/mysql-57-rhel7 + registry.redhat.io/rhel8/mysql-80 .... + [NOTE] @@ -83,6 +114,12 @@ To generate passwords for MySQL user accounts, instead of setting them staticall # export MYSQL_PASSWORD=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 32 | sed 1q) # export MYSQL_ROOT_PASSWORD=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 32 | sed 1q) + +If using MySQL 8.0 or higher, you must explicitly create your Quay database using the 'latin1' characterset. To set the character set when creating the database do the following (e.g. for a database named 'quay'): + +mysql> create database quay character set latin1; +Query OK, 1 row affected (0.02 sec) + ==== . **(optional) Check database connectivity**: To check connectivity to the database, you can log in using the mysql command (from the mariadb package). Substitute the hostname (or IP address) of your MySQL service and your password. Type `status` to see information about your MySQL connection: @@ -110,7 +147,7 @@ MySQL [(none)]> \q .... -. **Install / Deploy link:https://access.redhat.com/containers/?tab=overview#/registry.access.redhat.com/rhscl/redis-32-rhel7)[Redis]**: Run Redis as a container: +. **Install / Deploy Redis **: Run Redis as a container: + .... @@ -119,7 +156,7 @@ MySQL [(none)]> \q # docker run -d --restart=always -p 6379:6379 \ --privileged=true \ -v /var/lib/redis:/var/lib/redis/data:Z \ - registry.access.redhat.com/rhscl/redis-32-rhel7 + registry.redhat.io/rhel8/redis-5 .... . **Check redis connectivity**: You can use the `telnet` command to test connectivity to the redis service. Type MONITOR (to begin monitoring the service) and QUIT to exit: diff --git a/modules/proc_generating-splunk-token.adoc b/modules/proc_generating-splunk-token.adoc new file mode 100644 index 000000000..24200e396 --- /dev/null +++ b/modules/proc_generating-splunk-token.adoc @@ -0,0 +1,66 @@ +:_content-type: PROCEDURE +[id="proc_generating-splunk-token"] += Generating a Splunk token + +Use one of the following procedures to create a bearer token for Splunk. + +[id="proc_generating-splunk-token-ui"] +== Generating a Splunk token using the Splunk UI + +Use the following procedure to create a bearer token for Splunk using the Splunk UI. + +.Prerequisites + +* You have installed Splunk and created a username. + +.Procedure + +. On the Splunk UI, navigate to *Settings* -> *Tokens*. + +. Click *Enable Token Authentication*. + +. Ensure that *Token Authentication* is enabled by clicking *Token Settings* and selecting *Token Authentication* if necessary. + +. Optional: Set the expiration time for your token. This defaults at 30 days. + +. Click *Save*. + +. Click *New Token*. + +. Enter information for *User* and *Audience*. + +. Optional: Set the *Expiration* and *Not Before* information. + +. Click *Create*. Your token appears in the *Token* box. Copy the token immediately. ++ +[IMPORTANT] +==== +If you close out of the box before copying the token, you must create a new token. The token in its entirety is not available after closing the *New Token* window. +==== + +[id="proc_generating-splunk-token-cli"] +== Generating a Splunk token using the CLI + +Use the following procedure to create a bearer token for Splunk using the CLI. + +.Prerequisites + +* You have installed Splunk and created a username. + +.Procedure + +. In your CLI, enter the following `CURL` command to enable token authentication, passing in your Splunk username and password: ++ +[source,terminal] +---- +$ curl -k -u : -X POST ://:/services/admin/token-auth/tokens_auth -d disabled=false +---- + +. Create a token by entering the following `CURL` command, passing in your Splunk username and password. ++ +[source,terminal] +---- +$ curl -k -u : -X POST ://:/services/authorization/tokens?output_mode=json --data name= --data audience=Users --data-urlencode expires_on=+30d +---- + +. Save the generated bearer token. \ No newline at end of file diff --git a/modules/proc_github-app.adoc b/modules/proc_github-app.adoc index bdddc4bce..52548077d 100644 --- a/modules/proc_github-app.adoc +++ b/modules/proc_github-app.adoc @@ -1,26 +1,38 @@ -[[github-app]] +[id="github-app"] = Creating an OAuth application in GitHub -You can authorize your registry to access a GitHub account and its repositories by registering it as a GitHub OAuth application. +The following sections describe how to authorize {productname} to integrate with GitHub by creating an OAuth application. This allows {productname} to access GitHub repositories on behalf of a user. -[[github-app-create]] +OAuth integration with GitHub is primarily used to allow features like automated builds, where {productname} can be enabled to monitor specific GitHub repositories for changes like commits or pull requests, and trigger contain image builds when those changes are made. + +[id="github-app-create"] == Create new GitHub application -. Log into GitHub (Enterprise) -. Visit the Applications page under your organization's settings. -. Click link:https://github.com/settings/applications/new[Register New Application]. The `Register a new OAuth application` configuration screen is displayed: - image:register-app.png[Register a new OAuth application] -. Set Homepage URL: Enter the Quay Enterprise URL as the `Homepage URL` +Use the following procedure to create an OAuth application in Github. + +.Procedure + +. Log into link:https://github.com/enterprise[GitHub Enterprise]. + +. In the navigation pane, select your username -> *Your organizations*. + +. In the navigation pane, select *Applications* -> *Developer Settings*. + +. In the navigation pane, click *OAuth Apps* -> *New OAuth App*. You are navigated to the following page: ++ +image:register-app.png[Register a new OAuth application] + +. Enter a name for the application in the *Application name* textbox. + +. In the *Homepage URL* textbox, enter your {productname} URL. + [NOTE] ==== -If using public GitHub, the Homepage URL entered must be accessible by your users. It can still be an internal URL. +If you are using public GitHub, the Homepage URL entered must be accessible by your users. It can still be an internal URL. ==== -. Set Authorization callback URL: Enter -https://{$RED_HAT_QUAY_URL}/oauth2/github/callback -as the Authorization callback URL. +. In the *Authorization callback URL*, enter *https:///oauth2/github/callback*. -. Save your settings by clicking the Register application button. The new new application's summary is shown: +. Click *Register application* to save your settings. -. Record the Client ID and Client Secret shown for the new application. +. When the new application's summary is shown, record the Client ID and the Client Secret shown for the new application. \ No newline at end of file diff --git a/modules/proc_github-build-triggers.adoc b/modules/proc_github-build-triggers.adoc index 9c03f379b..16390d6c7 100644 --- a/modules/proc_github-build-triggers.adoc +++ b/modules/proc_github-build-triggers.adoc @@ -1,24 +1,76 @@ [[github-build-triggers]] -= Set up GitHub build triggers += Set up GitHub build trigger tags -{productname} supports using GitHub or GitHub Enterprise as a trigger to building images. +{productname} supports using GitHub or GitHub Enterprise as a trigger to building images. +If you have not yet done so, go ahead and link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#build-support[enable build support in {productname}]. -. Initial setup: If you have not yet done so, please link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/use_red_hat_quay/index#build-support[enable build support in {productname}]. +== Understanding tag naming for build triggers -. Create an OAuth application in GitHub: Following the instructions at link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/use_red_hat_quay/index#github-app[Create a GitHub Application]. +Prior to {productname} 3.3, how images created from build triggers were named was limited. +Images built by build triggers were named: + +* With the branch or tag whose change invoked the trigger +* With a `latest` tag for images that used the default branch + +As of {productname} 3.3 and later, you have more flexibility in how you set image tags. +The first thing you can do is enter custom tags, to have any string of characters assigned as a +tag for each built image. However, as an alternative, you could use the following tag templates to +to tag images with information from each commit: + +* **${commit_info.short_sha}**: The commit's short SHA +* **${commit_info.date}**: The timestamp for the commit +* **${commit_info.author}**: The author from the commit +* **${commit_info.committer}**: The committer of the commit +* **${parsed_ref.branch}**: The branch name + +The following procedure describes how you set up tagging for build triggers. + +== Setting tag names for build triggers + +Follow these steps to configure custom tags for build triggers: + +. From the repository view, select the Builds icon from the left navigation. + +. Select the Create Build Trigger menu, and select the type of repository push +you want (GitHub, Bitbucket, GitLab, or Custom Git repository push). +For this example, _GitHub Repository Push_ is chosen, as illustrated in the +following figure. + -[NOTE] -==== -This application must be different from that used for GitHub Authentication. -==== +image:create-build-trigger.png[Choose the type of build trigger to use] + +. When the _Setup Build Trigger_ page appears, select the repository and namespace in which +you want the trigger set up. + +. Under Configure Trigger, select either _Trigger for all branches and +tags_ or _Trigger only on branches and tags matching a regular expression_. Then select Continue. +The Configure Tagging section appears, as shown in the following figure: ++ +image:configure-tagging.png[Set tagging with your own tags or using tag templates] + +. Scroll down to _Configure Tagging_ and select from the following options: ++ +* **Tag manifest with the branch or tag name**: Check this box to use the name of the branch or tag +in which the commit occurred as the tag used on the image. This is enabled by default. +* **Add latest tag if on default branch**: Check this box to use the `latest` tag +for the image if it is on the default branch for the repository. This is enabled by default. +* **Add custom tagging templates**: Enter a custom tag or a template into the +_Enter a tag template_ box. There are multiple tag templates you can enter here, as +described earlier in this section. They include ways of using short SHA, timestamps, +author name, committer, and branch name from the commit as tags. + +. Select Continue. You are prompted to select the directory build context for the Docker build. +The build context directory identifies the location of the directory containing the Dockerfile, +along with other files needed when the build is triggered. +Enter "/" if the Dockerfile is in the root of the git repository. + +. Select Continue. You are prompted to add an optional Robot Account. +Do this if you want to pull a private base image during the build process. +The robot account would need access to the build. + +. Select Continue to complete the setup of the build trigger. -. Visit the management panel: Sign in to a superuser account and visit http://yourregister/superuser to view the management panel. +If you were to return to the Repository Builds page for the repository, the build +triggers you set up will be listed under the Build Triggers heading. -. Enable GitHub triggers: +image:view-tags-set.png[See the tagging options you set from the repository view] - * Click the configuration tab and scroll down to the section entitled GitHub (Enterprise) Build Triggers. - image:enable-trigger.png[Select Enable Github Triggers] - * Check the "Enable GitHub Triggers" box - * Fill in the credentials from the application created above - * Click "Save Configuration Changes" - * Restart the container (you will be prompted) diff --git a/modules/proc_installing-creating-username-splunk.adoc b/modules/proc_installing-creating-username-splunk.adoc new file mode 100644 index 000000000..4fed26b3e --- /dev/null +++ b/modules/proc_installing-creating-username-splunk.adoc @@ -0,0 +1,17 @@ +:_content-type: PROCEDURE +[id="proc_installing-creating-username-splunk"] += Installing and creating a username for Splunk + +Use the following procedure to install and create Splunk credentials. + +.Procedure + +. Create a Splunk account by navigating to link:https://www.splunk.com/en_us/sign-up.html[Splunk] and entering the required credentials. + +. Navigate to the link:https://www.splunk.com/en_us/download/splunk-enterprise.html[Splunk Enterprise] *Free Trial* page, select your platform and installation package, and then click *Download Now*. + +. Install the Splunk software on your machine. When prompted, create a username, for example, `splunk_admin` and password. + +. After creating a username and password, a localhost URL will be provided for your Splunk deployment, for example, `http://.remote.csb:8000/`. Open the URL in your preferred browser. + +. Log in with the username and password you created during installation. You are directed to the Splunk UI. \ No newline at end of file diff --git a/modules/proc_installing-qbo-on-ocp.adoc b/modules/proc_installing-qbo-on-ocp.adoc new file mode 100644 index 000000000..d8075bd5f --- /dev/null +++ b/modules/proc_installing-qbo-on-ocp.adoc @@ -0,0 +1,20 @@ +:_content-type: PROCEDURE +[id="installing-qbo-on-ocp"] += Installing the {qbo} on {ocp} + +In this procedure, you will install the {qbo} on {ocp}. + +.Prerequiites + +* You have set up {productname} and obtained an Access Token. +* An {ocp} 4.6 or greater environment for which you have cluster administrator permissions. + +.Procedure + +. Open the *Administrator* perspective of the web console and navigate to *Operators* → *OperatorHub* on the navigation pane. + +. Search for `{qbo}`, click the *{qbo}* title, and then click *Install*. + +. Select the version to install, for example, *stable-3.7*, and then click *Install*. + +. Click *View Operator* when the installation finishes to go to the {qbo}'s *Details* page. Alternatively, you can click *Installed Operators* → *Red Hat Quay Bridge Operator* to go to the *Details* page. diff --git a/modules/proc_manage-advanced-config.adoc b/modules/proc_manage-advanced-config.adoc new file mode 100644 index 000000000..029490cab --- /dev/null +++ b/modules/proc_manage-advanced-config.adoc @@ -0,0 +1,353 @@ +:_content-type: CONCEPT +[id="advanced-quay-configuration"] += Advanced {productname} configuration + +You can configure your {productname} after initial deployment using one of the following methods: + +//// +* *Using the {productname} Config Tool*. With this tool, a web-based interface for configuring the {productname} cluster is provided when running the `Quay` container in `config` mode. This method is recommended for configuring the {productname} service. +//// + +* *Editing the `config.yaml` file*. The `config.yaml` file contains most configuration information for the {productname} cluster. Editing the `config.yaml` file directly is the primary method for advanced tuning and enabling specific features. + +* *Using the {productname} API*. Some {productname} features can be configured through the API. + +This content in this section describes how to use each of the aforementioned interfaces and how to configure your deployment with advanced features. + +//// +[id="using-the-config-tool"] +== Using {productname} Config Tool to modify {productname} + +The {productname} Config Tool is made available by running a `Quay` container in `config` mode alongside the regular {productname} service. + +Use the following sections to run the Config Tool from the {productname} Operator, or to run the Config Tool on host systems from the command line interface (CLI). + +[id="running-config-tool-from-quay-operator"] +=== Running the Config Tool from the {productname} Operator + +When running the {productname} Operator on {ocp}, the Config Tool is readily available to use. Use the following procedure to access the {productname} Config Tool. + +.Prerequisites + +. You have deployed the {productname} Operator on {ocp}. + +.Procedure. + +. On the OpenShift console, select the {productname} project, for example, `quay-enterprise`. + +. In the navigation pane, select *Networking* -> *Routes*. You should see routes to both the {productname} application and Config Tool, as shown in the following image: ++ +image:configtoolroute.png[View the route to the {productname} Config Tool] + +. Select the route to the Config Tool, for example, `example-quayecosystem-quay-config`. The Config Tool UI should open in your browser. + +. Select *Modify configuration for this cluster* to bring up the Config Tool setup, for example: ++ +image:configtoolsetup.png[Modify {productname} cluster settings from the Config Tool] + +. Make the desired changes, and then select *Save Configuration Changes*. + +. Make any corrections needed by clicking *Continue Editing*, or, select *Next* to continue. + +. When prompted, select *Download Configuration*. This will download a tarball of your new `config.yaml`, as well as any certificates and keys used with your {productname} setup. The `config.yaml` can be used to make advanced changes to your configuration or use as a future reference. + +. Select *Go to deployment rollout* -> *Populate the configuration to deployments*. Wait for the {productname} pods to restart for the changes to take effect. + + +[id="running-config-tool-from-cli"] +=== Running the Config Tool from the command line + +If you are running {productname} from a host system, you can use the following procedure to make changes to your configuration after the initial deployment. + +. Prerequisites + +* You have installed either `podman` or `docker`. + +. Start {productname} in configuration mode. + +. On the first `Quay` node, enter the following command: ++ +[subs="verbatim,attributes"] +---- +$ podman run --rm -it --name quay_config -p 8080:8080 \ + -v path/to/config-bundle:/conf/stack \ + {productrepo}/{quayimage}:{productminv} config +---- ++ +[NOTE] +==== +To modify an existing config bundle, you can mount your configuration directory into the `Quay` container. +==== + +. When the {productname} configuration tool starts, open your browser and navigate to the URL and port used in your configuration file, for example, `quay-server.example.com:8080`. + +. Enter your username and password. + +. Modify your {productname} cluster as desired. + +[id="deploying-config-tool-using-tls"] +=== Deploying the config tool using TLS certificates + +You can deploy the config tool with secured SSL/TLS certificates by passing environment variables to the runtime variable. This ensures that sensitive data like credentials for the database and storage backend are protected. + +The public and private keys must contain valid Subject Alternative Names (SANs) for the route that you deploy the config tool on. + +The paths can be specified using `CONFIG_TOOL_PRIVATE_KEY` and `CONFIG_TOOL_PUBLIC_KEY`. + +If you are running your deployment from a container, the `CONFIG_TOOL_PRIVATE_KEY` and `CONFIG_TOOL_PUBLIC_KEY` values the locations of the certificates inside of the container. For example: + +[source,terminal] +---- +$ podman run --rm -it --name quay_config -p 7070:8080 \ + +-v ${PRIVATE_KEY_PATH}:/tls/localhost.key \ +-v ${PUBLIC_KEY_PATH}:/tls/localhost.crt \ +-e CONFIG_TOOL_PRIVATE_KEY=/tls/localhost.key \ +-e CONFIG_TOOL_PUBLIC_KEY=/tls/localhost.crt \ +-e DEBUGLOG=true \ +-ti config-app:dev +---- +//// + +[id="overview-advanced-config"] +== Using the API to modify {productname} + +See the +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index[{productname} API Guide] for information on how to access {productname} API. + +[id="editing-config-file-to-modify-quay"] +== Editing the config.yaml file to modify {productname} + +Advanced features can be implemented by editing the `config.yaml` file directly. All configuration fields for {productname} features and settings are available in the link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index[{productname} configuration guide]. + +The following example is one setting that you can change directly in the `config.yaml` file. Use this example as a reference when editing your `config.yaml` file for other features and settings. + +[id="add-name-and-company-to-quay-sign-in"] +=== Adding name and company to {productname} sign-in + +By setting the `FEATURE_USER_METADATA` field to `true`, users are prompted for their name and company when they first sign in. This is an optional field, but can provide your with extra data about your {productname} users. + +Use the following procedure to add a name and a company to the {productname} sign-in page. + +.Procedure + +. Add, or set, the `FEATURE_USER_METADATA` configuration field to `true` in your `config.yaml` file. For example: + +[source,yaml] +---- +# ... +FEATURE_USER_METADATA: true +# ... +---- + +. Redeploy {productname}. + +. Now, when prompted to log in, users are requested to enter the following information: ++ +image:metadata-request.png[Metadata request] + +//// +[id="disable-tls-protocols"] +=== Disable TLS Protocols + +You can change the `SSL_PROTOCOLS` setting to remove SSL protocols that you do not want to support in your {productname} instance. By default, {productname} is configured to support `TLSv1`, `TLSv1.1`, and `TLSv1.2`. +Use the following procedure to remove TLS v1 support from {productname}. + +.Procedure + + +to remove TLS v1 support from the default `SSL_PROTOCOLS:['TLSv1','TLSv1.1','TLSv1.2']`, change it to the following: + +[source,yaml] +---- +# ... +SSL_PROTOCOLS : ['TLSv1.1','TLSv1.2'] +# ... +---- + +[id="rate-limit-api-calls"] +=== Rate limit API calls + +Adding the `FEATURE_RATE_LIMITS` parameter to the `config.yaml` file causes `nginx` to limit certain API calls to 30-per-second. If `FEATURE_RATE_LIMITS` is not set, API calls are limited to 300-per-second, effectively making them unlimited. + +Rate limiting is important when you must ensure that the available resources are not overwhelmed with traffic. + +Some namespaces might require unlimited access, for example, if they are important to CI/CD and take priority. In that scenario, those namespaces might be placed in a list in the `config.yaml` file using the `NON_RATE_LIMITED_NAMESPACES`. + +[id="adjust-database-connection-pool"] +=== Adjust database connection pooling + +{productname} is composed of many different processes which all run within +the same container. Many of these processes interact with the database. + +With the `DB_CONNECTION_POOLING` parameter, each process that interacts with the database will contain a connection pool These per-process connection pools are configured to maintain a maximum of 20 connections. When under heavy load, it is possible to fill the connection pool for every process within a {productname} container. Under certain deployments and loads, this might require analysis to ensure that {productname} does not exceed the database's configured maximum connection count. + +Over time, the connection pools will release idle connections. To release all connections immediately, {productname} must be restarted. + +Database connection pooling can be toggled by setting the `DB_CONNECTION_POOLING` to `true` or `false`. For example: + +[source,yaml] +---- +--- +DB_CONNECTION_POOLING: true +--- +---- + +When `DB_CONNECTION_POOLING` is enabled, you can change the maximum size of the connection pool with the `DB_CONNECTION_ARGS` in your `config.yaml`. For example: + +[source,yaml] +---- +--- +DB_CONNECTION_ARGS: + max_connections: 10 +--- +---- + +[id="database-connection-arguments"] +==== Database connection arguments + +You can customize your {productname} database connection settings within the `config.yaml` file. These are dependent on your deployment's database driver, for example, `psycopg2` for Postgres and `pymysql` for MySQL. You can also pass in argument used by Peewee's connection pooling mechanism. For example: + +[source,yaml] +---- +--- +DB_CONNECTION_ARGS: + max_connections: n # Max Connection Pool size. (Connection Pooling only) + timeout: n # Time to hold on to connections. (Connection Pooling only) + stale_timeout: n # Number of seconds to block when the pool is full. (Connection Pooling only) +--- +---- + + +[id="database-ssl-configuration"] +==== Database SSL configuration + +Some key-value pairs defined under the `DB_CONNECTION_ARGS` field are generic, while others are specific to the database. In particular, SSL configuration depends on the database that you are deploying. + +[id="postgres-ssl-connection-arguments"] +===== PostgreSQL SSL connection arguments + +The following YAML shows a sample PostgreSQL SSL configuration: + +[source,yaml] +---- +--- +DB_CONNECTION_ARGS: + sslmode: verify-ca + sslrootcert: /path/to/cacert +--- +---- + +The `sslmode` parameter determines whether, or with, what priority a secure SSL TCP/IP connection will be negotiated with the server. There are six modes for the `sslmode` parameter: + +* **disabl:**: Only try a non-SSL connection. +* **allow**: Try a non-SSL connection first. Upon failure, try an SSL connection. +* **prefer**: Default. Try an SSL connection first. Upon failure, try a non-SSL connection. +* **require**: Only try an SSL connection. If a root CA file is present, verify the connection in the same way as if `verify-ca` was specified. +* **verify-ca**: Only try an SSL connection, and verify that the server certificate is issued by a trust certificate authority (CA). +* **verify-full**: Only try an SSL connection. Verify that the server certificate is issued by a trust CA, and that the requested server host name matches that in the certificate. + +For more information about the valid arguments for PostgreSQL, see link:https://www.postgresql.org/docs/current/libpq-connect.html[Database Connection Control Functions]. + +[id="mysql-ssl-connection-arguments"] +===== MySQL SSL connection arguments + +The following YAML shows a sample MySQL SSL configuration: + +[source,yaml] +---- +--- +DB_CONNECTION_ARGS: + ssl: + ca: /path/to/cacert +--- +---- + +For more information about the valid connection arguments for MySQL, see link:https://dev.mysql.com/doc/refman/8.0/en/connecting-using-uri-or-key-value-pairs.html[Connecting to the Server Using URI-Like Strings or Key-Value Pairs]. + +[id="http-connection-counts"] +==== HTTP connection counts + +You can specify the quantity of simultaneous HTTP connections using environment variables. The environment variables can be specified as a whole, or for a specific component. The default for each is 50 parallel connections per process. See the following YAML for example environment variables; + +[source,yaml] +---- +--- +WORKER_CONNECTION_COUNT_REGISTRY=n +WORKER_CONNECTION_COUNT_WEB=n +WORKER_CONNECTION_COUNT_SECSCAN=n +WORKER_CONNECTION_COUNT=n +--- +---- + +[NOTE] +==== +Specifying a count for a specific component will override any value +set in the `WORKER_CONNECTION_COUNT` configuration field. +==== + +[id="dynamic-process-counts"] +==== Dynamic process counts + +To estimate the quantity of dynamically sized processes, the following +calculation is used by default. + +[NOTE] +==== +{productname} queries the available CPU count from the entire machine. Any limits +applied using kubernetes or other non-virtualized mechanisms will not affect +this behavior. {productname} makes its calculation based on the total number of processors on the Node. The default values listed are simply targets, but shall +not exceed the maximum or be lower than the minimum. +==== + +Each of the following process quantities can be overridden using the +environment variable specified below: + +- registry - Provides HTTP endpoints to handle registry action +* minimum: 8 +* maximum: 64 +* default: $CPU_COUNT x 4 +* environment variable: WORKER_COUNT_REGISTRY + +- web - Provides HTTP endpoints for the web-based interface +* minimum: 2 +* maximum: 32 +* default: $CPU_COUNT x 2 +* environment_variable: WORKER_COUNT_WEB + +- secscan - Interacts with Clair +* minimum: 2 +* maximum: 4 +* default: $CPU_COUNT x 2 +* environment variable: WORKER_COUNT_SECSCAN + +[id="environment-variables"] +==== Environment variables + +{productname} allows overriding default behavior using environment variables. +The following table lists and describes each variable and the values they can expect. + +.Worker count environment variables +[cols="2a,2a,2a",options="header"] +|=== +| Variable | Description | Values +| *WORKER_COUNT_REGISTRY* | Specifies the number of processes to handle registry requests within the `Quay` container. | Integer between `8` and `64` +| *WORKER_COUNT_WEB* | Specifies the number of processes to handle UI/Web requests within the container. | Integer between `2` and `32` +| *WORKER_COUNT_SECSCAN* | Specifies the number of processes to handle Security Scanning (for example, Clair) integration within the container. | Integer. Because the Operator specifies 2 vCPUs for resource requests and limits, setting this value between `2` and `4` is safe. However, users can run more, for example, `16`, if warranted. +| *DB_CONNECTION_POOLING* | Toggle database connection pooling. | `true` or `false` +|=== + +[id="turning-off-connection-pooling"] +==== Turning off connection pooling + +{productname} deployments with a large amount of user activity can regularly +hit the 2k maximum database connection limit. In these cases, connection +pooling, which is enabled by default for {productname}, can cause database +connection count to rise exponentially and require you to turn off connection +pooling. + +If turning off connection pooling is not enough to prevent hitting the 2k +database connection limit, you need to take additional steps to deal with +the problem. If this happens, you might need to increase the maximum database +connections to better suit your workload. +//// \ No newline at end of file diff --git a/modules/proc_manage-bittorrent.adoc b/modules/proc_manage-bittorrent.adoc deleted file mode 100644 index 34b6e42ff..000000000 --- a/modules/proc_manage-bittorrent.adoc +++ /dev/null @@ -1,39 +0,0 @@ -[[bittorrent-based-distribution]] -= Distributing Images with BitTorrent - -{productname} supports BitTorrent-based distribution of its images to -clients via the https://github.com/coreos/quayctl[quayctl] tool. -BitTorrent-based distribution allows for machines to share image data -amongst themselves, resulting in faster downloads and shorter production -launch times. - -[id='visit-the-management-panel_{context}'] -== Visit the management panel - -Sign in to a superuser account from the {productname} login screen. For -example, if the host were reg.example.com, you would go to `http://reg.example.com/superuser` -to view the management panel: -image:superuser.png[Log in as superuser to set up BitTorrent downloads] - -[[enable-bittorrent-distribution]] -== Enable BitTorrent distribution - -* Click the configuration tab and scroll down to the section entitled -*BitTorrent-based download*. -image:enable-bittorrent.png[Select to enable BitTorrent downloads] - -* Check the "Enable BitTorrent downloads" box - -[[enter-an-announce-url]] -== Enter an announce URL - -In the "Announce URL" field, enter the HTTP endpoint of a JWT-capable -BitTorrent tracker's announce URL. -This will typically be a URL ending in -`/announce`. - -[id='save-configuration_{context}'] -== Save configuration - -* Click "Save Configuration Changes" -* Restart the container (you will be prompted) diff --git a/modules/proc_manage-clair-enable.adoc b/modules/proc_manage-clair-enable.adoc index 30e9894a2..01f1f464c 100644 --- a/modules/proc_manage-clair-enable.adoc +++ b/modules/proc_manage-clair-enable.adoc @@ -1,26 +1,26 @@ [[clair-initial-setup]] -= Setting Up Clair Security Scanning += Setting Up Clair V2 Security Scanning Once you have created the necessary key and pem files from the {productname} -config UI, you are ready to start up the Clair container and associated +config UI, you are ready to start up the Clair V2 container and associated database. Once that is done, you an restart your {productname} cluster to have those changes take effect. -Procedures for running the Clair container and associated database +Procedures for running the Clair V2 container and associated database are different on OpenShift than they are for running those containers directly on a host. -== Run Clair on a {productname} OpenShift deployment -To run the Clair image scanning container and its associated database on an OpenShift environment with your -{productname} cluster, see -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/deploy_red_hat_quay_on_openshift/index#add_clair_scanner[Add Clair image scanning to {productname}]. +== Run Clair V2 on a {productname} OpenShift deployment +To run the Clair V2 image scanning container and its associated database on an OpenShift environment with your +{productname} cluster, see +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/deploy_red_hat_quay_on_openshift/index#add_clair_scanner[Add Clair image scanning to {productname}]. -== Run Clair on a {productname} Basic or HA deployment -To run Clair and its associated database on non-OpenShift environments (directly on a host), you need to: +== Run Clair V2 on a {productname} Basic or HA deployment +To run Clair V2 and its associated database on non-OpenShift environments (directly on a host), you need to: * Start up a database -* Configure and start Clair +* Configure and start Clair V2 [[clair-postgres-database]] === Get Postgres and Clair @@ -36,9 +36,9 @@ For testing purposes, a single PostgreSQL instance can be started locally: . To start Postgres locally, do the following: + ``` -# docker run --name postgres -p 5432:5432 -d postgres +# sudo podman run --name postgres -p 5432:5432 -d postgres # sleep 5 -# docker run --rm --link postgres:postgres postgres \ +# sudo podman run --rm --link postgres:postgres postgres \ sh -c 'echo "create database clairtest" | psql -h \ "$POSTGRES_PORT_5432_TCP_ADDR" -p \ "$POSTGRES_PORT_5432_TCP_PORT" -U postgres' @@ -51,11 +51,20 @@ postgresql://postgres@{DOCKER HOST GOES HERE}:5432/clairtest?sslmode=disable ``` . Pull the security-enabled Clair image: + +ifdef::upstream[] +You will need to build your own Clair container and pull it during this step. +Instructions for building the Clair container are not yet available. +endif::upstream[] + +ifdef::downstream[] + [subs="verbatim,attributes"] ``` -docker pull quay.io/redhat/clair-jwt:v{productmin} +sudo podman pull {productrepo}/clair-jwt:{productminv} ``` +endif::downstream[] + . Make a configuration directory for Clair + ``` @@ -64,15 +73,15 @@ docker pull quay.io/redhat/clair-jwt:v{productmin} ``` [[configure-clair]] -=== Configure Clair +=== Configure Clair V2 -Clair can run either as a single instance or in high-availability mode. +Clair V2 can run either as a single instance or in high-availability mode. It is recommended to run more than a single instance of Clair, ideally in an auto-scaling group with automatic healing. -. Create a `config.yaml` file to be used in the Clair config directory (`/clair/config`) from one of the two Clair configuration files shown here. +. Create a `config.yaml` file to be used in the Clair V2 config directory (`/clair/config`) from one of the two Clair configuration files shown here. . If you are doing a high-availability installation, go through the procedure in -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/manage_red_hat_quay/#authentication-for-high-availability-scanners[Authentication for high-availability scanners] to create a Key ID and Private Key (PEM). +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/#authentication-for-high-availability-scanners[Authentication for high-availability scanners] to create a Key ID and Private Key (PEM). . Save the Private Key (PEM) to a file (such as, $HOME/config/security_scanner.pem). . Replace the value of key_id (CLAIR_SERVICE_KEY_ID) with the Key ID you generated and the value of private_key_path with the location of the PEM file (for example, /config/security_scanner.pem). @@ -87,7 +96,7 @@ private_key_path: /clair/config/security_scanner.pem . Change other values in the configuration file as needed. [[clair-configuration-high-availability]] -==== Clair configuration: High availability +==== Clair V2 configuration: High availability ``` clair: @@ -113,14 +122,14 @@ clair: updater: # interval defines how often Clair will check for updates from its upstream vulnerability databases. interval: 6h - notifier: - attempts: 3 - renotifyinterval: 1h - http: - # QUAY_ENDPOINT defines the endpoint at which Quay is running. - # For example: https://myregistry.mycompany.com - endpoint: { QUAY_ENDPOINT }/secscan/notify - proxy: http://localhost:6063 + notifier: + attempts: 3 + renotifyinterval: 1h + http: + # QUAY_ENDPOINT defines the endpoint at which Quay is running. + # For example: https://myregistry.mycompany.com + endpoint: { QUAY_ENDPOINT }/secscan/notify + proxy: http://localhost:6063 jwtproxy: signer_proxy: @@ -167,7 +176,7 @@ jwtproxy: ``` [[clair-configuration-single-instance]] -==== Clair configuration: Single instance +==== Clair V2 configuration: Single instance ``` clair: @@ -193,14 +202,14 @@ clair: updater: # interval defines how often Clair will check for updates from its upstream vulnerability databases. interval: 6h - notifier: - attempts: 3 - renotifyinterval: 1h - http: - # QUAY_ENDPOINT defines the endpoint at which Quay is running. - # For example: https://myregistry.mycompany.com - endpoint: { QUAY_ENDPOINT }/secscan/notify - proxy: http://localhost:6063 + notifier: + attempts: 3 + renotifyinterval: 1h + http: + # QUAY_ENDPOINT defines the endpoint at which Quay is running. + # For example: https://myregistry.mycompany.com + endpoint: { QUAY_ENDPOINT }/secscan/notify + proxy: http://localhost:6063 jwtproxy: signer_proxy: @@ -252,7 +261,7 @@ jwtproxy: ``` [[configuring-clair-for-tls]] -=== Configuring Clair for TLS +=== Configuring Clair V2 for TLS To configure Clair to run with TLS, a few additional steps are required. @@ -275,7 +284,7 @@ it below. ==== Configuring trust of self-signed SSL Similar to the process for setting up Docker to -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/manage_red_hat_quay/#configuring-docker-to-trust-a-certificate-authority[trust +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/#configuring-docker-to-trust-a-certificate-authority[trust your self-signed certificates], Clair must also be configured to trust your certificates. Using the same CA certificate bundle used to configure Docker, complete the following steps: @@ -284,26 +293,27 @@ configure Docker, complete the following steps: to `ca.crt` . Make sure the `ca.crt` file is mounted inside the Clair container under `/etc/pki/ca-trust/source/anchors/` as in the example below: -+ -[NOTE] -==== -Add `--loglevel=debug` to the `docker run` command line for -the clair container to enable debug level logging. -==== +ifdef::upstream[] +You will need to build your own Clair container and run it during this step. +Instructions for building the Clair container are not yet available. +endif::upstream[] + +ifdef::downstream[] + [subs="verbatim,attributes"] ``` -# docker run --restart=always -p 6060:6060 -p 6061:6061 \ +# sudo podman run --restart=always -p 6060:6060 -p 6061:6061 \ -v /path/to/clair/config/directory:/clair/config \ -v /path/to/quay/cert/ca.crt:/etc/pki/ca-trust/source/anchors/ca.crt \ - quay.io/redhat/clair-jwt:v{productmin} + {productrepo}/clair-jwt:{productminv} ``` +endif::downstream[] Now Clair will be able to trust the source of your TLS certificates and use them to secure communication between Clair and Quay. [[clair-sources]] -=== Using Clair data sources +=== Using Clair V2 data sources Before scanning container images, Clair tries to figure out the operating system on which the container was built. It does this by looking for specific filenames inside that image (see Table 1). Once Clair knows the operating system, it uses specific @@ -342,7 +352,7 @@ You must be sure that Clair has access to all listed data sources by whitelistin to each data source's location. You might need to add a wild-card character (*) at the end of some URLS that may not be fully complete because they are dynamically built by code. ==== -.Clair data sources and data collected +.Clair V2 data sources and data collected [cols="2a,2a,2a,2a,2a",options="header"] |=== |Data source |Data collected |Whitelist links |Format |License @@ -389,20 +399,36 @@ https://cve.mitre.org/cgi-bin/cvename.cgi?name= https://nvd.nist.gov/feeds/xml/cve/2.0/nvdcve-2.0-%s.meta |N/A |link:https://nvd.nist.gov/faq[Public domain] + +|link:https://alas.aws.amazon.com/[Amazon Linux Security Advisories] +|Amazon Linux 2018.03, 2 namespaces +|link:http://repo.us-west-2.amazonaws.com/2018.03/updates/x86_64/mirror.list[Amazonaws.com mirror list] + +link:https://cdn.amazonlinux.com/2/core/latest/x86_64/mirror.list[Amazon.com mirror list] +|link:http://www.rpm.org/[rpm] +|link:https://spdx.org/licenses/MIT-0.html[MIT-0] + |=== [[run-clair]] -=== Run Clair +=== Run Clair V2 -Execute the following command to run Clair: +Execute the following command to run Clair V2: [subs="verbatim,attributes"] +ifdef::upstream[] +You will need to build your own Clair container and run it during this step. +Instructions for building the Clair container are not yet available. +endif::upstream[] + +ifdef::downstream[] ``` -# docker run --restart=always -p 6060:6060 -p 6061:6061 \ +# sudo podman run --restart=always -p 6060:6060 -p 6061:6061 \ -v /path/to/clair/config/directory:/clair/config \ - quay.io/redhat/clair-jwt:v{productmin} + {productrepo}/clair-jwt:{productminv} ``` +endif::downstream[] Output similar to the following will be seen on success: @@ -433,7 +459,7 @@ time="2016-05-04T20:01:08Z" level=info msg="Starting forward proxy (Listening on 2016-05-04 20:01:09,543 INFO success: jwtproxy entered RUNNING state, process has stayed up for > than 1 seconds (startsecs) ``` -To verify Clair is running, execute the following command: +To verify Clair V2 is running, execute the following command: ``` curl -X GET -I http://path/to/clair/here:6061/health @@ -449,4 +475,4 @@ Content-Length: 0 Content-Type: text/plain; charset=utf-8 ``` -Once Clair and its associated database are running, you man need to restart your quay application for the changes to take effect. +Once Clair V2 and its associated database are running, you man need to restart your quay application for the changes to take effect. diff --git a/modules/proc_manage-insert-custom-cert.adoc b/modules/proc_manage-insert-custom-cert.adoc deleted file mode 100644 index b058551a8..000000000 --- a/modules/proc_manage-insert-custom-cert.adoc +++ /dev/null @@ -1,96 +0,0 @@ -[[adding-tls-certificates-to-the-quay-enterprise-container]] -= Adding TLS Certificates to the {productname} Container - -To add custom TLS certificates to {productname}, create a new -directory named `extra_ca_certs/` beneath the {productname} config -directory. Copy any required site-specific TLS certificates to this new -directory. - -[[add-certificates-to-quay-container]] -== Add TLS certificates to {productname} -. View certificate to be added to the container -+ -``` -$ cat storage.crt ------BEGIN CERTIFICATE----- -MIIDTTCCAjWgAwIBAgIJAMVr9ngjJhzbMA0GCSqGSIb3DQEBCwUAMD0xCzAJBgNV -[...] ------END CERTIFICATE----- -``` - -. Create certs directory and copy certificate there -+ -``` -$ mkdir -p quay/config/extra_ca_certs -$ cp storage.crt quay/config/extra_ca_certs/ -$ tree quay/config/ -├── config.yaml -├── extra_ca_certs -│   ├── storage.crt -``` - -. Obtain the quay container's `CONTAINER ID` with `docker ps`: -+ -[subs="verbatim,attributes"] -``` -$ docker ps -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS -5a3e82c4a75f quay.io/redhat/quay:v{productmin} "/sbin/my_init" 24 hours ago Up 18 hours 0.0.0.0:80->80/tcp, 0.0.0.0:443->443/tcp, 443/tcp grave_keller -``` - -. Restart the container with that ID: -+ -``` -$ docker restart 5a3e82c4a75f -``` - -. Examine the certificate copied into the container namespace: -+ -``` -$ docker exec -it 5a3e82c4a75f cat /etc/ssl/certs/storage.pem ------BEGIN CERTIFICATE----- -MIIDTTCCAjWgAwIBAgIJAMVr9ngjJhzbMA0GCSqGSIb3DQEBCwUAMD0xCzAJBgNV -``` - -[[add-certs-when-deployed-on-kubernetes]] -== Add certs when deployed on Kubernetes - -When deployed on Kubernetes, {productname} mounts in a secret as a volume to store -config assets. Unfortunately, this currently breaks the upload -certificate function of the superuser panel. - -To get around this error, a base64 encoded certificate can be added to -the secret _after_ {productname} has been deployed. Here's how: - -. Begin by base64 encoding the contents of the certificate: -+ -``` -$ cat ca.crt ------BEGIN CERTIFICATE----- -MIIDljCCAn6gAwIBAgIBATANBgkqhkiG9w0BAQsFADA5MRcwFQYDVQQKDA5MQUIu -TElCQ09SRS5TTzEeMBwGA1UEAwwVQ2VydGlmaWNhdGUgQXV0aG9yaXR5MB4XDTE2 -MDExMjA2NTkxMFoXDTM2MDExMjA2NTkxMFowOTEXMBUGA1UECgwOTEFCLkxJQkNP -UkUuU08xHjAcBgNVBAMMFUNlcnRpZmljYXRlIEF1dGhvcml0eTCCASIwDQYJKoZI -[...] ------END CERTIFICATE----- - -$ cat ca.crt | base64 -w 0 -[...] -c1psWGpqeGlPQmNEWkJPMjJ5d0pDemVnR2QNCnRsbW9JdEF4YnFSdVd3PT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= -``` -. Use the `kubectl` tool to edit the quay-enterprise-config-secret. -+ -``` -$ kubectl --namespace quay-enterprise edit secret/quay-enterprise-config-secret -``` -. Add an entry for the cert and paste the full base64 encoded string under -the entry: -+ -``` - custom-cert.crt: -c1psWGpqeGlPQmNEWkJPMjJ5d0pDemVnR2QNCnRsbW9JdEF4YnFSdVd3PT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= -``` - -. Finally, recycle all {productname} pods. Use `kubectl delete` to remove all {productname} -pods. The {productname} Deployment will automatically schedule replacement pods -with the new certificate data. diff --git a/modules/proc_manage-ipv6-dual-stack.adoc b/modules/proc_manage-ipv6-dual-stack.adoc new file mode 100644 index 000000000..8106db0b2 --- /dev/null +++ b/modules/proc_manage-ipv6-dual-stack.adoc @@ -0,0 +1,106 @@ +:_content-type: CONCEPT +[id="proc_manage-ipv6-dual-stack"] += IPv6 and dual-stack deployments + +Your standalone {productname} deployment can now be served in locations that only support IPv6, such as Telco and Edge environments. Support is also offered for dual-stack networking so your {productname} deployment can listen on IPv4 and IPv6 simultaneously. + +For a list of known limitations, see xref:proc_manage-ipv6-limitations-38[IPv6 limitations] + +[id="proc-manage-enabling-ipv6"] +== Enabling the IPv6 protocol family + +Use the following procedure to enable IPv6 support on your standalone {productname} deployment. + +.Prerequisites + +* You have updated {productname} to 3.8. +* Your host and container software platform (Docker, Podman) must be configured to support IPv6. + +.Procedure + +. In your deployment's `config.yaml` file, add the `FEATURE_LISTEN_IP_VERSION` parameter and set it to `IPv6`, for example: ++ +[source,yaml] +---- +--- +FEATURE_GOOGLE_LOGIN: false +FEATURE_INVITE_ONLY_USER_CREATION: false +FEATURE_LISTEN_IP_VERSION: IPv6 +FEATURE_MAILING: false +FEATURE_NONSUPERUSER_TEAM_SYNCING_SETUP: false +--- +---- + +. Start, or restart, your {productname} deployment. + +. Check that your deployment is listening to IPv6 by entering the following command: ++ +[source,terminal] +---- +$ curl /health/instance +{"data":{"services":{"auth":true,"database":true,"disk_space":true,"registry_gunicorn":true,"service_key":true,"web_gunicorn":true}},"status_code":200} +---- + +After enabling IPv6 in your deployment's `config.yaml`, all {productname} features can be used as normal, so long as your environment is configured to use IPv6 and is not hindered by the ipv6-limitations[current limitations]. + +[WARNING] +==== +If your environment is configured to IPv4, but the `FEATURE_LISTEN_IP_VERSION` configuration field is set to `IPv6`, {productname} will fail to deploy. +==== + +[id="proc-manageenabling-dual-stack"] +== Enabling the dual-stack protocol family + +Use the following procedure to enable dual-stack (IPv4 and IPv6) support on your standalone {productname} deployment. + + +.Prerequisites + +* You have updated {productname} to 3.8. +* Your host and container software platform (Docker, Podman) must be configured to support IPv6. + +.Procedure + +. In your deployment's `config.yaml` file, add the `FEATURE_LISTEN_IP_VERSION` parameter and set it to `dual-stack`, for example: ++ +[source,yaml] +---- +--- +FEATURE_GOOGLE_LOGIN: false +FEATURE_INVITE_ONLY_USER_CREATION: false +FEATURE_LISTEN_IP_VERSION: dual-stack +FEATURE_MAILING: false +FEATURE_NONSUPERUSER_TEAM_SYNCING_SETUP: false +--- +---- + +. Start, or restart, your {productname} deployment. + +. Check that your deployment is listening to both channels by entering the following command: +.. For IPv4, enter the following command: ++ +[source,terminal] +---- +$ curl --ipv4 +{"data":{"services":{"auth":true,"database":true,"disk_space":true,"registry_gunicorn":true,"service_key":true,"web_gunicorn":true}},"status_code":200} +---- +.. For IPv6, enter the following command: ++ +[source,terminal] +---- +$ curl --ipv6 +{"data":{"services":{"auth":true,"database":true,"disk_space":true,"registry_gunicorn":true,"service_key":true,"web_gunicorn":true}},"status_code":200} +---- + +After enabling dual-stack in your deployment's `config.yaml`, all {productname} features can be used as normal, so long as your environment is configured for dual-stack. + +[id="proc_manage-ipv6-limitations-38"] +== IPv6 and dual-stack limitations + +* Currently, attempting to configure your {productname} deployment with the common Azure Blob Storage configuration will not work on IPv6 single stack environments. Because the endpoint of Azure Blob Storage does not support IPv6, there is no workaround in place for this issue. ++ +For more information, see link:https://issues.redhat.com/browse/PROJQUAY-4433[PROJQUAY-4433]. + +* Currently, attempting to configure your {productname} deployment with Amazon S3 CloudFront will not work on IPv6 single stack environments. Because the endpoint of Amazon S3 CloudFront does not support IPv6, there is no workaround in place for this issue. ++ +For more information, see link:https://issues.redhat.com/browse/PROJQUAY-4470[PROJQUAY-4470]. \ No newline at end of file diff --git a/modules/proc_manage-ldap-setup.adoc b/modules/proc_manage-ldap-setup.adoc index c7e678421..01e33b519 100644 --- a/modules/proc_manage-ldap-setup.adoc +++ b/modules/proc_manage-ldap-setup.adoc @@ -1,89 +1,271 @@ -[[ldap-authentication-setup-for-quay-enterprise]] +:_content-type: CONCEPT +[id="ldap-authentication-setup-for-quay-enterprise"] = LDAP Authentication Setup for {productname} -The Lightweight Directory Access Protocol (LDAP) is an open, -vendor-neutral, industry standard application protocol for accessing and -maintaining distributed directory information services over an Internet -Protocol (IP) network. {productname} supports using LDAP as an -identity provider. +Lightweight Directory Access Protocol (LDAP) is an open, vendor-neutral, industry standard application protocol for accessing and maintaining distributed directory information services over an Internet Protocol (IP) network. {productname} supports using LDAP as an identity provider. -[[quay-ldap-prerequisites]] -== Prerequisites +[id="ldap-considerations"] +== Considerations when enabling LDAP -The {productname} LDAP setup workflow requires that the user -configuring the LDAP Setup already exist in the LDAP directory. Before attempting -the setup, make sure that you are logged in as a superuser that matches -user crendentials in LDAP. In order to do so, Navigate to the SuperUser -panel (ex: http(s)://quay.enterprise/superuser) and click on the “Create -User” button to create a new User. Make sure to create a user that -matches the username/email syntax in LDAP. +Prior to enabling LDAP for your {productname} deployment, you should consider the following. -Once the user is created, click on the Settings icon next to the user -and choose “Make Superuser” option. For ease of troubleshooting, set the -User password to the LDAP password. +[discrete] +[id="existing-quay-deployments"] +=== Existing {productname} deployments -image:superuser-user-settings.png[Change the user password to match LDAP password] +Conflicts between usernames can arise when you enable LDAP for an existing {productname} deployment that already has users configured. For example, one user, `alice`, was manually created in {productname} prior to enabling LDAP. If the username `alice` also exists in the LDAP directory, {productname} automatically creates a new user, `alice-1`, when `alice` logs in for the first time using LDAP. {productname} then automatically maps the LDAP credentials to the `alice` account. For consistency reasons, this might be erroneous for your {productname} deployment. It is recommended that you remove any potentially conflicting local account names from {productname} prior to enabling LDAP. +[discrete] +[id="considerations-for-manual-user-creation"] +=== Manual User Creation and LDAP authentication -You will be prompted to restart the container once the new user is -created. Restart the {productname} container and log in to the Superuser -panel *_as the user that was just created._* +When {productname} is configured for LDAP, LDAP-authenticated users are automatically created in {productname}'s database on first log in, if the configuration option `FEATURE_USER_CREATION` is set to `true`. If this option is set to `false`, the automatic user creation for LDAP users fails, and the user is not allowed to log in. In this scenario, the superuser needs to create the desired user account first. Conversely, if `FEATURE_USER_CREATION` is set to `true`, this also means that a user can still create an account from the {productname} login screen, even if there is an equivalent user in LDAP. -[[setup-ldap-configuration]] -== Setup LDAP Configuration +[id="setup-ldap-configuration"] +== Configuring LDAP for {productname} -Navigate to the Superuser panel and navigate to settings section. Locate -the Authentication section and select “LDAP” from the drop-down menu. +You can configure LDAP for {productname} by updating your `config.yaml` file directly and restarting your deployment. Use the following procedure as a reference when configuring LDAP for {productname}. -image:authentication-ldap.png[Select LDAP from the Authentication section] +//// +.Procedure -Enter LDAP configuration fields as required. +. You can use the {productname} config tool to configure LDAP. -image:authentication-ldap-details.png[Fill in LDAP information] +.. Using the {productname} config tool, locate the *Authentication* section. Select *LDAP* from the dropdown menu, and update the LDAP configuration fields as required. ++ +image:authentication-ldap.png[LDAP configuration fields] -[[few-tips-for-ldap-configuration]] -== Tips for LDAP configuration: +.. Optional. On the *Team synchronization* box, and click *Enable Team Syncrhonization Support*. With team synchronization enabled, {productname} administrators who are also superusers can set teams to have their membership synchronized with a backing group in LDAP. ++ +image:authentication-ldap-team-sync-1.png[Team synchronization] -* LDAP URI must be in ldap:// or ldaps:// syntax. Typing a URI with -ldaps:// prefix will surface the option to provide custom SSL -certificate for TLS setup -* User Relative DN is relative to BaseDN (ex: ou=NYC not -ou=NYC,dc=example,dc=org) -* Logged in Username must exist in User Relative DN -* You can enter multiple “Secondary User Relative DNs” if there are -multiple Organizational Units where User objects are located at. (ex: -ou=Users,ou=NYC and ou=Users,ou=SFO). Simply type in the Organizational -Units and click on Add button to add multiple RDNs -* sAMAccountName is the UID attribute for against Microsoft Active -Directory setups -* {productname} searches "User Relative DN" with subtree scope. For -example, if your Organization has Organizational Units NYC and SFO under -the Users OU (`ou=SFO,ou=Users and ou=NYC,ou=Users`), {productname} -can authenticate users from both the NYC and SFO Organizational Units if -the User Relative DN is set to Users (ou=Users) +.. For *Resynchronization duration* enter *60m*. This option sets the resynchronization duration at which a team must be re-synchronized. This field must be set similar to the following examples: `30m`, `1h`, `1d`. -Once the configuration is completed, click on “Save Configuration -Changes” button to validate the configuration. +.. Optional. For *Self-service team syncing setup*, you can click *Allow non-superusers to enable and manage team syncing* to allow superusers the ability to enable and manage team syncing under the organizations that they are administrators for. ++ +image:authentication-ldap-team-sync-2.png[Team synchronization] -image:authentication-ldap-success.png[Fill in LDAP information] +.. Locate the *LDAP URI* box and provide a full LDAP URI, including the _ldap://_ or _ldaps://_ prefix, for example, `ldap://117.17.8.101`. ++ +image:authentication-ldap-uri.png[LDAP server URI] -You will be prompted to login with *_LDAP -credentials_*. +.. Under *Base DN*, provide a name which forms the base path for looking up all LDAP records, for example, `o=`,`dc=`,`dc=com`. ++ +image:authentication-ldap-basedn.png[Distinguished Names] -[[common-issues]] -== Common Issues +.. Under *User Relative DN*, provide a list of Distinguished Name path(s), which form the secondary base path(s) for looking up all user LDAP records relative to the *Base DN* defined above. For example, `uid=`,`ou=Users`,`o=`,`dc=`,`dc=com`. This path, or these paths, is tried if the user is not found through the primary relative DN. ++ +image:user-relative-dn.png[User Relative DN] ++ +[NOTE] +==== +*User Relative DN* is relative to *Base DN*, for example, `ou=Users` and not `ou=Users,dc=,dc=com`. +==== -*_Invalid credentials_* +.. Optional. Provide *Secondary User Relative DNs* if there are multiple Organizational Units where user objects are located. You can type in the Organizational Units and click *Add* to add multiple RDNs. For example, `ou=Users,ou=NYC and ou=Users,ou=SFO`. ++ +The *User Relative DN* searches with subtree scope. For example, if your organization has Organization Units `NYC` and `SFO` under the Users OU (that is, `ou=SFO,ou=Users` and `ou=NYC,ou=Users`), {productname} can authenticate users from both the `NYC` and `SFO` Organizational Units if the *User Relative DN* is set to `Users` (`ou=Users`). -Administrator DN or Administrator DN Password values are incorrect +.. Optional. Fill in the *Additional User Filter Expression* field for all user lookup queries if desired. Distinguished Names used in the filter must be full based. The *Base DN* is not added automatically added to this field, and you must wrap the text in parentheses, for example, `(memberOf=cn=developers,ou=groups,dc=,dc=com)`. ++ +image:authentication-ldap-user-filter.png[Additional User Filter] -*_Verification of superuser %USERNAME% failed: Username not found The -user either does not exist in the remote authentication system OR LDAP -auth is misconfigured._* +.. Fill in the *Administrator DN* field for the {productname} administrator account. This account must be able to login and view the records for all users accounts. For example: `uid=,ou=Users,o=,dc=,dc=com`. ++ +image:authentication-ldap-admin-dn.png[Administrator DN] -{productname} can connect to the LDAP server via Username/Password specified in -the Administrator DN fields however cannot find the current logged in -user with the UID Attribute or Mail Attribute fields in the User -Relative DN Path. Either current logged in user does not exist in User -Relative DN Path, or Administrator DN user do not have rights to -search/read this LDAP path. +.. Fill in the *Administrator DN Password* field. This is the password for the administrator distinguished name. ++ +[IMPORTANT] +==== +The password for this field is stored in plaintext inside of the `config.yaml` file. Setting up a dedicated account of using a password hash is highly recommended. +==== + +.. Optional. Fill in the *UID Attribute* field. This is the name of the property field in the LDAP user records that stores your user's username. Most commonly, *uid* is entered for this field. This field can be used to log into your {productname} deployment. ++ +image:uid-attribute-ldap.png[UID Attribute] + +.. Optional. Fill in the *Mail Attribute* field. This is the name of the property field in your LDAP user records that stores your user's e-mail addresses. Most commonly, *mail* is entered for this field. This field can be used to log into your {productname} deployment. ++ +image:mail-attribute-ldap.png[Mail Attribute] ++ +[NOTE] +==== +* The username to log in must exist in the *User Relative DN*. +* If you are using Microsoft Active Directory to setup your LDAP deployment, you must use `sAMAccountName` for your UID attribute. +==== + +.. Optional. You can add a custom SSL/TLS certificate by clicking *Choose File* under the *Custom TLS Certificate* optionl. Additionally, you can enable fallbacks to insecure, non-TLS connections by checking the *Allow fallback to non-TLS connections* box. ++ +image:authentication-ldap-ssl.png[LDAP server SSL] ++ +If you upload an SSl/TLS certificate, you must provide an _ldaps://_ prefix, for example, `LDAP_URI: ldaps://ldap_provider.example.org`. + +//// + +. Update your `config.yaml` file directly to include the following relevant information: ++ +[source,yaml] +---- +# ... +AUTHENTICATION_TYPE: LDAP <1> +# ... +LDAP_ADMIN_DN: uid=,ou=Users,o=,dc=,dc=com <2> +LDAP_ADMIN_PASSWD: ABC123 <3> +LDAP_ALLOW_INSECURE_FALLBACK: false <4> +LDAP_BASE_DN: <5> + - dc=example + - dc=com +LDAP_EMAIL_ATTR: mail <6> +LDAP_UID_ATTR: uid <7> +LDAP_URI: ldap://.com <8> +LDAP_USER_FILTER: (memberof=cn=developers,ou=Users,dc=,dc=com) <9> +LDAP_USER_RDN: <10> + - ou=people +LDAP_SECONDARY_USER_RDNS: <11> + - ou= + - ou= + - ou= + - ou= +# ... +---- +<1> Required. Must be set to `LDAP`. +<2> Required. The admin DN for LDAP authentication. +<3> Required. The admin password for LDAP authentication. +<4> Required. Whether to allow SSL/TLS insecure fallback for LDAP authentication. +<5> Required. The base DN for LDAP authentication. +<6> Required. The email attribute for LDAP authentication. +<7> Required. The UID attribute for LDAP authentication. +<8> Required. The LDAP URI. +<9> Required. The user filter for LDAP authentication. +<10> Required. The user RDN for LDAP authentication. +<11> Optional. Secondary User Relative DNs if there are multiple Organizational Units where user objects are located. + +. After you have added all required LDAP fields, save the changes and restart your {productname} deployment. + +[id="ldap-restricted-users-enabling"] +== Enabling the LDAP_RESTRICTED_USER_FILTER configuration field + +The `LDAP_RESTRICTED_USER_FILTER` configuration field is a subset of the `LDAP_USER_FILTER` configuration field. When configured, this option allows {productname} administrators the ability to configure LDAP users as restricted users when {productname} uses LDAP as its authentication provider. + +Use the following procedure to enable LDAP restricted users on your {productname} deployment. + +.Prerequisites + +* Your {productname} deployment uses LDAP as its authentication provider. +* You have configured the `LDAP_USER_FILTER` field in your `config.yaml` file. + +.Procedure + +. In your deployment's `config.yaml` file, add the `LDAP_RESTRICTED_USER_FILTER` parameter and specify the group of restricted users, for example, `members`: ++ +[source,yaml] +---- +# ... +AUTHENTICATION_TYPE: LDAP +# ... +FEATURE_RESTRICTED_USERS: true <1> +# ... +LDAP_ADMIN_DN: uid=,ou=Users,o=,dc=,dc=com +LDAP_ADMIN_PASSWD: ABC123 +LDAP_ALLOW_INSECURE_FALLBACK: false +LDAP_BASE_DN: + - o= + - dc= + - dc=com +LDAP_EMAIL_ATTR: mail +LDAP_UID_ATTR: uid +LDAP_URI: ldap://.com +LDAP_USER_FILTER: (memberof=cn=developers,ou=Users,o=,dc=,dc=com) +LDAP_RESTRICTED_USER_FILTER: (=) <2> +LDAP_USER_RDN: + - ou= + - o= + - dc= + - dc=com +# ... +---- +<1> Must be set to `true` when configuring an LDAP restricted user. +<2> Configures specified users as restricted users. + +. Start, or restart, your {productname} deployment. + +After enabling the `LDAP_RESTRICTED_USER_FILTER` feature, your LDAP {productname} users are restricted from reading and writing content, and creating organizations. + +[id="ldap-super-users-enabling"] +== Enabling the LDAP_SUPERUSER_FILTER configuration field + +With the `LDAP_SUPERUSER_FILTER` field configured, {productname} administrators can configure Lightweight Directory Access Protocol (LDAP) users as superusers if {productname} uses LDAP as its authentication provider. + +Use the following procedure to enable LDAP superusers on your {productname} deployment. + +.Prerequisites + +* Your {productname} deployment uses LDAP as its authentication provider. +* You have configured the `LDAP_USER_FILTER` field field in your `config.yaml` file. + +.Procedure + +. In your deployment's `config.yaml` file, add the `LDAP_SUPERUSER_FILTER` parameter and add the group of users you want configured as super users, for example, `root`: ++ +[source,yaml] +---- +# ... +AUTHENTICATION_TYPE: LDAP +# ... +LDAP_ADMIN_DN: uid=,ou=Users,o=,dc=,dc=com +LDAP_ADMIN_PASSWD: ABC123 +LDAP_ALLOW_INSECURE_FALLBACK: false +LDAP_BASE_DN: + - o= + - dc= + - dc=com +LDAP_EMAIL_ATTR: mail +LDAP_UID_ATTR: uid +LDAP_URI: ldap://.com +LDAP_USER_FILTER: (memberof=cn=developers,ou=Users,o=,dc=,dc=com) +LDAP_SUPERUSER_FILTER: (=) <1> +LDAP_USER_RDN: + - ou= + - o= + - dc= + - dc=com +# ... +---- +<1> Configures specified users as superusers. + +. Start, or restart, your {productname} deployment. + +After enabling the `LDAP_SUPERUSER_FILTER` feature, your LDAP {productname} users have superuser privileges. The following options are available to superusers: + +* Manage users +* Manage organizations +* Manage service keys +* View the change log +* Query the usage logs +* Create globally visible user messages + +[id="common-ldap-configuration-issues"] +== Common LDAP configuration issues + +The following errors might be returned with an invalid configuration. + +* **Invalid credentials**. If you receive this error, the Administrator DN or Administrator DN password values are incorrect. Ensure that you are providing accurate Administrator DN and password values. + +* **Verification of superuser %USERNAME% failed*. This error is returned for the following reasons: + +** The username has not been found. +** The user does not exist in the remote authentication system. +** LDAP authorization is configured improperly. + +* **Cannot find the current logged in user**. When configuring LDAP for {productname}, there may be situations where the LDAP connection is established successfully using the username and password provided in the *Administrator DN* fields. However, if the current logged-in user cannot be found within the specified *User Relative DN* path using the *UID Attribute* or *Mail Attribute* fields, there are typically two potential reasons for this: + +** The current logged in user does not exist in the *User Relative DN* path. +** The *Administrator DN* does not have rights to search or read the specified LDAP path. ++ +To fix this issue, ensure that the logged in user is included in the *User Relative DN* path, or provide the correct permissions to the *Administrator DN* account. + +[id="ldap-configuration-fields-link"] +== LDAP configuration fields + +For a full list of LDAP configuration fields, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index#config-fields-ldap[LDAP configuration fields] \ No newline at end of file diff --git a/modules/proc_manage-log-storage-elasticsearch.adoc b/modules/proc_manage-log-storage-elasticsearch.adoc new file mode 100644 index 000000000..6976121da --- /dev/null +++ b/modules/proc_manage-log-storage-elasticsearch.adoc @@ -0,0 +1,67 @@ +[id="proc_manage-log-storage-elasticsearch"] += Configuring action log storage for Elasticsearch + +[NOTE] +==== +To configure action log storage for Elasticsearch, you must provide your own Elasticsearch stack; it is not included with {productname} as a customizable component. +==== + +Enabling Elasticsearch logging can be done during {productname} deployment or post-deployment by updating your `config.yaml` file. When configured, usage log access continues to be provided through the web UI for repositories and organizations. + +Use the following procedure to configure action log storage for Elasticsearch: + +.Procedure + +. Obtain an Elasticsearch account. + +. Update your {productname} `config.yaml` file to include the following information: ++ +[source,yaml] +---- +# ... +LOGS_MODEL: elasticsearch <1> +LOGS_MODEL_CONFIG: + producer: elasticsearch <2> + elasticsearch_config: + host: http://: <3> + port: 9200 <4> + access_key: <5> + secret_key: <6> + use_ssl: True <7> + index_prefix: <8> + aws_region: <9> +# ... +---- +<1> The method for handling log data. +<2> Choose either Elasticsearch or Kinesis to direct logs to +an intermediate Kinesis stream on AWS. You need to set up your own pipeline to +send logs from Kinesis to Elasticsearch, for example, Logstash. +<3> The hostname or IP address of the system providing +the Elasticsearch service. +<4> The port number providing the Elasticsearch service on the host +you just entered. Note that the port must be accessible from all systems +running the {productname} registry. The default is TCP port `9200`. +<5> The access key needed to gain access to the Elasticsearch +service, if required. +<6> The secret key needed to gain access to the Elasticsearch +service, if required. +<7> Whether to use SSL/TLS for Elasticsearch. Defaults to `True`. +<8> Choose a prefix to attach to log entries. +<9> If you are running on AWS, set the AWS region (otherwise, leave it blank). + +. Optional. If you are using Kinesis as your logs producer, you must include the following fields in your `config.yaml` file: ++ +[source,yaml] +---- + kinesis_stream_config: + stream_name: <1> + access_key: <2> + secret_key: <3> + aws_region: <4> +---- +<1> The name of the Kinesis stream. +<2> The name of the AWS access key needed to gain access to the Kinesis stream, if required. +<3> The name of the AWS secret key needed to gain access to the Kinesis stream, if required. +<4> The Amazon Web Services (AWS) region. + +. Save your `config.yaml` file and restart your {productname} deployment. diff --git a/modules/proc_manage-log-storage-splunk.adoc b/modules/proc_manage-log-storage-splunk.adoc new file mode 100644 index 000000000..41031634d --- /dev/null +++ b/modules/proc_manage-log-storage-splunk.adoc @@ -0,0 +1,9 @@ +:_content-type: CONCEPT +[id="proc_manage-log-storage-splunk"] += Configuring action log storage for Splunk + +link:https://www.splunk.com/[Splunk] is an alternative to Elasticsearch that can provide log analyses for your {productname} data. + +Enabling Splunk logging can be done during {productname} deployment or post-deployment using the configuration tool. Configuration includes both the option to forward action logs directly to Splunk or to the Splunk HTTP Event Collector (HEC). + +Use the following procedures to enable Splunk for your {productname} deployment. \ No newline at end of file diff --git a/modules/proc_manage-log-storage.adoc b/modules/proc_manage-log-storage.adoc new file mode 100644 index 000000000..9cb37c456 --- /dev/null +++ b/modules/proc_manage-log-storage.adoc @@ -0,0 +1,4 @@ +[id="proc_manage-log-storage"] += Configuring action log storage for Elasticsearch and Splunk + +By default, usage logs are stored in the {productname} database and exposed through the web UI on organization and repository levels. Appropriate administrative privileges are required to see log entries. For deployments with a large amount of logged operations, you can store the usage logs in Elasticsearch and Splunk instead of the {productname} database backend. \ No newline at end of file diff --git a/modules/proc_manage-quay-geo-replication.adoc b/modules/proc_manage-quay-geo-replication.adoc deleted file mode 100644 index 5cad84aa0..000000000 --- a/modules/proc_manage-quay-geo-replication.adoc +++ /dev/null @@ -1,89 +0,0 @@ -[[georeplication-of-storage-in-quay]] -= Georeplication of storage in {productname} - -Georeplication allows for a single globally-distributed {productname} -to serve container images from localized storage. - -When georeplication is configured, container image pushes will be -written to the preferred storage engine for that {productname} instance. After the -initial push, image data will be replicated in the background to other -storage engines. The list of replication locations is configurable. An -image pull will always use the closest available storage engine, to -maximize pull performance. - -[[prerequisites]] -== Prerequisites - -Georeplication requires that there be a high availability storage engine -(S3, GCS, RADOS, Swift) in each geographic region. Further, each region -must be able to access *every* storage engine due to replication -requirements. - -[NOTE] -==== -Local disk storage is not compatible with georeplication at this -time. -==== - -[id='visit-the-management-panel_{context}'] -== Visit the Management Panel - -Sign in to a superuser account from the {productname} login screen. For -example, if the host were reg.example.com, you would go to `http://reg.example.com/superuser` to view the management panel: -image:superuser.png[Log in as superuser to set up Georeplication] - -[[enable-storage-replication]] -== Enable storage replication - -. Click the configuration tab and scroll down to the section -entitled `Registry Storage`. -. Click `Enable Storage Replication`. -. Add each of the storage engines to which data will be replicated. -All storage engines to be used must be listed. -. If complete replication of all images to all storage engines is -required, under each storage engine configuration click `Replicate to -storage engine by default`. This will ensure that all images are -replicated to that storage engine. To instead enable per-namespace -replication, please contact support. -. Click Save to validate. - -. After adding storage and enabling “Replicate to storage engine by default” for Georeplications, you need to sync existing image data across all storage. -To do this, you need to `oc exec` (or docker/kubectl exec) into the container -and run: -+ -``` -# scl enable python27 bash -# python -m util.backfillreplication -``` -+ -This is a one time operation to sync content after adding new storage. - -[[run-quay-with-storage-preferences]] -== Run {productname} with storage preferences - -. Copy the config.yaml to all machines running {productname} - -. For each machine in each region, add a -`QUAY_DISTRIBUTED_STORAGE_PREFERENCE` environment variable with the -preferred storage engine for the region in which the machine is running. -+ -For example, for a machine running in Europe with the config -directory on the host available from /mnt/quay/config: -+ -[source,yaml,subs="verbatim,attributes"] -``` -# docker login quay.io -Username: yourquayuser -Password: ***** -# docker run -d -p 443:8443 -p 8080:8080 -v /mnt/quay/config:/conf/stack:Z \ - -e QUAY_DISTRIBUTED_STORAGE_PREFERENCE=europestorage \ - quay.io/redhat/quay:{productmin} -``` -+ -[NOTE] -==== -The value of the environment variable specified must match the -name of a Location ID as defined in the config panel. -==== - -. Restart all {productname} containers diff --git a/modules/proc_manage-quay-prometheus.adoc b/modules/proc_manage-quay-prometheus.adoc index c8d62a2ef..76f5fa3eb 100644 --- a/modules/proc_manage-quay-prometheus.adoc +++ b/modules/proc_manage-quay-prometheus.adoc @@ -8,11 +8,63 @@ endpoint on each instance to allow for easy monitoring and alerting. [[exposing-the-prometheus-endpoint]] == Exposing the Prometheus endpoint -The Prometheus- and -Grafana-compatible endpoint on the {productname} instance can -be found at port `9092`. See https://access.redhat.com/solutions/3750281[Monitoring Quay with Prometheus and Grafana] for details on configuring Prometheus +=== Standalone {productname} + +When using `podman run` to start the `Quay` container, expose the metrics port `9091`: + +[subs="verbatim,attributes"] +---- +$ sudo podman run -d --rm -p 80:8080 -p 443:8443 -p 9091:9091\ + --name=quay \ + -v $QUAY/config:/conf/stack:Z \ + -v $QUAY/storage:/datastorage:Z \ + {productrepo}/{quayimage}:{productminv} +---- + +The metrics will now be available: + +[source,terminal] +---- +$ curl quay.example.com:9091/metrics +---- + + +See https://access.redhat.com/solutions/3750281[Monitoring Quay with Prometheus and Grafana] for details on configuring Prometheus and Grafana to monitor Quay repository counts. +=== {productname} Operator + +Determine the cluster IP for the `quay-metrics` service: + +[source,terminal] +---- +$ oc get services -n quay-enterprise +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +example-registry-clair-app ClusterIP 172.30.61.161 80/TCP,8089/TCP 18h +example-registry-clair-postgres ClusterIP 172.30.122.136 5432/TCP 18h +example-registry-quay-app ClusterIP 172.30.72.79 443/TCP,80/TCP,8081/TCP,55443/TCP 18h +example-registry-quay-config-editor ClusterIP 172.30.185.61 80/TCP 18h +example-registry-quay-database ClusterIP 172.30.114.192 5432/TCP 18h +example-registry-quay-metrics ClusterIP 172.30.37.76 9091/TCP 18h +example-registry-quay-redis ClusterIP 172.30.157.248 6379/TCP 18h +---- + +Connect to your cluster and access the metrics using the cluster IP and port for the `quay-metrics` service: + +[source,terminal] +---- +$ oc debug node/master-0 + +sh-4.4# curl 172.30.37.76:9091/metrics + +# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles. +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds{quantile="0"} 4.0447e-05 +go_gc_duration_seconds{quantile="0.25"} 6.2203e-05 +... +---- + + [[setting-up-prometheus-to-consume-metrics]] === Setting up Prometheus to consume metrics @@ -26,11 +78,11 @@ Prometheus. A simple link:http://kubernetes.io/docs/user-guide/services/[Kubernetes service] can be configured to provide the DNS entry for Prometheus. -Details on running Prometheus under Kubernetes can be found at -https://coreos.com/blog/prometheus-and-kubernetes-up-and-running.html[Prometheus -and Kubernetes] and -https://coreos.com/blog/monitoring-kubernetes-with-prometheus.html[Monitoring -Kubernetes with Prometheus]. +//Details on running Prometheus under Kubernetes can be found at +//https://coreos.com/blog/prometheus-and-kubernetes-up-and-running.html[Prometheus +//and Kubernetes] and +//https://coreos.com/blog/monitoring-kubernetes-with-prometheus.html[Monitoring +//Kubernetes with Prometheus]. [[dns-configuration-for-a-manual-cluster]] === DNS configuration for a manual cluster diff --git a/modules/proc_manage-quay-ssl.adoc b/modules/proc_manage-quay-ssl.adoc index 98012bfd9..c70ac213b 100644 --- a/modules/proc_manage-quay-ssl.adoc +++ b/modules/proc_manage-quay-ssl.adoc @@ -1,11 +1,11 @@ [[using-ssl-to-protect-quay]] = Using SSL to protect connections to {productname} -This document assumes you have deployed {productname} in a link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/getting_started_with_red_hat_quay/[single-node] or link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/deploy_red_hat_quay_-_high_availability[highly available] deployment. +This document assumes you have deployed {productname} in a link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/getting_started_with_red_hat_quay/[single-node] or link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/deploy_red_hat_quay_-_high_availability[highly available] deployment. To configure {productname} with a https://en.wikipedia.org/wiki/Self-signed_certificate[self-signed -certificate], you need to create a Certificate Authority (CA), then generate the required key and certificate files. You then enter those files using the {productname} superuser GUI or command line. +certificate], you need to create a Certificate Authority (CA), then generate the required key and certificate files. You then enter those files using the {productname} Config Tool or command line. [[create-a-ca-and-sign-a-certificate]] == Create a CA and sign a certificate @@ -88,19 +88,18 @@ The next step can be accomplished either in the {productname} screen or from the [[configure-with-superuser-gui-in-quay]] === Configure SSL from the {productname} Setup screen -Start the quay container in config mode, as described +Start the `Quay` container in config mode, as described in each deployment guide. In the server Configuration section, enable SSL as follows: . Set the `Server Hostname` to the appropriate value and check the `Enable SSL` box, then upload the `ssl.key` and `ssl.cert` files (in our example, named `device.key` and `device.crt`, respectively): -image:server-config.png[Enable SSL] +image:ssl-config.png[Enable SSL] . Save the configuration. {productname} will automatically validate the SSL certificate: image:save-configuration.png[validating SSL] . Restart the container -image:restart-container.png[restart the container] [[to-configure-with-the-command-line]] === Configure with the command line @@ -138,12 +137,12 @@ PREFERRED_URL_SCHEME: https . Restart the {productname} container: + ``` -$ docker ps +$ sudo podman ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -eaf45a4aa12d ...redhat.com/rhscl/redis "/usr/bin/redis-serve" 22 hours ago Up 22 hours 0.0.0.0:6379->6379/tcp dreamy... +eaf45a4aa12d ...redhat.io/rhel8/redis-5 "/usr/bin/redis-serve" 22 hours ago Up 22 hours 0.0.0.0:6379->6379/tcp dreamy... cbe7b0fa39d8 quay.io/redhat/quay "/sbin/my_init" 22 hours ago Up one hour 80/tcp,443/tcp,443/tcp ferv... -705fe7311940 mysql:5.7 "/entrypoint.sh mysql" 23 hours ago Up 22 hours 0.0.0.0:3306->3306/tcp mysql -$ docker restart cbe7b0fa39d8 +705fe7311940 rhel8/mysql-80 "/entrypoint.sh mysql" 23 hours ago Up 22 hours 0.0.0.0:3306->3306/tcp mysql +$ sudo podman restart cbe7b0fa39d8 ``` [[test-the-secure-connection]] diff --git a/modules/proc_manage-release-notifications.adoc b/modules/proc_manage-release-notifications.adoc index 48344727f..f70ccf4c4 100644 --- a/modules/proc_manage-release-notifications.adoc +++ b/modules/proc_manage-release-notifications.adoc @@ -1,7 +1,7 @@ [[release-notifications]] = Getting {productname} release notifications To keep up with the latest {productname} releases and other changes related -to Red Hat Quay, you can sign up for update notifications on the +to {productname}, you can sign up for update notifications on the link:https://access.redhat.com[Red Hat Customer Portal]. After signing up for notifications, you will receive notifications letting you know when there is new a {productname} version, updated documentation, or other {productname} @@ -15,7 +15,7 @@ image:notification-profile.png[View account and portal selections] . Select the Notifications tab. . Select Manage Notifications. . Select Follow, then choose Products from the drop-down box. -. From the drop-down box next to the Products, search for and select Red Hat Quay: +. From the drop-down box next to the Products, search for and select {productname}: image:notification-follow.png[Select Products from notifications box] . Select the SAVE NOTIFICATION button. Going forward, you will receive notifications -when there are changes to the Red Hat Quay product, such as a new release. +when there are changes to the {productname} product, such as a new release. diff --git a/modules/proc_manage-repomirror.adoc b/modules/proc_manage-repomirror.adoc deleted file mode 100644 index 3d138b63f..000000000 --- a/modules/proc_manage-repomirror.adoc +++ /dev/null @@ -1,214 +0,0 @@ -[[repo-mirroring-in-red-hat-quay]] -= Repository Mirroring in {productname} - -{productname} repository mirroring lets you mirror images from external container registries -(or the local registry) into your local {productname} cluster. -Using repository mirroring you can synchronize images to {productname} based on repository names and tags. - -[[overview-repository-mirroring]] -== Overview of repository mirroring - -From your {productname} cluster with repository mirroring enabled, you can: - -* Choose a repository from an external registry to mirror -* Add credentials to access the external registry -* Set intervals at which a repository is synced -* Identify specific container image repository names and tags to sync -* Check the current state of synchronization - -With repository mirroring, you mirror a specific subset of content, -between two or more distinct registries, to -selected datacenters, clusters, or regions. By contrast, Georeplication -provides a single, globally distributed {productname} to serve container -images from localized storage. The two approaches to sharing content -differ in the following ways: - -.{productname} Repository Mirroring vs. Georeplication -[width="100%",cols=options="header"] -|================================================================== -| Feature / Capability | Georeplication | Repository Mirroring -| What is the feature designed to do? | A shared, global registry | Distinct, different registries -| What happens if replication or mirroring hasn’t been completed yet? | The remote copy is used (slower) | No image is served -| Is access to all storage backends in both regions required? | Yes (all {productname} nodes) | No (distinct storage) -| Can users push images from both sites to the same repository? | Yes | No -| Is all registry content and configuration identical across all regions (shared database) | Yes | No -| Can users select individual namespaces or repositories to be mirrored? | No,by default | Yes -| Can users apply filters to synchronization rules? | No | Yes -|================================================================== - -Here are a few tips for using {productname} repository mirroring: - -* With repository mirroring, you can mirror an entire repository or selectively -limit which images are synced based on a comma-separated list of tags, a -range of tags, or other means of identifying tags through -regular expressions and globs. - -* Once set as a mirrored repository, you cannot manually add other images to that repository. - -* Because the mirrored repository is based on the repository and tags you set, -it will hold only the content represented by the repo/tag pair. In other words, if you change -the tag so that some images in the repository don't match any more, those images will be deleted. - -* Only the designated robot can push images to a mirrored repository, -superseding any role-based access control permissions set on the repository. - -* With a mirrored repository, a user can pull images (given read permission) -from the repository but not push images to the repository. - -* Changing setting on your mirrored repository is done from a Mirrors tab -on the Repositories page for the mirrored repository you create. - -* Images are synced at set intervals, but can also be synced on demand. - -[[prerequisites-mirrored-repo]] -== Prerequisites - -Before you can use repository mirroring, you must enable repository mirroring from the {productname} -configuration screen and start the repository mirroring worker. Ways of starting up this service are described -in the {productname} deployment guides: - -* link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/deploy_red_hat_quay_-_basic/index[Deploy {productname} - Basic] -* link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/deploy_red_hat_quay_-_high_availability/index[Deploy {productname} - High Availability] -* link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/deploy_red_hat_quay_on_openshift/index[Deploy {productname} on OpenShift] - -The steps shown in the following section assumes you already have the repository mirroring service running and -that you have enabled repository mirroring on your {productname} cluster. - -[[create-mirrored-repo]] -== Create a mirrored repository - -To mirror an external repository from an external container registry, do the following: - -. Log into your {productname} registry. - -. Create a robot account to pull images for the mirrored repository: -+ -* Select Account Settings from the drop-down in the upper right corner. -+ -* Select the Robot Accounts button in the left column. -+ -* Select Create Robot Account. -+ -* Add the name and description of the robot account and select Create robot account. -+ -* Select Close, since the mirrored repository you are adding does not exist yet. -+ -* Select the ROBOT ACCOUNT NAME from the listing. -+ -* When prompted, add the credentials needed by the robot to -access the external registry of the repository you want to mirror, then close -the Credentials window. - -. Select REPOSITORIES. - -. Select Create New Repository and give it a name. - -. Fill in a repository name, select Public or Private, and select Create Repository. - -. Select the Settings button and change the repository state to MIRROR. - -. Open the new repository and select the Mirroring button in the left column. - -. Fill in the fields to identify the repository you are mirroring in your new repository: - -+ -* **Registry URL**: Location of the container registry you want to mirror from. -+ -* **User or Organization**: Typically, the account name associated with the -content you are mirroring. -For example, with the image registry.example.com/jsmith/myimage:latest, jsmith would be entered here. -+ -* **Repository Name**: The name identifying the name of the set of images. -For example, with the image registry.example.com/jsmith/myimage:latest, myimage would be entered here. -+ -* **Sync Interval**: Defaults to syncing every 24 hours. You can change that based on hours or days. -+ -* **Robot User**: Select the robot account you created earlier to do the mirroring. -+ -* **Username**: The username for logging into the external registry holding the repository you are mirroring. -+ -* **Password**: The password associated with the Username. Note that the password -cannot include characters that require an escape character (\). -+ -* **Start Date**: The date on which mirroring begins. The current date and time used by default. -+ -* **Verify TLS**: Check this box if you want to verify the authenticity of the external registry. -Uncheck this box if, for example, you set up {productname} for testing with a self-signed certificate -or no certificate. -+ -* **HTTP Proxy**: Identify the proxy server needed to access the remote site, if one is required. -+ -* **Tags**: This field is required. You can enter individual tags or indicate which -tags to match using globbing or regular expressions. For example, you can match specific -instances of an image (such as latest), enter a comma-separated list of tags, or use -wildcards (such as mytag*) to match multiple images. For example, -for a repository that features daily image builds named abc-2019-08-12, abc-2019-08-13, -and so on, you could mirror all the August, 2019 images using abc-2019-08* as the tag. -+ -[NOTE] -==== -At least one Tag must be entered. -==== -+ -Here is an example of a completed Repository Mirroring screen: -+ -image:repo_mirror_create.png[Create a new {productname} repo mirror] -+ -. Select the Enable Mirror button. Here's the resulting Repository Mirroring page: -+ -image:repo_mirror_view.png[Viewing a new {productname} repo mirror] -+ -You can return to this page later to change any of those settings. - -[[working-with-mirrored-repo]] -== Working with mirrored repositories - -Once you have created a mirrored repository, there are several -ways you can work with that repository. -Select your mirrored repository from the Repositories page -and do any of the following: - -* **Enable/disable the repository**: Select the Mirroring button in the left column, -then toggle the Enabled check box to enable or disable the repository temporarily. - -* **Check mirror logs**: To make sure the mirrored repository is working properly, -you can check the mirror logs. To do that, select the Usage Logs -button in the left column. Here's an example: -+ -image:repo_mirror_logs.png[View logs for your {productname} repo mirror] - -* **Sync mirror now**: To immediately sync the images in your repository, -select the Sync Now button. - -* **Change credentials**: To change the username and password, select DELETE from the Credentials line. -Then select None and add the username and password needed to log into the external registry when prompted. - -* **Cancel mirroring**: To stop mirroring, which keeps the current images available but stops -new ones from being synced, select the CANCEL button. - -* **Set robot permissions**: {productname} robot accounts are named tokens that hold credentials -for accessing external repositories. By assigning credentials to a robot, that robot can be used -across multiple mirrored repositories that need to access the same external registry. -+ -You can assign an existing robot to a repository by going to Account Settings, then selecting -the Robot Accounts icon in the left column. For the robot account, choose the -link under the REPOSITORIES column. From the pop-up window, you can: - -** Check which repositories are assigned to that robot. - -** Assign read, write or Admin privileges to that robot from the PERMISSION field shown in this figure: -image:repo_mirror_robot_assign.png[Assign a robot to mirrored repo] - -* **Change robot credentials**: Robots can hold credentials such as -Kubernetes secrets, Docker login information, and Mesos bundles. -To change robot credentials, select -the Options gear on the robot's account line on the Robot Accounts window and choose View Credentials. -Add the appropriate credentials for the external repository the robot needs to access. -+ -image:repo_mirror_robot_perm.png[Assign permission to a robot] - -* **Check and change general setting**: Select the Settings button (gear icon) from the left -column on the mirrored repository page. -On the resulting page, you can change settings associated with -the mirrored repository. In particular, you can change User and Robot Permissions, -to specify exaclty which users and robots can read from or write to the repo. diff --git a/modules/proc_manage-running-chihaya.adoc b/modules/proc_manage-running-chihaya.adoc deleted file mode 100644 index 0b1f6caa0..000000000 --- a/modules/proc_manage-running-chihaya.adoc +++ /dev/null @@ -1,73 +0,0 @@ -[[chihaya-setup]] -= Setting up BitTorrent Distribution with Chihaya - -The Chihaya project is an open source BitTorrent tracker that supports -JWT-based authorization. It is the preferred tracker for making use of -the secure link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/manage_red_hat_quay/#bittorrent-based-distribution[BitTorrent-based distribution] -feature in {productname}. - -[[basic-chihaya-configuration]] -== Basic BitTorrent Chihaya configuration - -Copy the following file as `chihaya.yaml`, replacing `{QE LOCATION}` and -`{TRACKER LOCATION}` with the reachable endpoint for the Quay -instance and the tracker itself, respectively. - -``` -chihaya: - announce_interval: 15m - prometheus_addr: 0.0.0.0:6880 - - http: - addr: 0.0.0.0:6881 - allow_ip_spoofing: true - real_ip_header: X-Forwarded-For - read_timeout: 5s - write_timeout: 5s - request_timeout: 5s - - storage: - gc_interval: 14m - peer_lifetime: 15m - shards: 16 - max_numwant: 50 - - prehooks: - - name: jwt - config: - issuer: '{QE LOCATION}' - audience: '{TRACKER LOCATION}/announce' - jwk_set_url: '{QE LOCATION}/keys/services/quay/keys' - jwk_set_update_interval: 5m -``` - -[[running]] -== Running the Chihaya service - -Run the following commands to start Chihaya under a Docker container -with the specified configuration mounted, making sure to point the -`chihaya.yaml` to the file created above. - -``` -$ docker pull quay.io/jzelinskie/chihaya:v2.0.0-rc.1 -$ docker run -p 6880-6882:6880-6882 \ - -v $PWD/chihaya.yaml:/etc/chihaya.yaml:ro \ - quay.io/jzelinskie/chihaya:v2.0.0-rc.1 -``` - -[[security]] -== Securing Chihaya - -It is recommended to place the tracker behind an SSL-terminating proxy -or load balancer of some kind, especially if publicly facing. If setup -this way, make sure to update the `jwtAudience` value in the -configuration to have `https` as its prefix, and to refer to the load -balancer. - -[[high-availability]] -== Making Chihaya Highly Available - -High Availability of the tracker can be handled by running 2 or more -instances of the tracker, with one setup as primary and another as -secondary, configured with automatic failover. A simple HTTP check can -be used to ensure the health of each instance. diff --git a/modules/proc_manage-security-scanning.adoc b/modules/proc_manage-security-scanning.adoc index dec7a140d..22c3dbce1 100644 --- a/modules/proc_manage-security-scanning.adoc +++ b/modules/proc_manage-security-scanning.adoc @@ -1,12 +1,17 @@ [[quay-security-scanner]] -= {productname} Security Scanning with Clair += {productname} Security Scanning with Clair V2 {productname} supports scanning container images for known vulnerabilities with a scanning engine such as link:https://github.com/coreos/clair/[Clair]. This document explains how to configure Clair with {productname}. -== Set up Clair in the {productname} config tool -Enabling Clair in {productname} consists of: +[NOTE] +==== +With the release of {productname} 3.4, the default version of Clair is V4. This new version V4 is no longer being released as link:https://access.redhat.com/support/offerings/techpreview[Technology Preview] and is supported for production use. Customers are strongly encouraged to use Clair V4 for with {productname} 3.4. It is possible to run both Clair V4 and Clair V2 simultaneously if so desired. In future versions of {productname}, Clair V2 will eventually be removed. +==== + +== Set up Clair V2 in the {productname} config tool +Enabling Clair V2 in {productname} consists of: * Starting the {productname} config tool. See the {productname} deployment guide for the type of deployment you are doing (OpenShift, Basic, or HA) for how to @@ -17,13 +22,13 @@ start the config tool for that environment. The procedure varies, based on whether you are running {productname} on OpenShift or directly on a host. -=== Enabling Clair on a {productname} OpenShift deployment -To set up Clair on {productname} in OpenShift, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/deploy_red_hat_quay_on_openshift/index#add-clair-scanner[Add Clair image scanning to {productname}]. +=== Enabling Clair V2 on a {productname} OpenShift deployment +To set up Clair V2 on {productname} in OpenShift, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/deploy_red_hat_quay_on_openshift/index#add-clair-scanner[Add Clair image scanning to {productname}]. -=== Enabling Clair on a {productname} Basic or HA deployment -To set up Clair on a {productname} deployment where the container is running directly on the host system, do the following: +=== Enabling Clair V2 on a {productname} Basic or HA deployment +To set up Clair V2 on a {productname} deployment where the container is running directly on the host system, do the following: -. **Restart the {productname} config tool**: Run the quay container again in config mode, +. **Restart the {productname} config tool**: Run the `Quay` container again in config mode, open the configuration UI in a browser, then select `Modify an existing configuration`. When prompted, upload the `quay-config.tar.gz` file that was originally created for the deployment. @@ -57,4 +62,4 @@ config.yaml ssl.cert ssl.key $ cp config.yaml ssl* /mnt/quay/config .... -Next, start the Clair container and associated database, as described in the following sections. +Next, start the Clair V2 container and associated database, as described in the following sections. diff --git a/modules/proc_manage-upgrade-quay-guide.adoc b/modules/proc_manage-upgrade-quay-guide.adoc index 697df7a0c..e36ba1411 100644 --- a/modules/proc_manage-upgrade-quay-guide.adoc +++ b/modules/proc_manage-upgrade-quay-guide.adoc @@ -1,7 +1,7 @@ [[quay-upgrade-guide]] = {productname} upgrade guide -This document describes how to upgrade one or more Quay containers. +This document describes how to upgrade one or more `Quay` containers. [[backup-the-quay-enterprise-database]] == Backup the Quay database @@ -9,10 +9,10 @@ This document describes how to upgrade one or more Quay containers. The database is the "source of truth" for Quay, and some version upgrades will trigger a schema update and data migration. Such versions are clearly documented in the -https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes/[Red Hat Quay Release Notes]. +https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes/[{productname} Release Notes]. Backup the database before upgrading Quay. Once the backup -completes, use the procedure in this document to stop the running Quay container, start the new container, and check the health of +completes, use the procedure in this document to stop the running `Quay` container, start the new container, and check the health of the upgraded Quay service. [[provide-quay-credentials-to-the-docker-client]] @@ -25,7 +25,7 @@ the upgraded Quay service. [[pull-the-latest-quay-enterprise-release-from-the-repository]] == Pull the latest Quay release from the repository. -Check the https://access.redhat.com/documentation/en-us/red_hat_quay/2.9/html-single/red_hat_quay_release_notes/[list of Red Hat Quay releases] for the latest version. +Check the https://access.redhat.com/documentation/en-us/red_hat_quay/2.9/html-single/red_hat_quay_release_notes/[list of {productname} releases] for the latest version. ``` # docker pull quay.io/coreos/registry:RELEASE_VERSION @@ -34,7 +34,7 @@ Check the https://access.redhat.com/documentation/en-us/red_hat_quay/2.9/html-si Replace `RELEASE VERSION` with the desired version of Quay. [[find-the-running-quay-container-id]] -== Find the running Quay container ID +== Find the running `Quay` container ID ``` # docker ps -a @@ -43,14 +43,14 @@ Replace `RELEASE VERSION` with the desired version of Quay. The Quay image will be labeled `quay.io/coreos/registry`. [[stop-the-existing-quay-container]] -== Stop the existing Quay container +== Stop the existing `Quay` container ``` # docker stop QUAY_CONTAINER_ID ``` [[start-the-new-quay-enterprise-container]] -== Start the new Quay container +== Start the new `Quay` container ``` # docker run --restart=always -p 443:443 -p 80:80 \ @@ -73,11 +73,11 @@ should be consulted before each {productname} upgrade. [[check-the-health-of-the-upgraded-container]] == Check the health of the upgraded container -Visit the /health/endtoend endpoint on the registry hostname and verify +Visit the `/health/endtoend` endpoint on the registry hostname and verify that the code is 200 and `is_testing` is false. [[upgrade-all-containers-in-the-cluster]] == Upgrade the rest of the containers in the cluster. If the upgraded container is healthy, repeat this process for all -remaining Quay containers. +remaining `Quay` containers. diff --git a/modules/proc_manage-upgrade-quay.adoc b/modules/proc_manage-upgrade-quay.adoc index d91008262..c64fa5ad5 100644 --- a/modules/proc_manage-upgrade-quay.adoc +++ b/modules/proc_manage-upgrade-quay.adoc @@ -2,7 +2,7 @@ = Upgrading Quay The full list of Quay versions can be found on the -https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes/[Red Hat Quay Release Notes] +https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes/[{productname} Release Notes] page. [[special-note-upgrading-from-quay-enterprise-2.0.0-to-2.0.0]] @@ -11,7 +11,7 @@ page. ==== If you are upgrading from a version of Quay older than 2.0.0, you *must* upgrade to Quay 2.0.0 *first*. Please follow the -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/manage_red_hat_quay/#upgrade-to-quay-2.0.0[Upgrade to Quay 2.0.0 instructions] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/#upgrade-to-quay-2.0.0[Upgrade to Quay 2.0.0 instructions] to upgrade to Quay 2.0.0, and then follow the instructions below to upgrade from 2.0.0 to the latest version you'd like. ==== @@ -28,7 +28,7 @@ release. [[the-upgrade-process]] == The upgrade process -. Visit the https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes/[Red Hat Quay Release Notes] page and note the latest version of Quay. +. Visit the https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes/[{productname} Release Notes] page and note the latest version of Quay. . Shutdown the Quay cluster: Remove *all* containers from service. . On a *single* node, run the newer version of Quay. diff --git a/modules/proc_setting-up-quay-for-qbo.adoc b/modules/proc_setting-up-quay-for-qbo.adoc new file mode 100644 index 000000000..d67688471 --- /dev/null +++ b/modules/proc_setting-up-quay-for-qbo.adoc @@ -0,0 +1,40 @@ +:_content-type: PROCEDURE +[id="setting-up-quay-for-qbo"] += Setting up {productname} for the {qbo} + +In this procedure, you will create a dedicated {productname} organization, and from a new application created within that organization you will generate an OAuth token to be used with the {qbo} in {ocp}. + +.Procedure + +. Log in to {productname} through the web UI. + +. Select the organization for which the external application will be configured. + +. On the navigation pane, select *Applications*. + +. Select *Create New Application* and enter a name for the new application, for example, `openshift`. + +. On the *OAuth Applications* page, select your application, for example, `openshift`. + +. On the navigation pane, select *Generate Token*. + +. Select the following fields: ++ +* *Administer Organization* +* *Administer Repositories* +* *Create Repositories* +* *View all visible repositories* +* *Read/Write to any accessible repositories* +* *Administer User* +* *Read User Information* + +. Review the assigned permissions. + +. Select *Authorize Application* and then confirm confirm the authorization by selecting *Authorize Application*. + +. Save the generated access token. ++ +[IMPORTANT] +==== +{productname} does not offer token management. You cannot list tokens, delete tokens, or modify tokens. The generated access token is only shown once and cannot be re-obtained after closing the page. +==== diff --git a/modules/proc_splunk-action-log.adoc b/modules/proc_splunk-action-log.adoc new file mode 100644 index 000000000..3e6c6e655 --- /dev/null +++ b/modules/proc_splunk-action-log.adoc @@ -0,0 +1,57 @@ +:_content-type: PROCEDURE +[id="proc_splunk-action-log"] += Creating an action log + +Use the following procedure to create a user account that can forward action logs to Splunk. + +[IMPORTANT] +==== +You must use the Splunk UI to view {productname} action logs. At this time, viewing Splunk action logs on the {productname} *Usage Logs* page is unsupported, and returns the following message: `Method not implemented. Splunk does not support log lookups`. +==== + +.Prerequisites + +* You have installed Splunk and created a username. +* You have generated a Splunk bearer token. +* You have configured your {productname} `config.yaml` file to enable Splunk. + +.Procedure + +. Log in to your {productname} deployment. + +. Click on the name of the organization that you will use to create an action log for Splunk. + +. In the navigation pane, click *Robot Accounts* -> *Create Robot Account*. + +. When prompted, enter a name for the robot account, for example `spunkrobotaccount`, then click *Create robot account*. + +. On your browser, open the Splunk UI. + +. Click *Search and Reporting*. + +. In the search bar, enter the name of your index, for example, `` and press *Enter*. ++ +The search results populate on the Splunk UI. Logs are forwarded in JSON format. A response might look similar to the following: ++ +[source,json] +---- +{ + "log_data": { + "kind": "authentication", <1> + "account": "quayuser123", <2> + "performer": "John Doe", <3> + "repository": "projectQuay", <4> + "ip": "192.168.1.100", <5> + "metadata_json": {...}, <6> + "datetime": "2024-02-06T12:30:45Z" <7> + } +} + +---- +<1> Specifies the type of log event. In this example, `authentication` indicates that the log entry relates to an authentication event. +<2> The user account involved in the event. +<3> The individual who performed the action. +<4> The repository associated with the event. +<5> The IP address from which the action was performed. +<6> Might contain additional metadata related to the event. +<7> The timestamp of when the event occurred. \ No newline at end of file diff --git a/modules/proc_splunk-config.adoc b/modules/proc_splunk-config.adoc new file mode 100644 index 000000000..d6a90d44e --- /dev/null +++ b/modules/proc_splunk-config.adoc @@ -0,0 +1,100 @@ +:_content-type: PROCEDURE +[id="proc_splunk-config"] += Configuring {productname} to use Splunk + +Use the following procedure to configure {productname} to use Splunk or the Splunk HTTP Event Collector (HEC). + +.Prerequisites + +* You have installed Splunk and created a username. +* You have generated a Splunk bearer token. + +.Procedure + +. Configure {productname} to use Splunk or the Splunk HTTP Event Collector (HEC). + +.. If opting to use Splunk, open your {productname} `config.yaml` file and add the following configuration fields: ++ +[source,yaml] +---- +# ... +LOGS_MODEL: splunk +LOGS_MODEL_CONFIG: + producer: splunk + splunk_config: + host: http://.remote.csb <1> + port: 8089 <2> + bearer_token: <3> + url_scheme: <4> + verify_ssl: False <5> + index_prefix: <6> + ssl_ca_path: <7> +# ... +---- +<1> String. The Splunk cluster endpoint. +<2> Integer. The Splunk management cluster endpoint port. Differs from the Splunk GUI hosted port. Can be found on the Splunk UI under *Settings* -> *Server Settings* -> *General Settings*. +<3> String. The generated bearer token for Splunk. +<4> String. The URL scheme for access the Splunk service. If Splunk is configured to use TLS/SSL, this must be `https`. +<5> Boolean. Whether to enable TLS/SSL. Defaults to `true`. +<6> String. The Splunk index prefix. Can be a new, or used, index. Can be created from the Splunk UI. +<7> String. The relative container path to a single `.pem` file containing a certificate authority (CA) for TLS/SSL validation. + +.. If opting to use Splunk HEC, open your {productname} `config.yaml` file and add the following configuration fields: ++ +[source,yaml] +---- +# ... +LOGS_MODEL: splunk +LOGS_MODEL_CONFIG: + producer: splunk_hec <1> + splunk_hec_config: <2> + host: prd-p-aaaaaq.splunkcloud.com <3> + port: 8088 <4> + hec_token: 12345678-1234-1234-1234-1234567890ab <5> + url_scheme: https <6> + verify_ssl: False <7> + index: quay <8> + splunk_host: quay-dev <9> + splunk_sourcetype: quay_logs <10> +# ... +---- +<1> Specify `splunk_hec` when configuring Splunk HEC. +<2> Logs model configuration for Splunk HTTP event collector action logs configuration. +<3> The Splunk cluster endpoint. +<4> Splunk management cluster endpoint port. +<5> HEC token for Splunk. +<6> The URL scheme for access the Splunk service. If Splunk is behind SSL/TLS, must be `https`. +<7> Boolean. Enable (true) or disable (false) SSL/TLS verification for HTTPS connections. +<8> The Splunk index to use. +<9> The host name to log this event. +<10> The name of the Splunk `sourcetype` to use. + +. If you are configuring `ssl_ca_path`, you must configure the SSL/TLS certificate so that {productname} will trust it. + +.. If you are using a standalone deployment of {productname}, SSL/TLS certificates can be provided by placing the certificate file inside of the `extra_ca_certs` directory, or inside of the relative container path and specified by `ssl_ca_path`. + +.. If you are using the {productname} Operator, create a config bundle secret, including the certificate authority (CA) of the Splunk server. For example: ++ +[source,terminal] +---- +$ oc create secret generic --from-file config.yaml=./config_390.yaml --from-file extra_ca_cert_splunkserver.crt=./splunkserver.crt config-bundle-secret +---- ++ +Specify the `conf/stack/extra_ca_certs/splunkserver.crt` file in your `config.yaml`. For example: ++ +[source,yaml] +---- +# ... +LOGS_MODEL: splunk +LOGS_MODEL_CONFIG: + producer: splunk + splunk_config: + host: ec2-12-345-67-891.us-east-2.compute.amazonaws.com + port: 8089 + bearer_token: eyJra + url_scheme: https + verify_ssl: true + index_prefix: quay123456 + ssl_ca_path: conf/stack/splunkserver.crt +# ... +---- \ No newline at end of file diff --git a/modules/proc_upgrade_standalone.adoc b/modules/proc_upgrade_standalone.adoc new file mode 100644 index 000000000..b1b1a12cb --- /dev/null +++ b/modules/proc_upgrade_standalone.adoc @@ -0,0 +1,732 @@ +:_content-type: PROCEDURE +[id="standalone-upgrade"] += Standalone upgrade + +In general, single-step upgrades from prior (N-2, N-3) minor versions. This helps simplify the upgrade procedure for customers on older releases. The following upgrade paths are supported for {productname} {productmin}: + +* 3.11.z -> {productmin} +* 3.12.z -> {productmin} +* 3.13.z -> {productmin} + +Upgrading to {productmin} from releases older than those listed above is unsupported. This helps ensure that any necessary database migrations are done correctly and in the right order during the upgrade. + +For users wanting to upgrade the {productname} Operator, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrading_quay_by_upgrading_the_quay_operator[Upgrading the {productname} Operator Overview]. + +This document describes the steps needed to perform each individual upgrade. Determine your current version and then follow the steps in sequential order, starting with your current version and working up to your desired target version. + +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_14_z_from_3_13_z[Upgrade to 3.14.z from 3.13.z] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_14_z_from_3_12_z[Upgrade to 3.14.z from 3.12.z] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_14_z_from_3_11_z[Upgrade to 3.14.z from 3.11.z] + +//// +//3.13 +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_13_z_from_3_12_z[Upgrade to 3.13.z from 3.12.z] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_13_z_from_3_11_z[Upgrade to 3.13.z from 3.11.z] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_13_z_from_3_10_z[Upgrade to 3.13.z from 3.10.z] + + +//3.12 +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_12_z_from_3_11_z[Upgrade to 3.12.z from 3.11.z] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_12_z_from_3_10_z[Upgrade to 3.12.z from 3.10.z] + + +//3.11 +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_11_z_from_3_10_z[Upgrade to 3.11.z from 3.10.z] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_11_z_from_3_9_z[Upgrade to 3.11.z from 3.9.z] + + +//3.10 +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_10_z_from_3_9_z[Upgrade to 3.10.z from 3.9.z] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_10_z_from_3_8_z[Upgrade to 3.10.z from 3.8.z] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_10_z_from_3_7_z[Upgrade to 3.10.z from 3.7.z] + +//3.9 +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_9_z_from_3_8_z[Upgrade to 3.9.z from 3.8.z] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_9_z_from_3_7_z[Upgrade to 3.9.z from 3.7.z] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_8_z_from_3_7_z[Upgrade to 3.8.z from 3.7.z] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_7_z_from_3_6_z[Upgrade to 3.7.z from 3.6.z] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_7_z_from_3_5_z[Upgrade to 3.7.z from 3.5.z] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_7_z_from_3_4_z[Upgrade to 3.7.z from 3.4.z] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_7_z_from_3_3_z[Upgrade to 3.7.z from 3.3.z] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_6_z_from_3_5_z[Upgrade to 3.6.z from 3.5.z] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_6_z_from_3_4_z[Upgrade to 3.6.z from 3.4.z] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_6_z_from_3_3_z[Upgrade to 3.6.z from 3.3.z] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_5_7_from_3_4_z[Upgrade to 3.5.z from 3.4.z] +ifdef::downstream[] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_4_6_from_3_3_z[Upgrade to 3.4.z from 3.3.4] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_3_4_from_3_2_z[Upgrade to 3.3.4 from 3.2.2] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_2_2_from_3_1_z[Upgrade to 3.2.2 from 3.1.3] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_1_3_from_3_0_z[Upgrade to 3.1.3 from 3.0.5] +* link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_0_5_from_2_9_5[Upgrade to 3.0.5 from 2.9.5] +endif::downstream[] +//// + +See the link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes/index[{productname} Release Notes] for information on features for individual releases. + +The general procedure for a manual upgrade consists of the following steps: + +. Stop the `Quay` and `Clair` containers. +. Backup the database and image storage (optional but recommended). +. Start Clair using the new version of the image. +. Wait until Clair is ready to accept connections before starting the new version of {productname}. + +[id="accessing-images"] +== Accessing images + +{productname} image from version 3.4.0 and later are available from link:https://registry.redhat.io[registry.redhat.io] and +link:https://registry.access.redhat.com[registry.access.redhat.com], with authentication set up as described in link:https://access.redhat.com/RegistryAuthentication[Red Hat Container Registry Authentication]. + +//// +[id="upgrading-clair-postgresql-database"] +== Upgrading the Clair PostgreSQL database + +If you are upgrading {productname} to version {productmin}, you must migrate your Clair PostgreSQL database version from PostgreSQL version 13 -> version 15. This requires bringing down your Clair PostgreSQL 13 database and running a migration script to initiate the process. + +Use the following procedure to upgrade your Clair PostgreSQL database from version 13 -> to version 15. + +[IMPORTANT] +==== +Clair security scans might become temporarily disrupted after the migration procedure has succeeded. +==== + +.Procedure + +. Stop the {productname} container by entering the following command: ++ +[source,terminal] +---- +$ sudo podman stop +---- + +. Stop the Clair container by running the following command: ++ +[source,terminal] +---- +$ sudo podman stop +---- + +. Run the following Podman process from SCLOrg's link:https://github.com/sclorg/postgresql-container/tree/master/13#data-migration[Data Migration] procedure, which allows for data migration from a remote PostgreSQL server: ++ +[source,terminal] +---- +$ sudo podman run -d --name <1> + -e POSTGRESQL_MIGRATION_REMOTE_HOST= \ <2> + -e POSTGRESQL_MIGRATION_ADMIN_PASSWORD=remoteAdminP@ssword \ + -v \ <3> + [ OPTIONAL_CONFIGURATION_VARIABLES ] + registry.redhat.io/rhel8/postgresql-15 +---- ++ +<1> Insert a name for your Clair PostgreSQL 15 migration database. +<2> Your new Clair PostgreSQL 15 database container IP address. Can obtained by running the following command: `sudo podman inspect -f "{{.NetworkSettings.IPAddress}}" postgresql-quay`. +<3> You must specify a different volume mount point than the one from your initial Clair PostgreSQL 13 deployment, and modify the access control lists for said directory. For example: ++ +[source,terminal] +---- +$ mkdir -p /host/data/clair-postgresql15-directory +---- ++ +[source,terminal] +---- +$ setfacl -m u:26:-wx /host/data/clair-postgresql15-directory +---- ++ +This prevents data from being overwritten by the new container. + +. Stop the Clair PostgreSQL 13 container: ++ +[source,terminal] +---- +$ sudo podman stop +---- + +. After completing the PostgreSQL migration, run the Clair PostgreSQL 15 container, using the new data volume mount from Step 3, for example, ``: ++ +[source,terminal] +---- +$ sudo podman run -d --rm --name \ + -e POSTGRESQL_USER= \ + -e POSTGRESQL_PASSWORD= \ + -e POSTGRESQL_DATABASE= \ + -e POSTGRESQL_ADMIN_PASSWORD= \ + -p 5433:5432 \ + -v \ + registry.redhat.io/rhel8/postgresql-15 +---- + +. Start the {productname} container by entering the following command: ++ +[source,terminal] +---- +$ sudo podman run -d --rm -p 80:8080 -p 443:8443 --name=quay \ +-v /home//quay-poc/config:/conf/stack:Z \ +-v /home//quay-poc/storage:/datastorage:Z \ +{productrepo}/{quayimage}:{productminv} +---- + +. Start the Clair container by entering the following command: ++ +[source,terminal] +---- +$ sudo podman run -d --name clairv4 \ +-p 8081:8081 -p 8088:8088 \ +-e CLAIR_CONF=/clair/config.yaml \ +-e CLAIR_MODE=combo \ +registry.redhat.io/quay/clair-rhel8:{productminv} +---- + +For more information, see link:https://github.com/sclorg/postgresql-container/tree/master/13#data-migration[Data Migration]. +//// + +== Upgrade to 3.14.z from 3.13.z + +=== Target images +* **Quay:** {productrepo}/{quayimage}:{productminv} +ifdef::downstream[] +* **Clair:** {productrepo}/{clairimage}:{productminv} +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} +* **Clair-PosgreSQL:** registry.redhat.io/rhel8/postgresql-15 + +== Upgrade to 3.14.z from 3.12.z + +=== Target images +* **Quay:** {productrepo}/{quayimage}:{productminv} +ifdef::downstream[] +* **Clair:** {productrepo}/{clairimage}:{productminv} +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} +* **Clair-PosgreSQL:** registry.redhat.io/rhel8/postgresql-15 + +== Upgrade to 3.14.z from 3.11.z + +=== Target images +* **Quay:** {productrepo}/{quayimage}:{productminv} +ifdef::downstream[] +* **Clair:** {productrepo}/{clairimage}:{productminv} +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} +* **Clair-PosgreSQL:** registry.redhat.io/rhel8/postgresql-15 + +//// +== Upgrade to 3.13.z from 3.12.z + +=== Target images +* **Quay:** {productrepo}/{quayimage}:{productminv} +ifdef::downstream[] +* **Clair:** {productrepo}/{clairimage}:{productminv} +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} +* **Clair-PosgreSQL:** registry.redhat.io/rhel8/postgresql-15 + +== Upgrade to 3.13.z from 3.11.z + +=== Target images +* **Quay:** {productrepo}/{quayimage}:{productminv} +ifdef::downstream[] +* **Clair:** {productrepo}/{clairimage}:{productminv} +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} +* **Clair-PosgreSQL:** registry.redhat.io/rhel8/postgresql-15 + +== Upgrade to 3.13.z from 3.10.z + +=== Target images +* **Quay:** {productrepo}/{quayimage}:{productminv} +ifdef::downstream[] +* **Clair:** {productrepo}/{clairimage}:{productminv} +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} +* **Clair-PosgreSQL:** registry.redhat.io/rhel8/postgresql-15 + + +== Upgrade to 3.12.z from 3.11.z + +=== Target images +* **Quay:** {productrepo}/{quayimage}:{productminv} +ifdef::downstream[] +* **Clair:** {productrepo}/{clairimage}:{productminv} +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} + +== Upgrade to 3.12.z from 3.10.z + +=== Target images +* **Quay:** {productrepo}/{quayimage}:{productminv} +ifdef::downstream[] +* **Clair:** {productrepo}/{clairimage}:{productminv} +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} + + +== Upgrade to 3.11.z from 3.10.z + +=== Target images +* **Quay:** {productrepo}/{quayimage}:v3.11.0 +ifdef::downstream[] +* **Clair:** {productrepo}/{clairimage}:v3.11.0 +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} + +== Upgrade to 3.11.z from 3.9.z + +=== Target images +* **Quay:** {productrepo}/{quayimage}:v3.11.0 +ifdef::downstream[] +* **Clair:** {productrepo}/{clairimage}::v3.11.0 +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} + + +== Upgrade to 3.10.z from 3.9.z + +=== Target images +* **Quay:** {productrepo}/{quayimage}:v3.10.0 +ifdef::downstream[] +* **Clair:** {productrepo}/{clairimage}:v3.10.0 +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} + +== Upgrade to 3.10.z from 3.8.z + +=== Target images +* **Quay:** {productrepo}/{quayimage}:v3.10.0 +ifdef::downstream[] +* **Clair:** {productrepo}/{clairimage}:v3.10.0 +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} + +== Upgrade to 3.10.z from 3.7.z + +=== Target images +* **Quay:** {productrepo}/{quayimage}:v3.10.0 +ifdef::downstream[] +* **Clair:** {productrepo}/{clairimage}:v3.10.0 +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} + + +== Upgrade to 3.9.z from 3.8.z + +If you are upgrading your standalone {productname} deployment from 3.8.z -> 3.9, it is highly recommended that you upgrade PostgreSQL from version 10 -> 13. To upgrade PostgreSQL from 10 -> 13, you must bring down your PostgreSQL 10 database and run a migration script to initiate the process. + +Use the following procedure to upgrade PostgreSQL from 10 -> 13 on a standalone {productname} deployment. + +.Procedure + +. Enter the following command to scale down the {productname} container: ++ +[source,terminal] +---- +$ sudo podman stop +---- + +. Optional. If you are using Clair, enter the following command to stop the Clair container: ++ +[source,terminal] +---- +$ sudo podman stop +---- + +. Run the Podman process from SCLOrg's link:https://github.com/sclorg/postgresql-container/tree/master/13#data-migration[Data Migration] procedure, which allows for data migration from a remote PostgreSQL server: ++ +[source,terminal] +---- +$ sudo podman run -d --name <1> + -e POSTGRESQL_MIGRATION_REMOTE_HOST=172.17.0.2 \ <2> + -e POSTGRESQL_MIGRATION_ADMIN_PASSWORD=remoteAdminP@ssword \ + -v <3> + [ OPTIONAL_CONFIGURATION_VARIABLES ] + rhel8/postgresql-13 +---- ++ +<1> The name of your PostgreSQL 13 migration database. +<2> Your current {productname} PostgreSQL 13 database container IP address. Can obtained by running the following command: `sudo podman inspect -f "{{.NetworkSettings.IPAddress}}" postgresql-quay`. +<3> You must specify a different volume mount point than the one from your initial PostgreSQL 10 deployment, and modify the access control lists for said directory. For example: ++ +[source,terminal] +---- +$ mkdir -p /host/data/directory +---- ++ +[source,terminal] +---- +$ setfacl -m u:26:-wx /host/data/directory +---- ++ +This prevents data from being overwritten by the new container. + +. Optional. If you are using Clair, repeat the previous step for the Clair PostgreSQL database container. + +. Stop the PostgreSQL 10 container: ++ +[source,terminal] +---- +$ sudo podman stop +---- + +. After completing the PostgreSQL migration, run the PostgreSQL 13 container, using the new data volume mount from Step 3, for example, ``: ++ +[source,terminal] +---- +$ sudo podman run -d --rm --name postgresql-quay \ + -e POSTGRESQL_USER= \ + -e POSTGRESQL_PASSWORD= \ + -e POSTGRESQL_DATABASE= \ + -e POSTGRESQL_ADMIN_PASSWORD= \ + -p 5432:5432 \ + -v \ + registry.redhat.io/rhel8/postgresql-13:1-109 +---- + +. Optional. If you are using Clair, repeat the previous step for the Clair PostgreSQL database container. + +. Start the {productname} container: ++ +[source,terminal] +---- +$ sudo podman run -d --rm -p 80:8080 -p 443:8443 --name=quay \ +-v /home//quay-poc/config:/conf/stack:Z \ +-v /home//quay-poc/storage:/datastorage:Z \ +{productrepo}/{quayimage}:{productminv} +---- + +. Optional. Restart the Clair container, for example: ++ +[source,terminal] +---- +$ sudo podman run -d --name clairv4 \ +-p 8081:8081 -p 8088:8088 \ +-e CLAIR_CONF=/clair/config.yaml \ +-e CLAIR_MODE=combo \ +registry.redhat.io/quay/clair-rhel8:v3.9.0 +---- + +For more information, see link:https://github.com/sclorg/postgresql-container/tree/master/13#data-migration[Data Migration]. + +//updating target images is where you left + +=== Target images +* **Quay:** {productrepo}/{quayimage}:v3.9.0 +ifdef::downstream[] +* **Clair:** {productrepo}/{clairimage}:v3.9.0 +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} + +== Upgrade to 3.9.z from 3.7.z + +If you are upgrading your standalone {productname} deployment from 3.7.z -> 3.9, it is highly recommended that you upgrade PostgreSQL from version 10 -> 13. To upgrade PostgreSQL from 10 -> 13, you must bring down your PostgreSQL 10 database and run a migration script to initiate the process: + +[NOTE] +==== +* When upgrading from {productname} 3.7 to 3.9, you might receive the following error: `pg_dumpall: error: query failed: ERROR: xlog flush request 1/B446CCD8 is not satisfied --- flushed only to 1/B0013858`. As a workaround to this issue, you can delete the `quayregistry-clair-postgres-upgrade` job on your {ocp} deployment, which should resolve the issue. +==== + +=== Target images +* **Quay:** {productrepo}/{quayimage}:v3.9.0 +ifdef::downstream[] +* **Clair:** {productrepo}/{clairimage}:v3.9.0 +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} + +== Upgrade to 3.8.z from 3.7.z + +=== Target images +* **Quay:** {productrepo}/{quayimage}:v3.8.0 +ifdef::downstream[] +* **Clair:** {productrepo}/{clairimage}:v3.8.0 +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} + +== Upgrade to 3.7.z from 3.6.z + +=== Target images +* **Quay:** {productrepo}/{quayimage}:v3.7.0 +ifdef::downstream[] +* **Clair:** {productrepo}/{clairimage}:v3.7.0 +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} + +== Upgrade to 3.7.z from 3.5.z + +=== Target images +* **Quay:** {productrepo}/{quayimage}:v3.7.0 +ifdef::downstream[] +* **Clair:** {productrepo}/{clairimage}:v3.7.0 +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} + +== Upgrade to 3.7.z from 3.4.z + +=== Target images +* **Quay:** {productrepo}/{quayimage}:v3.7.0 +ifdef::downstream[] +* **Clair:** {productrepo}/{clairimage}:v3.7.0 +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} + +== Upgrade to 3.7.z from 3.3.z + +Upgrading to {productname} 3.7 from 3.3. is unsupported. Users must first upgrade to 3.6 from 3.3, and then upgrade to 3.7. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_to_3_6_z_from_3_3_z[Upgrade to 3.6.z from 3.3.z]. + +== Upgrade to 3.6.z from 3.5.z + +=== Target images +* **Quay:** {productrepo}/{quayimage}:v3.6.0 +ifdef::downstream[] +* **Clair:** {productrepo}/{clairimage}:v3.6.0 +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} + +== Upgrade to 3.6.z from 3.4.z + +[NOTE] +==== +{productname} 3.6 supports direct, single-step upgrade from 3.4.z. This exception to the normal, prior minor version-only, upgrade simplifies the upgrade procedure for customers on older releases. +==== + +Upgrading to {productname} 3.6 from 3.4.z requires a database migration which does not support downgrading back to a prior version of {productname}. Please back up your database before performing this migration. + +Users will also need to configure a completely new Clair v4 instance to replace the old Clair v2 when upgrading from 3.4.z. For instructions on configuring Clair v4, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#clair-standalone[Setting up Clair on a non-OpenShift {productname} deployment]. + +=== Target images +* **Quay:** {productrepo}/{quayimage}:v3.6.0 +ifdef::downstream[] +* **Clair:** {productrepo}/{clairimage}:v3.6.0 +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} + +== Upgrade to 3.6.z from 3.3.z + +[NOTE] +==== +{productname} 3.6 supports direct, single-step upgrade from 3.3.z. This exception to the normal, prior minor version-only, upgrade simplifies the upgrade procedure for customers on older releases. +==== + +Upgrading to {productname} 3.6.z from 3.3.z requires a database migration which does not support downgrading back to a prior version of {productname}. Please back up your database before performing this migration. + +Users will also need to configure a completely new Clair v4 instance to replace the old Clair v2 when upgrading from 3.3.z. For instructions on configuring Clair v4, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#clair-standalone[Setting up Clair on a non-OpenShift {productname} deployment]. + +=== Target images +* **Quay:** {productrepo}/{quayimage}:v3.6.0 +ifdef::downstream[] +* **Clair:** {productrepo}/{clairimage}:v3.6.0 +endif::downstream[] +ifdef::upstream[] +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +endif::upstream[] +* **PostgreSQL:** {postgresimage} +* **Redis:** {redisimage} + +=== Swift configuration when upgrading from 3.3.z to 3.6 + +When upgrading from {productname} 3.3.z to 3.6.z, some users might receive the following error: `Switch auth v3 requires tenant_id (string) in os_options`. As a workaround, you can manually update your `DISTRIBUTED_STORAGE_CONFIG` to add the `os_options` and `tenant_id` parameters: + +[source,yaml] +---- + DISTRIBUTED_STORAGE_CONFIG: + brscale: + - SwiftStorage + - auth_url: http://****/v3 + auth_version: "3" + os_options: + tenant_id: **** + project_name: ocp-base + user_domain_name: Default + storage_path: /datastorage/registry + swift_container: ocp-svc-quay-ha + swift_password: ***** + swift_user: ***** +---- + +== Upgrade to 3.5.7 from 3.4.z + +=== Target images +ifdef::downstream[] +* **Quay:** {productrepo}/{quayimage}:v3.5.7 +* **Clair:** {productrepo}/{clairimage}:{productminv} +* **PostgreSQL:** registry.redhat.io/rhel8/postgresql-13:1-109 +* **Redis:** registry.redhat.io/rhel8/redis-6:1-110) +endif::downstream[] + +ifdef::upstream[] +* **Quay:** {productrepo}/{quayimage}:v3.5.1 +* **Clair:** {productrepo}/{clairimage}:{clairproductminv} +* **PostgreSQL:** registry.redhat.io/rhel8/postgresql-13:1-109 +* **Redis:** registry.redhat.io/rhel8/redis-6:1-110) +endif::upstream[] + + +ifdef::downstream[] +== Upgrade to 3.4.6 from 3.3.z + +Upgrading to Quay 3.4 requires a database migration which does not support downgrading back to a prior version of Quay. Please back up your database before performing this migration. + +=== Target images +* **Quay:** {productrepo}/{quayimage}:v3.4.6 +* **Clair:** {productrepo}/{clairimage}:{productminv} +* **PostgreSQL:** registry.redhat.io/rhel8/postgresql-13:1-109 +* **Redis:** registry.redhat.io/rhel8/redis-6:1-110) + + +== Upgrade to 3.3.4 from 3.2.z + +=== Target images +* **Quay:** quay.io/redhat/quay:v3.3.4 +* **Clair:** {productrepo}/{clairimage}:{productminv} +* **PostgreSQL:** rhscl/postgresql-96-rhel7 +* **Redis:** registry.access.redhat.com/rhscl/redis-32-rhel7 + + + +== Upgrade to 3.2.2 from 3.1.z + +Once your cluster is running any {productname} 3.1.z version, +to upgrade your cluster to 3.2.2 you must bring down your entire cluster and make a small change to the configuration before bringing it back up with the 3.2.2 version. + + +[WARNING] +==== +Once you set the value of DATABASE_SECRET_KEY in this procedure, do not ever +change it. If you do so, then existing robot accounts, API tokens, etc. cannot be used anymore. You would have to create a new robot account and API tokens to use with Quay. +==== + +. Take all hosts in the {productname} cluster out of service. +. Generate some random data to use as a database secret key. For example: ++ +``` +$ openssl rand -hex 48 +2d023adb9c477305348490aa0fd9c +``` +. Add a new DATABASE_SECRET_KEY field to your `config.yaml` file. For example: ++ +``` +DATABASE_SECRET_KEY: "2d023adb9c477305348490aa0fd9c" +``` ++ +[NOTE] +==== +For an OpenShift installation, the `config.yaml` file is stored as a secret. +==== + +. Bring up one `Quay` container to complete the migration to 3.2.2. +. Once the migration is done, make sure the same `config.yaml` is available +on all nodes and bring up the new quay 3.2.2 service on those nodes. +. Start 3.0.z versions of quay-builder and Clair to replace any instances of those containers you want to return to your cluster. + + + +=== Target images + +* **Quay:** quay.io/redhat/quay:v3.2.2 +* **Clair:** {productrepo}/{clairimage}:{productminv} +* **PostgreSQL:** rhscl/postgresql-96-rhel7 +* **Redis:** registry.access.redhat.com/rhscl/redis-32-rhel7 + + + + +== Upgrade to 3.1.3 from 3.0.z + +=== Target images +* **Quay:** quay.io/redhat/quay:v3.1.3 +* **Clair:** {productrepo}/{clairimage}:{productminv} +* **PostgreSQL:** rhscl/postgresql-96-rhel7 +* **Redis:** registry.access.redhat.com/rhscl/redis-32-rhel7 + + +== Upgrade to 3.0.5 from 2.9.5 + +For the 2.9.5 to 3.0.5 upgrade, you can either do the whole upgrade with {productname} down (synchronous upgrade) or only bring down {productname} for a few minutes and have the bulk of the upgrade continue with {productname} running (background upgrade). + +A background upgrade could take longer to run the upgrade depending on how many tags need to be processed. However, there is less total downtime. The downside of a background upgrade is that you will not have access to the latest features until the upgrade completes. The cluster runs from the Quay v3 container in v2 compatibility mode until the upgrade is complete. + +include::con_upgrade_v3.adoc[leveloffset=+2] + +include::proc_upgrade_v3.adoc[leveloffset=+2] +endif::downstream[] +//// \ No newline at end of file diff --git a/modules/proc_upgrade_v3.adoc b/modules/proc_upgrade_v3.adoc index 33cd46690..a38249c6a 100644 --- a/modules/proc_upgrade_v3.adoc +++ b/modules/proc_upgrade_v3.adoc @@ -1,5 +1,5 @@ [[upgrade-v3-proc]] -= Running the upgrade to {productname} v3.0.z += Choosing upgrade type Choose between a synchronous upgrade (complete the upgrade in downtime) and a background upgrade (complete the upgrade while {productname} is still running). @@ -7,19 +7,19 @@ Both of these major-release upgrades require that the {productname} cluster be d for at least a short period of time. Regardless of which upgrade type you choose, during the time that the {productname} -cluster is down, you are using builder and clair images, you need to also upgrade to +cluster is down, if you are using builder and Clair images, you need to also upgrade to those new images: -* The builder image (quay.io/redhat/quay-builder:v3.0.5) -* The clair image (quay.io/redhat/clair-jwt:v3.0.5) +* *Builder*: quay.io/redhat/quay-builder:v3.0.5 +* *Clair*: quay.io/redhat/clair-jwt:v3.0.5 -Both of those images are available from the new quay.io/redhat repository. +Both of those images are available from the {productrepo} repository. [[sync-upgrade-v3]] -== Run a synchronous upgrade += Running a synchronous upgrade To run a synchronous upgrade, where your whole cluster is down for the entire upgrade, do the following: -. Take down your entire {productname} cluster. +. Take down your entire {productname} cluster, including any quay-builder and Clair containers. . Add the following setting to the `config.yaml` file on all nodes: + @@ -27,21 +27,18 @@ To run a synchronous upgrade, where your whole cluster is down for the entire up V3_UPGRADE_MODE: complete ==== -. Pull and start up the v3 container on a single node and wait for however long it takes to do the upgrade -(it should take just a few minutes). Use the following container or later: +. Pull and start up the v3 container on a single node and wait for however long it takes to do the upgrade (it will take a few minutes). Use the following container or later: + -==== -quay.io/redhat/quay:v3.0.5 -==== +* *Quay*: quay.io/redhat/quay:v3.0.5 + -Note that the quay container comes up on ports 8080 and 8443 for v3, instead -of 80 and 443, as they did for v2. Therefore, we recommend remapping 8080 +Note that the `Quay` container comes up on ports 8080 and 8443 for {productname} 3, instead +of 80 and 443, as they did for {productname} 2. Therefore, we recommend remapping 8080 and 8443 into 80 and 443, respectively, as shown in this example: + [subs="verbatim,attributes"] ``` -# docker run --restart=always -p 443:8443 -p 80:8080 \ +# docker run --restart=always -p 80:8080 -p 443:8443 \ --sysctl net.core.somaxconn=4096 \ --privileged=true \ -v /mnt/quay/config:/conf/stack:Z \ @@ -49,29 +46,29 @@ and 8443 into 80 and 443, respectively, as shown in this example: -d quay.io/redhat/quay:v3.0.5 ``` -. After the upgrade completes, bring the {productname} v3 container up on all other nodes. +. After the upgrade completes, bring the {productname} 3 container up on all other nodes. + +. Start 3.0.z versions of quay-builder and Clair to replace any +instances of those containers you want to return to your cluster. . Verify that {productname} is working, including pushes and pulls of containers compatible with Docker version 2, schema 2. This can include windows container images and images of different computer architectures (arm, ppc, etc.). [[background-upgrade-v3]] -== Run a background upgrade -To run a background upgrade, you need only bring down your cluster for a short period of time on two occasions. When you -bring the cluster back up after the first downtime, the quay v3 container runs in v2 compatibility mode as it backfills the database. -This background process can take hours or even days to complete. Background upgrades are recommended for += Running a background upgrade +To run a background upgrade, you need only bring down your cluster for a short period of time on two occasions. When you bring the cluster back up after the first downtime, the quay v3 container runs in v2 compatibility mode as it backfills the database. This background process can take hours or even days to complete. Background upgrades are recommended for large installations where downtime of more than a few hours would be a problem. -For this type of upgrade, you put {productname} into a compatibility mode, where you have a v3 quay container -running, but it is running on the old data model while the upgrade completes. Here's what you do: +For this type of upgrade, you put {productname} into a compatibility mode, where you have a `Quay` 3 container running, but it is running on the old data model while the upgrade completes. Here's what you do: -. Pull the {productname} v3 container to all the nodes. Use the following container or later: +. Pull the {productname} 3 container to all the nodes. Use the following container or later: + [subs="verbatim,attributes"] ==== quay.io/redhat/quay:v3.0.5 ==== -. Take down {productname} on all nodes. +. Take down your entire {productname} cluster, including any quay-builder and Clair containers. . Edit the `config.yaml` file on each node and set the upgrade mode to background as follows: + @@ -79,18 +76,18 @@ quay.io/redhat/quay:v3.0.5 V3_UPGRADE_MODE: background ==== -. Bring the {productname} v3 container up on a single node and wait for the migrations to +. Bring the {productname} 3 container up on a single node and wait for the migrations to complete (should take a few minutes maximum). Here is an example of that command: + -Note that the quay container comes up on ports 8080 and 8443 for v3, instead -of 80 and 443, as they did for v2. Therefore, we recommend remapping 8080 +Note that the `Quay` container comes up on ports 8080 and 8443 for {productname} 3, instead +of 80 and 443, as they did for {productname} 2. Therefore, we recommend remapping 8080 and 8443 into 80 and 443, respectively, as shown in this example: + [subs="verbatim,attributes"] ``` -# docker run --restart=always -p 443:8443 -p 80:8080 \ +# docker run --restart=always -p 80:8080 -p 443:8443 \ --sysctl net.core.somaxconn=4096 \ --privileged=true \ -v /mnt/quay/config:/conf/stack:Z \ @@ -98,33 +95,36 @@ and 8443 into 80 and 443, respectively, as shown in this example: -d quay.io/redhat/quay:v3.0.5 ``` -. Bring the {productname} v3 container up on all the other nodes. +. Bring the {productname} 3 container up on all the other nodes. -. Monitor the `/upgradeprogress` API endpoint until it reports done (the status reaches 100%). +. Monitor the `/upgradeprogress` API endpoint until it reports done +enough to move to the next step (the status reaches 99%). For example, view `https://myquay.example.com/upgradeprogress` or use some other tool to query the API. -. Once the background process is done, you have to schedule another maintenance window. +. Once the background process is far enough along you have to schedule another maintenance window. . During your scheduled maintenance, take the entire {productname} cluster down. . Edit the `config.yaml` file on each node and set the upgrade mode to `complete` as follows: + -==== +[source,yaml] +---- V3_UPGRADE_MODE: complete -==== +---- . Bring {productname} back up on one node to have it do a final check. . Once the final check is done, bring {productname} v3 back up on all the other nodes. -. Verify Quay is working, including pushes and pulls of containers compatible with Docker version 2, schema 2. -This can include windows container images and images of different computer architectures (arm, ppc, etc.). +. Start 3.0.z versions of quay-builder and Clair to replace any instances of those containers you want to return to your cluster. + +. Verify Quay is working, including pushes and pulls of containers compatible with Docker version 2, schema 2. This can include windows container images and images of different computer architectures (arm, ppc, etc.). -[[upgrade-v31-proc]] -= Running the upgrade to {productname} v{productmin} -As noted earlier, once your cluster is running any {productname} 3.0.z version, you can -upgrade your cluster to v{productmin} by simply doing a rolling upgrade. -To do that, just add new v{productmin} containers to the cluster one at a time, check that they -are working, then decommission the older v3.0.z containers. += Target images +* **Quay:** quay.io/redhat/quay:v3.0.5 +* **Clair:** quay.io/redhat/clair-jwt:v3.0.5 +* **Redis:** registry.access.redhat.com/rhscl/redis-32-rhel7 +* **PostgreSQL:** rhscl/postgresql-96-rhel7 +* **Builder:** quay.io/redhat/quay-builder:v3.0.5 diff --git a/modules/proc_use-api.adoc b/modules/proc_use-api.adoc new file mode 100644 index 000000000..7d3ecd9a8 --- /dev/null +++ b/modules/proc_use-api.adoc @@ -0,0 +1,452 @@ +:_content-type: CONCEPT +[id="using-the-api"] += Using the {productname} API + +[id="accessing-quay-io-api"] +== Accessing the Quay API from Quay.io +If you don't have your own {productname} cluster running yet, +you can explore the {productname} API available from Quay.io +from your web browser: + +``` +https://docs.quay.io/api/swagger/ +``` +The API Explorer that appears shows Quay.io API endpoints. +You will not see superuser API endpoints or endpoints for {productname} features +that are not enabled on Quay.io (such as Repository Mirroring). + +From API Explorer, you can get, and sometimes change, information on: + +* Billing, subscriptions, and plans +* Repository builds and build triggers +* Error messages and global messages +* Repository images, manifests, permissions, notifications, vulnerabilities, and +image signing +* Usage logs +* Organizations, members and OAuth applications +* User and robot accounts +* and more... + +Select to open an endpoint to view the Model Schema for each part +of the endpoint. Open an endpoint, enter any required parameters +(such as a repository name or image), then select the `Try it out!` +button to query or change settings associated with a Quay.io endpoint. + + + + +== Accessing your Quay API from a web browser + +By enabling Swagger, you can access the API for your own {productname} instance through a web browser. +This URL exposes the {productname} API explorer via the Swagger UI and this URL: + +``` +https:///api/v1/discovery. +``` + +That way of accessing the API does not include superuser endpoints that are available on +{productname} installations. Here is an example of accessing a {productname} API interface +running on the local system by running the swagger-ui container image: + +``` +# export SERVER_HOSTNAME= +# sudo podman run -p 8888:8080 -e API_URL=https://$SERVER_HOSTNAME:8443/api/v1/discovery docker.io/swaggerapi/swagger-ui +``` +With the swagger-ui container running, open your web browser to localhost port 8888 to view +API endpoints via the swagger-ui container. + +To avoid errors in the log such as "API calls must be invoked with an X-Requested-With header if called from a browser," +add the following line to the `config.yaml` on all nodes in the cluster and restart {productname}: + +``` +BROWSER_API_CALLS_XHR_ONLY: false +``` + +//// + +=== Get superuser information + +``` +$ curl -X GET -H "Authorization: Bearer " \ + "https:///api/v1/superuser/users/" +``` + +For example: + +[source,json] +---- +$ curl -X GET -H "Authorization: Bearer mFCdgS7SAIoMcnTsHCGx23vcNsTgziAa4CmmHIsg" http://quay-server:8080/api/v1/superuser/users/ | jq + +{ + "users": [ + { + "kind": "user", + "name": "quayadmin", + "username": "quayadmin", + "email": "quayadmin@example.com", + "verified": true, + "avatar": { + "name": "quayadmin", + "hash": "357a20e8c56e69d6f9734d23ef9517e8", + "color": "#5254a3", + "kind": "user" + }, + "super_user": true, + "enabled": true + } + ] +} +---- + +=== Creating a superuser using the API + +* Configure a superuser name, as described in the Deploy Quay book: + +** Use the configuration editor UI or +** Edit the `config.yaml` file directly, with the option of using the configuration API to validate (and download) the updated configuration bundle + + +* Create the user account for the superuser name: + +** Obtain an authorization token as detailed above, and use `curl` to create the user: ++ +``` +$ curl -H "Content-Type: application/json" -H "Authorization: Bearer Fava2kV9C92p1eXnMawBZx9vTqVnksvwNm0ckFKZ" -X POST --data '{ + "username": "quaysuper", + "email": "quaysuper@example.com" +}' http://quay-server:8080/api/v1/superuser/users/ | jq +``` + +** The returned content includes a generated password for the new user account: ++ +[source,json] +---- +{ + "username": "quaysuper", + "email": "quaysuper@example.com", + "password": "EH67NB3Y6PTBED8H0HC6UVHGGGA3ODSE", + "encrypted_password": "fn37AZAUQH0PTsU+vlO9lS0QxPW9A/boXL4ovZjIFtlUPrBz9i4j9UDOqMjuxQ/0HTfy38goKEpG8zYXVeQh3lOFzuOjSvKic2Vq7xdtQsU=" +} +---- + + +Now, when you request the list of users , it will show `quaysuper` as a superuser: + + +[source,json] +---- +$ curl -X GET -H "Authorization: Bearer mFCdgS7SAIoMcnTsHCGx23vcNsTgziAa4CmmHIsg" http://quay-server:8080/api/v1/superuser/users/ | jq + +{ + "users": [ + { + "kind": "user", + "name": "quayadmin", + "username": "quayadmin", + "email": "quayadmin@example.com", + "verified": true, + "avatar": { + "name": "quayadmin", + "hash": "357a20e8c56e69d6f9734d23ef9517e8", + "color": "#5254a3", + "kind": "user" + }, + "super_user": true, + "enabled": true + }, + { + "kind": "user", + "name": "quaysuper", + "username": "quaysuper", + "email": "quaysuper@example.com", + "verified": true, + "avatar": { + "name": "quaysuper", + "hash": "c0e0f155afcef68e58a42243b153df08", + "color": "#969696", + "kind": "user" + }, + "super_user": true, + "enabled": true + } + ] +} +---- + + +=== List usage logs + +An intrnal API, `/api/v1/superuser/logs`, is available to list the usage logs for the current system. The results are paginated, so in the following example, more than 20 repos were created to show how to use multiple invocations to access the entire result set. + +==== Example for pagination + +.First invocation +[source,terminal] +---- +$ curl -X GET -k -H "Authorization: Bearer qz9NZ2Np1f55CSZ3RVOvxjeUdkzYuCp0pKggABCD" https://example-registry-quay-quay-enterprise.apps.example.com/api/v1/superuser/logs | jq +---- + +.Initial output +[source,json] +---- +{ + "start_time": "Sun, 12 Dec 2021 11:41:55 -0000", + "end_time": "Tue, 14 Dec 2021 11:41:55 -0000", + "logs": [ + { + "kind": "create_repo", + "metadata": { + "repo": "t21", + "namespace": "namespace1" + }, + "ip": "10.131.0.13", + "datetime": "Mon, 13 Dec 2021 11:41:16 -0000", + "performer": { + "kind": "user", + "name": "user1", + "is_robot": false, + "avatar": { + "name": "user1", + "hash": "5d40b245471708144de9760f2f18113d75aa2488ec82e12435b9de34a6565f73", + "color": "#ad494a", + "kind": "user" + } + }, + "namespace": { + "kind": "org", + "name": "namespace1", + "avatar": { + "name": "namespace1", + "hash": "6cf18b5c19217bfc6df0e7d788746ff7e8201a68cba333fca0437e42379b984f", + "color": "#e377c2", + "kind": "org" + } + } + }, + { + "kind": "create_repo", + "metadata": { + "repo": "t20", + "namespace": "namespace1" + }, + "ip": "10.131.0.13", + "datetime": "Mon, 13 Dec 2021 11:41:05 -0000", + "performer": { + "kind": "user", + "name": "user1", + "is_robot": false, + "avatar": { + "name": "user1", + "hash": "5d40b245471708144de9760f2f18113d75aa2488ec82e12435b9de34a6565f73", + "color": "#ad494a", + "kind": "user" + } + }, + "namespace": { + "kind": "org", + "name": "namespace1", + "avatar": { + "name": "namespace1", + "hash": "6cf18b5c19217bfc6df0e7d788746ff7e8201a68cba333fca0437e42379b984f", + "color": "#e377c2", + "kind": "org" + } + } + }, +... + + { + "kind": "create_repo", + "metadata": { + "repo": "t2", + "namespace": "namespace1" + }, + "ip": "10.131.0.13", + "datetime": "Mon, 13 Dec 2021 11:25:17 -0000", + "performer": { + "kind": "user", + "name": "user1", + "is_robot": false, + "avatar": { + "name": "user1", + "hash": "5d40b245471708144de9760f2f18113d75aa2488ec82e12435b9de34a6565f73", + "color": "#ad494a", + "kind": "user" + } + }, + "namespace": { + "kind": "org", + "name": "namespace1", + "avatar": { + "name": "namespace1", + "hash": "6cf18b5c19217bfc6df0e7d788746ff7e8201a68cba333fca0437e42379b984f", + "color": "#e377c2", + "kind": "org" + } + } + } + ], + "next_page": "gAAAAABhtzGDsH38x7pjWhD8MJq1_2FAgqUw2X9S2LoCLNPH65QJqB4XAU2qAxYb6QqtlcWj9eI6DUiMN_q3e3I0agCvB2VPQ8rY75WeaiUzM3rQlMc4i6ElR78t8oUxVfNp1RMPIRQYYZyXP9h6E8LZZhqTMs0S-SedaQJ3kVFtkxZqJwHVjgt23Ts2DonVoYwtKgI3bCC5" +} + +---- + + + +.Second invocation using next_page +[source,terminal] +---- +$ curl -X GET -k -H "Authorization: Bearer qz9NZ2Np1f55CSZ3RVOvxjeUdkzYuCp0pKggABCD" https://example-registry-quay-quay-enterprise.apps.example.com/api/v1/superuser/logs?next_page=gAAAAABhtzGDsH38x7pjWhD8MJq1_2FAgqUw2X9S2LoCLNPH65QJqB4XAU2qAxYb6QqtlcWj9eI6DUiMN_q3e3I0agCvB2VPQ8rY75WeaiUzM3rQlMc4i6ElR78t8oUxVfNp1RMPIRQYYZyXP9h6E8LZZhqTMs0S-SedaQJ3kVFtkxZqJwHVjgt23Ts2DonVoYwtKgI3bCC5 | jq +---- + +.Output from second invocation +[source,json] +---- +{ + "start_time": "Sun, 12 Dec 2021 11:42:46 -0000", + "end_time": "Tue, 14 Dec 2021 11:42:46 -0000", + "logs": [ + { + "kind": "create_repo", + "metadata": { + "repo": "t1", + "namespace": "namespace1" + }, + "ip": "10.131.0.13", + "datetime": "Mon, 13 Dec 2021 11:25:07 -0000", + "performer": { + "kind": "user", + "name": "user1", + "is_robot": false, + "avatar": { + "name": "user1", + "hash": "5d40b245471708144de9760f2f18113d75aa2488ec82e12435b9de34a6565f73", + "color": "#ad494a", + "kind": "user" + } + }, + "namespace": { + "kind": "org", + "name": "namespace1", + "avatar": { + "name": "namespace1", + "hash": "6cf18b5c19217bfc6df0e7d788746ff7e8201a68cba333fca0437e42379b984f", + "color": "#e377c2", + "kind": "org" + } + } + }, + ... + ] +} +---- + + + + + + + + +=== Directory synchronization + +To enable directory synchronization for the team `newteam` in organization `testadminorg`, where the corresponding group name in LDAP is `ldapgroup`: + +``` +$ curl -X POST -H "Authorization: Bearer 9rJYBR3v3pXcj5XqIA2XX6Thkwk4gld4TCYLLWDF" \ + -H "Content-type: application/json" \ + -d '{"group_dn": "cn=ldapgroup,ou=Users"}' \ + http://quay1-server:8080/api/v1/organization/testadminorg/team/newteam/syncing + + +``` + + +To disable synchronization for the same team: + +``` +$ curl -X DELETE -H "Authorization: Bearer 9rJYBR3v3pXcj5XqIA2XX6Thkwk4gld4TCYLLWDF" \ + http://quay1-server:8080/api/v1/organization/testadminorg/team/newteam/syncing +``` + +=== Create a repository build via API + +In order to build a repository from the specified input and tag the build with custom tags, users can use requestRepoBuild endpoint. It takes the following data: + +``` +{ +"docker_tags": [ + "string" +], +"pull_robot": "string", +"subdirectory": "string", +"archive_url": "string" +} +``` + +The `archive_url` parameter should point to a `tar` or `zip` archive that includes the Dockerfile +and other required files for the build. +The `file_id` parameter was apart of our older build system. +It cannot be used anymore. If Dockerfile is in a sub-directory it needs to be specified as well. + +The archive should be publicly accessible. OAuth app should have "Administer Organization" scope +because only organization admins have access to the robots' account tokens. +Otherwise, someone could get robot permissions by simply granting a build access to a robot +(without having access themselves), and use it to grab the image contents. +In case of errors, check the json block returned and ensure the archive location, pull robot, +and other parameters are being passed correctly. Click "Download logs" on the top-right of the +individual build's page to check the logs for more verbose messaging. + +=== Create an org robot + +``` +$ curl -X PUT https://quay.io/api/v1/organization/{orgname}/robots/{robot shortname} \ + -H 'Authorization: Bearer '' +``` + +=== Trigger a build + +``` +$ curl -X POST https://quay.io/api/v1/repository/YOURORGNAME/YOURREPONAME/build/ \ + -H 'Authorization: Bearer ' +``` + +Python with requests + +``` +import requests +r = requests.post('https://quay.io/api/v1/repository/example/example/image', headers={'content-type': 'application/json', 'Authorization': 'Bearer '}, data={[}) +print(r.text) +``` + +=== Create a private repository + +``` +$ curl -X POST https://quay.io/api/v1/repository \ + -H 'Authorization: Bearer {token}' \ + -H 'Content-Type: application/json' \ + -d '{"namespace":"yournamespace", "repository":"yourreponame", + "description":"descriptionofyourrepo", "visibility": "private"}' | jq +``` + +[id="api-create-mirrored-repo"] +=== Create a mirrored repository + +.Minimal configuration +[source,terminal] +---- +curl -X POST + -H "Authorization: Bearer ${bearer_token}" + -H "Content-Type: application/json" + --data '{"external_reference": "quay.io/minio/mc", "external_registry_username": "", "sync_interval": 600, "sync_start_date": "2021-08-06T11:11:39Z", "root_rule": {"rule_kind": "tag_glob_csv", "rule_value": [ "latest" ]}, "robot_username": "orga+robot"}' https://${quay_registry}/api/v1/repository/${orga}/${repo}/mirror | jq +---- + +.Extended configuration +[source,terminal] +---- +$ curl -X POST + -H "Authorization: Bearer ${bearer_token}" + -H "Content-Type: application/json" + --data '{"is_enabled": true, "external_reference": "quay.io/minio/mc", "external_registry_username": "username", "external_registry_password": "password", "external_registry_config": {"unsigned_images":true, "verify_tls": false, "proxy": {"http_proxy": "http://proxy.tld", "https_proxy": "https://proxy.tld", "no_proxy": "domain"}}, "sync_interval": 600, "sync_start_date": "2021-08-06T11:11:39Z", "root_rule": {"rule_kind": "tag_glob_csv", "rule_value": [ "*" ]}, "robot_username": "orga+robot"}' https://${quay_registry}/api/v1/repository/${orga}/${repo}/mirror | jq +---- +//// \ No newline at end of file diff --git a/modules/proc_use-quay-build-dockerfiles.adoc b/modules/proc_use-quay-build-dockerfiles.adoc index b69dccb5b..1185af712 100644 --- a/modules/proc_use-quay-build-dockerfiles.adoc +++ b/modules/proc_use-quay-build-dockerfiles.adoc @@ -1,85 +1,87 @@ -= Building Dockerfiles +:_content-type: CONCEPT +[id="building-dockerfiles"] += Building container images -Quay.io supports the ability to build -http://docs.docker.com/reference/builder/[Dockerfiles] on our build -fleet and push the resulting image to the repository. +Building container images involves creating a blueprint for a containerized application. Blueprints rely on base images from other public repositories that define how the application should be installed and configured. -[[viewing-and-managing-builds]] -== Viewing and managing builds - -Repository Builds can be viewed and managed by clicking the Builds tab -in the `Repository View`. - -[[manually-starting-a-build]] -== Manually starting a build - -To manually start a repository build, click the `+` icon in the top -right of the header on any repository page and choose `New Dockerfile -Build`. An uploaded `Dockerfile`, `.tar.gz`, or an HTTP URL to either -can be used for the build. +ifeval::["{context}" == "quay-io"] [NOTE] ==== -You will not be able to specify the -Docker build context when manually starting a build. +Because blueprints rely on images from other public repositories, they might be subject to rate limiting. Consequently, your build _could_ fail. ==== +endif::[] + +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +ifeval::["{context}" == "quay-builders-image-automation"] +{productname} +endif::[] +supports the ability to build Docker and Podman container images. This functionality is valuable for developers and organizations who rely on container and container orchestration. + +ifeval::["{context}" == "quay-io"] +On {quayio}, this feature works the same across both free, and paid, tier plans. -[[build-triggers]] -== Build Triggers - -Repository builds can also be automatically triggered by events such as -a push to an SCM (GitHub, BitBucket or GitLab) or via -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/use_red_hat_quay/#webhook[a call to a webhook]. - -[[creating-a-new-build-trigger]] -=== Creating a new build trigger - -To setup a build trigger, click the `Create Build Trigger` button on the -Builds view page and follow the instructions of the dialog. You will -need to grant Quay.io access to your repositories in order to setup the -trigger and your account _requires admin access on the SCM repository_. - -[[manually-triggering-a-build-trigger]] -=== Manually triggering a build trigger - -To trigger a build trigger manually, click the icon next to the build -trigger and choose `Run Now`. - -[[build-contexts]] -=== Build Contexts - -When building an image with Docker, a directory is specified to become -the build context. This holds true for both manual builds and build -triggers because the builds conducted by Quay.io are no different from -running `docker build` on your own machine. - -Quay.io build contexts are -always the specified _subdirectory_ from the build setup and fallback to -the root of the build source if none is specified. When a build is -triggered, Quay.io build workers clone the git repository to the worker -machine and enter the build context before conducting a build. - -For builds based on tar archives, build workers extract the archive and -enter the build context. For example: - -``` +[NOTE] +==== +{quayio} limits the number of simultaneous builds that a single user can submit at one time. +==== +endif::[] + +[id="build-contexts"] +== Build contexts + +When building an image with Docker or Podman, a directory is specified to become the _build context_. This is true for both manual Builds and Build triggers, because the Build that is created by +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +is not different than running `docker build` or `podman build` on your local machine. + +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +Build contexts are always specified in the _subdirectory_ from the Build setup, and fallback to the root of the Build source if a directory is not specified. + +When a build is triggered, +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +Build workers clone the Git repository to the worker machine, and then enter the Build context before conducting a Build. + +For Builds based on `.tar` archives, Build workers extract the archive and enter the Build context. For example: + +.Extracted Build archive +[source,terminal] +---- example ├── .git ├── Dockerfile ├── file └── subdir └── Dockerfile -``` +---- -Imagine the example above is the directory structure for a GitHub -repository called "example". If no subdirectory is specified in the -build trigger setup or while manually starting a build, the build will -operate in the example directory. +Imagine that the _Extracted Build archive_ is the directory structure got a Github repository called *example.* If no subdirectory is specified in the Build trigger setup, or when manually starting the Build, the Build operates in the example directory. -If `subdir` is specified to be the -subdirectory in the build trigger setup, only the Dockerfile within it -is visible to the build. This means that you cannot use the `ADD` -command in the Dockerfile to add `file`, because it is outside of the -build context. +If a subdirectory is specified in the Build trigger setup, for example, `subdir`, only the Dockerfile within it is visible to the Build. This means that you cannot use the `ADD` command in the Dockerfile to add `file`, because it is outside of the Build context. -Unlike the Docker Hub, the Dockerfile is part of the build context on -Quay. Thus, it must not appear in the `.dockerignore` file. +Unlike Docker Hub, the Dockerfile is part of the Build context on +ifeval::["{context}" == "quay-io"] +{quayio}. +endif::[] +ifeval::["{context}" == "use-quay"] +{productname}. +endif::[] +As a result, it must not appear in the `.dockerignore` file. \ No newline at end of file diff --git a/modules/proc_use-quay-build-workers-dockerfiles.adoc b/modules/proc_use-quay-build-workers-dockerfiles.adoc index f852ba794..f593f298e 100644 --- a/modules/proc_use-quay-build-workers-dockerfiles.adoc +++ b/modules/proc_use-quay-build-workers-dockerfiles.adoc @@ -1,91 +1,11 @@ -[[build-support]] -= Automatially build Dockerfiles with build workers +:_content-type: CONCEPT +[id="bare-metal-builds"] += Bare metal builds with {productname-ocp} -{productname} supports building Dockerfiles using a set of worker nodes. Build triggers, -such as GitHub webhooks -(link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/use_red_hat_quay/index#github-build-triggers[Setup Instructions]), -can be configured to automatically build -new versions of your repositories when new code is committed. This document will walk -you through enabling the feature flag and setting up multiple build workers to enable -this feature. +ifeval::["{context}" == "use-quay"] +Documentation for the _builds_ feature has been moved to link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html/builders_and_image_automation/index[Builders and image automation]. This chapter will be removed in a future version of {productname}. +endif::[] -[[enable-building-dockerfile]] -== Enable building - -. Visit the management panel: Sign in to a superuser account and visit -`http://yourregister/superuser` to view the management panel: - -. Enable Dockerfile Build Support: - - * Click the configuration tab and scroll down to the section entitled Dockerfile Build Support. - image:enable-build.png[Select Enable Dockerfile Build] - - * Check the "Enable Dockerfile Build" box - * Click "Save Configuration Changes" - * Restart the container (you will be prompted) - -[[set-up-the-build-workers]] -== Set up the build workers - -image:workers.png[Set up build workers] - -One or more build workers will communicate with {productname} to build new -containers when triggered. The machines must have Docker installed and must -not be used for any other work. The following procedure needs to be done every -time a new worker needs to be added, but it can be automated fairly easily. - -[[pull-the-build-worker-image]] -=== Pull the build worker image - -Pull down the latest copy of the image. Make sure to pull the version tagged matching your {productname} version. - -[subs="verbatim,attributes"] -==== -# docker pull quay.io/redhat/quay-builder:v{productmin} -==== - -[[run-the-build-worker-image]] -=== Run the build worker image -Run this container on each build worker. Since the worker will be -orchestrating docker builds, we need to mount in the docker socket. This -orchestration will use a large amount of CPU and need to manipulate the docker -images on disk — we recommend that dedicated machines be used for this task. - -Use the environment variable SERVER to tell the worker the hostname at which {productname} is accessible: -[cols="2a,8a",options="header"] -|=== -|Security |Websocket Address - -|Using SSL -|wss://your.quayenterprise.dnsname - -|Without SSL -|ws://your.quayenterprise.dnsname -|=== - -Here's what the full command looks like: - -[subs="verbatim,attributes"] -.... -# docker run --restart on-failure \ - -e SERVER=ws://myquayenterprise \ - -v /mnt/docker.sock:/mnt/docker.sock \ - quay.io/redhat/quay-builder:v{productmin} -.... - -When the container starts, each build worker will auto-register and start building containers once a job is triggered and it is assigned to a worker. - -If {productname} is setup to use a SSL certificate that is not globally trusted, for example a self-signed certificate, {productname}'s public SSL certificates must be mounted onto the quay-builder container's SSL trust store. An example command to mount a certificate found at the host's /path/to/ssl/rootCA.pem looks like: - -[subs="verbatim,attributes"] -.... -# docker run --restart on-failure \ - -e SERVER=wss://myquayenterprise \ - -v /path/to/ssl/rootCA.pem:/etc/pki/ca-trust/source/anchors/rootCA.pem \ - -v /mnt/docker.sock:/mnt/docker.sock \ - quay.io/redhat/quay-builder:v{productmin} -.... -[[set-up-github-build]] -== Set up GitHub build (optional) -If your organization plans to have builds be conducted via pushes to GitHub -(or GitHub Enterprise), please continue with the Setting up GitHub Build. +ifeval::["{context}" == "quay-builders-image-automation"] +The procedures in this section explain how to create an environment for _bare metal builds_ for {productname-ocp}. +endif::[] diff --git a/modules/proc_use-quay-create-repo.adoc b/modules/proc_use-quay-create-repo.adoc index d5154d488..ff28066bb 100644 --- a/modules/proc_use-quay-create-repo.adoc +++ b/modules/proc_use-quay-create-repo.adoc @@ -1,44 +1,53 @@ -= Creating a repository - -There are two ways to create a repository in Quay: via a -`docker push` and via the Quay UI. -These are essentially the same, whether you are using Quay.io or your own -instance of Red Hat Quay. - -[[creating-an-image-repository-via-the-ui]] -== Creating an image repository via the UI - -To create a repository in the Quay UI, click the `+` icon in the top -right of the header on any Quay page and choose `New Repository`. -Select `Container Image Repository` on the next page, choose a namespace -(only applies to organizations), enter a repository name and then click -the `Create Repository` button. The repository will start out empty -unless a `Dockerfile` is uploaded as well. - -[[creating-an-image-repository-via-docker]] -== Creating an image repository via docker - -First, tag the repository. -Here are examples for pushing images to Quay.io or your own -Red Hat Quay setup (for example, reg.example.com). - -``` -# docker tag 0u123imageid quay.io/namespace/repo_name -# docker tag 0u123imageid reg.example.com/namespace/repo_name -``` - -Then push to the appropriate Quay registry. For example: - -``` -# docker push quay.io/namespace/repo_name -# docker push reg.example.com/namespace/repo_name -``` - -[[creating-an-application-repository-via-the-ui]] -== Creating an application repository via the UI - -To create a repository in the Quay UI, click the `+` icon in the top -right of the header on any Quay page and choose `New Repository`. -Select `Application Repository` on the next page, choose a namespace -(only applies to organizations), enter a repository name and then click -the `Create Repository` button. The repository will start out empty. + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: CONCEPT +[id="use-quay-create-repo"] += {productname} repository overview + +A repository provides a central location for storing a related set of container images. These images can be used to build applications along with their dependencies in a standardized format. + +Repositories are organized by namespaces. Each namespace can have multiple repositories. For example, you might have a namespace for your personal projects, one for your company, or one for a specific team within your organization. + +ifeval::["{context}" == "quay-io"] +With a paid plan, {quayio} provides users with access controls for their repositories. Users can make a repository public, meaning that anyone can pull, or download, the images from it, or users can make it private, restricting access to authorized users or teams. +endif::[] + +ifeval::["{context}" == "use-quay"] +{productname} provides users with access controls for their repositories. Users can make a repository public, meaning that anyone can pull, or download, the images from it, or users can make it private, restricting access to authorized users or teams. +endif::[] + +ifeval::["{context}" == "quay-security"] +Private repositories provide control over the users that have access to your images by allowing you to define users or teams who can push to, or pull from, the repository, thereby enhancing the security of your registry. +endif::[] + +ifeval::["{context}" == "quay-io"] +[NOTE] +==== +The free tier of {quayio} does not allow for private repositories. You must upgrade to a paid tier of {quayio} to create a private repository. For more information, see "Information about {quayio} pricing". +==== +endif::[] + +ifeval::["{context}" == "quay-io"] +There are two ways to create a repository in {quayio}: by pushing an image with the relevant `podman` command, or by using the {quayio} UI. You can also use the UI to delete a repository. +endif::[] +ifeval::["{context}" == "use-quay"] +There are multiple ways to create a repository in {productname}. The following options are available depending on your use case: + +* You can push an image with the relevant `podman` or `docker` command. +* You can use the {productname} UI. +* You can use the {productname} API. +* For OCI artifacts, for example, a large-language model (LLM) or machine learning application, you can use `skopeo` or `oras` to copy the artifact to your repository. +endif::[] + +ifeval::["{context}" == "quay-io"] +If you push an image through the command-line interface (CLI) without first creating a repository on the UI, the created repository is set to *Private*, regardless of the plan you have. + +[NOTE] +==== +It is recommended that you create a repository on the {quayio} UI before pushing an image. {quayio} checks the plan status and does not allow creation of a private repository if a plan is not active. +==== +endif::[] \ No newline at end of file diff --git a/modules/proc_use-quay-git-trigger.adoc b/modules/proc_use-quay-git-trigger.adoc index a3d0e9c96..9db54dba6 100644 --- a/modules/proc_use-quay-git-trigger.adoc +++ b/modules/proc_use-quay-git-trigger.adoc @@ -1,55 +1,70 @@ -= Setting up a Custom Git Trigger +[id="setting-up-custom-git-trigger"] += Setting up a custom Git trigger -A Custom Git Trigger is a generic way for any git server to act as a -build trigger. It relies solely on SSH keys and webhook endpoints; everything else -is left to the user to implement. +After you have created a _custom Git trigger_, two additional steps are required: -[[creating-a-custom-git-trigger]] -== Creating a Trigger +. You must provide read access to the SSH public key that is generated when creating the trigger. -Creating a Custom Git Trigger is similar to the creation of any other -trigger with a few subtle differences: +ifeval::["{context}" == "quay-io"] +. You must setup a webhook that POSTs to the {quayio} endpoint to trigger the build. +endif::[] +ifeval::["{context}" == "quay-builders-image-automation"] +. You must setup a webhook that POSTs to the {productname} endpoint to trigger the build. +endif::[] -* It is not possible for Quay to automatically detect the proper -robot account to use with the trigger. This must be done manually in the -creation process. -* There are extra steps after the creation of the trigger that must be -done in order to use the trigger. These steps are detailed below. +These steps are only required if you are using a _custom Git trigger_. -[[post-git-trigger-creation-setup]] -== Post trigger-creation setup +[id="obtaining-build-trigger-credentials"] +== Obtaining build trigger credentials -Once a trigger has been created, *there are 2 additional steps required* -before the trigger can be used: +The SSH public key and Webhook Endpoint URL are available on the {productname} UI. -* Provide read access to the _SSH public key_ generated when creating -the trigger. -* Setup a _webhook_ that POSTs to the Quay endpoint to trigger a -build. +.Prerequisites -The key and the URL are both available at all times by selecting `View -Credentials` from the gear located in the trigger listing. -image:view-credentials.png[View and modify tags from your repository] +* You have created a _custom Git trigger_. -[[ssh-public-key-access]] +.Procedure + +. On the *Builds* page of your repository, click the menu kebab for your _custom Git trigger_. + +. Click *View Credentials*. + +. Save the SSH Public Key and Webhook Endpoint URL. + +The key and the URL are available by selecting *View Credentials* from the *Settings*, or _gear_ icon. + +.View and modify tags from your repository +image:view-credentials.png[Trigger Credentials] + +[id="ssh-public-key-access"] === SSH public key access -Depending on the Git server setup, there are various ways to install the -SSH public key that Quay generates for a custom git trigger. For -example, -https://git-scm.herokuapp.com/book/en/v2/Git-on-the-Server-Getting-Git-on-a-Server[Git documentation] describes a small server setup in which simply adding -the key to `$HOME/.ssh/authorize_keys` would provide access for builders -to clone the repository. For any git repository management software that -isn't officially supported, there is usually a location to input the key -often labeled as `Deploy Keys`. +Depending on the Git server configuration, there are multiple ways to install the SSH public key that +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +generates for a custom Git trigger. + +For example, documentation for link:https://git-scm.com/book/en/v2/Git-on-the-Server-Getting-Git-on-a-Server[Getting Git on a Server] describes a describes how to set up a Git server on a Linux-based machine with a focus on managing repositories and access control through SSH. In this procedure, a small server is set up to add the keys to the `$HOME/.ssh/authorize_keys` folder, which provides access for _builders_ to clone the repository. + +For any Git repository management software that is not officially supported, there is usually a location to input the key that is often labeled as *Deploy Keys*. -[[webhook]] +[id="webhook"] === Webhook +To automatically trigger a build, you must `POST` a `.json` payload to the webhook URL using the following format: -In order to automatically trigger a build, one must POST a JSON payload -to the webhook URL with the following format: +[NOTE] +==== +This request requires a `Content-Type` header containing +`application/json` in order to be valid. +==== -``` +.Example webhook +[source,terminal] +---- { "commit": "1c002dd", // required "ref": "refs/heads/master", // required @@ -70,15 +85,6 @@ to the webhook URL with the following format: } } } -``` - -[NOTE] -==== -This request requires a `Content-Type` header containing -`application/json` in order to be valid. -==== +---- -Once again, this can be accomplished in various ways depending on the -server setup, but for most cases can be done via a -https://git-scm.herokuapp.com/book/en/v2/Customizing-Git-Git-Hooks#idp26374144[post-receive -git hook]. +This can typically be accomplished with a link:https://git-scm.com/docs/githooks#post-receive[`post-receive` Git hook], however it does depend on your server setup. \ No newline at end of file diff --git a/modules/proc_use-quay-manage-repo.adoc b/modules/proc_use-quay-manage-repo.adoc new file mode 100644 index 000000000..67fad9aff --- /dev/null +++ b/modules/proc_use-quay-manage-repo.adoc @@ -0,0 +1,34 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: CONCEPT +[id="use-quay-manage-repo"] += Access management for {productname} + +As a +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +user, you can create your own repositories and make them accessible to other users that are part of your instance. Alternatively, you can create an organization and associate a set of repositories directly to that organization, referred to as an _organization repository_. + +Organization repositories differ from basic repositories in that the organization is intended to set up shared repositories through groups of users. In +ifeval::["{context}" == "quay-io"] +{quayio}, +endif::[] +ifeval::["{context}" == "use-quay"] +{productname}, +endif::[] +groups of users can be either _Teams_, or sets of users with the same permissions, or _individual users_. You can also allow access to user repositories and organization repositories by creating credentials associated with Robot Accounts. Robot Accounts make it easy for a variety of container clients, such as Docker or Podman, to access your repositories without requiring that the client have a +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +user account. \ No newline at end of file diff --git a/modules/proc_use-quay-notifications.adoc b/modules/proc_use-quay-notifications.adoc index bc559b8b8..7bf6d519c 100644 --- a/modules/proc_use-quay-notifications.adoc +++ b/modules/proc_use-quay-notifications.adoc @@ -1,358 +1,21 @@ -= Repository Notifications +// module included in the following assemblies: -Quay supports adding _notifications_ to a repository for various events -that occur in the repository's lifecycle. To add notifications, click -the `Settings` tab while viewing a repository and select -`Create Notification`. From the `When this event occurs` -field, select the items for which you want to receive notifications: +// * use_quay/master.adoc +// * quay_io/master.adoc -image:event-select.png[Create repository notifications] - -After selecting an event, further configure it by adding how you -will be notified of that event. - -[NOTE] -==== -Adding notifications requires _repository admin permission_. -==== - -The following are examples of repository events. - -[[repository-events]] -== Repository Events - -[[repository-push]] -=== Repository Push - -A successful push of one or more images was made to the repository: - -``` -{ - "name": "repository", - "repository": "dgangaia/test", - "namespace": "dgangaia", - "docker_url": "quay.io/dgangaia/test", - "homepage": "https://quay.io/repository/dgangaia/repository", - "updated_tags": [ - "latest" - ] -} -``` - -[[dockerfile-build-queued]] -=== Dockerfile Build Queued - -Here is a sample response for a Dockerfile build has been queued into the build system. -The response can differ based on the use of optional attributes. - -``` -{ - "build_id": "296ec063-5f86-4706-a469-f0a400bf9df2", - "trigger_kind": "github", //Optional - "name": "test", - "repository": "dgangaia/test", - "namespace": "dgangaia", - "docker_url": "quay.io/dgangaia/test", - "trigger_id": "38b6e180-9521-4ff7-9844-acf371340b9e", //Optional - "docker_tags": [ - "master", - "latest" - ], - "repo": "test", - "trigger_metadata": { - "default_branch": "master", - "commit": "b7f7d2b948aacbe844ee465122a85a9368b2b735", - "ref": "refs/heads/master", - "git_url": "git@github.com:dgangaia/test.git", - "commit_info": { //Optional - "url": "https://github.com/dgangaia/test/commit/b7f7d2b948aacbe844ee465122a85a9368b2b735", - "date": "2019-03-06T12:48:24+11:00", - "message": "adding 5", - "author": { //Optional - "username": "dgangaia", - "url": "https://github.com/dgangaia", //Optional - "avatar_url": "https://avatars1.githubusercontent.com/u/43594254?v=4" //Optional - }, - "committer": { - "username": "web-flow", - "url": "https://github.com/web-flow", - "avatar_url": "https://avatars3.githubusercontent.com/u/19864447?v=4" - } - } - }, - "is_manual": false, - "manual_user": null, - "homepage": "https://quay.io/repository/dgangaia/test/build/296ec063-5f86-4706-a469-f0a400bf9df2" -} -``` - -[[dockerfile-build-started]] -=== Dockerfile Build Started - -Here is an example of a Dockerfile build being started by the build system. -The response can differ based on some attributes being optional. - -``` -{ - "build_id": "a8cc247a-a662-4fee-8dcb-7d7e822b71ba", - "trigger_kind": "github", //Optional - "name": "test", - "repository": "dgangaia/test", - "namespace": "dgangaia", - "docker_url": "quay.io/dgangaia/test", - "trigger_id": "38b6e180-9521-4ff7-9844-acf371340b9e", //Optional - "docker_tags": [ - "master", - "latest" - ], - "build_name": "50bc599", - "trigger_metadata": { //Optional - "commit": "50bc5996d4587fd4b2d8edc4af652d4cec293c42", - "ref": "refs/heads/master", - "default_branch": "master", - "git_url": "git@github.com:dgangaia/test.git", - "commit_info": { //Optional - "url": "https://github.com/dgangaia/test/commit/50bc5996d4587fd4b2d8edc4af652d4cec293c42", - "date": "2019-03-06T14:10:14+11:00", - "message": "test build", - "committer": { //Optional - "username": "web-flow", - "url": "https://github.com/web-flow", //Optional - "avatar_url": "https://avatars3.githubusercontent.com/u/19864447?v=4" //Optional - }, - "author": { //Optional - "username": "dgangaia", - "url": "https://github.com/dgangaia", //Optional - "avatar_url": "https://avatars1.githubusercontent.com/u/43594254?v=4" //Optional - } - } - }, - "homepage": "https://quay.io/repository/dgangaia/test/build/a8cc247a-a662-4fee-8dcb-7d7e822b71ba" -} -``` - -[[dockerfile-build-successfully-completed]] -=== Dockerfile Build Successfully Completed - -Here is a sample response of a Dockerfile build that has been successfully completed by the build system. +:_content-type: CONCEPT +[id="repository-notifications"] += Notifications overview +ifeval::["{context}" == "quay-io"] +{quayio} supports adding _notifications_ to a repository for various events that occur in the repository's lifecycle. +ifdef::upstream[] [NOTE] ==== -This event will occur *simultaneously* with a _Repository Push_ -event for the built image(s) +By default, vulnerability notifications are disabled on {quayio} and cannot be enabled. ==== - -``` -{ - "build_id": "296ec063-5f86-4706-a469-f0a400bf9df2", - "trigger_kind": "github", //Optional - "name": "test", - "repository": "dgangaia/test", - "namespace": "dgangaia", - "docker_url": "quay.io/dgangaia/test", - "trigger_id": "38b6e180-9521-4ff7-9844-acf371340b9e", //Optional - "docker_tags": [ - "master", - "latest" - ], - "build_name": "b7f7d2b", - "image_id": "sha256:0339f178f26ae24930e9ad32751d6839015109eabdf1c25b3b0f2abf8934f6cb", - "trigger_metadata": { - "commit": "b7f7d2b948aacbe844ee465122a85a9368b2b735", - "ref": "refs/heads/master", - "default_branch": "master", - "git_url": "git@github.com:dgangaia/test.git", - "commit_info": { //Optional - "url": "https://github.com/dgangaia/test/commit/b7f7d2b948aacbe844ee465122a85a9368b2b735", - "date": "2019-03-06T12:48:24+11:00", - "message": "adding 5", - "committer": { //Optional - "username": "web-flow", - "url": "https://github.com/web-flow", //Optional - "avatar_url": "https://avatars3.githubusercontent.com/u/19864447?v=4" //Optional - }, - "author": { //Optional - "username": "dgangaia", - "url": "https://github.com/dgangaia", //Optional - "avatar_url": "https://avatars1.githubusercontent.com/u/43594254?v=4" //Optional - } - } - }, - "homepage": "https://quay.io/repository/dgangaia/test/build/296ec063-5f86-4706-a469-f0a400bf9df2", - "manifest_digests": [ - "quay.io/dgangaia/test@sha256:2a7af5265344cc3704d5d47c4604b1efcbd227a7a6a6ff73d6e4e08a27fd7d99", - "quay.io/dgangaia/test@sha256:569e7db1a867069835e8e97d50c96eccafde65f08ea3e0d5debaf16e2545d9d1" - ] -} -``` - -[[dockerfile-build-failed]] -=== Dockerfile Build Failed - -A Dockerfile build has failed - -``` -{ - "build_id": "5346a21d-3434-4764-85be-5be1296f293c", - "trigger_kind": "github", //Optional - "name": "test", - "repository": "dgangaia/test", - "docker_url": "quay.io/dgangaia/test", - "error_message": "Could not find or parse Dockerfile: unknown instruction: GIT", - "namespace": "dgangaia", - "trigger_id": "38b6e180-9521-4ff7-9844-acf371340b9e", //Optional - "docker_tags": [ - "master", - "latest" - ], - "build_name": "6ae9a86", - "trigger_metadata": { //Optional - "commit": "6ae9a86930fc73dd07b02e4c5bf63ee60be180ad", - "ref": "refs/heads/master", - "default_branch": "master", - "git_url": "git@github.com:dgangaia/test.git", - "commit_info": { //Optional - "url": "https://github.com/dgangaia/test/commit/6ae9a86930fc73dd07b02e4c5bf63ee60be180ad", - "date": "2019-03-06T14:18:16+11:00", - "message": "failed build test", - "committer": { //Optional - "username": "web-flow", - "url": "https://github.com/web-flow", //Optional - "avatar_url": "https://avatars3.githubusercontent.com/u/19864447?v=4" //Optional - }, - "author": { //Optional - "username": "dgangaia", - "url": "https://github.com/dgangaia", //Optional - "avatar_url": "https://avatars1.githubusercontent.com/u/43594254?v=4" //Optional - } - } - }, - "homepage": "https://quay.io/repository/dgangaia/test/build/5346a21d-3434-4764-85be-5be1296f293c" -} - -``` - -[[dockerfile-build-cancelled]] -=== Dockerfile Build Cancelled - -A Dockerfile build was cancelled - -``` -{ - "build_id": "cbd534c5-f1c0-4816-b4e3-55446b851e70", - "trigger_kind": "github", - "name": "test", - "repository": "dgangaia/test", - "namespace": "dgangaia", - "docker_url": "quay.io/dgangaia/test", - "trigger_id": "38b6e180-9521-4ff7-9844-acf371340b9e", - "docker_tags": [ - "master", - "latest" - ], - "build_name": "cbce83c", - "trigger_metadata": { - "commit": "cbce83c04bfb59734fc42a83aab738704ba7ec41", - "ref": "refs/heads/master", - "default_branch": "master", - "git_url": "git@github.com:dgangaia/test.git", - "commit_info": { - "url": "https://github.com/dgangaia/test/commit/cbce83c04bfb59734fc42a83aab738704ba7ec41", - "date": "2019-03-06T14:27:53+11:00", - "message": "testing cancel build", - "committer": { - "username": "web-flow", - "url": "https://github.com/web-flow", - "avatar_url": "https://avatars3.githubusercontent.com/u/19864447?v=4" - }, - "author": { - "username": "dgangaia", - "url": "https://github.com/dgangaia", - "avatar_url": "https://avatars1.githubusercontent.com/u/43594254?v=4" - } - } - }, - "homepage": "https://quay.io/repository/dgangaia/test/build/cbd534c5-f1c0-4816-b4e3-55446b851e70" -} -``` - -[[vulnerability-detected]] -=== Vulnerability Detected - - -A vulnerability was detected in the repository - -``` -{ - "repository": "dgangaia/repository", - "namespace": "dgangaia", - "name": "repository", - "docker_url": "quay.io/dgangaia/repository", - "homepage": "https://quay.io/repository/dgangaia/repository", - - "tags": ["latest", "othertag"], - - "vulnerability": { - "id": "CVE-1234-5678", - "description": "This is a bad vulnerability", - "link": "http://url/to/vuln/info", - "priority": "Critical", - "has_fix": true - } -} -``` - -[[notification-actions]] -== Notification Actions - - -[[quay-notification]] -=== Quay Notification - -A notification will be added to the Quay.io notification area. The -notification area can be found by clicking on the bell icon in the top -right of any Quay.io page. - -Quay.io notifications can be setup to be sent to a _User_, _Team_, or the _organization_ as a whole. - -[[e-mail]] -=== E-mail - -An e-mail will be sent to the specified address describing the event -that occurred. - -[NOTE] -==== -All e-mail addresses will have to be verified on a -_per-repository_ basis -==== - -[[webhook-post]] -=== Webhook POST - -An HTTP POST call will be made to the specified URL with the event's -data (see above for each event's data format). - -When the URL is HTTPS, the call will have an SSL client certificate set -from Quay.io. Verification of this certificate will prove the call -originated from Quay.io. Responses with status codes in the 2xx range -are considered successful. Responses with any other status codes will be -considered failures and result in a retry of the webhook notification. - -[[flowdock-notification]] -=== Flowdock Notification - - -Posts a message to Flowdock. - -[[hipchat-notification]] -=== Hipchat Notification - - -Posts a message to HipChat. - -[[slack-notification]] -=== Slack Notification - - -Posts a message to Slack. +endif::upstream[] +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} supports adding _notifications_ to a repository for various events that occur in the repository's lifecycle. +endif::[] diff --git a/modules/proc_use-quay-skip-trigger.adoc b/modules/proc_use-quay-skip-trigger.adoc deleted file mode 100644 index fc7f15784..000000000 --- a/modules/proc_use-quay-skip-trigger.adoc +++ /dev/null @@ -1,6 +0,0 @@ -= Skipping a source control-triggered build -[[skipping-source-control-trigger-build]] - -To specify that a commit should be ignored by the Quay build system, add -the text `[skip build]` or `[build skip]` anywhere in the commit -message. diff --git a/modules/proc_use-quay-squash-images.adoc b/modules/proc_use-quay-squash-images.adoc deleted file mode 100644 index 81cccf0b2..000000000 --- a/modules/proc_use-quay-squash-images.adoc +++ /dev/null @@ -1,56 +0,0 @@ -= Downloading Squashed Docker Images - -Docker images are composed of image layers which include all of the -intermediary data used to reach their current state. When iterating on a -solution locally on a developer's machine, layers provide an efficient -workflow. - -There are scenarios, however, in which the layers cease to be -efficient. For example, when deploying software to an ephemeral machine, -that machine doesn't care about the whole layer history, it just needs -the end state of the image. This is why Quay.io supports _Squashed -Images_. - -[[downloading-a-squashed-image]] -== Downloading a Squashed Image - -To download a squashed image: - -. Navigate to the `Tags` tab of a Quay `Repository View`. For an organization named `abcsales` and a repo named `myweb`, -the URL would be `https://quay.io/repository/abcsales/myweb?tab=tags`) on Quay.io. For a Red Hat Quay registry, replace `quay.io` with your registry name. - -. On the left -side of the table, click on the _Fetch Tag_ icon for the tag you want -to download. A modal dialog appears with a dropdown for specifying -the desired format of the download. - -. Select `Squashed Docker Image` from -the dropdown and then select a robot that has _read_ permission to be -able to pull the repository. -image:squash_image.png[Identify image to squash and download] -. Click on the `Copy Command` button. -. Paste this command into a shell on the machine where you have -a Docker service running. -. Type `docker images` to see that the image is loaded and read to use. - -[[caveats-warnings]] -== Caveats & Warnings - -[[prime-the-cache]] -=== Prime the cache! - -When the first pull of a squashed image occurs, the registry streams the -image as it is being flattened in real time. Afterwards, the end result -is cached and served directly. Thus, it is recommended to pull the first -squashed image on a developer machine before deploying, so that all of -the production machines can pull the cached result. - -[[isnt-piping-curl-insecure]] -=== Isn't piping curl insecure? - -You may be familiar with installers that pipe curl into bash -(`curl website.com/installer | /bin/bash`). These scripts are insecure -because they allow arbitrary code execution. The Quay script to download -squashed images uses `curl` to download a tarball that is streamed into -`docker load`. This is just as secure as running `docker pull` because -it never executes anything we've downloaded from the internet. diff --git a/modules/proc_use-quay-tags.adoc b/modules/proc_use-quay-tags.adoc index 980bda84e..557b3b394 100644 --- a/modules/proc_use-quay-tags.adoc +++ b/modules/proc_use-quay-tags.adoc @@ -1,98 +1,195 @@ -= Working with tags +:_content-type: CONCEPT +[id="working-with-tags"] += Image tags overview -Tags provide a way to identify the version of an image, as well as -offering a means of naming the same image in different ways. -Besides an image's version, an image tag can identify its uses (such as devel, -testing, or prod) or the fact that it is the most recent version (latest). +An _image tag_ refers to a label or identifier assigned to a specific version or variant of a container image. Container images are typically composed of multiple layers that represent different parts of the image. Image tags are used to differentiate between different versions of an image or to provide additional information about the image. -From the `Tags` tab of an image repository, you can view, modify, add, move, delete, and -see the history of tags. You also can fetch command-lines you can use to -download (pull) a specific image (based on its name and tag) using different commands. +Image tags have the following benefits: -[[viewing-and-modifying-tags]] -== Viewing and modifying tags +* *Versioning and Releases*: Image tags allow you to denote different versions or releases of an application or software. For example, you might have an image tagged as _v1.0_ to represent the initial release and _v1.1_ for an updated version. This helps in maintaining a clear record of image versions. -The tags of a repository can be viewed and modified in the tags panel of -the repository page, found by clicking on the `Tags` tab. -image:tag-operations.png[View and modify tags from your repository] +* *Rollbacks and Testing*: If you encounter issues with a new image version, you can easily revert to a previous version by specifying its tag. This is helpful during debugging and testing phases. -[[adding-a-new-tag-to-a-tagged-image]] -=== Adding a new tag to a tagged image +* *Development Environments*: Image tags are beneficial when working with different environments. You might use a _dev_ tag for a development version, _qa_ for quality assurance testing, and _prod_ for production, each with their respective features and configurations. -A new tag can be added to a tagged image by clicking on the gear icon next to -the tag and choosing `Add New Tag`. Quay.io will confirm the addition of -the new tag to the image. +* *Continuous Integration/Continuous Deployment (CI/CD)*: CI/CD pipelines often utilize image tags to automate the deployment process. New code changes can trigger the creation of a new image with a specific tag, enabling seamless updates. -[[moving-a-tag]] -=== Moving a tag +* *Feature Branches*: When multiple developers are working on different features or bug fixes, they can create distinct image tags for their changes. This helps in isolating and testing individual features. -Moving a tag to a different image is accomplished by performing the same -operation as adding a new tag, but giving an existing tag name. Quay.io -will confirm that you want the tag moved, rather than added. +* *Customization*: You can use image tags to customize images with different configurations, dependencies, or optimizations, while keeping track of each variant. -[[deleting-a-tag]] -=== Deleting a tag +* *Security and Patching*: When security vulnerabilities are discovered, you can create patched versions of images with updated tags, ensuring that your systems are using the latest secure versions. -A specific tag and all its images can be deleted by clicking on the tag's gear icon -and choosing `Delete Tag`. This will delete the tag and any images unique -to it. Images will not be deleted until no tag references them either -directly or indirectly through a parent child relationship. +* *Dockerfile Changes*: If you modify the Dockerfile or build process, you can use image tags to differentiate between images built from the previous and updated Dockerfiles. -[[viewing-tag-history-and-going-back-in-time]] -=== Viewing tag history and going back in time +Overall, image tags provide a structured way to manage and organize container images, enabling efficient development, deployment, and maintenance workflows. -[[viewing-tag-history]] -==== Viewing tag history -To view the image history for a tag, click on the `View Tags History` menu -item located under the `Actions` menu. The page shown will display each -image to which the tag pointed in the past and when it pointed to that -image. +[id="viewing-tag-history"] +== Viewing tag history -[[going-back-in-time]] -==== Going back in time +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +offers a comprehensive history of images and their respective image tags. -To revert the tag to a previous image, find the history line where your -desired image was overwritten, and click on the Restore link. +.Procedure -[[fetching-images-and-tags]] -=== Fetching an image by tag or digest -From the `Tags` tab, you can view different ways of pulling images from the clients -that are ready to use those images. +* Navigate to the *Tag History* page of a repository to view the image tag history. -. Select a particular repository/image -. Select Tags in the left column -. Select the Fetch Tag icon for a particular image/tag combination -. When the Fetch Tag pop-up appears, select the Image format box to -see a drop-down menu that shows different ways that are -available to pull the image. The selections offer full command lines -for pulling a specific container image to the local system: +[id="going-back-in-time"] +== Reverting tag changes -image:image-fetch.png[Get commands for fetching images in different ways] +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +offers a comprehensive _time machine_ feature that allows older images tags to remain in the repository for set periods of time so that they can revert changes made to tags. This feature allows users to revert tag changes, like tag deletions. -You can select to pull a regular of an image by tag name or by digest name using the *docker* command. -You can pull a squashed version of the image with *docker* by selecting `Squashed Docker Image`. -There is also an example for pulling the image using the `rkt` command. -. Choose the type of pull you want, then select `Copy Command`. -The full command-line is copied into your clipboard. -These two commands show a *docker pull* by tag and by digest: +.Procedure -``` -docker pull quay.io/cnegus/whatever:latest -docker pull quay.io/cnegus/whatever@sha256:e02231a6aa8ba7f5da3859a359f99d77e371cb47e643ce78e101958782581fb9 -``` +. Navigate to the *Tag History* page of a repository. -Paste the command into a command-line shell on a system that has the -*docker* command and service available, and press Enter. -At this point, the container image is ready to run on your local system. +. Find the point in the timeline at which image tags were changed or removed. Next, click the option under *Revert* to restore a tag to its image, or click the option under *Permanently Delete* to permanently delete the image tag. -On most Red Hat systems, you can -substitute *podman* for *docker* to pull and run the selected image. +[id="fetching-images-and-tags"] +== Fetching an image by tag or digest -[[security-scanning]] -== Security scanning +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +offers multiple ways of pulling images using Docker and Podman clients. -By clicking the on the vulnerability or fixable count next to a tab you -can jump into the security scanning information for that tag. There you -can find which CVEs your image is susceptible to, and what remediation -options you may have available. +.Procedure + +. Navigate to the *Tags* page of a repository. + +. Under *Manifest*, click the *Fetch Tag* icon. + +. When the popup box appears, users are presented with the following options: ++ +* Podman Pull (by tag) +* Docker Pull (by tag) +* Podman Pull (by digest) +* Docker Pull (by digest) ++ +Selecting any one of the four options returns a command for the respective client that allows users to pull the image. + +. Click *Copy Command* to copy the command, which can be used on the command-line interface (CLI). For example: ++ +ifeval::["{context}" == "quay-io"] +[source,terminal] +---- +$ podman pull quay.io/quayadmin/busybox:test2 +---- +endif::[] +ifeval::["{context}" == "use-quay"] +[source,terminal] +---- +$ podman pull quay-server.example.com/quayadmin/busybox:test2 +---- +endif::[] + +[id="tag-expiration"] +== Tag Expiration + +Images can be set to expire from a +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +repository at a chosen date and time using the _tag expiration_ feature. This feature includes the following characteristics: + +* When an image tag expires, it is deleted from the repository. If it is the last tag for a specific image, the image is also set to be deleted. + +* Expiration is set on a per-tag basis. It is not set for a repository as a whole. + +* After a tag is expired or deleted, it is not immediately removed from the registry. This is contingent upon the allotted time designed in the _time machine_ feature, which defines when the tag is permanently deleted, or garbage collected. By default, this value is set at _14 days_, however the administrator can adjust this time to one of multiple options. Up until the point that garbage collection occurs, tags changes can be reverted. + +ifeval::["{context}" == "use-quay"] +The {productname} superuser has no special privilege related to deleting expired images from user repositories. There is no central mechanism for the superuser to gather information and act on user repositories. It is up to the owners of each repository to manage expiration and the deletion of their images. +endif::[] + +Tag expiration can be set up in one of two ways: + +* By setting the `quay.expires-after=` LABEL in the Dockerfile when the image is created. This sets a time to expire from when the image is built. + +* By selecting an expiration date on the +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +UI. For example: ++ +image:tag-expires-ui.png[Change tag expiration under the Options icon or from the EXPIRES column] + +[id="setting-tag-from-dockerfile"] +== Setting tag expiration from a Dockerfile + +Adding a label, for example, `quay.expires-after=20h` by using the `docker label` command causes a tag to automatically expire after the time indicated. The following values for hours, days, or weeks are accepted: + +* `1h` +* `2d` +* `3w` + +Expiration begins from the time that the image is pushed to the registry. + +[id="setting-tag-expiration"] +== Setting tag expiration from the repository + +Tag expiration can be set on the +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +UI. + +.Procedure + +. Navigate to a repository and click *Tags* in the navigation pane. + +. Click the *Settings*, or _gear_ icon, for an image tag and select *Change Expiration*. + +. Select the date and time when prompted, and select *Change Expiration*. The tag is set to be deleted from the repository when the expiration time is reached. + +[id="security-scanning"] +== Viewing Clair security scans + +ifeval::["{context}" == "quay-io"] +{quayio} comes equipped with Clair security scanner. For more information about Clair on {quayio}, see "Clair security scanner." +endif::[] +ifeval::["{context}" == "use-quay"] +Clair security scanner is not enabled for {productname} by default. To enable Clair, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/vulnerability_reporting_with_clair_on_red_hat_quay/testing-clair-with-quay[Clair on {productname}]. +endif::[] + +.Procedure + +. Navigate to a repository and click *Tags* in the navigation pane. This page shows the results of the security scan. + +. To reveal more information about multi-architecture images, click *See Child Manifests* to see the list of manifests in extended view. + +. Click a relevant link under *See Child Manifests*, for example, *1 Unknown* to be redirected to the *Security Scanner* page. + +. The *Security Scanner* page provides information for the tag, such as which CVEs the image is susceptible to, and what remediation options you might have available. + +[NOTE] +==== +Image scanning only lists vulnerabilities found by Clair security scanner. What users do about the vulnerabilities are uncovered is up to said user. +ifeval::["{context}" == "use-quay"] +{productname} superusers do not act on found vulnerabilities. +endif::[] +==== diff --git a/modules/proc_use-quay-view-export-logs.adoc b/modules/proc_use-quay-view-export-logs.adoc new file mode 100644 index 000000000..8eaf5e287 --- /dev/null +++ b/modules/proc_use-quay-view-export-logs.adoc @@ -0,0 +1,55 @@ +:_content-type: PROCEDURE +[id="use-quay-view-export-logs"] += Viewing and exporting logs + +Activity logs are gathered for all repositories and namespace in +ifeval::["{context}" == "quay-io"] +{quayio}. +endif::[] +ifeval::["{context}" == "use-quay"] +{productname}. +endif::[] + +Viewing usage logs of +ifeval::["{context}" == "quay-io"] +{quayio}. +endif::[] +ifeval::["{context}" == "use-quay"] +{productname}. +endif::[] +can provide valuable insights and benefits for both operational and security purposes. Usage logs might reveal the following information: + +//// +* *Monitoring and Performance Analysis*: Usage logs can help you monitor the performance of your container registry. By analyzing logs, you can identify patterns in usage, peak times, and potential bottlenecks. This information can guide resource allocation and optimization efforts. +//// + +* *Resource Planning*: Usage logs can provide data on the number of image pulls, pushes, and overall traffic to your registry. + +* *User Activity*: Logs can help you track user activity, showing which users are accessing and interacting with images in the registry. This can be useful for auditing, understanding user behavior, and managing access controls. + +* *Usage Patterns*: By studying usage patterns, you can gain insights into which images are popular, which versions are frequently used, and which images are rarely accessed. This information can help prioritize image maintenance and cleanup efforts. + +* *Security Auditing*: Usage logs enable you to track who is accessing images and when. This is crucial for security auditing, compliance, and investigating any unauthorized or suspicious activity. + +* *Image Lifecycle Management*: Logs can reveal which images are being pulled, pushed, and deleted. This information is essential for managing image lifecycles, including deprecating old images and ensuring that only authorized images are used. + +* *Compliance and Regulatory Requirements*: Many industries have compliance requirements that mandate tracking and auditing of access to sensitive resources. Usage logs can help you demonstrate compliance with such regulations. + +* *Identifying Abnormal Behavior*: Unusual or abnormal patterns in usage logs can indicate potential security breaches or malicious activity. Monitoring these logs can help you detect and respond to security incidents more effectively. + +* *Trend Analysis*: Over time, usage logs can provide trends and insights into how your registry is being used. This can help you make informed decisions about resource allocation, access controls, and image management strategies. + +There are multiple ways of accessing log files: + +* Viewing logs through the web UI. +* Exporting logs so that they can be saved externally. +* Accessing log entries using the API. + +To access logs, you must have administrative privileges for the selected repository or namespace. + +[NOTE] +==== +A maximum of 100 log results are available at a time via the API. +To gather more results that that, you must use the log exporter feature +described in this chapter. +==== \ No newline at end of file diff --git a/modules/proxy-cache-arch.adoc b/modules/proxy-cache-arch.adoc new file mode 100644 index 000000000..701ccd781 --- /dev/null +++ b/modules/proxy-cache-arch.adoc @@ -0,0 +1,25 @@ +[[proxy-cache-architecture]] += Proxy cache architecture + +The following image shows the expected design flow and architecture of the proxy cache feature. + +image:cache-proxy-overview.png[Proxy cache overview] + +When a user pulls an image, for example, `postgres:14`, from an upstream repository on {productname}, the repository checks to see if an image is present. If the image does not exist, a fresh pull is initiated. After being pulled, the image layers are saved to cache and server to the user in parallel. The following image depicts an architectural overview of this scenario: + +image:cache-proxy-pulled-image.png[Pulled image overview] + +If the image in the cache exists, users can rely on Quay's cache to stay up-to-date with the upstream source so that newer images from the cache are automatically pulled. This happens when tags of the original image have been overwritten in the upstream registry. The following image depicts an architectural overview of what happens when the upstream image and cached version of the image are different: + +image:updated-layers-in-cache.png[Updating opposing layers overview] + + +If the upstream image and cached version are the same, no layers are pulled and the cached image is delivered to the user. + +In some cases, users initiate pulls when the upstream registry is down. If this happens with the configured staleness period, the image stored in cache is delivered. If the pull happens after the configured staleness period, the error is propagated to the user. The following image depicts an architectural overview when a pull happens after the configured staleness period: + +image:cache-proxy-staleness-pull.png[Staleness pull overview] + +Quay administrators can leverage the configurable size limit of an organization to limit cache size so that backend storage consumption remains predictable. This is achieved by discarding images from the cache according to the frequency in which an image is used. The following image depicts an architectural overview of this scenario: + +//TODO FIX IMAGE image:proxy-cache-size-configuration.adoc[Organization size limit overview] diff --git a/modules/proxy-cache-leveraging-storage-quota-limits.adoc b/modules/proxy-cache-leveraging-storage-quota-limits.adoc new file mode 100644 index 000000000..de0ea9fbd --- /dev/null +++ b/modules/proxy-cache-leveraging-storage-quota-limits.adoc @@ -0,0 +1,51 @@ +:_content-type: CONCEPT +[id="proxy-cache-leveraging-storage-quota-limits"] +== Leveraging storage quota limits in proxy organizations + +With {productname} 3.8, the proxy cache feature has been enhanced with an auto-pruning feature for tagged images. The auto-pruning of image tags is only available when a proxied namespace has quota limitations configured. Currently, if an image size is greater than quota for an organization, the image is skipped from being uploaded until an administrator creates the necessary space. Now, when an image is pushed that exceeds the allotted space, the auto-pruning enhancement marks the least recently used tags for deletion. As a result, the new image tag is stored, while the least used image tag is marked for deletion. + +[IMPORTANT] +==== +* As part of the auto-pruning feature, the tags that are marked for deletion are eventually garbage collected by the garbage collector (gc) worker process. As a result, the quota size restriction is not fully enforced during this period. +* Currently, the namespace quota size computation does not take into account the size for manifest child. This is a known issue and will be fixed in a future version of {productname}. +==== + +=== Testing the storage quota limits feature in proxy organizations + +Use the following procedure to test the auto-pruning feature of an organization with proxy cache and storage quota limitations enabled. + +.Prerequisites + +* Your organization is configured to serve as a proxy organization. The following example proxies from quay.io. + +* `FEATURE_PROXY_CACHE` is set to `true` in your `config.yaml` file. + +* `FEATURE_QUOTA_MANAGEMENT` is set to `true` in your `config.yaml` file. + +* Your organization is configured with a quota limit, for example, `150 MB`. + +.Procedure + +. Pull an image to your repository from your proxy organization, for example: ++ +---- +$ podman pull quay-server.example.com/proxytest/projectquay/quay:3.7.9 +---- + +. Depending on the space left in your repository, you might need to pull additional images from your proxy organization, for example: ++ +---- +$ podman pull quay-server.example.com/proxytest/projectquay/quay:3.6.2 +---- + +. In the {productname} registry UI, click the name of your repository. + +* Click *Tags* in the navigation pane and ensure that `quay:3.7.9` and `quay:3.6.2` are tagged. + +. Pull the last image that will result in your repository exceeding the allotted quota, for example: ++ +---- +$ podman pull quay-server.example.com/proxytest/projectquay/quay:3.5.1 +---- + +. Refresh the *Tags* page of your {productname} registry. The first image that you pushed, for example, `quay:3.7.9` should have been auto-pruned. The *Tags* page should now show `quay:3.6.2` and `quay:3.5.1`. \ No newline at end of file diff --git a/modules/proxy-cache-limitations.adoc b/modules/proxy-cache-limitations.adoc new file mode 100644 index 000000000..1ea52829e --- /dev/null +++ b/modules/proxy-cache-limitations.adoc @@ -0,0 +1,10 @@ +[[proxy-cache-limitations]] += Proxy cache limitations + +Proxy caching with {productname} has the following limitations: + +* Your proxy cache must have a size limit of greater than, or equal to, the image you want to cache. For example, if your proxy cache organization has a maximum size of 500 MB, and the image a user wants to pull is 700 MB, the image will be cached and will overflow beyond the configured limit. + +* Cached images must have the same properties that images on a Quay repository must have. + +* Currently, only layers requested by the client are cached. diff --git a/modules/proxy-cache-procedure.adoc b/modules/proxy-cache-procedure.adoc new file mode 100644 index 000000000..bae33a543 --- /dev/null +++ b/modules/proxy-cache-procedure.adoc @@ -0,0 +1,53 @@ + +[[red-hat-quay-proxy-cache-procedure]] += Using {productname} to proxy a remote registry + +The following procedure describes how you can use {productname} to proxy a remote registry. This procedure is set up to proxy quay.io, which allows users to use `podman` to pull any public image from any namespace on quay.io. + +.Prerequisites + +* `FEATURE_PROXY_CACHE` in your config.yaml is set to `true`. +* Assigned the *Member* team role. For more information about team roles, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/use_red_hat_quay/user-org-intro[Users and organizations in {productname}]. + + +.Procedure + +. On the {productname} v2 UI, click the name of an organization, for example, *cache-org*. + +. In the navigation pane, click *Settings*. + +. In the *Remote Registry* box, enter the name of the remote registry to be cached, for example, `quay.io`, and click *Save*. ++ +[NOTE] +==== +By adding a namespace to the *Remote Registry*, for example, `quay.io/`, users in your organization will only be able to proxy from that namespace. +==== + +. Optional. In the *Remote Registry username* box, enter the username for authenticating into the remote registry specified in the previous step. For anonymous pulls from the upstream, you can leave this empty. If you do note set a username at the time of creation, you cannot add one without removing the proxy cache and creating a new registry. + +. Optional. In the *Remote registry password* box, enter the password for authenticating into the remote registry. For anonymous pulls from the upstream, you can leave this empty. If you do note set a username at the time of creation, you cannot add one without removing the proxy cache and creating a new registry. + +. Optional. Set a time in the *Expiration* field. ++ +[NOTE] +==== +* The default tag *Expiration* field for cached images in a proxy organization is set to 86400 seconds. In the proxy organization, the tag expiration is refreshed to the value set in the UI's *Expiration* field every time the tag is pulled. This feature is different than Quay's default link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#tag-expiration[individual tag expiration] feature. In a proxy organization, it is possible to override the individual tag feature. When this happens, the individual tag's expiration is reset according to the *Expiration* field of the proxy organization. +* Expired images will disappear after the allotted time, but are still stored in {productname}. The time in which an image is completely deleted, or collected, depends on the *Time Machine* setting of your organization. The default time for garbage collection is 14 days unless otherwise specified. +==== + +. Optional. Check the *http* box if you want an unsecure protocol used. If not checked, https is used to request the remote registry. + +. Click *Save*. + +.Verification + +. On the CLI, pull a public image from the remote registry that was specified, for example, `quay.io`, acting as a proxy cache: ++ +---- +$ podman pull /// +---- ++ +[IMPORTANT] +==== +If your organization is set up to pull from a single namespace in the remote registry, the remote registry namespace must be omitted from the URL. For example, `podman pull //`. +==== diff --git a/modules/public-cloud-aws.adoc b/modules/public-cloud-aws.adoc new file mode 100644 index 000000000..2f7084122 --- /dev/null +++ b/modules/public-cloud-aws.adoc @@ -0,0 +1,17 @@ +:_content-type: CONCEPT +[id="arch-quay-on-aws"] += Running {productname} on Amazon Web Services + +If {productname} is running on Amazon Web Services (AWS), you can use the following features: + +* AWS Elastic Load Balancer +* AWS S3 (hot) blob storage +* AWS RDS database +* AWS ElastiCache Redis +* EC2 virtual machine recommendation: M3.Large or M4.XLarge + +The following image provides a high level overview of {productname} running on AWS: + +.{productname} on AWS +image:178_Quay_architecture_0821_on_AWS.png[{productname} on AWS] + diff --git a/modules/public-cloud-azure.adoc b/modules/public-cloud-azure.adoc new file mode 100644 index 000000000..c59aa57fe --- /dev/null +++ b/modules/public-cloud-azure.adoc @@ -0,0 +1,15 @@ +:_content-type: CONCEPT +[id="arch-quay-on-azure"] += Running {productname} on Microsoft Azure + +If {productname} is running on Microsoft Azure, you can use the following features: + +* Azure managed services such as highly available PostgreSQL +* Azure Blob Storage must be hot storage +** Azure cool storage is not available for {productname} +* Azure Cache for Redis + +The following image provides a high level overview of {productname} running on Microsoft Azure: + +.{productname} on Microsoft Azure +image:178_Quay_architecture_0821_on_Azure.png[{productname} on Microsoft Azure] \ No newline at end of file diff --git a/modules/public-cloud-intro.adoc b/modules/public-cloud-intro.adoc new file mode 100644 index 000000000..129e6bbb5 --- /dev/null +++ b/modules/public-cloud-intro.adoc @@ -0,0 +1,7 @@ +:_content-type: CONCEPT +[id="arch-deploy-quay-public-cloud"] += Deploying {productname} on public cloud + +{productname} can run on public clouds, either in standalone mode or where {ocp} itself has been deployed on public cloud. A full list of tested and supported configurations can be found in the {productname} *Tested Integrations Matrix* at link:https://access.redhat.com/articles/4067991[]. + +**Recommendation:** If {productname} is running on public cloud, then you should use the public cloud services for {productname} backend services to ensure proper high availability and scalability. \ No newline at end of file diff --git a/modules/qbo-operator-upgrade.adoc b/modules/qbo-operator-upgrade.adoc new file mode 100644 index 000000000..7da3e2ed1 --- /dev/null +++ b/modules/qbo-operator-upgrade.adoc @@ -0,0 +1,38 @@ +[[qbo-operator-upgrade]] += Upgrade Quay Bridge Operator + +To upgrade the Quay Bridge Operator (QBO), change the Channel Subscription update channel in the Subscription tab to the desired channel. + +When upgrading QBO from version 3.5 to 3.7, a number of extra steps are required: + +. You need to create a new `QuayIntegration` custom resource. This can be completed in the Web Console or from the command line. ++ +.upgrade-quay-integration.yaml +[source,yaml] +---- +- apiVersion: quay.redhat.com/v1 + kind: QuayIntegration + metadata: + name: example-quayintegration-new + spec: + clusterID: openshift <1> + credentialsSecret: + name: quay-integration + namespace: openshift-operators + insecureRegistry: false + quayHostname: https://registry-quay-quay35.router-default.apps.cluster.openshift.com +---- +<1> Make sure that the `clusterID` matches the value for the existing `QuayIntegration` resource. +. Create the new `QuayIntegration` custom resource: ++ +[source,bash] +---- +$ oc create -f upgrade-quay-integration.yaml +---- +. Delete the old `QuayIntegration` custom resource. +. Delete the old `mutatingwebhookconfigurations`: ++ +[source,bash] +---- +$ oc delete mutatingwebhookconfigurations.admissionregistration.k8s.io quay-bridge-operator +---- diff --git a/modules/quay-api-examples.adoc b/modules/quay-api-examples.adoc new file mode 100644 index 000000000..467fdeb4e --- /dev/null +++ b/modules/quay-api-examples.adoc @@ -0,0 +1,5 @@ +:_content-type: CONCEPT +[id="quay-api-examples"] += {productname} API examples + +The remainder of this chapter provides {productname} API examples for the features in which they are available. \ No newline at end of file diff --git a/modules/quay-as-cache-proxy.adoc b/modules/quay-as-cache-proxy.adoc new file mode 100644 index 000000000..1b72dfb5f --- /dev/null +++ b/modules/quay-as-cache-proxy.adoc @@ -0,0 +1,17 @@ +[[quay-as-cache-proxy]] += {productname} as a proxy cache for upstream registries + +With the growing popularity of container development, customers increasingly rely on container images from upstream registries like Docker or Google Cloud Platform to get services up and running. Today, registries have rate limitations and throttling on the number of times users can pull from these registries. + +With this feature, {productname} will act as a proxy cache to circumvent pull-rate limitations from upstream registries. Adding a cache feature also accelerates pull performance, because images are pulled from the cache rather than upstream dependencies. Cached images are only updated when the upstream image digest differs from the cached image, reducing rate limitations and potential throttling. + +With {productname} cache proxy, the following features are available: + +* Specific organizations can be defined as a cache for upstream registries. +* Configuration of a Quay organization that acts as a cache for a specific upstream registry. This repository can be defined by using the Quay UI, and offers the following configurations: +** Upstream registry credentials for private repositories or increased rate limiting. +** Expiration timer to avoid surpassing cache organization size. +* Global on/off configurable via the configuration application. +* Caching of entire upstream registries or just a single namespace, for example, all of `docker.io` or just `docker.io/library`. +* Logging of all cache pulls. +* Cached images scannability by Clair. diff --git a/modules/quay-bridge-operator-test.adoc b/modules/quay-bridge-operator-test.adoc new file mode 100644 index 000000000..2b3115221 --- /dev/null +++ b/modules/quay-bridge-operator-test.adoc @@ -0,0 +1,190 @@ +:_content-type: CONCEPT +[id="quay-bridge-operator-test"] += Using {qbo} + +Use the following procedure to use the {qbo}. + +.Prerequisites + +* You have installed the {productname} Operator. +* You have logged into {ocp} as a cluster administrator. +* You have logged into your {productname} registry. +* You have installed the {qbo}. +* You have configured the `QuayIntegration` custom resource. + +.Procedure + +. Enter the following command to create a new {ocp} project called `e2e-demo`: ++ +[source,terminal] +---- +$ oc new-project e2e-demo +---- + +. After you have created a new project, a new Organization is created in {productname}. Navigate to the {productname} registry and confirm that you have created a new Organization named `openshift_e2e-demo`. ++ +[NOTE] +==== +The `openshift` value of the Organization might different if the clusterID in your `QuayIntegration` resource used a different value. +==== + +. On the {productname} UI, click the name of the new Organization, for example, *openshift_e2e-demo*. + +. Click *Robot Accounts* in the navigation pane. As part of new project, the following Robot Accounts should have been created: ++ +* *openshift_e2e-demo+deployer* +* *openshift_e2e-demo+default* +* *openshift_e2e-demo+builder* + +. Enter the following command to confirm three secrets containing Docker configuration associated with the applicable Robot Accounts were created: ++ +[source,terminal] +---- +$ oc get secrets builder-quay-openshift deployer-quay-openshift default-quay-openshift +---- ++ +.Example output ++ +[source,terminal] +---- +stevsmit@stevsmit ocp-quay $ oc get secrets builder-quay-openshift deployer-quay-openshift default-quay-openshift +NAME TYPE DATA AGE +builder-quay-openshift kubernetes.io/dockerconfigjson 1 77m +deployer-quay-openshift kubernetes.io/dockerconfigjson 1 77m +default-quay-openshift kubernetes.io/dockerconfigjson 1 77m +---- + +. Enter the following command to display detailed information about `builder` ServiceAccount (SA), including its secrets, token expiration, and associated roles and role bindings. This ensures that the project is integrated via the {qbo}. ++ +[source,terminal] +---- +$ oc describe sa builder default deployer +---- ++ +.Example output ++ +[source,terminal] +---- +... +Name: builder +Namespace: e2e-demo +Labels: +Annotations: +Image pull secrets: builder-dockercfg-12345 + builder-quay-openshift +Mountable secrets: builder-dockercfg-12345 + builder-quay-openshift +Tokens: builder-token-12345 +Events: +... +---- + +. Enter the following command to create and deploy a new application called `httpd-template`: ++ +[source,terminal] +---- +$ oc new-app --template=httpd-example +---- ++ +.Example output ++ +[source,terminal] +---- +--> Deploying template "e2e-demo/httpd-example" to project e2e-demo +... +--> Creating resources ... + service "httpd-example" created + route.route.openshift.io "httpd-example" created + imagestream.image.openshift.io "httpd-example" created + buildconfig.build.openshift.io "httpd-example" created + deploymentconfig.apps.openshift.io "httpd-example" created +--> Success + Access your application via route 'httpd-example-e2e-demo.apps.quay-ocp.gcp.quaydev.org' + Build scheduled, use 'oc logs -f buildconfig/httpd-example' to track its progress. + Run 'oc status' to view your app. +---- ++ +After running this command, `BuildConfig`, `ImageStream`, `Service,` `Route`, and `DeploymentConfig` resources are created. When the `ImageStream` resource is created, an associated repository is created in {productname}. For example: ++ +image:e2e-demo-httpd-example.png[Example repository] + +. The `ImageChangeTrigger` for the `BuildConfig` triggers a new Build when the Apache HTTPD image, located in the `openshift` namespace, is resolved. As the new Build is created, the `MutatingWebhookConfiguration` automatically rewriters the output to point at {productname}. You can confirm that the build is complete by querying the output field of the build by running the following command: ++ +[source,terminal] +---- +$ oc get build httpd-example-1 --template='{{ .spec.output.to.name }}' +---- ++ +.Example output ++ +[source,terminal] +---- +example-registry-quay-quay-enterprise.apps.quay-ocp.gcp.quaydev.org/openshift_e2e-demo/httpd-example:latest +---- + +. On the {productname} UI, navigate to the `openshift_e2e-demo` Organization and select the *httpd-example* repository. + +. Click *Tags* in the navigation pane and confirm that the `latest` tag has been successfully pushed. + +. Enter the following command to ensure that the latest tag has been resolved: ++ +[source,terminal] +---- +$ oc describe is httpd-example +---- ++ +.Example output ++ +[source,terminal] +---- +Name: httpd-example +Namespace: e2e-demo +Created: 55 minutes ago +Labels: app=httpd-example + template=httpd-example +Description: Keeps track of changes in the application image +Annotations: openshift.io/generated-by=OpenShiftNewApp + openshift.io/image.dockerRepositoryCheck=2023-10-02T17:56:45Z +Image Repository: image-registry.openshift-image-registry.svc:5000/e2e-demo/httpd-example +Image Lookup: local=false +Unique Images: 0 +Tags: 1 + +latest + tagged from example-registry-quay-quay-enterprise.apps.quay-ocp.gcp.quaydev.org/openshift_e2e-demo/httpd-example:latest +---- + +. After the `ImageStream` is resolwillved, a new deployment should have been triggered. Enter the following command to generate a URL output: ++ +[source,terminal] +---- +$ oc get route httpd-example --template='{{ .spec.host }}' +---- ++ +.Example output ++ +[source,terminal] +---- +httpd-example-e2e-demo.apps.quay-ocp.gcp.quaydev.org +---- + +. Navigate to the URL. If a sample webpage appears, the deployment was successful. + +. Enter the following command to delete the resources and clean up your {productname} repository: ++ +[source,terminal] +---- +$ oc delete project e2e-demo +---- ++ +[NOTE] +==== +The command waits until the project resources have been removed. This can be bypassed by adding the `--wait=false` to the above command +==== + +. After the command completes, navigate to your {productname} repository and confirm that the `openshift_e2e-demo` Organization is no longer available. + +.Additional resources + +* Best practices dictate that all communication between a client and an image registry be facilitated through secure means. Communication should leverage HTTPS/TLS with a certificate trust between the parties. While {productname} can be configured to serve an insecure configuration, proper certificates should be utilized on the server and configured on the client. Follow the link:https://docs.openshift.com/container-platform/{ocp-y}/security/certificate_types_descriptions/proxy-certificates.html[{ocp} documentation] for adding and managing certificates at the container runtime level. + diff --git a/modules/quay-error-details.adoc b/modules/quay-error-details.adoc new file mode 100644 index 000000000..6760f0980 --- /dev/null +++ b/modules/quay-error-details.adoc @@ -0,0 +1,38 @@ +:_content-type: PROCEDURE +[id="quay-error-details"] += Obtaining {productname} API error details + +{productname} API error details are discoverable by using the API. + +Use the following procedure to discover error details. + +.Prerequisites + +* You have created an OAuth 2 access token. + +.Procedure + +* You can obtain error details of the API by entering the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#geterrordescription[`GET /api/v1/error/{error_type}`] endpoint. Note that you must include one of the following error codes: ++ +[options="header", width=100%, cols=".^2a,.^5a"] +|=== +|HTTP Code|Description +|200|Successful invocation +|400|Bad Request +|401|Session required +|403|Unauthorized access +|404|Not found +|=== ++ +[source,terminal] +---- +$ curl -X GET "https:///api/v1/error/" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +curl: (7) Failed to connect to quay-server.example.com port 443 after 0 ms: Couldn't connect to server +---- diff --git a/modules/quay-internal-registry-intro.adoc b/modules/quay-internal-registry-intro.adoc new file mode 100644 index 000000000..58918a682 --- /dev/null +++ b/modules/quay-internal-registry-intro.adoc @@ -0,0 +1,7 @@ +:_content-type: CONCEPT +[id="quay-internal-registry-intro"] += Using {productname} with or without internal registry + +{productname} can be used as an external registry in front of multiple {ocp} clusters with their internal registries. + +{productname} can also be used in place of the internal registry when it comes to automating builds and deployment rollouts. The required coordination of `Secrets` and `ImageStreams` is automated by the Quay Bridge Operator, which can be launched from the OperatorHub for {ocp}. diff --git a/modules/quay-robot-accounts-intro.adoc b/modules/quay-robot-accounts-intro.adoc new file mode 100644 index 000000000..c41690c28 --- /dev/null +++ b/modules/quay-robot-accounts-intro.adoc @@ -0,0 +1,14 @@ +[[quay-robot-accounts]] += {productname} robot accounts + +Robot accounts are named tokens that hold credentials for accessing external repositories. By assigning credentials to a robot, that robot can be used across multiple mirrored repositories that need to access the same external registry. + +Robot accounts are managed on the *Robot Accounts* tab. They can only belong to one organization, but can be assigned to multiple Teams. + +[NOTE] +==== +Teams and users can belong to multiple organizations. +==== + +//should probably be an xref +For more information on robot accounts, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#allow-robot-access-user-repo[Allowing robot access to a user repository]. diff --git a/modules/quay-sso-keycloak-intro.adoc b/modules/quay-sso-keycloak-intro.adoc new file mode 100644 index 000000000..9beeac7e1 --- /dev/null +++ b/modules/quay-sso-keycloak-intro.adoc @@ -0,0 +1,6 @@ +[[quay-sso-keycloak]] += {productname} and Red Hat SSO / Keycloak + +{productname} can support authentication via OpenID Connect (OIDC). Red Hat Single Sign On (SSO) is an OIDC provider that allows administrators to have a seamless authentication integration between {productname} and other application platforms such as Red Hat OpenShift Container Platform. + +{productname} and Red Hat SSO / Keycloak requires that TLS/SSL is properly configured to proceed with setup. Red Hat SSO supports many different types of OIDC. {productname}, however, only supports OIDC clients configured for link:https://access.redhat.com/solutions/3496181[Confidential Client Credentials]. For more information configuring Red Hat SSO, see link:https://access.redhat.com/solutions/3566061[Red Hat Quay with Red Hat Single Sign On / Keycloak]. diff --git a/modules/quay-super-users-intro.adoc b/modules/quay-super-users-intro.adoc new file mode 100644 index 000000000..54bdbbe98 --- /dev/null +++ b/modules/quay-super-users-intro.adoc @@ -0,0 +1,19 @@ +[[quay-super-users]] += {productname} super users + +`Super users` are a group of {productname} users with enhanced access and privileges, including: + +* Super user API calls that are not visible or accessible to normal users. +* Access to the `Super User Admin Panel`, which allows users to: +** Change a user's email address, password, delete, or disable users. +** Rename, delete, or take ownership of an organization. +** Change expiration time, rename, or delete service keys such as Clair. +** Access to global and registry-wide usage, or audit, logs. +** Create and send globally visible user messages. + +Primary super users are created during the initial {productname} configuration, and more can be created after configuration is complete. + +[NOTE] +==== +Super users cannot delete repositories or organizations without taking ownership of them first. +==== diff --git a/modules/quay-users-intro.adoc b/modules/quay-users-intro.adoc new file mode 100644 index 000000000..63f27ea04 --- /dev/null +++ b/modules/quay-users-intro.adoc @@ -0,0 +1,20 @@ +[[quay-users]] += {productname} users + +User accounts allow individuals to log in through the {productname} UI or a client, such as `podman login`. Usernames must be unique, and they cannot be changed once invited to an organization. After a user account is established, options to sign in via Google or GitHub can be added. + + + +[NOTE] +==== +Because `docker login` via the CLI stores passwords in plaintext, we recommend generating an encrypted version of the password. +==== + +Each user automatically gets their own user namespace, for example, quay.example.com/user/. + +[NOTE] +==== +User namespaces are different from Quay organizations. There are no teams, usage logs, default permissions, or OAuth applications. By comparison, organizations are listed under quay.example.com/organization. +==== + +Users who are not members of an organization can be added to the repository and given permissions. External users are marked with a special character in the *Users and Robot Permissions* section under the *Repository Settings* tab. They are also shown under the *Collaborators View* tab on on the organization level of the *Teams and Membership* page. diff --git a/modules/quayio-main-page.adoc b/modules/quayio-main-page.adoc new file mode 100644 index 000000000..77126ffb7 --- /dev/null +++ b/modules/quayio-main-page.adoc @@ -0,0 +1,230 @@ +:_content-type: CONCEPT +[id="quayio-main-page"] += {quayio} landing page + +The link:quay.io[Quay.io] landing page serves as the central hub for users to access the container registry services offered. This page provides essential information and links to guide users in securely storing, building, and deploying container images effortlessly. + +The landing page of {quayio} includes links to the following resources: + +* link:https://quay.io/search[Explore]. On this page, you can search the {quayio} database for various applications and repositories. +* link:https://quay.io/tutorial/[Tutorial]. On this page, you can take a step-by-step walkthrough that shows you how to use {quayio}. +* link:https://quay.io/plans/[Pricing]. On this page, you can learn about the various pricing tiers offered for {quayio}. There are also various FAQs addressed on this page. +* link:https://quay.io/signin/[Sign in]. By clicking this link, you are re-directed to sign into your {quayio} repository. + +image:quayio-header.png[{quayio} header]. + +The landing page also includes information about scheduled maintenance. During scheduled maintenance, {quayio} is operational in read-only mode, and pulls function as normal. Pushes and builds are non-operational during scheduled maintenance. You can subscribe to updates regarding {quayio} maintenance by navigating to link:https://status.quay.io/incidents/kzyx3gh434cr[{quayio} *Status* page] and clicking *Subscribe To Updates*. + +image:scheduled-maintenance-banner.png[Scheduled maintenance banner] + +The landing page also includes links to the following resources: + +* link:https://docs.projectquay.io/welcome.html[Documentation]. This page provides documentation for using {quayio}. +* link:https://cloud.redhat.com/legal/terms[Terms]. This page provides legal information about Red Hat Online Services. +* link:https://www.redhat.com/en/about/privacy-policy[Privacy]. This page provides information about Red Hat's Privacy Statement. +* link:https://quay.io/security/[Security]. this page provides information about {quayio} security, including SSL/TLS, encryption, passwords, access controls, firewalls, and data resilience. +* link:https://quay.io/about/[About]. This page includes information about packages and projects used and a brief history of the product. +* link:https://access.redhat.com/articles/quayio-help[Contact]. This page includes information about support and contacting the Red Hat Support Team. +* link:https://status.quay.io/[All Systems Operational]. This page includes information the status of {quayio} and a brief history of maintenance. +* Cookies. By clicking this link, a popup box appears that allows you to set your cookie preferences. + +image:quayio-footer.png[{quayio} footer]. + +You can also find information about link:https://www.redhat.com/en/technologies/cloud-computing/quay[Trying {productname} on premise] or link:https://quay.io/plans/[Trying {productname} on the cloud], which redirects you to the *Pricing* page. Each option offers a free trial. + +[id="pricsignining-page-quayio"] +== Creating a {quayio} account + +New users of {quayio} are required to both link:https://sso.redhat.com/auth/realms/redhat-external/login-actions/registration?client_id=quay.io&tab_id=6C6R-5nPDLo[Register for a Red Hat account] and create a {quayio} username. These accounts are correlated, with two distinct differences: + +* The {quayio} account can be used to push and pull container images or Open Container Initiative images to {quayio} to store images. +* The Red Hat account provides users access to the {quayio} user interface. For paying customers, this account can also be used to access images from link:registry.redhat.io[the Red Hat Ecosystem Catalog], which can be pushed to their {quayio} repository. + +Users must first register for a Red Hat account, and then create a {quayio} account. Users need both accounts to properly use all features of {quayio}. + +[id="registering-red-hat-account"] +=== Registering for a Red Hat Account + +Use the following procedure to register for a Red Hat account for {quayio}. + +.Procedure + +. Navigate to the link:https://access.redhat.com/[Red Hat Customer Portal]. + +. In navigation pane, click *Log In*. + +. When navigated to the log in page, click *Register for a Red Hat Account*. + +. Enter a Red Hat login ID. + +. Enter a password. + +. Enter the following personal information: ++ +* *First name* +* *Last name* +* *Email address* +* *Phone number* + +. Enter the following contact information that is relative to your country or region. For example: ++ +* *Country/region* +* *Address* +* *Postal code* +* *City* +* *County* + +. Select and agree to Red Hat's terms and conditions. + +. Click *Create my account*. + +. Navigate to {quayio} and log in. + +[id="creating-first-quayio-user-account"] +=== Creating a {quayio} user account + +Use the following procedure to create a {quayio} user account. + +.Prerequisites + +* You have created a Red Hat account. + +.Procedure + +. If required, resolve the captcha by clicking *I am not a robot* and confirming. You are redirected to a *Confirm Username* page. + +. On the *Confirm Username* page, enter a username. By default, a username is generated. If the same username already exists, a number is added at the end to make it unique. This username is be used as a namespace in the Quay Container Registry. + +. After deciding on a username, click *Confirm Username*. You are redirected to the {quayio} *Repositories* page, which serves as a dedicated hub where users can access and manage their repositories with ease. From this page, users can efficiently organize, navigate, and interact with their container images and related resources. + +[id="quayio-rh-sso-support"] +=== {quayio} Single Sign On support + +Red Hat Single Sign On (SSO) can be used with {quayio}. Use the following procedure to set up Red Hat SSO with {quayio}. For most users, these accounts are already linked. However, for some legacy {quayio} users, this procedure might be required. + +.Prerequisites + +* You have created a {quayio} account. + +.Procedure + +. Navigate to to the link:recovery.quay.io[{quayio} *Recovery* page]. + +. Enter your username and password, then click *Sign in to Quay Container Registry*. + +. In the navigation pane, click your username -> *Account Settings*. + +. In the navigation pane, click *External Logins and Applications*. + +. Click *Attach to Red Hat*. + +. If you are already signed into Red Hat SSO, your account is automatically linked. Otherwise, you are prompted to sign into Red Hat SSO by entering your Red Hat login or email, and the password. Alternatively, you might need to create a new account first. ++ +After signing into Red Hat SSO, you can choose to authenticate against {quayio} using your Red Hat account from the login page. + +[discrete] +=== Additional resources +* For more information, see +link:https://access.redhat.com/articles/5363231[Quay.io Now Supports Red Hat Single Sign On]. + + +[id="explore-page-quayio"] +== Exploring {quayio} + +The {quayio} link:https://quay.io/search[*Explore*] page is a valuable hub that allows users to delve into a vast collection of container images, applications, and repositories shared by the {quayio} community. With its intuitive and user-friendly design, the *Explore* page offers a powerful search function, enabling users to effortlessly discover containerized applications and resources. + +[id="tutorial-page-quayio"] +== Trying {quayio} (deprecated) + +[NOTE] +==== +The {productname} tutorial is currently deprecated and will be removed when the v2 UI goes generally available (GA). +==== + +The {quayio} link:https://quay.io/tutorial[*Tutorial*] page offers users and introduction to the {quayio} container registry service. By clicking *Continue Tutorial* users learn how to perform the following features on {quayio}: + +* Logging into Quay Container Registry from the Docker CLI +* Starting a container +* Creating images from a container +* Pushing a repository to Quay Container Registry +* Viewing a repository +* Setting up build triggers +* Changing a repository's permissions + +[id="pricing-page-quayio"] +== Information about {quayio} pricing + +In addition to a free tier, {quayio} also offers several paid plans that have enhanced benefits. + +The {quayio} *Pricing* page offers information about {quayio} plans and the associated prices of each plan. The cost of each tier can be found on the link:https://quay.io/plans/[*Pricing*] page. All {quayio} plans include the following benefits: + +* Continuous integration +* Public repositories +* Robot accounts +* Teams +* SSL/TLS encryption +* Logging and auditing +* Invoice history + +{quayio} subscriptions are handled by the link:https://stripe.com[Stripe] payment processing platform. A valid credit card is required to sign up for {quayio}. + +To sign up for {quayio}, use the following procedure. + +.Procedure + +. Navigate to the link:https://quay.io/plans/[{quayio} *Pricing* page]. + +. Decide on a plan, for example, *Small*, and click *Buy Now*. You are redirected to the *Create New Organization* page. Enter the following information: ++ +* *Organization Name* +* *Organization Email* +* Optional. You can select a different plan if you want a plan larger, than, for example, *Small*. + +. Resolve that captcha, and select *Create Organization*. + +. You are redirected to Stripe. Enter the following information: ++ +* *Card information*, including *MM/YY* and the *CVC* +* *Name on card* +* *Country or region* +* *ZIP* (if applicable) +* Check the box if you want your information to be saved. +* *Phone Number* + +. Click *Subscribe* after all boxes have been filled. + +//// +[id="pricing-page-faq"] +=== *Pricing* FAQ + +The following questions are commonly asked in regards to a {quayio} subscription. + +* *How do I use Quay with my servers and code?* ++ +Using Quay with your infrastructure is separated into two main actions: building containers and distributing them to your servers. ++ +You can configure Quay to automatically build containers of your code on each commit. Integrations with GitHub, Bitbucket, GitLab and self-hosted Git repositories are supported. Each built container is stored on Quay and is available to be pulled down onto your servers. ++ +To distribute your private containers onto your servers, Docker or rkt must be configured with the correct credentials. Quay has sophisticated access controls — organizations, teams, robot accounts, and more — to give you full control over which servers can pull down your containers. An API can be used to automate the creation and management of these credentials. + +* *How is Quay optimized for a team environment?* ++ +Quay's permission model is designed for teams. Each new user can be assigned to one or more teams, with specific permissions. Robot accounts, used for automated deployments, can be managed per team as well. This system allows for each development team to manage their own credentials. ++ +Full logging and auditing is integrated into every part of the application and API. Quay helps you dig into every action for more details. +Additional FAQs + +* *Can I change my plan?* ++ +Yes, you can change your plan at any time and your account will be pro-rated for the difference. For large organizations, Red Hat Quay offers unlimited users and repos. +Do you offer special plans for business or academic institutions? ++ +Please contact us at our support email address to discuss the details of your organization and intended usage. + +* *Can I use Quay for free?* ++ +Yes! We offer unlimited storage and serving of public repositories. We strongly believe in the open source community and will do what we can to help! +What types of payment do you accept? ++ +Quay uses Stripe as our payment processor, so we can accept any of the payment options they offer, which are currently: Visa, MasterCard, American Express, JCB, Discover and Diners Club. +//// \ No newline at end of file diff --git a/modules/quayio-overview.adoc b/modules/quayio-overview.adoc new file mode 100644 index 000000000..a6db3819f --- /dev/null +++ b/modules/quayio-overview.adoc @@ -0,0 +1,15 @@ +:_content-type: CONCEPT +[id="quayio-overview"] += {quayio} overview + +{quayio} is a registry for storing and building container images, but can also be used to distribute both container images and other artifacts. It offers both free and paid tiers to cater to various user needs, and is primarily hosted in the United States (`us-east-1` region of Amazon Web Services) with CDN edge servers scattered throughout the world. + +{quayio} is flexible, easy to use, and allows users to upload and manage container images. Developers can create private repositories, ensuring sensitive or proprietary code remains secure within their organization. Additionally, users can set up access controls and manage team collaboration, enabling seamless sharing of container images among designated team members. + +{quayio} addresses container security concerns through its integrated image scanner, link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/vulnerability_reporting_with_clair_on_red_hat_quay/index[Clair]. The service automatically scans container images for known vulnerabilities and security issues, providing developers with valuable insights into potential risks and suggesting remediation steps. + +{quayio} excels in automation and supports integration with popular Continuous Integration/Continuous Deployment (CI/CD) tools and platforms, enabling seamless automation of the container build and deployment processes. As a result, developers can streamline their workflows, significantly reducing manual intervention and improving overall development efficiency. + +{quayio} caters to the needs of both large and small-scale deployments. The platform can handle significant container image traffic and offers efficient replication and distribution mechanisms to deliver container images to various geographical locations. + +With {quayio}, developers can discover a collection of pre-built, public container images shared by other users, making it easier to find useful tools, applications, and services for their projects. \ No newline at end of file diff --git a/modules/quayio-support.adoc b/modules/quayio-support.adoc new file mode 100644 index 000000000..b60fd5a76 --- /dev/null +++ b/modules/quayio-support.adoc @@ -0,0 +1,18 @@ +:_content-type: CONCEPT +[id="quayio-support"] += {quayio} support + +Technical support is a crucial aspect of the {quayio} container registry service, providing assistance not only in managing container images but also ensuring the functionality and availability of the hosted platform. + +To help users with functionality-related issues, Red Hat offers {quayio} customers access to several resources. The link:https://access.redhat.com/knowledgebase[Red Hat Knowledgebase] contains valuable content to maximize the potential of Red Hat's products and technologies. Users can find articles, product documentation, and videos that outline best practices for installing, configuring, and utilizing Red Hat products. It also serves as a hub for solutions to known issues, providing concise root cause descriptions and remedial steps. + +Additionally, {quayio} customers can count on the technical support team to address questions, troubleshoot problems, and provide solutions for an optimized experience with the platform. Whether it involves understanding specific features, customizing configurations, or resolving container image build issues, the support team is dedicated to guiding users through each step with clarity and expertise. + +For incidents related to service disruptions or performance issues not listed on the link:https://status.quay.io/[Quay.io status page], which includes availability and functionality concerns, paying customers can raise a technical support ticket using the link:http://access.redhat.com[Red Hat Customer Portal]. A service incident is defined as an unplanned interruption of service or reduction in service quality, affecting multiple users of the platform. + +With this comprehensive technical support system in place, {quayio} ensures that users can confidently manage their container images, optimize their platform experience, and overcome any challenges that might arise. + +[role="_additional-resources"] +.Additional resources + +Current {productname} and {quayio} users can find more information about troubleshooting and support in the link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.9/html-single/troubleshooting_red_hat_quay[{productname} Troubleshooting guide]. diff --git a/modules/quayio-ui-overview.adoc b/modules/quayio-ui-overview.adoc new file mode 100644 index 000000000..500db76a6 --- /dev/null +++ b/modules/quayio-ui-overview.adoc @@ -0,0 +1,16 @@ +:_content-type: CONCEPT +[id="quayio-ui-overview"] += {quayio} user interface overview + +The user interface (UI) of {quayio} is a fundamental component that serves as the user's gateway to managing and interacting with container images within the platform's ecosystem. {quayio}'s UI is designed to provide an intuitive and user-friendly interface, making it easy for users of all skill levels to navigate and harness {quayio}'s features and functionalities. + +This documentation section aims to introduce users to the key elements and functionalities of {quayio}'s UI. It will cover essential aspects such as the UI's layout, navigation, and key features, providing a solid foundation for users to explore and make the most of {quayio}'s container registry service. + +Throughout this documentation, step-by-step instructions, visual aids, and practical examples are provided on the following topics: + +* Exploring applications and repositories +* Using the {quayio} tutorial +* Pricing and {quayio} plans +* Signing in and using {quayio} features + +Collectively, this document ensures that users can quickly grasp the UI's nuances and successfully navigate their containerization journey with {quayio}. \ No newline at end of file diff --git a/modules/quota-establishment-api.adoc b/modules/quota-establishment-api.adoc new file mode 100644 index 000000000..b7024c203 --- /dev/null +++ b/modules/quota-establishment-api.adoc @@ -0,0 +1,310 @@ +[id="quota-establishment-api"] += Establishing quota for an organization with the {productname} API + +When an organization is first created, it does not have an established quota. You can use the API to check, create, change, or delete quota limitations for an organization. + +.Prerequisites + +* You have generated an OAuth access token. + +.Procedure + +. To set a quota for an organization, you can use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createorganizationquota[`POST /api/v1/organization/{orgname}/quota`] endpoint: ++ +[source,terminal] +---- +$ curl -X POST "https:///api/v1/organization//quota" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "limit_bytes": 10737418240, + "limits": "10 Gi" + }' +---- ++ +.Example output +[source,terminal] +---- +"Created" +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listorganizationquota[`GET /api/v1/organization/{orgname}/quota`] command to see if your organization already has an established quota: ++ +[source,terminal] +---- +$ curl -k -X GET -H "Authorization: Bearer " -H 'Content-Type: application/json' https:///api/v1/organization//quota | jq +---- ++ +.Example output +[source,terminal] +---- +[{"id": 1, "limit_bytes": 10737418240, "limit": "10.0 GiB", "default_config": false, "limits": [], "default_config_exists": false}] +---- + +. You can use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#changeorganizationquota[`PUT /api/v1/organization/{orgname}/quota/{quota_id}`] command to modify the existing quota limitation. For example: ++ +[source,terminal] +---- +$ curl -X PUT "https:///api/v1/organization//quota/" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "limit_bytes": + }' +---- ++ +.Example output +[source,json] +---- +{"id": 1, "limit_bytes": 21474836480, "limit": "20.0 GiB", "default_config": false, "limits": [], "default_config_exists": false} +---- + +== Pushing images + +To see the storage consumed, push various images to the organization. + +=== Pushing ubuntu:18.04 + +Push ubuntu:18.04 to the organization from the command line: + +.Sample commands +[source,terminal] +---- +$ podman pull ubuntu:18.04 + +$ podman tag docker.io/library/ubuntu:18.04 example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/testorg/ubuntu:18.04 + +$ podman push --tls-verify=false example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/testorg/ubuntu:18.04 +---- + + +=== Using the API to view quota usage + +To view the storage consumed, `GET` data from the */api/v1/repository* endpoint: + +.Sample command +[source,terminal] +---- +$ curl -k -X GET -H "Authorization: Bearer " -H 'Content-Type: application/json' 'https://example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/api/v1/repository?last_modified=true&namespace=testorg&popularity=true&public=true' | jq +---- + +.Sample output +[source,json] +---- +{ + "repositories": [ + { + "namespace": "testorg", + "name": "ubuntu", + "description": null, + "is_public": false, + "kind": "image", + "state": "NORMAL", + "quota_report": { + "quota_bytes": 27959066, + "configured_quota": 104857600 + }, + "last_modified": 1651225630, + "popularity": 0, + "is_starred": false + } + ] +} +---- + +=== Pushing another image + +. Pull, tag, and push a second image, for example, `nginx`: ++ +.Sample commands +[source,terminal] +---- +$ podman pull nginx + +$ podman tag docker.io/library/nginx example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/testorg/nginx + +$ podman push --tls-verify=false example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/testorg/nginx +---- + +. To view the quota report for the repositories in the organization, use the */api/v1/repository* endpoint: ++ +.Sample command +[source,terminal] +---- +$ curl -k -X GET -H "Authorization: Bearer " -H 'Content-Type: application/json' 'https://example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/api/v1/repository?last_modified=true&namespace=testorg&popularity=true&public=true' +---- ++ +.Sample output +[source,json] +---- +{ + "repositories": [ + { + "namespace": "testorg", + "name": "ubuntu", + "description": null, + "is_public": false, + "kind": "image", + "state": "NORMAL", + "quota_report": { + "quota_bytes": 27959066, + "configured_quota": 104857600 + }, + "last_modified": 1651225630, + "popularity": 0, + "is_starred": false + }, + { + "namespace": "testorg", + "name": "nginx", + "description": null, + "is_public": false, + "kind": "image", + "state": "NORMAL", + "quota_report": { + "quota_bytes": 59231659, + "configured_quota": 104857600 + }, + "last_modified": 1651229507, + "popularity": 0, + "is_starred": false + } + ] +} +---- + +. To view the quota information in the organization details, use the */api/v1/organization/{orgname}* endpoint: ++ +.Sample command +[source,terminal] +---- +$ curl -k -X GET -H "Authorization: Bearer " -H 'Content-Type: application/json' 'https://example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/api/v1/organization/testorg' | jq +---- ++ +.Sample output +[source,json] +---- +{ + "name": "testorg", + ... + "quotas": [ + { + "id": 1, + "limit_bytes": 104857600, + "limits": [] + } + ], + "quota_report": { + "quota_bytes": 87190725, + "configured_quota": 104857600 + } +} +---- + +== Rejecting pushes using quota limits + +If an image push exceeds defined quota limitations, a soft or hard check occurs: + +* For a soft check, or _warning_, users are notified. +* For a hard check, or _reject_, the push is terminated. + +=== Setting reject and warning limits + +To set _reject_ and _warning_ limits, POST data to the */api/v1/organization/{orgname}/quota/{quota_id}/limit* endpoint: + +.Sample reject limit command +[source,terminal] +---- +$ curl -k -X POST -H "Authorization: Bearer " -H 'Content-Type: application/json' -d '{"type":"Reject","threshold_percent":80}' https://example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/api/v1/organization/testorg/quota/1/limit +---- + +.Sample warning limit command +[source,terminal] +---- +$ curl -k -X POST -H "Authorization: Bearer " -H 'Content-Type: application/json' -d '{"type":"Warning","threshold_percent":50}' https://example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/api/v1/organization/testorg/quota/1/limit +---- + +=== Viewing reject and warning limits + +To view the _reject_ and _warning_ limits, use the */api/v1/organization/{orgname}/quota* endpoint: + +.View quota limits +[source,terminal] +---- +$ curl -k -X GET -H "Authorization: Bearer " -H 'Content-Type: application/json' https://example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/api/v1/organization/testorg/quota | jq +---- + + +.Sample output for quota limits +[source,json] +---- +[ + { + "id": 1, + "limit_bytes": 104857600, + "default_config": false, + "limits": [ + { + "id": 2, + "type": "Warning", + "limit_percent": 50 + }, + { + "id": 1, + "type": "Reject", + "limit_percent": 80 + } + ], + "default_config_exists": false + } +] +---- + +=== Pushing an image when the reject limit is exceeded + +In this example, the reject limit (80%) has been set to below the current repository size (~83%), so the next push should automatically be rejected. + +Push a sample image to the organization from the command line: + +.Sample image push +[source,terminal] +---- +$ podman pull ubuntu:20.04 + +$ podman tag docker.io/library/ubuntu:20.04 example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/testorg/ubuntu:20.04 + +$ podman push --tls-verify=false example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/testorg/ubuntu:20.04 +---- + +.Sample output when quota exceeded +[source,terminal] +---- +Getting image source signatures +Copying blob d4dfaa212623 [--------------------------------------] 8.0b / 3.5KiB +Copying blob cba97cc5811c [--------------------------------------] 8.0b / 15.0KiB +Copying blob 0c78fac124da [--------------------------------------] 8.0b / 71.8MiB +WARN[0002] failed, retrying in 1s ... (1/3). Error: Error writing blob: Error initiating layer upload to /v2/testorg/ubuntu/blobs/uploads/ in example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org: denied: Quota has been exceeded on namespace +Getting image source signatures +Copying blob d4dfaa212623 [--------------------------------------] 8.0b / 3.5KiB +Copying blob cba97cc5811c [--------------------------------------] 8.0b / 15.0KiB +Copying blob 0c78fac124da [--------------------------------------] 8.0b / 71.8MiB +WARN[0005] failed, retrying in 1s ... (2/3). Error: Error writing blob: Error initiating layer upload to /v2/testorg/ubuntu/blobs/uploads/ in example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org: denied: Quota has been exceeded on namespace +Getting image source signatures +Copying blob d4dfaa212623 [--------------------------------------] 8.0b / 3.5KiB +Copying blob cba97cc5811c [--------------------------------------] 8.0b / 15.0KiB +Copying blob 0c78fac124da [--------------------------------------] 8.0b / 71.8MiB +WARN[0009] failed, retrying in 1s ... (3/3). Error: Error writing blob: Error initiating layer upload to /v2/testorg/ubuntu/blobs/uploads/ in example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org: denied: Quota has been exceeded on namespace +Getting image source signatures +Copying blob d4dfaa212623 [--------------------------------------] 8.0b / 3.5KiB +Copying blob cba97cc5811c [--------------------------------------] 8.0b / 15.0KiB +Copying blob 0c78fac124da [--------------------------------------] 8.0b / 71.8MiB +Error: Error writing blob: Error initiating layer upload to /v2/testorg/ubuntu/blobs/uploads/ in example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org: denied: Quota has been exceeded on namespace +---- + + +=== Notifications for limits exceeded + +When limits are exceeded, a notification appears: + +.Quota notifications +image:quota-notifications.png[Quota notifications] diff --git a/modules/quota-establishment-ui.adoc b/modules/quota-establishment-ui.adoc new file mode 100644 index 000000000..122b1ef4a --- /dev/null +++ b/modules/quota-establishment-ui.adoc @@ -0,0 +1,152 @@ +:_content-type: CONCEPT +[id="quota-establishment-ui"] += Establishing quota in {productname} UI + +The following procedure describes how you can report storage consumption and establish storage quota limits. + +.Prerequisites + +* A {productname} registry. +* A superuser account. +* Enough storage to meet the demands of quota limitations. + +.Procedure + +. Create a new organization or choose an existing one. Initially, no quota is configured, as can be seen on the *Organization Settings* tab: ++ +image:quota-none-org-settings.png[No Quota Configured] + +. Log in to the registry as a superuser and navigate to the *Manage Organizations* tab on the *Super User Admin Panel*. Click the *Options* icon of the organization for which you want to create storage quota limits: ++ +image:quota-su-org-options.png[Organization options] + +. Click *Configure Quota* and enter the initial quota, for example, *10 MB*. Then click *Apply* and *Close*: ++ +image:quota-su-init-10MB.png[Initial quota] + +. Check that the quota consumed shows *0 of 10 MB* on the *Manage Organizations* tab of the superuser panel: ++ +image:quota-su-init-consumed.png[Initial consumed quota] ++ +The consumed quota information is also available directly on the Organization page: ++ +.Initial consumed quota +image:quota-org-init-consumed.png[Initial consumed quota] + +. To increase the quota to 100MB, navigate to the *Manage Organizations* tab on the superuser panel. Click the *Options* icon and select *Configure Quota*, setting the quota to 100 MB. Click *Apply* and then *Close*: ++ +image:quota-su-increase-100MB.png[Increase quota] + +. Pull a sample image by entering the following command: ++ +[source,terminal] +---- +$ podman pull ubuntu:18.04 +---- + +. Tag the sample image by entering the following command: ++ +[source,terminal] +---- +$ podman tag docker.io/library/ubuntu:18.04 example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/testorg/ubuntu:18.04 +---- + +. Push the sample image to the organization by entering the following command: ++ +[source,terminal] +---- +$ podman push --tls-verify=false example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/testorg/ubuntu:18.04 +---- + +. On the superuser panel, the quota consumed per organization is displayed: ++ +image:quota-su-consumed-first.png[Total Quota Consumed for first image] + +. The Organization page shows the total proportion of the quota used by the image: ++ +.Total Quota Consumed for first image +image:quota-org-consumed-first.png[Total Quota Consumed for first image] + +. Pull a second sample image by entering the following command: ++ +[source,terminal] +---- +$ podman pull nginx +---- + +. Tag the second image by entering the following command: ++ +[source,terminal] +---- +$ podman tag docker.io/library/nginx example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/testorg/nginx +---- + +. Push the second image to the organization by entering the following command: ++ +[source,terminal] +---- +$ podman push --tls-verify=false example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/testorg/nginx +---- + +. The Organization page shows the total proportion of the quota used by each repository in that organization: ++ +.Total Quota Consumed for each repository +image:quota-org-consumed-second.png[Total Quota Consumed for each repository] + +. Create _reject_ and _warning_ limits: ++ +From the superuser panel, navigate to the *Manage Organizations* tab. Click the *Options* icon for the organization and select *Configure Quota*. In the *Quota Policy* section, with the *Action* type set to *Reject*, set the *Quota Threshold* to *80* and click *Add Limit*: ++ +image:quota-su-reject-80.png[Reject limit] + +. To create a _warning_ limit, select *Warning* as the *Action* type, set the *Quota Threshold* to *70* and click *Add Limit*: ++ +image:quota-su-warning-70.png[Warning limit] + +. Click *Close* on the quota popup. The limits are viewable, but not editable, on the *Settings* tab of the *Organization* page: ++ +image:quota-org-quota-policy.png[Quota policy in organization settings] + +. Push an image where the reject limit is exceeded: ++ +Because the reject limit (80%) has been set to below the current repository size (~83%), the next pushed image is rejected automatically. ++ +.Sample image push +[source,terminal] +---- +$ podman pull ubuntu:20.04 + +$ podman tag docker.io/library/ubuntu:20.04 example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/testorg/ubuntu:20.04 + +$ podman push --tls-verify=false example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/testorg/ubuntu:20.04 +---- ++ +.Sample output when quota exceeded +[source,terminal] +---- +Getting image source signatures +Copying blob d4dfaa212623 [--------------------------------------] 8.0b / 3.5KiB +Copying blob cba97cc5811c [--------------------------------------] 8.0b / 15.0KiB +Copying blob 0c78fac124da [--------------------------------------] 8.0b / 71.8MiB +WARN[0002] failed, retrying in 1s ... (1/3). Error: Error writing blob: Error initiating layer upload to /v2/testorg/ubuntu/blobs/uploads/ in example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org: denied: Quota has been exceeded on namespace +Getting image source signatures +Copying blob d4dfaa212623 [--------------------------------------] 8.0b / 3.5KiB +Copying blob cba97cc5811c [--------------------------------------] 8.0b / 15.0KiB +Copying blob 0c78fac124da [--------------------------------------] 8.0b / 71.8MiB +WARN[0005] failed, retrying in 1s ... (2/3). Error: Error writing blob: Error initiating layer upload to /v2/testorg/ubuntu/blobs/uploads/ in example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org: denied: Quota has been exceeded on namespace +Getting image source signatures +Copying blob d4dfaa212623 [--------------------------------------] 8.0b / 3.5KiB +Copying blob cba97cc5811c [--------------------------------------] 8.0b / 15.0KiB +Copying blob 0c78fac124da [--------------------------------------] 8.0b / 71.8MiB +WARN[0009] failed, retrying in 1s ... (3/3). Error: Error writing blob: Error initiating layer upload to /v2/testorg/ubuntu/blobs/uploads/ in example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org: denied: Quota has been exceeded on namespace +Getting image source signatures +Copying blob d4dfaa212623 [--------------------------------------] 8.0b / 3.5KiB +Copying blob cba97cc5811c [--------------------------------------] 8.0b / 15.0KiB +Copying blob 0c78fac124da [--------------------------------------] 8.0b / 71.8MiB +Error: Error writing blob: Error initiating layer upload to /v2/testorg/ubuntu/blobs/uploads/ in example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org: denied: Quota has been exceeded on namespace +---- + +. When limits are exceeded, notifications are displayed in the UI: ++ +.Quota notifications +image:quota-notifications.png[Quota notifications] \ No newline at end of file diff --git a/modules/quota-limit-api.adoc b/modules/quota-limit-api.adoc new file mode 100644 index 000000000..e2275e75f --- /dev/null +++ b/modules/quota-limit-api.adoc @@ -0,0 +1,90 @@ +[id="quota-limit-management-api"] += Setting quota limits for an organization with the {productname} API + +You can set specific quota limits for an organization so that, when exceeded, a warning is returned, or the pushed image is denied altogether. + +.Procedure + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createorganizationquotalimit[`POST /api/v1/organization/{orgname}/quota/{quota_id}/limit`] command to create a quota policy that rejects images if they exceeded the allotted quota. For example: ++ +[source,terminal] +---- +$ curl -X POST "https:///api/v1/organization//quota//limit" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "limit_bytes": 21474836480, + "type": "Reject", <1> + "threshold_percent": 90 <2> + }' +---- +<1> One of `Reject` or `Warning`. +<2> Quota threshold, in percent of quota. ++ +.Example output ++ +[source,terminal] +---- +"Created" +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listorganizationquotalimit[`GET /api/v1/organization/{orgname}/quota/{quota_id}/limit`] to obtain the ID of the quota limit. For example: ++ +[source,terminal] +---- +$ curl -X GET "https:///api/v1/organization//quota//limit" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +[{"id": 2, "type": "Reject", "limit_percent": 90}] +---- + +//// +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getuserquotalimit[`GET /api/v1/user/quota/{quota_id}/limit/{limit_id}`] endpoint to return information about the quota limit. Note that this requires the limit ID. For example: ++ +[source,terminal] +---- + +---- ++ +.Example output ++ +[source,terminal] +---- + +---- +//// + +. Update the policy with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#changeorganizationquotalimit[`PUT /api/v1/organization/{orgname}/quota/{quota_id}/limit/{limit_id}`] endpoint. For example: ++ +[source,terminal] +---- +$ curl -X PUT "https:///api/v1/organization//quota//limit/" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "type": "", + "threshold_percent": + }' +---- ++ +.Example output ++ +[source,terminal] +---- +{"id": 3, "limit_bytes": 10737418240, "limit": "10.0 GiB", "default_config": false, "limits": [{"id": 2, "type": "Warning", "limit_percent": 80}], "default_config_exists": false} +---- + +. You can delete the quota limit with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleteorganizationquotalimit[`DELETE /api/v1/organization/{orgname}/quota/{quota_id}/limit/{limit_id}`] endpoint: ++ +[source,terminal] +---- +$ curl -X DELETE "https:///api/v1/organization//quota//limit/" \ + -H "Authorization: Bearer " +---- ++ +This command does not return output. \ No newline at end of file diff --git a/modules/quota-limit-user-api.adoc b/modules/quota-limit-user-api.adoc new file mode 100644 index 000000000..53c182513 --- /dev/null +++ b/modules/quota-limit-user-api.adoc @@ -0,0 +1,67 @@ +[id="quota-limit-user-api"] += Obtaining quota limits for the user with the {productname} API + +You can specify quota and limitations for users so that, when exceeded, a warning is returned, or the pushed image is denied altogether. Quota limits for users must be set on the {productname} UI. The following APIs can be used to view the quota limits for the user that is logged in. + +.Procedure + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listuserquota[`GET /api/v1/user/quota`] command to return information about the quota limitations: ++ +[source,terminal] +---- +$ curl -X GET "https:///api/v1/user/quota" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +[{"id": 4, "limit_bytes": 2199023255552, "limit": "2.0 TiB", "default_config": false, "limits": [], "default_config_exists": false}] +---- + +. After you have received the quota ID, you can pass it in with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getuserquota[`GET /api/v1/user/quota/{quota_id}`] endpoint to return information about the limitation: ++ +[source,terminal] +---- +$ curl -X GET "https:///api/v1/user/quota/{quota_id}" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +{"id": 4, "limit_bytes": 2199023255552, "limit": "2.0 TiB", "default_config": false, "limits": [], "default_config_exists": false} +---- + +. The limitations can be viewed by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listuserquotalimit[`GET /api/v1/user/quota/{quota_id}/limit`] endpoint. For example: ++ +[source,terminal] +---- +$ curl -X GET "https:///api/v1/user/quota/{quota_id}/limit" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +[{"id": 3, "type": "Reject", "limit_percent": 100}] +---- + +. Additional information about the entire policy can be returned using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getuserquotalimit[`GET /api/v1/user/quota/{quota_id}/limit/{limit_id}`] endpoint: ++ +[source,terminal] +---- +$ curl -X GET "https:///api/v1/user/quota/{quota_id}/limit/{limit_id}" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +{"id": 4, "limit_bytes": 2199023255552, "limit": "2.0 TiB", "default_config": false, "limits": [{"id": 3, "type": "Reject", "limit_percent": 100}], "default_config_exists": false} + +---- \ No newline at end of file diff --git a/modules/quota-management-and-enforcement.adoc b/modules/quota-management-and-enforcement.adoc new file mode 100644 index 000000000..30af5a653 --- /dev/null +++ b/modules/quota-management-and-enforcement.adoc @@ -0,0 +1,11 @@ +:_content-type: CONCEPT +[id="red-hat-quay-quota-management-and-enforcement"] += {productname} quota management and enforcement overview + +With {productname}, users have the ability to report storage consumption and to contain registry growth by establishing configured storage quota limits. On-premise {productname} users are now equipped with the following capabilities to manage the capacity limits of their environment: + +* **Quota reporting:** With this feature, a superuser can track the storage consumption of all their organizations. Additionally, users can track the storage consumption of their assigned organization. + +* **Quota management:** With this feature, a superuser can define soft and hard checks for {productname} users. Soft checks tell users if the storage consumption of an organization reaches their configured threshold. Hard checks prevent users from pushing to the registry when storage consumption reaches the configured limit. + +Together, these features allow service owners of a {productname} registry to define service level agreements and support a healthy resource budget. diff --git a/modules/quota-management-api.adoc b/modules/quota-management-api.adoc new file mode 100644 index 000000000..00178ec14 --- /dev/null +++ b/modules/quota-management-api.adoc @@ -0,0 +1,6 @@ +[id="quota-management-api"] += Establishing quota with the {productname} API + +You can establish quota for an organization or users, and tailor quota policies to suit the needs of your registry. + +The following sections show you how to establish quota for an organization, a user, and then how to modify those settings. \ No newline at end of file diff --git a/modules/quota-management-arch.adoc b/modules/quota-management-arch.adoc new file mode 100644 index 000000000..42a360d7e --- /dev/null +++ b/modules/quota-management-arch.adoc @@ -0,0 +1,32 @@ +:_content-type: CONCEPT +[id="quota-management-arch"] += Quota management architecture + +With the quota management feature enabled, individual blob sizes are summed at the repository and namespace level. For example, if two tags in the same repository reference the same blob, the size of that blob is only counted once towards the repository total. Additionally, manifest list totals are counted toward the repository total. + +[IMPORTANT] +==== +Because manifest list totals are counted toward the repository total, the total quota consumed when upgrading from a previous version of {productname} might be reportedly differently in {productname} 3.9. In some cases, the new total might go over a repository's previously-set limit. {productname} administrators might have to adjust the allotted quota of a repository to account for these changes. +==== + +The quota management feature works by calculating the size of existing repositories and namespace with a backfill worker, and then adding or subtracting from the total for every image that is pushed or garbage collected afterwords. Additionally, the subtraction from the total happens when the manifest is garbage collected. + +[NOTE] +==== +Because subtraction occurs from the total when the manifest is garbage collected, there is a delay in the size calculation until it is able to be garbage collected. For more information about garbage collection, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#red_hat_quay_garbage_collection[{productname} garbage collection]. +==== + +The following database tables hold the quota repository size, quota namespace size, and quota registry size, in bytes, of a {productname} repository within an organization: + +* `QuotaRepositorySize` +* `QuotaNameSpaceSize` +* `QuotaRegistrySize` + +The organization size is calculated by the backfill worker to ensure that it is not duplicated. When an image push is initialized, the user's organization storage is validated to check if it is beyond the configured quota limits. If an image push exceeds defined quota limitations, a soft or hard check occurs: + +* For a soft check, users are notified. +* For a hard check, the push is stopped. + +If storage consumption is within configured quota limits, the push is allowed to proceed. + +Image manifest deletion follows a similar flow, whereby the links between associated image tags and the manifest are deleted. Additionally, after the image manifest is deleted, the repository size is recalculated and updated in the `QuotaRepositorySize`, `QuotaNameSpaceSize`, and `QuotaRegistrySize` tables. \ No newline at end of file diff --git a/modules/quota-management-limitations.adoc b/modules/quota-management-limitations.adoc new file mode 100644 index 000000000..88e9b0adf --- /dev/null +++ b/modules/quota-management-limitations.adoc @@ -0,0 +1,16 @@ +:_content-type: REFERENCE +[id="quota-management-limitations"] += Quota management limitations + +Quota management helps organizations to maintain resource consumption. One limitation of quota management is that calculating resource consumption on push results in the calculation becoming part of the push's critical path. Without this, usage data might drift. + +The maximum storage quota size is dependent on the selected database: + +.Worker count environment variables +[cols="2a,2a",options="header"] +|=== +|Variable |Description +|Postgres |8388608 TB +|MySQL |8388608 TB +|SQL Server |16777216 TB +|=== diff --git a/modules/quota-management-permanent-delete-39.adoc b/modules/quota-management-permanent-delete-39.adoc new file mode 100644 index 000000000..cfe70bbe4 --- /dev/null +++ b/modules/quota-management-permanent-delete-39.adoc @@ -0,0 +1,53 @@ +:_content-type: PROCEDURE +[id="quota-management-permanent-delete-39"] += Permanently deleting an image tag in {productname} 3.9 + +In some cases, users might want to delete an image tag outside of the time machine window. Use the following procedure to manually delete an image tag permanently. + +[IMPORTANT] +==== +The results of the following procedure cannot be undone. Use with caution. +==== + +.Procedure + +. Ensure that the `PERMANENTLY_DELETE_TAGS` and `RESET_CHILD_MANIFEST_EXPIRATION` parameters are set to `true` in your `config.yaml` file. For example: ++ +[source,yaml] +---- +PERMANENTLY_DELETE_TAGS: true +RESET_CHILD_MANIFEST_EXPIRATION: true +---- + +. Restart your {productname} deployment. + +. If you are using the legacy {productname} UI: + +.. On the {productname} UI, click the name of your organization and then the name of your repository. + +.. Click *Tags*. + +.. Select the checkbox of the name of the tag that you want to delete, for example, `tag1`. + +.. Click *Actions* -> *Delete Tags*. When the popup box appears, click *Delete Tag*. + +.. Click *Tag History* in the navigation pane. + +.. In the *Permanently Delete* category, click *Delete tag1*. When the popup box appears, click *Permanently Delete Tag*. ++ +Now, the image tag is no long calculated toward the total. + +. If you are using the {productname} v2 UI: + +.. In the navigation pane, click *Organizations* -> and the name of your organization, for example, *quota-test*. + +.. Click the name of your repository, for example, *ubuntu*. + +.. Check the box of the tag that you want to delete. + +.. Click *Actions* -> *Permanently Delete*. When the popup box appears, click *Delete*. ++ +[NOTE] +==== +Currently, the {productname} v2 UI does not show the Total Quota Consumed. To see Total Quota Consumed, you must switch back to the legacy UI. +==== \ No newline at end of file diff --git a/modules/quota-management-query-39.adoc b/modules/quota-management-query-39.adoc new file mode 100644 index 000000000..9960f47fb --- /dev/null +++ b/modules/quota-management-query-39.adoc @@ -0,0 +1,27 @@ +:_content-type: PROCEDURE +[id="quota-management-query-39"] += Calculating the total registry size in {productname} 3.9 + +Use the following procedure to queue a registry total calculation. + +[NOTE] +==== +This feature is done on-demand, and calculating a registry total is database intensive. Use with caution. +==== + +.Prerequisites + +* You have upgraded to {productname} 3.9. +* You are logged in as a {productname} superuser. + +.Procedure + +. On the {productname} UI, click your username -> *Super User Admin Panel*. + +. In the navigation pane, click *Manage Organizations*. + +. Click *Calculate*, next to *Total Registry Size: 0.00 KB, Updated: Never , Calculation required*. Then, click *Ok*. + +. After a few minutes, depending on the size of your registry, refresh the page. Now, the Total Registry Size should be calculated. For example: ++ +image:total-registry-size.png[Total registry size] \ No newline at end of file diff --git a/modules/quota-management-testing-39.adoc b/modules/quota-management-testing-39.adoc new file mode 100644 index 000000000..7bad032cb --- /dev/null +++ b/modules/quota-management-testing-39.adoc @@ -0,0 +1,59 @@ +:_content-type: PROCEDURE +[id="quota-management-testing-39"] += Testing quota management for {productname} 3.9 + +With quota management configured for {productname} 3.9, duplicative images are now only counted once towards the repository total. + +Use the following procedure to test that a duplicative image is only counted once toward the repository total. + +.Prerequisites + +* You have configured quota management for {productname} 3.9. + +.Procedure + +. Pull a sample image, for example, `ubuntu:18.04`, by entering the following command: ++ +[source,terminal] +---- +$ podman pull ubuntu:18.04 +---- + +. Tag the same image twice by entering the following command: ++ +[source,terminal] +---- +$ podman tag docker.io/library/ubuntu:18.04 quay-server.example.com/quota-test/ubuntu:tag1 +---- ++ +[source,terminal] +---- +$ podman tag docker.io/library/ubuntu:18.04 quay-server.example.com/quota-test/ubuntu:tag2 +---- + +. Push the sample image to your organization by entering the following commands: ++ +[source,terminal] +---- +$ podman push --tls-verify=false quay-server.example.com/quota-test/ubuntu:tag1 +---- ++ +[source,terminal] +---- +$ podman push --tls-verify=false quay-server.example.com/quota-test/ubuntu:tag2 +---- + +. On the {productname} UI, navigate to *Organization* and click the *Repository Name*, for example, *quota-test/ubuntu*. Then, click *Tags*. There should be two repository tags, `tag1` and `tag2`, each with the same manifest. For example: ++ +image:manifest-example.png[Manifest example] ++ +However, by clicking on the *Organization* link, we can see that the *Total Quota Consumed* is *27.94 MB*, meaning that the Ubuntu image has only been accounted for once: ++ +image:total-quota-consumed.png[Total quota consumed] ++ +If you delete one of the Ubuntu tags, the *Total Quota Consumed* remains the same. ++ +[NOTE] +==== +If you have configured the {productname} time machine to be longer than `0` seconds, subtraction will not happen until those tags pass the time machine window. If you want to expedite permanent deletion, see Permanently deleting an image tag in {productname} 3.9. +==== \ No newline at end of file diff --git a/modules/quota-organization-management-api.adoc b/modules/quota-organization-management-api.adoc new file mode 100644 index 000000000..4fa96a0fb --- /dev/null +++ b/modules/quota-organization-management-api.adoc @@ -0,0 +1,85 @@ +[id="quota-organization-management-api"] += Managing organization quota with the {productname} API + +When an organization is first created, it does not have an established quota. You can use the API to check, create, change, or delete quota limitations for an organization. + +.Prerequisites + +* You have generated an OAuth access token. + +.Procedure + +. To set a quota for an organization, you can use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createorganizationquota[`POST /api/v1/organization/{orgname}/quota`] endpoint: ++ +[source,terminal] +---- +$ curl -X POST "https:///api/v1/organization//quota" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "limit_bytes": 10737418240, + "limits": "10 Gi" + }' +---- ++ +.Example output +[source,terminal] +---- +"Created" +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listorganizationquota[`GET /api/v1/organization/{orgname}/quota`] command to return information about the policy, including the ID number, which is required for other organization quota endpoints. For example: ++ +[source,terminal] +---- +$ curl -k -X GET -H "Authorization: Bearer " -H 'Content-Type: application/json' https://example-registry-quay-quay-enterprise.apps.docs.gcp.quaydev.org/api/v1/organization/testorg/quota | jq +---- ++ +.Example output +[source,terminal] +---- +[{"id": 1, "limit_bytes": 10737418240, "limit": "10.0 GiB", "default_config": false, "limits": [], "default_config_exists": false}] +---- ++ +After you obtain the ID number, you can use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getorganizationquota[`GET /api/v1/organization/{orgname}/quota/{quota_id}`] command to list the quota policy. For example: ++ +[source,terminal] +---- +$ curl -X GET "https:///api/v1/organization//quota/" \ + -H "Authorization: Bearer " +---- ++ +.Example output +[source,terminal] +---- +{"id": 1, "limit_bytes": 10737418240, "limit": "10.0 GiB", "default_config": false, "limits": [], "default_config_exists": false} +---- + +. You can use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#changeorganizationquota[`PUT /api/v1/organization/{orgname}/quota/{quota_id}`] command to modify the existing quota limitation. Note that this requires the policy ID. For example: ++ +[source,terminal] +---- +$ curl -X PUT "https:///api/v1/organization//quota/" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "limit_bytes": + }' +---- ++ +.Example output +[source,json] +---- +{"id": 1, "limit_bytes": 21474836480, "limit": "20.0 GiB", "default_config": false, "limits": [], "default_config_exists": false} +---- + +. An organization's quota can be deleted with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleteorganizationquota[`DELETE /api/v1/organization/{orgname}/quota/{quota_id}`] command. For example: ++ +[source,terminal] ++ +---- +$ curl -X DELETE "https:///api/v1/organization//quota/" \ + -H "Authorization: Bearer " +---- ++ +This command does not return output. \ No newline at end of file diff --git a/modules/reassigning-oauth-access-token.adoc b/modules/reassigning-oauth-access-token.adoc new file mode 100644 index 000000000..c00bf5a5b --- /dev/null +++ b/modules/reassigning-oauth-access-token.adoc @@ -0,0 +1,80 @@ +:_content-type: PROCEDURE +[id="reassigning-oauth-access-token"] += Reassigning an OAuth access token + +Organization administrators can assign OAuth API tokens to be created by other user's with specific permissions. This allows the audit logs to be reflected accurately when the token is used by a user that has no organization administrative permissions to create an OAuth API token. + +[NOTE] +==== +The following procedure only works on the current {productname} UI. It is not currently implemented in the {productname} v2 UI. +==== + +.Prerequisites + +* You are logged in as a user with organization administrative privileges, which allows you to assign an OAuth API token. ++ +[NOTE] +==== +OAuth API tokens are used for authentication and not authorization. For example, the user that you are assigning the OAuth token to must have the `Admin` team role to use administrative API endpoints. For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#creating-an-image-repository-via-docker[Managing access to repositories]. +==== + +.Procedure + +. Optional. If not already, update your {productname} `config.yaml` file to include the `FEATURE_ASSIGN_OAUTH_TOKEN: true` field: ++ +[source,yaml] +---- +# ... +FEATURE_ASSIGN_OAUTH_TOKEN: true +# ... +---- + +. Optional. Restart your {productname} registry. + +. Log in to your {productname} registry as an organization administrator. + +. Click the name of the organization in which you created the OAuth token for. + +. In the navigation pane, click *Applications*. + +. Click the proper application name. + +. In the navigation pane, click *Generate Token*. + +. Click *Assign another user* and enter the name of the user that will take over the OAuth token. + +. Check the boxes for the desired permissions that you want the new user to have. For example, if you only want the new user to be able to create repositories, click *Create Repositories*. ++ +[IMPORTANT] +==== +Permission control is defined by the team role within an organization and must be configured regardless of the options selected here. For example, the user that you are assigning the OAuth token to must have the `Admin` team role to use administrative API endpoints. + +Solely checking the *Super User Access* box does not actually grant the user this permission. Superusers must be configured via the `config.yaml` file _and_ the box must be checked here. +==== + +. Click *Assign token*. A popup box appears that confirms authorization with the following message and shows you the approved permissions: ++ +[source,text] +---- +This will prompt user to generate a token with the following permissions: +repo:create +---- + +. Click *Assign token* in the popup box. You are redirected to a new page that displays the following message: ++ +[source,text] +---- +Token assigned successfully +---- + +.Verification + +. After reassigning an OAuth token, the assigned user must accept the token to receive the bearer token, which is required to use API endpoints. Request that the assigned user logs into the {productname} registry. + +. After they have logged in, they must click their username under *Users and Organizations*. + +. In the navigation pane, they must click *External Logins And Applications*. + +. Under *Authorized Applications*, they must confirm the application by clicking *Authorize Application*. They are directed to a new page where they must reconfirm by clicking *Authorize Application*. + +. They are redirected to a new page that reveals their bearer token. They must save this bearer token, as it cannot be viewed again. \ No newline at end of file diff --git a/modules/red-hat-quay-builders-ui.adoc b/modules/red-hat-quay-builders-ui.adoc new file mode 100644 index 000000000..d78e21f57 --- /dev/null +++ b/modules/red-hat-quay-builders-ui.adoc @@ -0,0 +1,74 @@ +:_content-type: PROCEDURE +[id="creating-a-build-trigger"] += Creating a build trigger + +The following procedure sets up a _custom Git trigger_. A custom Git trigger is a generic way for any Git server to act as a _build trigger_. It relies solely on SSH keys and webhook endpoints. Creating a custom Git trigger is similar to the creation of any other trigger, with the exception of the following: + +ifeval::["{context}" == "quay-io"] +* {quayio} cannot automatically detect the proper Robot Account to use with the trigger. This must be done manually during the creation process. +endif::[] + +ifeval::["{context}" == "use-quay"] +* {productname} cannot automatically detect the proper Robot Account to use with the trigger. This must be done manually during the creation process. +endif::[] + +These steps can be replicated to create a _build trigger_ using Github, Gitlab, or Bitbucket, however, you must configure the credentials for these services in your `config.yaml` file. + +[NOTE] +==== +* If you want to use Github to create a _build trigger_, you must configure Github to be used with {productname} by creating an OAuth application. For more information, see "Creating an OAuth application Github". +==== + +ifeval::["{context}" == "quay-builders-image-automation"] +.Prerequisites + +* For {productname-ocp} deployments, you have configured your {ocp} environment for either link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/builders_and_image_automation/build/tmp/en-US/html-single/index#bare-metal-builds[bare metal builds] or link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/builders_and_image_automation/build/tmp/en-US/html-single/index#red-hat-quay-builders-enhancement[virtual builds]. +endif::[] + +.Procedure + +. Log in to your {productname} registry. + +. In the navigation pane, click *Repositories*. + +. Click *Create Repository*. + +. Click the *Builds* tab. + +. On the *Builds* page, click *Create Build Trigger*. + +. Select the desired platform, for example, *Github*, *Bitbucket*, *Gitlab*, or use a custom Git repository. For this example, click *Custom Git Repository Push*. + +. Enter a custom Git repository name, for example, `\git@github.com:/.git`. Then, click *Next*. + +. When prompted, configure the tagging options by selecting one of, or both of, the following options: ++ +* *Tag manifest with the branch or tag name*. When selecting this option, the built manifest the name of the branch or tag for the git commit are tagged. ++ +* *Add `latest` tag if on default branch*. When selecting this option, the built manifest with latest if the build occurred on the default branch for the repository are tagged. ++ +Optionally, you can add a custom tagging template. There are multiple tag templates that you can enter here, including using short SHA IDs, timestamps, author names, committer, and branch names from the commit as tags. For more information, see "Tag naming for build triggers". ++ +After you have configured tagging, click *Next*. + +. When prompted, select the location of the Dockerfile to be built when the trigger is invoked. If the Dockerfile is located at the root of the git repository and named Dockerfile, enter */Dockerfile* as the Dockerfile path. Then, click *Next*. + +. When prompted, select the context for the Docker build. If the Dockerfile is located at the root of the Git repository, enter `/` as the build context directory. Then, click *Next*. + +. Optional. Choose an optional robot account. This allows you to pull a private base image during the build process. If you know that a private base image is not used, you can skip this step. + +. Click *Next*. Check for any verification warnings. If necessary, fix the issues before clicking *Finish*. + +. You are alerted that the trigger has been successfully activated. Note that using this trigger requires the following actions: +* You must give the following public key read access to the git repository. +* You must set your repository to `POST` to the following URL to trigger a build. ++ +Save the SSH Public Key, then click *Return to /*. You are redirected to the *Builds* page of your repository. + +. On the *Builds* page, you now have a _build trigger_. For example: ++ +image:build-trigger-example.png[Example Build trigger] ++ +After you have created a custom Git trigger, additional steps are required. Continue on to "Setting up a custom Git trigger". ++ +If you are setting up a _build trigger_ for Github, Gitlab, or Bitbucket, continue on to "Manually triggering a build". \ No newline at end of file diff --git a/modules/red-hat-quay-gcp-bucket-modify.adoc b/modules/red-hat-quay-gcp-bucket-modify.adoc new file mode 100644 index 000000000..1ea9f12ca --- /dev/null +++ b/modules/red-hat-quay-gcp-bucket-modify.adoc @@ -0,0 +1,87 @@ +:_content-type: PROCEDURE +[id="red-hat-quay-gcp-bucket-modify"] += Modifying your Google Cloud Platform object bucket + +[NOTE] +==== +Currently, modifying your Google Cloud Platform object bucket is not supported on IBM Power and IBM Z. +==== + +Use the following procedure to configure cross-origin resource sharing (CORS) for virtual builders. Without CORS configuration, uploading a build Dockerfile fails. + +.Procedure + +. Use the following reference to create a JSON file for your specific CORS needs. For example: ++ +[source,terminal] +---- +$ cat gcp_cors.json +---- ++ +.Example output ++ +[source,yaml] +---- +[ + { + "origin": ["*"], + "method": ["GET"], + "responseHeader": ["Authorization"], + "maxAgeSeconds": 3600 + }, + { + "origin": ["*"], + "method": ["PUT"], + "responseHeader": [ + "Content-Type", + "x-goog-acl", + "origin"], + "maxAgeSeconds": 3600 + } +] +---- + +. Enter the following command to update your GCP storage bucket: ++ +[source,terminal] +---- +$ gcloud storage buckets update gs:// --cors-file=./gcp_cors.json +---- ++ +.Example output ++ +[source,terminal] +---- +Updating + Completed 1 +---- + +. You can display the updated CORS configuration of your GCP bucket by running the following command: ++ +[source,terminal] +---- +$ gcloud storage buckets describe gs:// --format="default(cors)" +---- ++ +.Example output ++ +[source,yaml] +---- +cors: +- maxAgeSeconds: 3600 + method: + - GET + origin: + - '*' + responseHeader: + - Authorization +- maxAgeSeconds: 3600 + method: + - PUT + origin: + - '*' + responseHeader: + - Content-Type + - x-goog-acl + - origin +---- \ No newline at end of file diff --git a/modules/red-hat-quay-namespace-auto-pruning-overview.adoc b/modules/red-hat-quay-namespace-auto-pruning-overview.adoc new file mode 100644 index 000000000..5b255e504 --- /dev/null +++ b/modules/red-hat-quay-namespace-auto-pruning-overview.adoc @@ -0,0 +1,43 @@ +:_content-type: CONCEPT +[id="red-hat-quay-namespace-auto-pruning-overview"] += {productname} auto-pruning overview + +{productname} administrators can set up multiple auto-pruning policies on organizations and repositories; administrators can also set up auto-pruning policies at the registry level so that they apply to all organizations, including all newly created organizations. This feature allows for image tags to be automatically deleted within an organization or a repository based on specified criteria, which allows {productname} organization owners to stay below the storage quota by automatically pruning content. + +Currently, two policies have been added: + +* **Prune images by the number of tags**. For this policy, when the actual number of tags exceeds the desired number of tags, the oldest tags are deleted by their creation date until the desired number of tags is achieved. + +* **Prune image tags by creation date**. For this policy, any tags with a creation date older than the given time span, for example, 10 days, are deleted. + +After tags are automatically pruned, they go into the {productname} time machine, or the amount of time, after a tag is deleted, that the tag is accessible before being garbage collected. The expiration time of an image tag is dependent on your organization's settings. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#garbage-collection[{productname} garbage collection]. + +Users can configure multiple policies per namespace or repository; this can be done through the {productname} v2 UI. Policies can also be set by using the API endpoints through the command-line interface (CLI). + +[id="prerequisites-limitations-namespace-autopruning"] +== Prerequisites and limitations for auto-pruning and multiple policies + +The following prerequisites and limitations apply to the auto-pruning feature: + +* Auto-pruning is not available when using the {productname} legacy UI. You must use the v2 UI to create, view, or modify auto-pruning policies. + +* Auto-pruning is only supported in databases that support the `FOR UPDATE SKIP LOCKED` SQL command. + +* Auto-pruning is unavailable on mirrored repositories and read-only repositories. + +* If you are configuring multiple auto-prune policies, rules are processed without particular order, and individual result sets are processed immediately before moving on to the next rule. +** For example, if an image is already subject for garbage collection by one rule, it cannot be excluded from pruning by another rule. + +* If you have both an auto-pruning policy for an organization and a repository, the auto-pruning policies set at the organization level are executed first. + +[id="autopruning-regular-expressions"] +== Regular expressions with auto-pruning + +{productname} administrators can leverage _regular expressions_, or _regex_, to match a subset of tags for both organization- and repository-level auto-pruning policies. This provides more granular auto-pruning policies to target only certain image tags for removal. Consider the following when using _regular expressions_ with the auto-pruning feature: + +* _Regular expressions_ are optional. +* If a _regular expression_ is not provided, the auto-pruner defaults to pruning all image tags in the organization or the repository. These are user-supplied and must be protected against ReDOS attacks. +* Registry-wide policies do not currently support _regular expressions_. Only organization- and repository-level auto-pruning policies support _regular expressions_. +* _Regular expressions_ can be configured to prune images that either do, or _do not_, match the provided _regex_ pattern. + +Some of the following procedures provide example auto-pruning policies using _regular expressions_ that you can use as a reference when creating an auto-prune policy. diff --git a/modules/red-hat-quay-quota-management-configure-39.adoc b/modules/red-hat-quay-quota-management-configure-39.adoc new file mode 100644 index 000000000..0067aef52 --- /dev/null +++ b/modules/red-hat-quay-quota-management-configure-39.adoc @@ -0,0 +1,91 @@ +:_content-type: CONCEPT +[id="red-hat-quay-quota-management-39"] += Quota management for {productname} 3.9 + +If you are upgrading to {productname} 3.9, you must reconfigure the quota management feature. This is because with {productname} 3.9, calculation is done differently. As a result, totals prior to {productname} 3.9 are no longer valid. There are two methods for configuring quota management in {productname} 3.9, which are detailed in the following sections. + +[NOTE] +==== +* This is a one time calculation that must be done after you have upgraded to {productname} 3.9. +* Superuser privileges are required to create, update and delete quotas. While quotas can be set for users as well as organizations, you cannot reconfigure the _user_ quota using the {productname} UI and you must use the API instead. +==== + +[id="quota-management-configuring-38"] +== Option A: Configuring quota management for {productname} 3.9 by adjusting the QUOTA_TOTAL_DELAY feature flag + +Use the following procedure to recalculate {productname} 3.9 quota management by adjusting the `QUOTA_TOTAL_DELAY` feature flag. + +[NOTE] +==== +With this recalculation option, the totals appear as *0.00 KB* until the allotted time designated for `QUOTA_TOTAL_DELAY`. +==== + +.Prerequisites + +* You have upgraded to {productname} 3.9. +* You are logged into {productname} 3.9 as a superuser. + +.Procedure + +. Deploy {productname} 3.9 with the following `config.yaml` settings: ++ +[source,yaml] +---- +FEATURE_QUOTA_MANAGEMENT: true +FEATURE_GARBAGE_COLLECTION: true +PERMANENTLY_DELETE_TAGS: true +QUOTA_TOTAL_DELAY_SECONDS: 1800 <1> +RESET_CHILD_MANIFEST_EXPIRATION: true +---- +<1> The `QUOTA_TOTAL_DELAY_SECONDS` flag defaults to `1800` seconds, or 30 minutes. This allows {productname} 3.9 to successfully deploy before the quota management feature begins calculating storage consumption for every blob that has been pushed. Setting this flag to a lower number might result in miscalculation; it *must* be set to a number that is greater than the time it takes your {productname} deployment to start. `1800` is the recommended setting, however larger deployments that take longer than 30 minutes to start might require a longer duration than `1800`. + +. Navigate to the {productname} UI and click the name of your Organization. + +. The *Total Quota Consumed* should read *0.00 KB*. Additionally, the *Backfill Queued* indicator should be present. + +. After the allotted time, for example, 30 minutes, refresh your {productname} deployment page and return to your Organization. Now, the *Total Quota Consumed* should be present. + +[id="quota-management-configuring-39"] +== Option B: Configuring quota management for {productname} 3.9 by setting QUOTA_TOTAL_DELAY_SECONDS to 0 + +Use the following procedure to recalculate {productname} 3.9 quota management by setting `QUOTA_TOTAL_DELAY_SECONDS` to `0`. + +[NOTE] +==== +Using this option prevents the possibility of miscalculations, however is more time intensive. Use the following procedure for when your {productname} deployment swaps the `FEATURE_QUOTA_MANAGEMENT` parameter from `false` to `true`. Most users will find xref: +==== + +.Prerequisites + +* You have upgraded to {productname} 3.9. +* You are logged into {productname} 3.9 as a superuser. + +.Procedure + +. Deploy {productname} 3.9 with the following `config.yaml` settings: ++ +[source,yaml] +---- +FEATURE_GARBAGE_COLLECTION: true +FEATURE_QUOTA_MANAGEMENT: true +QUOTA_BACKFILL: false +QUOTA_TOTAL_DELAY_SECONDS: 0 +PERMANENTLY_DELETE_TAGS: true +RESET_CHILD_MANIFEST_EXPIRATION: true +---- + +. Navigate to the {productname} UI and click the name of your Organization. + +. The *Total Quota Consumed* should read *0.00 KB*. + +. Redeploy {productname} and set the `QUOTA_BACKFILL` flag set to `true`. For example: ++ +[source,yaml] +---- +QUOTA_BACKFILL: true +---- ++ +[NOTE] +==== +If you choose to disable quota management after it has calculated totals, {productname} marks those totals as stale. If you re-enable the quota management feature again in the future, those namespaces and repositories are recalculated by the backfill worker. +==== \ No newline at end of file diff --git a/modules/red-hat-quay-s3-bucket-modify.adoc b/modules/red-hat-quay-s3-bucket-modify.adoc new file mode 100644 index 000000000..f36736a06 --- /dev/null +++ b/modules/red-hat-quay-s3-bucket-modify.adoc @@ -0,0 +1,51 @@ +:_content-type: PROCEDURE +[id="red-hat-quay-s3-bucket-modify"] += Modifying your AWS S3 storage bucket + +If you are using AWS S3 storage, you must change your storage bucket in the AWS console prior to starting a _build_. + +.Procedure + +. Log in to your AWS console at link:https://s3.console.aws.amazon.com[s3.console.aws.com]. + +. In the search bar, search for `S3` and then click *S3*. + +. Click the name of your bucket, for example, `myawsbucket`. + +. Click the *Permissions* tab. + +. Under *Cross-origin resource sharing (CORS)*, include the following parameters: ++ +[source,yaml] +---- + [ + { + "AllowedHeaders": [ + "Authorization" + ], + "AllowedMethods": [ + "GET" + ], + "AllowedOrigins": [ + "*" + ], + "ExposeHeaders": [], + "MaxAgeSeconds": 3000 + }, + { + "AllowedHeaders": [ + "Content-Type", + "x-amz-acl", + "origin" + ], + "AllowedMethods": [ + "PUT" + ], + "AllowedOrigins": [ + "*" + ], + "ExposeHeaders": [], + "MaxAgeSeconds": 3000 + } + ] +---- \ No newline at end of file diff --git a/modules/ref_deploy_quay_openshift.adoc b/modules/ref_deploy_quay_openshift.adoc index ff7c1fa13..a4754be08 100644 --- a/modules/ref_deploy_quay_openshift.adoc +++ b/modules/ref_deploy_quay_openshift.adoc @@ -29,19 +29,19 @@ metadata: name: quay-enterprise-config-secret ---- -.quay-enterprise-redhat-quay-pull-secret.yaml +.quay-enterprise-redhat-pull-secret.yaml [source,yaml] ---- apiVersion: v1 kind: Secret metadata: namespace: quay-enterprise - name: redhat-quay-pull-secret + name: redhat-pull-secret data: .dockerconfigjson: <1> type: kubernetes.io/dockerconfigjson ---- -<1> Change to include the credentials shown from link:https://access.redhat.com/solutions/3533201[Accessing Red Hat Quay] +<1> Change to include the credentials shown from link:https://access.redhat.com/solutions/3533201[Accessing {productname}] == {productname} storage @@ -102,7 +102,7 @@ type: kubernetes.io/dockerconfigjson spec: containers: - name: postgres - image: registry.access.redhat.com/rhscl/postgresql-10-rhel7:1-35 + image: registry.redhat.io/rhel8/postgresql-10:latest imagePullPolicy: "IfNotPresent" ports: - containerPort: 5432 @@ -162,9 +162,6 @@ rules: - secrets verbs: - get - - put - - patch - - update - apiGroups: - "" resources: @@ -226,7 +223,7 @@ spec: spec: containers: - name: redis-master - image: registry.access.redhat.com/rhscl/redis-32-rhel7 + image: registry.redhat.io/rhel8/redis-5 imagePullPolicy: "IfNotPresent" ports: - containerPort: 6379 @@ -271,13 +268,13 @@ spec: spec: containers: - name: quay-enterprise-config-app - image: quay.io/redhat/quay:v{productmin} + image: {productrepo}/{quayimage}:{productminv} ports: - containerPort: 8443 command: ["/quay-registry/quay-entrypoint.sh"] args: ["config", "secret"] imagePullSecrets: - - name: redhat-quay-pull-secret + - name: redhat-pull-secret ---- .quay-enterprise-config-service-clusterip.yaml @@ -376,20 +373,25 @@ spec: volumes: - name: configvolume secret: - secretName: quay-enterprise-secret + secretName: quay-enterprise-config-secret containers: - name: quay-enterprise-app - image: quay.io/redhat/quay:{productmin} + image: {productrepo}/{quayimage}:{productminv} ports: - containerPort: 8443 volumeMounts: - name: configvolume readOnly: false mountPath: /conf/stack + resources: + limits: + memory: "4Gi" + requests: + memory: "2Gi" imagePullSecrets: - - name: redhat-quay-pull-secret + - name: redhat-pull-secret ---- -<1> Only one instance of the quay container is defined here. Adjust replicas based on demand. +<1> Only one instance of the `Quay` container is defined here. Adjust replicas based on demand. == Clair image scanning @@ -438,7 +440,7 @@ spec: value: clair <2> - name: POSTGRESQL_PASSWORD value: test123 <3> - image: registry.access.redhat.com/rhscl/postgresql-10-rhel7:1-35 + image: registry.redhat.io/rhel8/postgresql-10:latest imagePullPolicy: IfNotPresent name: postgres-clair ports: @@ -480,13 +482,14 @@ spec: ---- .clair-config.yaml +Modify source, endpoint, key_id, and registry settings to match your environment. [source,yaml,subs="verbatim,attributes"] ---- clair: database: type: pgsql options: - source: host=postgres-clair port=5432 dbname=clair user=clair password=test123 sslmode=disable <1> + source: host=172.30.87.93 port=5432 dbname=clair user=clair password=test123 sslmode=disable cachesize: 16384 api: # The port at which Clair will report its health status. For example, if Clair is running at @@ -510,7 +513,7 @@ spec: http: # QUAY_ENDPOINT defines the endpoint at which Quay Enterprise is running. # For example: https://myregistry.mycompany.com - endpoint: http://quay-enterprise-clusterip/secscan/notify + endpoint: https://quay-enterprise.apps.lzha0413.qe.devcluster.openshift.com/secscan/notify <1> proxy: http://localhost:6063 jwtproxy: @@ -529,7 +532,7 @@ spec: options: # The ID of the service key generated for Clair. The ID is returned when setting up # the key in [Quay Enterprise Setup](security-scanning.md) - key_id: cd40f1c6a63f574c68ce882258925374882fac2b2f535ae5f8157c429e0c4b2e <2> + key_id: fc6c2b02c495c9b8fc674fcdbfdd2058f2f559d6bdd19d0ba70af26c0cb66a48 <2> private_key_path: /clair/config/security_scanner.pem verifier_proxies: @@ -554,7 +557,7 @@ spec: options: # QUAY_ENDPOINT defines the endpoint at which Quay Enterprise is running. # Example: https://myregistry.mycompany.com - registry: http://quay-enterprise-clusterip/keys/ + registry: https://quay-enterprise.apps.lzha0413.qe.devcluster.openshift.com/keys/ ---- <1> Check that the database options match those set earlier in postgres-clair-deployment.yaml. <2> Insert the Key ID matches the value from the key generated from the {productname} Setup screen. @@ -604,7 +607,7 @@ spec: namespace: quay-enterprise spec: containers: - - image: quay.io/redhat/clair-jwt:v3.0.4 + - image: {productrepo}/clair-jwt:{productminv} imagePullPolicy: IfNotPresent name: clair-scanner ports: @@ -617,13 +620,25 @@ spec: volumeMounts: - mountPath: /clair/config name: configvolume + - mountPath: /etc/pki/ca-trust/source/anchors/ca.crt + name: quay-ssl + subPath: ca.crt imagePullSecrets: - - name: redhat-quay-pull-secret + - name: redhat-pull-secret restartPolicy: Always volumes: - name: configvolume secret: secretName: clair-scanner-config-secret + - name: quay-ssl + secret: + defaultMode: 420 + items: + - key: ssl.cert + path: ca.crt + secretName: quay-enterprise-config-secret + serviceAccount: clair-jwt + serviceAccountName: clair-jwt ---- == Repository mirroring @@ -649,13 +664,21 @@ spec: labels: quay-enterprise-component: mirror-app spec: + volumes: + - name: configvolume + secret: + secretName: quay-enterprise-config-secret containers: - name: quay-enterprise-mirror-app - image: quay.io/redhat/quay:v{productmin} + image: {productrepo}/{quayimage}:{productminv} ports: - containerPort: 8443 + volumeMounts: + - name: configvolume + readOnly: false + mountPath: /conf/stack command: ["/quay-registry/quay-entrypoint.sh"] args: ["repomirror"] imagePullSecrets: - - name: redhat-quay-pull-secret + - name: redhat-pull-secret ---- diff --git a/modules/ref_quay-integration-config-fields.adoc b/modules/ref_quay-integration-config-fields.adoc new file mode 100644 index 000000000..055ef44be --- /dev/null +++ b/modules/ref_quay-integration-config-fields.adoc @@ -0,0 +1,24 @@ +:_content-type: REFERENCE +[id="quay-integration-config-fields"] += QuayIntegration configuration fields + +The following configuration fields are available for the QuayIntegration custom resource: + +[cols="4a,2a,2a",options="header"] +|=== +|Name |Description |Schema +|allowlistNamespaces + +(Optional) | A list of namespaces to include. |Array +|clusterID + +(Required) |The ID associated with this cluster. |String +|credentialsSecret.key + +(Required) | The secret containing credentials to communicate with the Quay registry. |Object +|denylistNamespaces + +(Optional) | A list of namespaces to exclude. |Array +|insecureRegistry + +(Optional) |Whether to skip TLS verification to the Quay registry |Boolean +|quayHostname + +(Required) |The hostname of the Quay registry. |String +|scheduledImageStreamImport + +(Optional) | Whether to enable image stream importing. |Boolean +|=== diff --git a/modules/regenerating-robot-account-token-api.adoc b/modules/regenerating-robot-account-token-api.adoc new file mode 100644 index 000000000..be1d0aac7 --- /dev/null +++ b/modules/regenerating-robot-account-token-api.adoc @@ -0,0 +1,44 @@ +:_content-type: CONCEPT +[id="regenerating-robot-account-api"] += Regenerating a robot account token by using the {productname} API + +Use the following procedure to regenerate a robot account token using the {productname} API. + +.Prerequisites + +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +* Enter the following command to regenerate a robot account token for an organization using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#regenerateorgrobottoken[`POST /api/v1/organization/{orgname}/robots/{robot_shortname}/regenerate`] endpoint: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + "/api/v1/organization//robots//regenerate" +---- ++ +.Example output ++ +[source,terminal] +---- +{"name": "test-org+test", "created": "Fri, 10 May 2024 17:46:02 -0000", "last_accessed": null, "description": "", "token": ""} +---- + +* Enter the following command to regenerate a robot account token for the current user with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#regenerateuserrobottoken[`POST /api/v1/user/robots/{robot_shortname}/regenerate`] endpoint: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + "/api/v1/user/robots//regenerate" +---- ++ +Example output ++ +[source,terminal] +---- +{"name": "quayadmin+test", "created": "Fri, 10 May 2024 14:12:11 -0000", "last_accessed": null, "description": "", "token": ""} +---- \ No newline at end of file diff --git a/modules/regenerating-robot-account-token-ui.adoc b/modules/regenerating-robot-account-token-ui.adoc new file mode 100644 index 000000000..1cd2537de --- /dev/null +++ b/modules/regenerating-robot-account-token-ui.adoc @@ -0,0 +1,19 @@ +:_content-type: CONCEPT +[id="regenerating-robot-account-ui"] += Regenerating a robot account token by using the {productname} UI + +Use the following procedure to regenerate a robot account token by using the {productname} UI. + +.Prerequisites + +* You have logged into {productname}. + +.Procedure + +. Click the name of an Organization. + +. In the navigation pane, click *Robot accounts*. + +. Click the name of your robot account, for example, *testorg3+test*. + +. Click *Regenerate token* in the popup box. \ No newline at end of file diff --git a/modules/registry-wide-access-management.adoc b/modules/registry-wide-access-management.adoc new file mode 100644 index 000000000..3288fd90b --- /dev/null +++ b/modules/registry-wide-access-management.adoc @@ -0,0 +1,10 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: CONCEPT +[id="registry-wide-access-management"] += Registry-wide access management + +The following sections provide information about adjusting registry-wide permissions for both users and superusers. \ No newline at end of file diff --git a/modules/repo-creation-management.adoc b/modules/repo-creation-management.adoc new file mode 100644 index 000000000..7f4548747 --- /dev/null +++ b/modules/repo-creation-management.adoc @@ -0,0 +1,122 @@ +[id="repo-creation-api"] += Creating and configuring repositories by using the {productname} API + +Repositories can be created, retrieved, changed, and deleted by using the {productname} API. + +.Procedure + +. Enter the following command to create a repository using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createrepo[`POST /api/v1/repository`] endpoint: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "repository": "", + "visibility": "", + "description": "." + }' \ + "https://quay-server.example.com/api/v1/repository" +---- ++ +.Example output ++ +[source,terminal] +---- +{"namespace": "quayadmin", "name": "", "kind": "image"} +---- + +. You can list a repositories with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listrepos[`GET /api/v1/repository`] endpoint. For example: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/repository?public=true&starred=false&namespace=" +---- ++ +.Example output ++ +[source,terminal] +---- +{"repositories": [{"namespace": "quayadmin", "name": "busybox", "description": null, "is_public": false, "kind": "image", "state": "MIRROR", "is_starred": false, "quota_report": {"quota_bytes": 2280675, "configured_quota": 2199023255552}}]} +---- + +. Visibility can be changed from public to private with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#changerepovisibility[`POST /api/v1/repository/{repository}/changevisibility`] endpoint: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "visibility": "private" + }' \ + "https://quay-server.example.com/api/v1/repository///changevisibility" +---- +.Example output ++ +[source,terminal] +---- +{"success": true} +---- + +. You can check the {productname} UI, or you can enter the following link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getrepo[`GET /api/v1/repository/{repository}`] command to return details about a repository: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " "/api/v1/repository//" +---- ++ +Example output ++ +[source,terminal] +---- +{"detail": "Not Found", "error_message": "Not Found", "error_type": "not_found", "title": "not_found", "type": "http://quay-server.example.com/api/v1/error/not_found", "status": 404} +---- + +. Repository descriptions can be updated with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#updaterepo[`PUT /api/v1/repository/{repository}`] endpoint: ++ +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "description": "This is an updated description for the repository." + }' \ + "https://quay-server.example.com/api/v1/repository//" +---- ++ +.Example output ++ +[source,terminal] +---- +{"success": true} +---- + +. Enter the following command to delete a repository using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleterepository[`DELETE /api/v1/repository/{repository}`] endpoint: ++ +[source,terminal] +---- +$ curl -X DELETE -H "Authorization: Bearer " "/api/v1/repository//" +---- ++ +This command does not return output in the CLI. + +//// +. The link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#changerepostate[`PUT /api/v1/repository/{repository}/changestate`] API endpoint can be used to change the state of the repository: ++ +[source,terminal] +---- + +---- ++ +.Example output ++ +[source,terminal] +---- + +---- +//// \ No newline at end of file diff --git a/modules/repo-manage-api.adoc b/modules/repo-manage-api.adoc new file mode 100644 index 000000000..c85dabf42 --- /dev/null +++ b/modules/repo-manage-api.adoc @@ -0,0 +1,4 @@ +[id="repo-manage-api"] += Creating and configuring repositories by using the {productname} API + +Repositories can be created, retrieved, changed, and deleted by using the {productname} API. \ No newline at end of file diff --git a/modules/repo-mirroring-troubleshooting-issues.adoc b/modules/repo-mirroring-troubleshooting-issues.adoc new file mode 100644 index 000000000..a421b5b56 --- /dev/null +++ b/modules/repo-mirroring-troubleshooting-issues.adoc @@ -0,0 +1,112 @@ +:_content-type: PROCEDURE +[id="repo-mirroring-troubleshooting-issues"] += Troubleshooting repository mirroring + +Use the following sections to troubleshoot repository mirroring for {productname}. + +//// +[id="reviewing-logs-repo-mirroring"] +== Reviewing the logs of your mirrored {productname} instances + +Use the following procedure to review the logs of your mirrored instances. + +.Prerequisites + +* You have enabled debug mode in your {productname} `config.yaml` file. + +.Procedure + +* Retrieve the logs from all running mirror pods. + +.. If you are using the {productname} Operator, enter the following command: ++ +[source,terminal] +---- +$ oc logs mirror-pod +---- + +.. If you are using a standalone deployment of {productname}, enter the following command: ++ +[source,terminal] +---- +$ podman logs repomirror-container +---- + + +[id="checking-mirror-configuration"] +== Checking the mirror configuration + +Use the following procedure to review the mirror configuration settings in your {productname} instances. + +.Procedure + +* Review your `config.yaml` settings. + +.. If you are using the {productname} Operator, enter the following command: ++ +[source,terminal] +---- +$ oc exec -it quay-pod -- cat /conf/stack/config.yaml +---- + +.. If you are using a standalone deployment of {productname}, enter the following command: ++ +[source,terminal] +---- +$ podman exec -it quay-container cat /conf/stack/config.yaml +---- +//// + +[id="verifying-authentication-permissions"] +== Verifying authentication and permissions + +Ensure that the authentication credentials used for mirroring have the necessary permissions and access rights on both the source and destination {productname} instances. + +On the {productname} UI, check the following settings: + +* The access control settings. Ensure that the user or service account performing the mirroring operation has the required privileges. +* The permissions of your robot account on the {productname} registry. + +//// +[id="manual-copy"] +== Checking slow disk issues + +Repository mirroring uses `skopeo copy` as a background process. Test the time it takes to copy an image by manually running `skopeo copy`. This can help isolate any issues related to specific images or repositories and narrow down the troubleshooting scope. Additionally, it can help identify any network issues or bottlenecks that might be impacting the mirroring performance or causing failures. Pay attention to network latency, packet loss, or any unusual network patterns. + +Use the following procedure to time `skopeo copy`. + +.Procedure + +* Enter the following command to measure the time it takes to perform `skopeo copy`: ++ +[source,terminal] +---- +$ time { skopeo copy docker://SOURCE_REGISTRY_IMAGE docker://DESTINATION_REGISTRY/REPOSITPRY/IMAGE:TAG } +---- ++ +.Example output ++ +[source,terminal] +---- +Getting image source signatures +Copying blob 4182b7568f06 skipped: already exists +Copying blob 4182b7568f06 skipped: already exists +Copying blob b7f76d1d9088 skipped: already exists +Copying blob ede3648667b7 skipped: already exists +Copying blob 021495d3c262 done +Copying blob 335fbccacdd3 done +Copying blob 4c70e3d931b6 done +Copying config d9f6ca2777 done +Writing manifest to image destination +Storing signatures + +real 6m19.291s +user 0m58.207s +sys 0m40.666s +---- + +[role="_additional-resources"] +.Additional resources + +For more information, see link:https://access.redhat.com/articles/7018078[Troubleshooting Quay Repository Mirroring]. +//// \ No newline at end of file diff --git a/modules/repo-organizations-and-users-intro.adoc b/modules/repo-organizations-and-users-intro.adoc new file mode 100644 index 000000000..f4cbdfbc6 --- /dev/null +++ b/modules/repo-organizations-and-users-intro.adoc @@ -0,0 +1,25 @@ +[[repository-organizations-and-users]] += Repository organizations and users + +{productname} repository organizations provide a way to share repositories under a common namespace that does not belong to a single user. These organizations allow repositories to be used in a shared setting, for example, by a company. + +The following is a brief overview of the organization's main page: + +* **Managing teams**: used to manage teams within the organization +* **Creating a team**: used to create a team within the organization +* **Team Global Permissions**: used to define the global permissions of teams within an organization +* **Managing team members**: used to manage teams within the organization. +* **Defined permissions**: ++ +[cols="1,1",options="header"] +|=== +|Permission |Abilities +|*Members* +|Inherits all permissions sets for the team + +|*Creator* +|All member permissions, plus the ability to create new repositories + +|*Admin* +|Full administrative access to the organization, including the ability to create new repositories, add members, and set permissions. +|=== diff --git a/modules/repo-permission-api.adoc b/modules/repo-permission-api.adoc new file mode 100644 index 000000000..6c5e007e8 --- /dev/null +++ b/modules/repo-permission-api.adoc @@ -0,0 +1,6 @@ +[id="repo-permission-api"] += Managing repository permissions by using the {productname} API + +Repository permissions can be managed by using the {productname} API. For example, you can create, view, and delete user and team permissions. + +The following procedures show you how to manage repository permissions by using the {productname} API. \ No newline at end of file diff --git a/modules/repo-policy-api.adoc b/modules/repo-policy-api.adoc new file mode 100644 index 000000000..08c0cb5bd --- /dev/null +++ b/modules/repo-policy-api.adoc @@ -0,0 +1,122 @@ +[id="policy-api"] += Managing auto-prune policies by using the {productname} API + +Auto-prune policies can be created, retrieved, changed, and delete for organizations, repositories, and users by using the {productname} API. + +.Procedure + +. Enter the following command to create a repository using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createrepo[`POST /api/v1/repository`] endpoint: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "repository": "", + "visibility": "", + "description": "." + }' \ + "https://quay-server.example.com/api/v1/repository" +---- ++ +.Example output ++ +[source,terminal] +---- +{"namespace": "quayadmin", "name": "", "kind": "image"} +---- + +. You can list a repositories with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listrepos[`GET /api/v1/repository`] endpoint. For example: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/repository?public=true&starred=false&namespace=" +---- ++ +.Example output ++ +[source,terminal] +---- +{"repositories": [{"namespace": "quayadmin", "name": "busybox", "description": null, "is_public": false, "kind": "image", "state": "MIRROR", "is_starred": false, "quota_report": {"quota_bytes": 2280675, "configured_quota": 2199023255552}}]} +---- + +. Visibility can be changed from public to private with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#changerepovisibility[`POST /api/v1/repository/{repository}/changevisibility`] endpoint: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "visibility": "private" + }' \ + "https://quay-server.example.com/api/v1/repository///changevisibility" +---- +.Example output ++ +[source,terminal] +---- +{"success": true} +---- + +. You can check the {productname} UI, or you can enter the following link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getrepo[`GET /api/v1/repository/{repository}`] command to return details about a repository: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " "/api/v1/repository//" +---- ++ +Example output ++ +[source,terminal] +---- +{"detail": "Not Found", "error_message": "Not Found", "error_type": "not_found", "title": "not_found", "type": "http://quay-server.example.com/api/v1/error/not_found", "status": 404} +---- + +. Repository descriptions can be updated with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#updaterepo[`PUT /api/v1/repository/{repository}`] endpoint: ++ +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "description": "This is an updated description for the repository." + }' \ + "https://quay-server.example.com/api/v1/repository//" +---- ++ +.Example output ++ +[source,terminal] +---- +{"success": true} +---- + +. Enter the following command to delete a repository using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleterepository[`DELETE /api/v1/repository/{repository}`] endpoint: ++ +[source,terminal] +---- +$ curl -X DELETE -H "Authorization: Bearer " "/api/v1/repository//" +---- ++ +This command does not return output in the CLI. + +//// +. The link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#changerepostate[`PUT /api/v1/repository/{repository}/changestate`] API endpoint can be used to change the state of the repository: ++ +[source,terminal] +---- + +---- ++ +.Example output ++ +[source,terminal] +---- + +---- +//// \ No newline at end of file diff --git a/modules/repository-events.adoc b/modules/repository-events.adoc new file mode 100644 index 000000000..db539c1ab --- /dev/null +++ b/modules/repository-events.adoc @@ -0,0 +1,307 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: CONCEPT + +[id="repository-events"] += Repository events description + +The following sections detail repository events. + +[discrete] +[id="repository-push"] +== Repository Push + +A successful push of one or more images was made to the repository: + +---- +{ + "name": "repository", + "repository": "dgangaia/test", + "namespace": "dgangaia", + "docker_url": "quay.io/dgangaia/test", + "homepage": "https://quay.io/repository/dgangaia/repository", + "updated_tags": [ + "latest" + ] +} +---- + +[discrete] +[id="dockerfile-build-queued"] +== Dockerfile Build Queued + +The following example is a response from a Dockerfile Build that has been queued into the Build system. + +[NOTE] +==== +Responses can differ based on the use of optional attributes. +==== + +---- +{ + "build_id": "296ec063-5f86-4706-a469-f0a400bf9df2", + "trigger_kind": "github", //Optional + "name": "test", + "repository": "dgangaia/test", + "namespace": "dgangaia", + "docker_url": "quay.io/dgangaia/test", + "trigger_id": "38b6e180-9521-4ff7-9844-acf371340b9e", //Optional + "docker_tags": [ + "master", + "latest" + ], + "repo": "test", + "trigger_metadata": { + "default_branch": "master", + "commit": "b7f7d2b948aacbe844ee465122a85a9368b2b735", + "ref": "refs/heads/master", + "git_url": "git@github.com:dgangaia/test.git", + "commit_info": { //Optional + "url": "https://github.com/dgangaia/test/commit/b7f7d2b948aacbe844ee465122a85a9368b2b735", + "date": "2019-03-06T12:48:24+11:00", + "message": "adding 5", + "author": { //Optional + "username": "dgangaia", + "url": "https://github.com/dgangaia", //Optional + "avatar_url": "https://avatars1.githubusercontent.com/u/43594254?v=4" //Optional + }, + "committer": { + "username": "web-flow", + "url": "https://github.com/web-flow", + "avatar_url": "https://avatars3.githubusercontent.com/u/19864447?v=4" + } + } + }, + "is_manual": false, + "manual_user": null, + "homepage": "https://quay.io/repository/dgangaia/test/build/296ec063-5f86-4706-a469-f0a400bf9df2" +} +---- + +[discrete] +[id="dockerfile-build-started"] +== Dockerfile Build started + +The following example is a response from a Dockerfile Build that has been queued into the Build system. + +[NOTE] +==== +Responses can differ based on the use of optional attributes. +==== + +---- +{ + "build_id": "a8cc247a-a662-4fee-8dcb-7d7e822b71ba", + "trigger_kind": "github", //Optional + "name": "test", + "repository": "dgangaia/test", + "namespace": "dgangaia", + "docker_url": "quay.io/dgangaia/test", + "trigger_id": "38b6e180-9521-4ff7-9844-acf371340b9e", //Optional + "docker_tags": [ + "master", + "latest" + ], + "build_name": "50bc599", + "trigger_metadata": { //Optional + "commit": "50bc5996d4587fd4b2d8edc4af652d4cec293c42", + "ref": "refs/heads/master", + "default_branch": "master", + "git_url": "git@github.com:dgangaia/test.git", + "commit_info": { //Optional + "url": "https://github.com/dgangaia/test/commit/50bc5996d4587fd4b2d8edc4af652d4cec293c42", + "date": "2019-03-06T14:10:14+11:00", + "message": "test build", + "committer": { //Optional + "username": "web-flow", + "url": "https://github.com/web-flow", //Optional + "avatar_url": "https://avatars3.githubusercontent.com/u/19864447?v=4" //Optional + }, + "author": { //Optional + "username": "dgangaia", + "url": "https://github.com/dgangaia", //Optional + "avatar_url": "https://avatars1.githubusercontent.com/u/43594254?v=4" //Optional + } + } + }, + "homepage": "https://quay.io/repository/dgangaia/test/build/a8cc247a-a662-4fee-8dcb-7d7e822b71ba" +} +---- + +[discrete] +[id="dockerfile-build-successfully-completed"] +== Dockerfile Build successfully completed + +The following example is a response from a Dockerfile Build that has been successfully completed by the Build system. + +[NOTE] +==== +This event occurs simultaneously with a _Repository Push_ event for the built image or images. +==== + +---- +{ + "build_id": "296ec063-5f86-4706-a469-f0a400bf9df2", + "trigger_kind": "github", //Optional + "name": "test", + "repository": "dgangaia/test", + "namespace": "dgangaia", + "docker_url": "quay.io/dgangaia/test", + "trigger_id": "38b6e180-9521-4ff7-9844-acf371340b9e", //Optional + "docker_tags": [ + "master", + "latest" + ], + "build_name": "b7f7d2b", + "image_id": "sha256:0339f178f26ae24930e9ad32751d6839015109eabdf1c25b3b0f2abf8934f6cb", + "trigger_metadata": { + "commit": "b7f7d2b948aacbe844ee465122a85a9368b2b735", + "ref": "refs/heads/master", + "default_branch": "master", + "git_url": "git@github.com:dgangaia/test.git", + "commit_info": { //Optional + "url": "https://github.com/dgangaia/test/commit/b7f7d2b948aacbe844ee465122a85a9368b2b735", + "date": "2019-03-06T12:48:24+11:00", + "message": "adding 5", + "committer": { //Optional + "username": "web-flow", + "url": "https://github.com/web-flow", //Optional + "avatar_url": "https://avatars3.githubusercontent.com/u/19864447?v=4" //Optional + }, + "author": { //Optional + "username": "dgangaia", + "url": "https://github.com/dgangaia", //Optional + "avatar_url": "https://avatars1.githubusercontent.com/u/43594254?v=4" //Optional + } + } + }, + "homepage": "https://quay.io/repository/dgangaia/test/build/296ec063-5f86-4706-a469-f0a400bf9df2", + "manifest_digests": [ + "quay.io/dgangaia/test@sha256:2a7af5265344cc3704d5d47c4604b1efcbd227a7a6a6ff73d6e4e08a27fd7d99", + "quay.io/dgangaia/test@sha256:569e7db1a867069835e8e97d50c96eccafde65f08ea3e0d5debaf16e2545d9d1" + ] +} +---- + +[discrete] +[id="dockerfile-build-failed"] +== Dockerfile Build failed + +The following example is a response from a Dockerfile Build that has failed. + +---- +{ + "build_id": "5346a21d-3434-4764-85be-5be1296f293c", + "trigger_kind": "github", //Optional + "name": "test", + "repository": "dgangaia/test", + "docker_url": "quay.io/dgangaia/test", + "error_message": "Could not find or parse Dockerfile: unknown instruction: GIT", + "namespace": "dgangaia", + "trigger_id": "38b6e180-9521-4ff7-9844-acf371340b9e", //Optional + "docker_tags": [ + "master", + "latest" + ], + "build_name": "6ae9a86", + "trigger_metadata": { //Optional + "commit": "6ae9a86930fc73dd07b02e4c5bf63ee60be180ad", + "ref": "refs/heads/master", + "default_branch": "master", + "git_url": "git@github.com:dgangaia/test.git", + "commit_info": { //Optional + "url": "https://github.com/dgangaia/test/commit/6ae9a86930fc73dd07b02e4c5bf63ee60be180ad", + "date": "2019-03-06T14:18:16+11:00", + "message": "failed build test", + "committer": { //Optional + "username": "web-flow", + "url": "https://github.com/web-flow", //Optional + "avatar_url": "https://avatars3.githubusercontent.com/u/19864447?v=4" //Optional + }, + "author": { //Optional + "username": "dgangaia", + "url": "https://github.com/dgangaia", //Optional + "avatar_url": "https://avatars1.githubusercontent.com/u/43594254?v=4" //Optional + } + } + }, + "homepage": "https://quay.io/repository/dgangaia/test/build/5346a21d-3434-4764-85be-5be1296f293c" +} +---- + +[discrete] +[id="dockerfile-build-cancelled"] +== Dockerfile Build cancelled + +The following example is a response from a Dockerfile Build that has been cancelled. + +---- +{ + "build_id": "cbd534c5-f1c0-4816-b4e3-55446b851e70", + "trigger_kind": "github", + "name": "test", + "repository": "dgangaia/test", + "namespace": "dgangaia", + "docker_url": "quay.io/dgangaia/test", + "trigger_id": "38b6e180-9521-4ff7-9844-acf371340b9e", + "docker_tags": [ + "master", + "latest" + ], + "build_name": "cbce83c", + "trigger_metadata": { + "commit": "cbce83c04bfb59734fc42a83aab738704ba7ec41", + "ref": "refs/heads/master", + "default_branch": "master", + "git_url": "git@github.com:dgangaia/test.git", + "commit_info": { + "url": "https://github.com/dgangaia/test/commit/cbce83c04bfb59734fc42a83aab738704ba7ec41", + "date": "2019-03-06T14:27:53+11:00", + "message": "testing cancel build", + "committer": { + "username": "web-flow", + "url": "https://github.com/web-flow", + "avatar_url": "https://avatars3.githubusercontent.com/u/19864447?v=4" + }, + "author": { + "username": "dgangaia", + "url": "https://github.com/dgangaia", + "avatar_url": "https://avatars1.githubusercontent.com/u/43594254?v=4" + } + } + }, + "homepage": "https://quay.io/repository/dgangaia/test/build/cbd534c5-f1c0-4816-b4e3-55446b851e70" +} +---- + +ifeval::["{context}" == "use-quay"] + +[discrete] +[id="vulnerability-detected"] +== Vulnerability detected + +The following example is a response from a Dockerfile Build has detected a vulnerability in the repository. + +---- +{ + "repository": "dgangaia/repository", + "namespace": "dgangaia", + "name": "repository", + "docker_url": "quay.io/dgangaia/repository", + "homepage": "https://quay.io/repository/dgangaia/repository", + + "tags": ["latest", "othertag"], + + "vulnerability": { + "id": "CVE-1234-5678", + "description": "This is a bad vulnerability", + "link": "http://url/to/vuln/info", + "priority": "Critical", + "has_fix": true + } +} +---- +endif::[] diff --git a/modules/resetting-superuser-password-on-operator.adoc b/modules/resetting-superuser-password-on-operator.adoc new file mode 100644 index 000000000..bb07175fa --- /dev/null +++ b/modules/resetting-superuser-password-on-operator.adoc @@ -0,0 +1,82 @@ +:_content-type: CONCEPT +[id="resetting-superuser-password-on-operator"] += Resetting superuser passwords on the {productname} Operator + +.Prerequisites + +* You have created a {productname} superuser. +* You have installed Python 3.9. +* You have installed the `pip` package manager for Python. +* You have installed the `bcrypt` package for `pip`. + +.Procedure + +. Log in to your {productname} deployment. + +. On the {ocp} UI, navigate to *Workloads* -> *Secrets*. + +. Select the namespace for your {productname} deployment, for example, `Project quay`. + +. Locate and store the PostgreSQL database credentials. + +. Generate a secure, hashed password using the `bcrypt` package in Python 3.9 by entering the following command: ++ +[source,terminal] +---- +$ python3.9 -c 'import bcrypt; print(bcrypt.hashpw(b"newpass1234", bcrypt.gensalt(12)).decode("utf-8"))' +---- ++ +.Example output ++ +[source,terminal] +---- +$2b$12$zoilcTG6XQeAoVuDuIZH0..UpvQEZcKh3V6puksQJaUQupHgJ4.4y +---- + +. On the CLI, log in to the database, for example: ++ +[source,terminal] +---- +$ oc rsh quayuser-quay-quay-database-669c8998f-v9qsl +---- + +. Enter the following command to open a connection to the `quay` PostgreSQL database server, specifying the database, username, and host address: ++ +[source,terminal] +---- +sh-4.4$ psql -U quayuser-quay-quay-database -d quayuser-quay-quay-database -W +---- + +. Enter the following command to connect to the default database for the current user: ++ +[source,terminal] +---- +quay=> \c +---- + +. Update the `password_hash` of the superuser admin who lost their password: ++ +[source,terminal] +---- +quay=> UPDATE public.user SET password_hash = '$2b$12$zoilcTG6XQeAoVuDuIZH0..UpvQEZcKh3V6puksQJaUQupHgJ4.4y' where username = 'quayadmin'; +---- + +. Enter the following to command to ensure that the `password_hash` has been updated: ++ +[source,terminal] +---- +quay=> select * from public.user; +---- ++ +.Example output ++ +[source,terminal] +---- +id | uuid | username | password_hash | email | verified | stripe_id | organization | robot | invoice_email | invalid_login_attempts | last_invalid_login |removed_tag_expiration_s | enabled | invoice_email_address | company | family_name | given_name | location | maximum_queued_builds_count | creation_date | last_accessed +----+--------------------------------------+-----------+--------------------------------------------------------------+-----------------------+--- +-------+-----------+--------------+-------+---------------+------------------------+----------------------------+--------------------------+------ +---+-----------------------+---------+-------------+------------+----------+-----------------------------+----------------------------+----------- +1 | 73f04ef6-19ba-41d3-b14d-f2f1eed94a4a | quayadmin | $2b$12$zoilcTG6XQeAoVuDuIZH0..UpvQEZcKh3V6puksQJaUQupHgJ4.4y | quayadmin@example.com | t | | f | f | f | 0 | 2023-02-23 07:54:39.116485 | 1209600 | t | | | | | | | 2023-02-23 07:54:39.116492 +---- + +. Navigate to your {productname} UI on {ocp} and log in using the new credentials. diff --git a/modules/resource-demand-failed-operator.adoc b/modules/resource-demand-failed-operator.adoc new file mode 100644 index 000000000..56bf0fab4 --- /dev/null +++ b/modules/resource-demand-failed-operator.adoc @@ -0,0 +1,56 @@ +:_content-type: CONCEPT +[id="resource-demand-failed-operator"] += How can I handle failed {productname} Operator deployments caused by resource demand? + +The {productname} Operator deploys the following pods with default resource requests shown below. Default resource requests can be too large for smaller clusters, and might cause issues during rolling updates or even initial rollout. + +[source,terminal] +---- +NAME READY STATUS RESTARTS AGE +quay-operator.v3.6.2-d88c4f74b-7s8t7 1/1 Running 0 4m22s +subquay-clair-app-79f96d69dc-j7dzh 1/1 Running 0 2m35s +subquay-clair-app-79f96d69dc-n9svj 1/1 Running 0 2m3s +subquay-clair-postgres-cc4fdf4b7-hjv9m 1/1 Running 0 2m51s +subquay-quay-app-766f64b84d-grkqv 1/1 Running 0 2m35s +subquay-quay-app-766f64b84d-m4bps 1/1 Running 0 2m35s +subquay-quay-app-upgrade-wp9vd 0/1 Completed 0 2m44s +subquay-quay-config-editor-6c84649df8-v2zhz 1/1 Running 0 2m35s +subquay-quay-database-78bf9dd579-gjfvm 1/1 Running 0 2m33s +subquay-quay-mirror-b9c7657b6-7tptr 1/1 Running 0 2m11s +subquay-quay-mirror-b9c7657b6-phcfh 1/1 Running 0 2m11s +subquay-quay-postgres-init-lp8fv 0/1 Completed 0 2m36s +subquay-quay-redis-6c65bdc497-hsgfg 1/1 Running 0 3m31s + +1.clair-app Requests x 2 (instances): + cpu: 2 + memory: 2Gi +2. clair-postgres : +Requests: + cpu: 500m + memory: 2Gi +3. quay.app x 2 (instances) : +Requests: + cpu: 2 + memory: 8Gi +4. quay-database : +Requests: + cpu: 500m + memory: 2Gi +5. quay-mirror x 2 (instances): +Requests: + cpu: 500m + memory: 512Mi +6. redis +Requests: + cpu: 500m + memory: 1Gi +---- + +Resource limitation and requests cannot be lowered, however, you can disable the `horizontalpodautoscaling` components in the `QuayRegistry` custom resource definition (CRD) and use the `override` feature to set the replica count to `1`. This lowers the required resources. + +[NOTE] +==== +Using a single replica is prone to cause registry outages because the pod might get restarted during updates, {productname} configuration updates, node maintenance events, or unexpected node downtime. +==== + +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/manage_red_hat_quay/index#scale_down_your_red_hat_quay_deployment[Scaling down your {productname} deployment]. diff --git a/modules/restoring-red-hat-quay-standalone.adoc b/modules/restoring-red-hat-quay-standalone.adoc new file mode 100644 index 000000000..b98d6ceb5 --- /dev/null +++ b/modules/restoring-red-hat-quay-standalone.adoc @@ -0,0 +1,240 @@ +:_content-type: PROCEDURE +[[restoring-red-hat-quay-standalone]] += Restoring {productname} on standalone deployments + +This procedure describes how to restore {productname} on standalone deployments. + +.Prerequisites + +* You have backed up your {productname} deployment. + +.Procedure + +. Create a new directory that will bind-mount to `/conf/stack` inside of the {productname} container: ++ +[source,terminal] +---- +$ mkdir /opt/new-quay-install +---- + +. Copy the contents of your temporary backup directory created in xref:backing-up-red-hat-quay-standalone[Backing up {productname} on standalone deployments] to the `new-quay-install1` directory created in Step 1: ++ +[source,terminal] +---- +$ cp /tmp/quay-backup/quay-backup.tar.gz /opt/new-quay-install/ +---- + +. Change into the `new-quay-install` directory by entering the following command: ++ +[source,terminal] +---- +$ cd /opt/new-quay-install/ +---- + +. Extract the contents of your {productname} directory: ++ +[source,terminal] +---- +$ tar xvf /tmp/quay-backup/quay-backup.tar.gz * +---- ++ +Example output: ++ +---- +config.yaml +config.yaml.bak +extra_ca_certs/ +extra_ca_certs/ca.crt +ssl.cert +ssl.key +---- + +. Recall the `DB_URI` from your backed-up `config.yaml` file by entering the following command: ++ +[source,terminal] +---- +$ grep DB_URI config.yaml +---- ++ +Example output: ++ +[source,yaml] +---- +postgresql://:test123@172.24.10.50/quay +---- + +. Run the following command to enter the PostgreSQL database server: ++ +[source,terminal] +---- +$ sudo postgres +---- + +. Enter psql and create a new database in 172.24.10.50 to restore the quay databases, for example, `example_restore_registry_quay_database`, by entering the following command: ++ +[source,terminal] +---- +$ psql "host=172.24.10.50 port=5432 dbname=postgres user= password=test123" +postgres=> CREATE DATABASE example_restore_registry_quay_database; +---- ++ +Example output: ++ +---- +CREATE DATABASE +---- + +. Connect to the database by running the following command: ++ +[source,terminal] +---- +postgres=# \c "example-restore-registry-quay-database"; +---- ++ +Example output: ++ +[source,terminal] +---- +You are now connected to database "example-restore-registry-quay-database" as user "postgres". +---- + +. Create a `pg_trmg` extension of your Quay database by running the following command: ++ +[source,terminal] +---- +example_restore_registry_quay_database=> CREATE EXTENSION IF NOT EXISTS pg_trgm; +---- ++ +Example output: ++ +[source,terminal] +---- +CREATE EXTENSION +---- + +. Exit the postgres CLI by entering the following command: ++ +[source,terminal] +---- +\q +---- + +. Import the database backup to your new database by running the following command: ++ +[source,terminal] +---- +$ psql "host=172.24.10.50 port=5432 dbname=example_restore_registry_quay_database user= password=test123" -W < /tmp/quay-backup/quay-backup.sql +---- ++ +Example output: ++ +---- +SET +SET +SET +SET +SET +---- ++ +Update the value of `DB_URI` in your `config.yaml` from `postgresql://:test123@172.24.10.50/quay` to `postgresql://:test123@172.24.10.50/example-restore-registry-quay-database` before restarting the {productname} deployment. ++ +[NOTE] +==== +The DB_URI format is `DB_URI postgresql://:@/`. If you are moving from one PostgreSQL server to another PostgreSQL server, update the value of ``, `` and `` at the same time. +==== + + + +. In the `/opt/new-quay-install` directory, print the contents of your `DISTRIBUTED_STORAGE_CONFIG` bundle: ++ +[source,terminal] +---- +$ cat config.yaml | grep DISTRIBUTED_STORAGE_CONFIG -A10 +---- ++ +Example output: ++ +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + default: +DISTRIBUTED_STORAGE_CONFIG: + default: + - S3Storage + - s3_bucket: + storage_path: /registry + s3_access_key: + s3_region: + s3_secret_key: + host: +---- ++ +[NOTE] +==== +Your `DISTRIBUTED_STORAGE_CONFIG` in `/opt/new-quay-install` must be updated before restarting your {productname} deployment. +==== + +. Export the `AWS_ACCESS_KEY_ID` by using the `access_key` credential obtained in Step 13: ++ +[source,terminal] +---- +$ export AWS_ACCESS_KEY_ID= +---- + +. Export the `AWS_SECRET_ACCESS_KEY` by using the `secret_key` obtained in Step 13: ++ +[source,terminal] +---- +$ export AWS_SECRET_ACCESS_KEY= +---- + +. Create a new s3 bucket by entering the following command: ++ +[source,terminal] +---- +$ aws s3 mb s3:// --region us-east-2 +---- ++ +Example output: ++ +[source,terminal] +---- +$ make_bucket: quay +---- + +. Upload all blobs to the new s3 bucket by entering the following command: ++ +[source,terminal] +---- +$ aws s3 sync --no-verify-ssl \ +--endpoint-url <1> +/tmp/quay-backup/blob-backup/. s3://quay/ +---- +<1> The {productname} registry endpoint must be the same before backup and after restore. ++ +Example output: ++ +[source,terminal] +---- +upload: ../../tmp/quay-backup/blob-backup/datastorage/registry/sha256/50/505edb46ea5d32b5cbe275eb766d960842a52ee77ac225e4dc8abb12f409a30d to s3://quay/datastorage/registry/sha256/50/505edb46ea5d32b5cbe275eb766d960842a52ee77ac225e4dc8abb12f409a30d +upload: ../../tmp/quay-backup/blob-backup/datastorage/registry/sha256/27/27930dc06c2ee27ac6f543ba0e93640dd21eea458eac47355e8e5989dea087d0 to s3://quay/datastorage/registry/sha256/27/27930dc06c2ee27ac6f543ba0e93640dd21eea458eac47355e8e5989dea087d0 +upload: ../../tmp/quay-backup/blob-backup/datastorage/registry/sha256/8c/8c7daf5e20eee45ffe4b36761c4bb6729fb3ee60d4f588f712989939323110ec to s3://quay/datastorage/registry/sha256/8c/8c7daf5e20eee45ffe4b36761c4bb6729fb3ee60d4f588f712989939323110ec +... +---- + +. Before restarting your {productname} deployment, update the storage settings in your config.yaml: ++ +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + default: +DISTRIBUTED_STORAGE_CONFIG: + default: + - S3Storage + - s3_bucket: + storage_path: /registry + s3_access_key: + s3_secret_key: + s3_region: + host: +---- \ No newline at end of file diff --git a/modules/restoring-red-hat-quay.adoc b/modules/restoring-red-hat-quay.adoc new file mode 100644 index 000000000..281ff05ca --- /dev/null +++ b/modules/restoring-red-hat-quay.adoc @@ -0,0 +1,334 @@ +:_content-type: PROCEDURE +[id="restoring-up-red-hat-quay"] += Restoring {productname} + +Use the following procedures to restore {productname} when the {productname} Operator manages the database. It should be performed after a backup of your {productname} registry has been performed. See xref:backing-up-red-hat-quay-operator.adoc#backing-up-red-hat-quay-operator[Backing up {productname}] for more information. + +.Prerequisites + +* {productname} is deployed on {ocp} using the {productname} Operator. +* A backup of the {productname} configuration managed by the {productname} Operator has been created following the instructions in the xref:backing-up-red-hat-quay-operator.adoc#backing-up-red-hat-quay-operator[Backing up {productname}] section +* Your {productname} database has been backed up. +* The object storage bucket used by {productname} has been backed up. +* The components `quay`, `postgres` and `objectstorage` are set to `managed: true` +* If the component `clair` is set to `managed: true`, the component `clairpostgres` is also set to `managed: true` (starting with {productname} v3.7 or later) +* There is no running {productname} deployment managed by the {productname} Operator in the target namespace on your {ocp} cluster + +[NOTE] +==== +If your deployment contains partially unmanaged database or storage components and you are using external services for PostgreSQL or S3-compatible object storage to run your {productname} deployment, you must refer to the service provider or vendor documentation to restore their data from a backup prior to restore {productname} +==== + +[id="restoring-quay-and-configuration-from-backup"] +== Restoring {productname} and its configuration from a backup + +Use the following procedure to restore {productname} and its configuration files from a backup. + +[NOTE] +==== +These instructions assume you have followed the process in the xref:backing-up-red-hat-quay-operator.adoc#backing-up-red-hat-quay-operator[Backing up {productname}] guide and create the backup files with the same names. +==== + +.Procedure + +. Restore the backed up {productname} configuration by entering the following command: ++ +[source,terminal] +---- +$ oc create -f ./config-bundle.yaml +---- ++ +[IMPORTANT] +==== +If you receive the error `Error from server (AlreadyExists): error when creating "./config-bundle.yaml": secrets "config-bundle-secret" already exists`, you must delete your existing resource with `$ oc delete Secret config-bundle-secret -n ` and recreate it with `$ oc create -f ./config-bundle.yaml`. +==== + +. Restore the generated keys from the backup by entering the following command: ++ +[source,terminal] +---- +$ oc create -f ./managed-secret-keys.yaml +---- + +. Restore the `QuayRegistry` custom resource: ++ +[source,terminal] +---- +$ oc create -f ./quay-registry.yaml +---- + +. Check the status of the {productname} deployment and wait for it to be available: ++ +[source,terminal] +---- +$ oc wait quayregistry registry --for=condition=Available=true -n +---- + +[id="scale-down-quay-deployment"] +== Scaling down your {productname} deployment + +Use the following procedure to scale down your {productname} deployment. + +.Procedure + +. Depending on the version of your {productname} deployment, scale down your deployment using one of the following options. + +.. *For Operator version 3.7 and newer:* Scale down the {productname} deployment by disabling auto scaling and overriding the replica count for Quay, mirror workers and Clair (if managed). Your `QuayRegistry` resource should look similar to the following: ++ +[source,yaml] +---- +apiVersion: quay.redhat.com/v1 +kind: QuayRegistry +metadata: + name: registry + namespace: ns +spec: + components: + … + - kind: horizontalpodautoscaler + managed: false <1> + - kind: quay + managed: true + overrides: <2> + replicas: 0 + - kind: clair + managed: true + overrides: + replicas: 0 + - kind: mirror + managed: true + overrides: + replicas: 0 + … +---- +<1> Disable auto scaling of Quay, Clair and Mirroring workers +<2> Set the replica count to 0 for components accessing the database and objectstorage + +.. *For Operator version 3.6 and earlier:* Scale down the {productname} deployment by scaling down the {productname} registry first and then the managed {productname} resources: ++ +[source,terminal] +---- +$ oc scale --replicas=0 deployment $(oc get deployment -n |awk '/^quay-operator/ {print $1}') -n +---- ++ +[source,terminal] +---- +$ oc scale --replicas=0 deployment $(oc get deployment -n |awk '/quay-app/ {print $1}') -n +---- ++ +[source,terminal] +---- +$ oc scale --replicas=0 deployment $(oc get deployment -n |awk '/quay-mirror/ {print $1}') -n +---- ++ +[source,terminal] +---- +$ oc scale --replicas=0 deployment $(oc get deployment -n |awk '/clair-app/ {print $1}') -n +---- + +. Wait for the `registry-quay-app`, `registry-quay-mirror` and `registry-clair-app` pods (depending on which components you set to be managed by {productname} Operator) to disappear. You can check their status by running the following command: ++ +[source,terminal] +---- +$ oc get pods -n +---- ++ +Example output: ++ +[source,terminal] +---- +registry-quay-config-editor-77847fc4f5-nsbbv 1/1 Running 0 9m1s +registry-quay-database-66969cd859-n2ssm 1/1 Running 0 6d1h +registry-quay-redis-7cc5f6c977-956g8 1/1 Running 0 5d21h +---- + +[id="restoring-quay-database"] +== Restoring your {productname} database + +Use the following procedure to restore your {productname} database. + +.Procedure + +. Identify your `Quay` database pod by entering the following command: ++ +[source,terminal] +---- +$ oc get pod -l quay-component=postgres -n -o jsonpath='{.items[0].metadata.name}' +---- ++ +Example output: ++ +---- +quayregistry-quay-database-59f54bb7-58xs7 +---- + +. Upload the backup by copying it from the local environment and into the pod: ++ +---- +$ oc cp ./backup.sql -n registry-quay-database-66969cd859-n2ssm:/tmp/backup.sql +---- + +. Open a remote terminal to the database by entering the following command: ++ +[source,terminal] +---- +$ oc rsh -n registry-quay-database-66969cd859-n2ssm +---- + +. Enter psql by running the following command: ++ +[source,terminal] +---- +bash-4.4$ psql +---- + +. You can list the database by running the following command: ++ +---- +postgres=# \l +---- ++ +.Example output ++ +[source,terminal] +---- + List of databases + Name | Owner | Encoding | Collate | Ctype | Access privileges +----------------------------+----------------------------+----------+------------+------------+----------------------- +postgres | postgres | UTF8 | en_US.utf8 | en_US.utf8 | +quayregistry-quay-database | quayregistry-quay-database | UTF8 | en_US.utf8 | en_US.utf8 | +---- + +. Drop the database by entering the following command: ++ +[source,terminal] +---- +postgres=# DROP DATABASE "quayregistry-quay-database"; +---- ++ +.Example output ++ +[source,terminal] +---- +DROP DATABASE +---- + +. Exit the postgres CLI to re-enter bash-4.4: ++ +[source,terminal] +---- +\q +---- + +. Redirect your PostgreSQL database to your backup database: ++ +[source,terminal] +---- +sh-4.4$ psql < /tmp/backup.sql +---- + +. Exit bash by entering the following command: ++ +[source,terminal] +---- +sh-4.4$ exit +---- + +[id="restoring-quay-object-storage-data"] +== Restore your {productname} object storage data + +Use the following procedure to restore your {productname} object storage data. + +.Procedure + +. Export the `AWS_ACCESS_KEY_ID` by entering the following command: ++ +[source,terminal] +---- +$ export AWS_ACCESS_KEY_ID=$(oc get secret -l app=noobaa -n -o jsonpath='{.items[0].data.AWS_ACCESS_KEY_ID}' |base64 -d) +---- + +. Export the `AWS_SECRET_ACCESS_KEY` by entering the following command: ++ +[source,terminal] +---- +$ export AWS_SECRET_ACCESS_KEY=$(oc get secret -l app=noobaa -n -o jsonpath='{.items[0].data.AWS_SECRET_ACCESS_KEY}' |base64 -d) +---- + +. Upload all blobs to the bucket by running the following command: ++ +[source,terminal] +---- +$ aws s3 sync --no-verify-ssl --endpoint https://$(oc get route s3 -n openshift-storage -o jsonpath='{.spec.host}') ./blobs s3://$(oc get cm -l app=noobaa -n -o jsonpath='{.items[0].data.BUCKET_NAME}') +---- + +[NOTE] +==== +You can also use link:https://rclone.org/[rclone] or link:https://s3tools.org/s3cmd[sc3md] instead of the AWS command line utility. +==== + +[id="scaling-up-quay"] +== Scaling up your {productname} deployment + +. Depending on the version of your {productname} deployment, scale up your deployment using one of the following options. + +.. *For Operator version 3.7 and newer:* Scale up the {productname} deployment by re-enabling auto scaling, if desired, and removing the replica overrides for Quay, mirror workers and Clair as applicable. Your `QuayRegistry` resource should look similar to the following: ++ +[source,yaml] +---- +apiVersion: quay.redhat.com/v1 +kind: QuayRegistry +metadata: + name: registry + namespace: ns +spec: + components: + … + - kind: horizontalpodautoscaler + managed: true <1> + - kind: quay <2> + managed: true + - kind: clair + managed: true + - kind: mirror + managed: true + … +---- +<1> Re-enables auto scaling of {productname}, Clair and mirroring workers again (if desired) +<2> Replica overrides are removed again to scale the {productname} components back up + +.. *For Operator version 3.6 and earlier:* Scale up the {productname} deployment by scaling up the {productname} registry again: ++ +[source,terminal] +---- +$ oc scale --replicas=1 deployment $(oc get deployment -n | awk '/^quay-operator/ {print $1}') -n +---- + +. Check the status of the {productname} deployment: ++ +[source,terminal] +---- +$ oc wait quayregistry registry --for=condition=Available=true -n +---- ++ +Example output: ++ +[source,yaml] +---- +apiVersion: quay.redhat.com/v1 +kind: QuayRegistry +metadata: + ... + name: registry + namespace: + ... +spec: + ... +status: + - lastTransitionTime: '2022-06-20T05:31:17Z' + lastUpdateTime: '2022-06-20T17:31:13Z' + message: All components reporting as healthy + reason: HealthChecksPassing + status: 'True' + type: Available +---- \ No newline at end of file diff --git a/modules/retrieving-build-info-superuser-api.adoc b/modules/retrieving-build-info-superuser-api.adoc new file mode 100644 index 000000000..09ac7aad7 --- /dev/null +++ b/modules/retrieving-build-info-superuser-api.adoc @@ -0,0 +1,31 @@ +[id="retrieving-build-info-superuser-api"] += Retrieving build information with the {productname} API + +As a superuser, you can retrieve information about builds with the {productname} API. + +.Procedure + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getrepobuildsuperuser[`GET /api/v1/superuser/{build_uuid}/build`] endpoint to return information about a build: ++ +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/superuser//build" \ + -H "Authorization: Bearer " +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getrepobuildstatussuperuser[`GET /api/v1/superuser/{build_uuid}/status`] API endpoint to return the status for the builds specified by the build uuids: ++ +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/superuser//status" \ + -H "Authorization: Bearer " +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getrepobuildlogssuperuser[`GET /api/v1/superuser/{build_uuid}/logs`] API endpoint to return the build logs for the build specified by the build uuid: ++ +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/superuser//logs" \ + -H "Authorization: Bearer " +---- + diff --git a/modules/reverting-tag-changes-api.adoc b/modules/reverting-tag-changes-api.adoc new file mode 100644 index 000000000..0fb862e7c --- /dev/null +++ b/modules/reverting-tag-changes-api.adoc @@ -0,0 +1,55 @@ +:_content-type: CONCEPT +[id="reverting-tag-changes-api"] += Reverting tag changes by using the API + +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +offers a comprehensive _time machine_ feature that allows older images tags to remain in the repository for set periods of time so that they can revert changes made to tags. This feature allows users to revert tag changes, like tag deletions. + +.Prerequisites + +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +. You can restore a repository tag to its previous image by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#restoreta[`POST /api/v1/repository/{repository}/tag/{tag}/restore`] command. For example: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + --data '{ + "manifest_digest": + }' \ + quay-server.example.com/api/v1/repository/quayadmin/busybox/tag/test/restore +---- ++ +.Example output ++ +[source,terminal] +---- +{} +---- + +. To see a list of tags after restoring an old tag you can use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listrepotags[`GET /api/v1/repository/{repository}/tag/`] command. For example: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + https:///api/v1/repository///tag +---- ++ +.Example output ++ +[source,terminal] +---- +{"tags": [{"name": "test", "reversion": false, "start_ts": 1716324069, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Tue, 21 May 2024 20:41:09 -0000"}, {"name": "example", "reversion": false, "start_ts": 1715698131, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Tue, 14 May 2024 14:48:51 -0000"}, {"name": "example", "reversion": false, "start_ts": 1715697708, "end_ts": 1715698131, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Tue, 14 May 2024 14:41:48 -0000", "expiration": "Tue, 14 May 2024 14:48:51 -0000"}, {"name": "test", "reversion": false, "start_ts": 1715695488, "end_ts": 1716324069, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Tue, 14 May 2024 14:04:48 -0000", "expiration": "Tue, 21 May 2024 20:41:09 -0000"}, {"name": "test", "reversion": false, "start_ts": 1715631517, "end_ts": 1715695488, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Mon, 13 May 2024 20:18:37 -0000", "expiration": "Tue, 14 May 2024 14:04:48 -0000"}], "page": 1, "has_additional": false} +---- \ No newline at end of file diff --git a/modules/reverting-tag-changes.adoc b/modules/reverting-tag-changes.adoc new file mode 100644 index 000000000..af9d53a84 --- /dev/null +++ b/modules/reverting-tag-changes.adoc @@ -0,0 +1,19 @@ +:_content-type: CONCEPT +[id="reverting-tag-changes"] += Reverting tag changes by using the UI + +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +offers a comprehensive _time machine_ feature that allows older images tags to remain in the repository for set periods of time so that they can revert changes made to tags. This feature allows users to revert tag changes, like tag deletions. + +.Procedure + +. On the *Repositories* page of the v2 UI, click the name of the image you want to revert. + +. Click the *Tag History* tab. + +. Find the point in the timeline at which image tags were changed or removed. Next, click the option under *Revert* to restore a tag to its image. \ No newline at end of file diff --git a/modules/rn_1_12.adoc b/modules/rn_1_12.adoc index d64079072..5d99543f0 100644 --- a/modules/rn_1_12.adoc +++ b/modules/rn_1_12.adoc @@ -26,4 +26,4 @@ Release Date: September 10, 2015 * Changed webhook notifications to also send client SSL certs (#374) * Improved internal test suite (#381, #374, #388, #455, #457) -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-1-120[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-1-120[Link to this Release] diff --git a/modules/rn_1_13.adoc b/modules/rn_1_13.adoc index a26ae5b23..3f702ef71 100644 --- a/modules/rn_1_13.adoc +++ b/modules/rn_1_13.adoc @@ -9,7 +9,7 @@ Bug Fixes: Quay Enterprise v1.13.x contains long-running migrations and should be updated during a maintenance window where administrators will have several hours of time to dedicate to the database migrating. Quay Enterprise will not be available while these migrations run. -link:https://access.redhat.com/documentation/en-us/red_hat_quay/2.9/html-single/red_hat_quay_release_notes#rn-1-133[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-1-133[Link to this Release] [[rn-1-132]] @@ -19,7 +19,7 @@ Release Date: November 3, 2015 * Fixed 404 API calls redirecting to 404 page (#762) -link:https://access.redhat.com/documentation/en-us/red_hat_quay/2.9/html-single/red_hat_quay_release_notes#rn-1-132[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-1-132[Link to this Release] [[rn-1-131]] == Version 1.13.1 @@ -29,7 +29,7 @@ Release Date: November 3, 2015 * Fixed broken database migration (#759) * Added OpenGraph preview image (#750, #758) -link:https://access.redhat.com/documentation/en-us/red_hat_quay/2.9/html-single/red_hat_quay_release_notes#rn-1-131[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-1-131[Link to this Release] [[rn-1-130]] == Version 1.13.0 @@ -65,4 +65,4 @@ Release Date: November 2, 2015 * Improved internal test suite (#470, #511, #526, #514, #545, #570, #572, #573, #583, #711, #728, #730) * Improved background worker stability (#471) -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-1-130[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-1-130[Link to this Release] diff --git a/modules/rn_1_14.adoc b/modules/rn_1_14.adoc index caad60f49..ff0e0e625 100644 --- a/modules/rn_1_14.adoc +++ b/modules/rn_1_14.adoc @@ -22,7 +22,7 @@ Bug Fixes: * Fixed unhandled exceptions in Queue * Fixed UI for dismissing notifications (#1094) -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-1-141[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-1-141[Link to this Release] [[rn-1-140]] == Version 1.14.0 @@ -49,4 +49,4 @@ Bug fixes: * Fixed page titles (#952) * Fixed numerous builder failures -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-1-140[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-1-140[Link to this Release] diff --git a/modules/rn_1_15.adoc b/modules/rn_1_15.adoc index 0a1520f20..27e68034a 100644 --- a/modules/rn_1_15.adoc +++ b/modules/rn_1_15.adoc @@ -7,7 +7,7 @@ Fixed: * Docker pushes with v2 sha mismatch were breaking v2 functionality (#1236) -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-1-155[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-1-155[Link to this Release] [[rn-1-154]] == Version 1.15.4 @@ -24,7 +24,7 @@ Fixed: * Minor UI error in tag specific image view (#1222) * Notification logo (#1223) -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-1-154[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-1-154[Link to this Release] [[rn-1-153]] == Version 1.15.3 @@ -43,7 +43,7 @@ Fixed: * Tests (#1190, #1184) * Setup tool storage engine validation (#1194) -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-1-153[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-1-153[Link to this Release] [[rn-1-152]] == Version 1.15.2 @@ -62,7 +62,7 @@ Fixed: * Scope handling for Docker 1.8.3 (#1162) * Typos in docs (#1163, #1164) -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-1-152[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-1-152[Link to this Release] [[rn-1-150]] == Version 1.15.0 @@ -75,4 +75,4 @@ Fixed: * Fix torrent hash calculation (#1142) -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-1-150[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-1-150[Link to this Release] diff --git a/modules/rn_1_16.adoc b/modules/rn_1_16.adoc index 8e57c7b29..0c8166d36 100644 --- a/modules/rn_1_16.adoc +++ b/modules/rn_1_16.adoc @@ -8,7 +8,7 @@ Changed: * Added ability to override secure cookie setting when using HTTPS protocol (#1712) -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-1-166[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-1-166[Link to this Release] [[rn-1-165]] == Version 1.16.5 @@ -37,7 +37,7 @@ Fixed: * Support for empty RDN in LDAP configuration (#1644) * Error raised on duplicate placements when replicating (#1633) -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-1-165[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-1-165[Link to this Release] [[rn-1-164]] == Version 1.16.4 @@ -61,7 +61,7 @@ Fixed: * GitHub API URLs are properly stripped of trailing slashes (#1590) * Tutorial fails gracefully without Redis (#1587) -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-1-164[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-1-164[Link to this Release] [[rn-1-163]] == Version 1.16.3 @@ -90,7 +90,7 @@ Fixed: * Removed hosted Quay.io status from Enterprise 500 page (#1548) * Performance of database queries (#1512) -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-1-163[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-1-163[Link to this Release] [[rn-1-162]] == Version 1.16.2 @@ -110,7 +110,7 @@ Fixed: * Repository descriptions breaking log page styles (#1532) * Styles on Privacy and Terms of Service pages (#1531) -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-1-162[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-1-162[Link to this Release] [[rn-1-161]] == Version 1.16.1 @@ -147,7 +147,7 @@ Fixed: * Handling of admin OAuth Scope (#1447) -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-1-161[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-1-161[Link to this Release] [[rn-1-160]] == Version 1.16.0 @@ -184,4 +184,4 @@ Fixed: * Unicode error when calculating new V1 IDs (#1239) * Error when turning on receipt emails (#1209) -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-1-160[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-1-160[Link to this Release] diff --git a/modules/rn_1_17.adoc b/modules/rn_1_17.adoc index 76ac3a0f9..3b7792217 100644 --- a/modules/rn_1_17.adoc +++ b/modules/rn_1_17.adoc @@ -17,7 +17,7 @@ Fixed: * Delete empty Swift chunks (#1844) * Handling of custom LDAP cert (#1846) -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-1-171[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-1-171[Link to this Release] [[rn-1-170]] == Version 1.17.0 @@ -53,4 +53,4 @@ Fixed: * Non-admin users no longer default to organization-wide read (#1685) * Database performance (#1680, #1688, #1690, #1722, #1744, #1772) -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-1-170[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-1-170[Link to this Release] diff --git a/modules/rn_1_18.adoc b/modules/rn_1_18.adoc index 5de2a411f..129eb2599 100644 --- a/modules/rn_1_18.adoc +++ b/modules/rn_1_18.adoc @@ -7,7 +7,7 @@ Fixed: * Exception when using RADOS GW Storage driver (#2057) -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-1-181[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-1-181[Link to this Release] [[rn-1-180]] == Version 1.18.0 @@ -30,4 +30,4 @@ Fixed: * Add feature flag to turn off requirement for team invitations (#1845) * Don't exception log for expected 404s in Swift storage (#1851) -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-1-180[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-1-180[Link to this Release] diff --git a/modules/rn_2_00.adoc b/modules/rn_2_00.adoc index ae916366b..49d9133d3 100644 --- a/modules/rn_2_00.adoc +++ b/modules/rn_2_00.adoc @@ -11,7 +11,7 @@ Fixed: * Support for wildcard certs in the superuser config panel -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-2-005[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-2-005[Link to this Release] [[rn-2-004]] == Version 2.0.4 @@ -48,7 +48,7 @@ Regressed: * Support for wildcard certs in the superuser config panel -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-2-004[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-2-004[Link to this Release] [[rn-2-003]] == Version 2.0.3 @@ -67,7 +67,7 @@ Fixed: * Improve security scan performance (#2209) * Fix user lookup for external auth engines (#2206) -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-2-003[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-2-003[Link to this Release] [[rn-2-002]] == Version 2.0.2 @@ -105,7 +105,7 @@ Regressed: * User lookup for external auth engines broken -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-2-002[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-2-002[Link to this Release] [[rn-2-001]] == Version 2.0.1 @@ -150,7 +150,7 @@ Regressed: * Superuser config panel cannot save -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-2-001[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-2-001[Link to this Release] [[rn-2-000]] == Version 2.0.0 @@ -184,4 +184,4 @@ Regressed: * Entity search broken under Postgres -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-2-000[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-2-000[Link to this Release] diff --git a/modules/rn_2_10.adoc b/modules/rn_2_10.adoc index 96e982897..a5ccbdecf 100644 --- a/modules/rn_2_10.adoc +++ b/modules/rn_2_10.adoc @@ -19,4 +19,4 @@ Fixed: * Display of expiration date for licenses with multiple entries (#2354) * V1 search compatibility (#2344) -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-2-100[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-2-100[Link to this Release] diff --git a/modules/rn_2_20.adoc b/modules/rn_2_20.adoc index 12638fdba..853f44579 100644 --- a/modules/rn_2_20.adoc +++ b/modules/rn_2_20.adoc @@ -29,4 +29,4 @@ Fixed: * Validation and installation of custom TLS certificates (#2473) * Garbage Collection corner case (#2404) -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-2-200[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-2-200[Link to this Release] diff --git a/modules/rn_2_30.adoc b/modules/rn_2_30.adoc index 89f1fe8f7..47fd03aa3 100644 --- a/modules/rn_2_30.adoc +++ b/modules/rn_2_30.adoc @@ -6,7 +6,7 @@ Added: * Always show tag expiration options in superuser panel -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-2-304[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-2-304[Link to this Release] [[rn-2-303]] == Version 2.3.3 @@ -25,7 +25,7 @@ Fixed: * Viewing of repositories with trust enabled caused a 500 (#2594, #2593) * Failure in setup tool when time machine config is not set (#2589) -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-2-303[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-2-303[Link to this Release] [[rn-2-302]] == Version 2.3.2 @@ -47,7 +47,7 @@ Fixed: * Create New tooltip hiding dropdown menu (#2579) * Ensure build logs archive lookup URL checks build permissions (#2578) -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-2-302[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-2-302[Link to this Release] [[rn-2-301]] == Version 2.3.1 @@ -63,7 +63,7 @@ Fixed: * Specify default server value for new bool field added to the repository table -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-2-301[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-2-301[Link to this Release] [[rn-2-300]] == Version 2.3.0 @@ -99,4 +99,4 @@ Fixed: * Backfill replication script when adjusting replication destinations (#2555) * Errors when deleting repositories without security scanning enabled (#2554) -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-2-300[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-2-300[Link to this Release] diff --git a/modules/rn_2_40.adoc b/modules/rn_2_40.adoc index e1580aefa..df59adb4c 100644 --- a/modules/rn_2_40.adoc +++ b/modules/rn_2_40.adoc @@ -28,4 +28,4 @@ Fixed: * Torrent validation in superuser config panel (#2694) * Expensive database call in build badges (#2688) -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-2-400[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-2-400[Link to this Release] diff --git a/modules/rn_2_50.adoc b/modules/rn_2_50.adoc index 1f4e94b8c..79dab9e23 100644 --- a/modules/rn_2_50.adoc +++ b/modules/rn_2_50.adoc @@ -19,4 +19,4 @@ Fixed: * Setting of team resync option * Purge repository on very large repositories -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-2-500[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-2-500[Link to this Release] diff --git a/modules/rn_2_60.adoc b/modules/rn_2_60.adoc index 5a8c56eed..ed43e2912 100644 --- a/modules/rn_2_60.adoc +++ b/modules/rn_2_60.adoc @@ -11,7 +11,7 @@ Fixed: * Failure to register uploaded TLS certificates (#2946) -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-2-602[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-2-602[Link to this Release] [[rn-2-601]] == Version 2.6.1 @@ -31,7 +31,7 @@ Fixed: * Inability to display Tag Signing status (#2890) * Broken health check for OIDC authentication (#2888) -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-2-601[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-2-601[Link to this Release] [[rn-2-600]] == Version 2.6.0 @@ -54,4 +54,4 @@ Fixed: * Lazy loading of teams and robots (#2883) * OIDC auth headers (#2695) -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-2-600[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-2-600[Link to this Release] diff --git a/modules/rn_2_70.adoc b/modules/rn_2_70.adoc index 575821209..b4d2fc667 100644 --- a/modules/rn_2_70.adoc +++ b/modules/rn_2_70.adoc @@ -28,4 +28,4 @@ Fixed: * "Restart Container" button in superuser config panel (#2928) * Various small JavaScript security fixes -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-2-700[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-2-700[Link to this Release] diff --git a/modules/rn_2_80.adoc b/modules/rn_2_80.adoc index dc07edb9b..08cefd3bc 100644 --- a/modules/rn_2_80.adoc +++ b/modules/rn_2_80.adoc @@ -28,4 +28,4 @@ Fixed: * Warning bar should not be displayed for already expired application tokens (#3003) -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-2-800[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-2-800[Link to this Release] diff --git a/modules/rn_2_90.adoc b/modules/rn_2_90.adoc index df44c8951..a3813bbe9 100644 --- a/modules/rn_2_90.adoc +++ b/modules/rn_2_90.adoc @@ -10,7 +10,7 @@ Fixed: * Prohibit DES TLS ciphers -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-2-905[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-2-905[Link to this Release] [[rn-2-904]] == Version 2.9.4 @@ -20,7 +20,7 @@ Fixed: * Georeplication under certain failure conditions would incorrectly mark storage as replicated (#3283) -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-2-904[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-2-904[Link to this Release] [[rn-2-903]] == Version 2.9.3 @@ -30,7 +30,7 @@ Fixed: * Changed to using v4 of Gitlab API now that v3 has been deprecated and removed (#3110) -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-2-903[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-2-903[Link to this Release] [[rn-2-902]] == Version 2.9.2 @@ -62,7 +62,7 @@ Fixed: * Respect CPU affinity when determining number of workers to run (#3064) * Breakage in RECATPCHA support (#3065) -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-2-902[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-2-902[Link to this Release] [[rn-2-901]] == Version 2.9.1 @@ -79,7 +79,7 @@ Fixed: * Specify default server value for new integer fields added (#3052) * Overflow of repository grid UI (#3049) -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-2-901[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-2-901[Link to this Release] [[rn-2-900]] == Version 2.9.0 @@ -107,5 +107,5 @@ Fixed: * Squashed images with hard links pointing to deleted files no longer fail (#3032) * 500 error when trying to pull certain images via torrent (#3036) -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-2-900[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-2-900[Link to this Release] diff --git a/modules/rn_3_00.adoc b/modules/rn_3_00.adoc index 71e4e949a..5c7ad6bd4 100644 --- a/modules/rn_3_00.adoc +++ b/modules/rn_3_00.adoc @@ -12,7 +12,7 @@ Fixed: * Remove obsolete 01_copy_syslog_config.sh * Config tool fails to set up database when password string contains "$" -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-3-005[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-3-005[Link to this Release] [[rn-3-004]] == Version 3.0.4 @@ -27,7 +27,7 @@ Fixed: * nginx access and error logs now to stdout -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-3-004[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-3-004[Link to this Release] [[rn-3-003]] @@ -41,7 +41,7 @@ Fixed: * Connection pooling was ignoring environment variable * Exception when in OAuth approval flow -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-3-003[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-3-003[Link to this Release] [[rn-3-002]] == Version 3.0.2 @@ -53,7 +53,7 @@ Fixed: * {productname}'s security scan endpoint is now enabled at startup for viewing results of Clair container image scans. * A flaw was found in the way the DES/3DES cipher was used as part of the TLS/SSL protocol. A man-in-the-middle attacker could use this flaw to recover some plaintext data by capturing large amounts of encrypted traffic between TLS/SSL server and client if the communication used a DES/3DES based ciphersuite. (CVE-2016-2183) -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-3-002[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-3-002[Link to this Release] [[rn-3-001]] == Version 3.0.1 @@ -63,7 +63,7 @@ Fixed: * Health API endpoint (/health/instance) now correctly checks the internal port to verify all services. -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-3-001[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-3-001[Link to this Release] [[rn-3-000]] == Version 3.0.0 @@ -141,4 +141,4 @@ Previous versions of images required running in privileged mode. To remove this The move to a RHEL base image means the certificate install path has changed to /etc/pki/ca-trust/source/anchors. Examples running the images have been updated to reflect this. -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-3-000[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-3-000[Link to this Release] diff --git a/modules/rn_3_10.adoc b/modules/rn_3_10.adoc index 6d111fb56..e453aad3d 100644 --- a/modules/rn_3_10.adoc +++ b/modules/rn_3_10.adoc @@ -1,3 +1,30 @@ +[[rn-3-103]] +== Version 3.1.3 +Release Date: November 22, 2019 + +Fixed: + +* NVD stopped publishing the XML feed, Clair now consumes JSON feed + +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-3-103[Link to this Release] + +[[rn-3-102]] +== Version 3.1.2 +Release Date: October 31, 2019 + +Fixed: + +* Upgrade base image to latest rhel:7.7 +* Repository mirroring properly updates status +* Application repositories in public namespaces shown in UI +* Description of log operations in UI +* Quay V3 upgrade fails with "id field missing from v1Compatibility JSON" +* Security token for storage proxy properly URL encoded + +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-3-102[Link to this Release] + +ifdef::downstream[] + [[rn-3-101]] == Version 3.1.1 Release Date: October 3, 2019 @@ -11,7 +38,7 @@ Fixed: * Removed kernel-headers package from clair-jwt and quay-builder images to elliminate false vulnerabilities * Updated SCL rh-nginx112 (related to CVE-2019-9511, CVE-2019-9513, CVE-2019-9516) -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-3-101[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-3-101[Link to this Release] [[rn-3-100]] == Version 3.1.0 @@ -42,7 +69,8 @@ Known Issues: * During repository mirroring, in order to fetch tags from a repository, at least one tag in the list of tags to sync must exist exactly as specified. See -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/manage_red_hat_quay/index#repo-mirroring-in-red-hat-quay[Repository Mirroring in Red Hat Quay] for more details. +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#repo-mirroring-in-red-hat-quay[Repository Mirroring in {productname}] for more details. * Repository mirror config has known issues when remote registry username or password has characters requiring special handling for shell commands. Specifically, the tokens for registry.redhat.io with a pipe (|) character in them are incorrectly escaped. Out of an abundance of caution, a fix for this will follow in a subsequent update. -link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes#rn-3-100[Link to this Release] +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-3-100[Link to this Release] +endif::downstream[] diff --git a/modules/rn_3_10_0.adoc b/modules/rn_3_10_0.adoc new file mode 100644 index 000000000..6e11201b8 --- /dev/null +++ b/modules/rn_3_10_0.adoc @@ -0,0 +1,365 @@ +:_content-type: CONCEPT +[id="release-notes-310"] += {productname} release notes + +The following sections detail _y_ and _z_ stream release information. + +[id="rn-3-10-3"] +== RHBA-2024:0382 - {productname} 3.10.3 release + +Issued 2024-01-31 + +{productname} release 3.10.3 is now available with Clair {clairproductminv}. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2024:0382[RHBA-2024:0382] advisory. + +[id="bug-fixes-310-3"] +=== {productname} 3.10.3 bug fixes + +* link:https://issues.redhat.com/browse/PROJQUAY-4849[PROJQUAY-4849]. Previously, the exporter failed to update the lifetime end of child manifests in the main manifest lists. Consequently, this led to exceptions when attemping to pull Docker images by tag after the tag was removed from the database due to garbage collection. This issue has been resolved. + +* link:https://issues.redhat.com/browse/PROJQUAY-6007[PROJQUAY-6007]. Previously, the Operator would attempt to create a temporary fake route to check if the cluster supported the _Route_ API. This check was unable to be conducted when the route and TLS component were marked as unamanged because these components are supposed to be managed manually by the user. This issue has been resolved. + +[id="rn-3-10-2"] +== RHBA-2024:0102 - {productname} 3.10.2 release + +Issued 2024-01-16 + +{productname} release 3.10.2 is now available with Clair {clairproductminv}. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2024:0102[RHBA-2024:0102] advisory. + +[id="new-features-310-2"] +=== {productname} 3.10.2 new features + +With this release, IBM Cloud object storage is now supported. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index#config-fields-ibmcloudstorage[IBM Cloud Object Storage]. + +[id="bug-fixes-310-2"] +=== {productname} 3.10.2 bug fixes + +* link:https://issues.redhat.com/browse/PROJQUAY-2679[PROJQUAY-2679] +* link:https://issues.redhat.com/browse/PROJQUAY-6549[PROJQUAY-6549] + +[id="known-issues-310-2"] +=== Known issues + +* A known issue was discovered when using naming conventions with the following words for repository names: ++ +`build` +`trigger` +`tag` ++ +When these words are used for repository names, users are unable access the repository, and are unable to permanently delete the repository. Attempting to delete these repositories returns the following error: `Failed to delete repository , HTTP404 - Not Found.` ++ +There is no workaround for this issue. Users should not use `build`, `trigger`, or `tag` in their repository names. + +[id="rn-3-10-1"] +== RHBA-2023:7819 - {productname} 3.10.1 release + +Issued 2023-12-14 + +{productname} release 3.10.1 is now available with Clair {clairproductminv}. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2023:7819[RHBA-2023:7819] advisory. + +[id="bug-fixes-310-1"] +=== {productname} 3.10.1 bug fixes + +* link:https://issues.redhat.com/browse/PROJQUAY-5452[PROJQUAY-5452] - Breadcrumbs incorrect when visiting a direct link +* link:https://issues.redhat.com/browse/PROJQUAY-6333[PROJQUAY-6333] - [New UI] The user in the team which has "member" or "creator" role can't see the "Teams and Membership" tab +* link:https://issues.redhat.com/browse/PROJQUAY-6336[PROJQUAY-6336] - Quay 3.10 new UI can't add normal user to quay new team during Create team wizard +* link:https://issues.redhat.com/browse/PROJQUAY-6369[PROJQUAY-6369] - The search input box doesn't work in permanently delete default permissions wizard of new UI + +[id="rn-3-10-0"] +== RHBA-2023:7341 - {productname} 3.10.0 release + +Issued 2023-11-28 + +{productname} release {producty} is now available with Clair {clairproductminv}. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHSA-2023:7341[RHSA-2023:7341] and link:https://errata.devel.redhat.com/advisory/124676[RHSA-2023:7575] advisories. + +[id="release-cadence-310"] +== {productname} release cadence + +With the release of {productname} 3.10, the product has begun to align its release cadence and lifecycle with {ocp}. As a result, {productname} releases are now generally available (GA) within approximately four weeks of the most recent version of {ocp}. Customers can not expect the support lifecycle phases of {productname} to align with {ocp} releases. + +For more information, see the link:https://access.redhat.com/support/policy/updates/rhquay/[{productname} Life Cycle Policy]. + +[id="new-features-and-enhancements-310"] +== {productname} new features and enhancements + +The following updates have been made to {productname}. + +[id="ibm-power-z-linuxone-support"] +=== IBM Power, IBM Z, IBM® LinuxONE support + +With this release, IBM Power (ppc64le), IBM Z (s390x), and IBM® LinuxONE (s390x) architectures are supported. + +[id="namespace-auto-pruning-310-rn"] +=== Namespace auto-pruning + +With {productname} 3.10, {productname} administrators can set up auto-pruning policies on namespaces (both users and organization). This feature allows for image tags to be automatically deleted within a namespace based on specified criteria. For this release, two policies have been added: + +* Auto-pruning images based on the number of tags. +* Auto-pruning based on the age of a tag. + +The auto-pruning feature allows {productname} organization owners to stay below the storage quota by automatically pruning content based on one of the aforementioned policies. + +For more information about implementing this feature, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/manage_red_hat_quay/red-hat-quay-namespace-auto-pruning-overview[{productname} namespace auto-pruning overview]. + +[id="v2-ui-enhancements-310"] +=== {productname} v2 UI enhancements + +In {productname} 3.8, a new UI was introduced as a technology preview feature. With {productname} 3.10, the following enhancements have been made to the v2 UI: + +* With this update, a *Settings* page has been added for {productname} organizations. {productname} administrators can edit their preferences, billing information, and set organization types from this page. + +* With this update, a *Settings* page has been added for {productname} repositories. This page must be enabled by setting `FEATURE_UI_V2_REPO_SETTINGS` to `true` in your `config.yaml` file. This page allows users to create and set robot permissions, create events and notifications, set repository visibility, and delete repositories. + +* With this update, bulk managing robot account repository access is available on the {productname} v2 UI. Users can now easily add a robot account to multiple repositories using the v2 UI. + +* With this update, the default user repository, or namespace, now includes a *Robot accounts* tab. This allows users to easily create their own robot accounts. + +* With this update, the following alert messages have been added to confirm either the creation, or failure, of robot accounts and permission updates: + +** *Successfully updated repository permission* +** *Successfully created robot account with robot name: + * ++ +Alternatively, you can receive the following error if you try to create a robot account with the same name as another: *Error creating robot account* +** *Successfully deleted robot account* + +* With this update, a *Teams and membership* page has been added to the v2 UI. {productname} administrators can perform the following actions from this page: + +** Create new teams +** Manage or create new team members +** Set repository permissions +** Search for specific teams +** View teams, members of a team, or collaborators of a team + +* With this update, a *Default permissions* page has be been added to the v2 UI. This page allows {productname} administrators to set repository permissions. + +* With this update, a *Tag History* page has been added to the v2 UI. Additionally, {productname} administrators can add and manage labels for repositories, and set expiration dates for specified tags in a repository. + +For more information about navigating the v2 UI and enabling, or using, these features, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#using-v2-ui[Using the {productname} v2 UI]. + +[id="clair-gc-manifests"] +=== Garbage collection of manifests for Clair + +Previously, Clair's indexer database was continually growing as it added storage when new manifests and layers were uploaded. This could cause the following issues for {productname} deployments: + +* Increased storage requirements +* Performance issues +* Increased storage management burden, requiring that administrators would monitor usage and develop a scaling strategy + +With this update, a new configuration field, `SECURITY_SCANNER_V4_MANIFEST_CLEANUP`, has been added. When this field is set to `true`, the {productname} garbage collector removes manifests that are not referenced by other tags or manifests. As a result, manifest reports are removed from Clair's database. + +[id="managing-robot-accounts-quay"] +=== Managing {productname} robot accounts + +Prior to {productname} {producty}, all users were able to create robot accounts with unrestricted access. With this release, {productname} administrators can manage robot accounts by disallowing users to create new robot accounts. + +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/use_red_hat_quay/use-quay-manage-repo#disabling-robot-account[Disabling robot accounts] + +[id="new-quay-config-fields-310"] +== New {productname} configuration fields + +The following configuration fields have been added to {productname} {producty}. + +[id="clair-gc-manifests-config-field"] +=== Clair garbage collection of manifests configuration field + +* **SECURITY_SCANNER_V4_MANIFEST_CLEANUP**. When set to `true` the {productname} garbage collector removes manifests that are not referenced by other tags or manifests. ++ +*Default*: `True` + +[id="disabling-robot-account-config-field"] +=== Disabling robot accounts configuration field + +* **ROBOTS_DISALLOW**: When set to `true`, robot accounts are prevented from all interactions, as well as from being created ++ +*Default*: `False` + +[id="namespace-auto-pruning-config-fields"] +=== Namespace auto-pruning configuration field + +The following configuration fields have been added for the auto-pruning feature: + +** **FEATURE_AUTO_PRUNE**: When set to `True`, enables functionality related to the auto-pruning of tags. ++ +*Default:* `False` + +[id="v2-ui-repo-settings-config-field"] +=== {productname} v2 UI repository settings configuration field + +* **FEATURE_UI_V2_REPO_SETTINGS**: When set to `True`, enables repository settings in the {productname} v2 UI. ++ +*Default:* `False` + +[id="quay-operator-updates-310"] +== {productname} Operator + +The following updates have been made to the {productname} Operator: + +* The config editor has been removed from the {productname} Operator on {ocp} deployments. As a result, the `quay-config-editor` pod no longer deploys, and users cannot check the status of the config editor route. Additionally, the Config Editor Endpoint no longer generates on the {productname} Operator *Details* page. ++ +Users with existing {productname} Operators who are upgrading from 3.7, 3.8, or 3.9 to {producty} must manually remove the {productname} config editor by removing the `deployment`, `route,` `service`, and `secret` objects. For information about this procedure, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/upgrade_red_hat_quay/operator-upgrade#config-editor-removal[Removing config editor objects on {productname} Operator]. ++ +By default, the config editor was deployed for every `QuayRegistry` instance, which made it difficult to establish an audit trail over the registry's configuration. Anyone with access to the namespace, config editor secret, and config editor route could use the editor to make changes to {productname}'s configuration, and their identity was no logged in the system. Removing the config editor forces all changes through the config bundle property of the `QuayRegistry` resource, which points to a secret, which is then subject to native Kubernetes auditing and logging. + +[id="known-issues-and-limitations-310"] +== {productname} 3.10 known issues and limitations + +The following sections note known issues and limitations for {productname} {producty}. + +[id="known-issues-310"] +=== {productname} 3.10 known issues + +* There is a known issue with the auto-pruning feature when pushing image tags with Cosign signatures. In some scenarios, for example, when each image tag uses a different Cosign key, the auto-pruner worker removes the image signature and only keeps the image tag. This occurs because {productname} considers image tags and the signature as two tags. The expected behavior of this feature is that the auto-pruner should consider the image tag and signature as one item, calculate only the image tag, and when the auto-pruner worker is configured in such a way that the tag is pruned, it also prunes the signature. This will be fixed in a future version of {productname}. (link:https://issues.redhat.com/browse/PROJQUAY-6380[*PROJQUAY-6380*]) + +* Currently, auditing for auto-pruning policy operations, including creating, updating, or deleting policies, is unavailable. This is a known issue and will be fixed in a future release of {productname}. (link:https://issues.redhat.com/browse/PROJQUAY-6229[*PROJQUAY-6228*]) + +* Currently, the the auto-pruning worker prunes `ReadOnly` and mirror repositories, in addition to normal repositories. `ReadOnly` and mirror repositories should not be pruned automatically. This is a known issue and will be fixed in a future version of {productname}. (link:https://issues.redhat.com/browse/PROJQUAY-6235[*PROJQUAY-6235*]) + +* When upgrading the {productname} Operator from versions 3.7, 3.8, or 3.9 to {producty}, users must manually remove the {productname} config editor by removing the `deployment`, `route,` `service`, and `secret` objects. For information about this procedure, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#config-editor-removal[Removing config editor objects on {productname} Operator]. + +* When creating a new team using the {productname} v2 UI, users are unable to add normal users to the new team while. This only occurs while setting up the new team. As a workaround, you can add users after the team has been created. Robot accounts are unaffected by this issue. This is a known issue and will be fixed in a future version of {productname}. (link:https://issues.redhat.com/browse/PROJQUAY-6336[*PROJQUAY-6336*]) + +* Sometimes, when creating a new default permission setting, the *Create default permission* button is disabled. As a workaround, you can try adjusting the *Applied to* setting in the *Create default permission* wizard. This is a known issue and will be fixed in a future version of {productname}. (link:https://issues.redhat.com/browse/PROJQUAY-6341[*PROJQUAY-6341*]) + +[id="limitations-310"] +=== {productname} 3.10 limitations + +* In this release, the following features are not supported on IBM Power (ppc64le) and IBM Z (s390x): +** Geo-Replication +** IPv6 Single stack/ Dual Stack +** Mirror registry +** Quay config editor - Mirror, MAG, Kinesis, Keystone, GitHub Enterprise, OIDC +** RedHat Quay V2 User Interface +** Deploy Red Hat Quay - High Availability is supported but the following is not: +*** Backing up and restoring on a standalone deployment +*** Migrating a standalone to operator deployment + +* Robot accounts are mandatory for repository mirroring. Setting the `ROBOTS_DISALLOW` configuration field to `true` breaks mirroring configurations. This will be fixed in a future version of {productname} + +//// + +Additionally, {productname} administrators can add robot accounts to allowlists when disallowing the creation of new robot accounts. This ensures operability of approved robot accounts. + +* Robot accounts are mandatory for repository mirroring. Setting the `ROBOTS_DISALLOW` configuration field to `true` without allowlisting supplementary robot accounts will break mirroring configurations. This will be fixed in a future version of {productname} + +You must allowlist robot accounts with the `ROBOTS_WHITELIST` variable when managing robot accounts with the `ROBOTS_DISALLOW` field. Use the following reference when managing robot accounts: ++ +[source,yaml] +---- +ROBOTS_DISALLOW: true +ROBOTS_WHITELIST: + - quayadmin+robot1 + - quayadmin+robot2 + - quayadmin+robot3 +---- ++ +For more information, see. . . +//// + +[id="bug-fixes-310"] +== {productname} bug fixes + +* link:https://issues.redhat.com/browse/PROJQUAY-6184[*PROJQUAY-6184*]. Add missing props for Create robot account modal +* link:https://issues.redhat.com/browse/PROJQUAY-6048[*PROJQUAY-6048*]. Poor UI performance with quotas enabled +* link:https://issues.redhat.com/browse/PROJQUAY-6010[*PROJQUAY-6010*]. Registry quota total worker fails to start due to import +* link:https://issues.redhat.com/browse/PROJQUAY-5212[*PROJQUAY-5212*]. Quay 3.8.1 can't mirror OCI images from Docker Hub +* link:https://issues.redhat.com/browse/PROJQUAY-2462[*PROJQUAY-2462*]. Consider changing the type of the removed_tag_expiration_s from integer to bigint +* link:https://issues.redhat.com/browse/PROJQUAY-2803[*PROJQUAY-2803*]. Quay should notify Clair when manifests are garbage collected +* link:https://issues.redhat.com/browse/PROJQUAY-5598[*PROJQUAY-5598*]. Log auditing tries to write to the database in read-only mode +* link:https://issues.redhat.com/browse/PROJQUAY-4126[*PROJQUAY-4126*]. Clair database growing +* link:https://issues.redhat.com/browse/PROJQUAY-5489[*PROJQUAY-5489*]. Pushing an artifact to Quay with oras binary results in a 502 +* link:https://issues.redhat.com/browse/PROJQUAY-3906[*PROJQUAY-3906*]. Quay can see the push image on Console after push image get error "Quota has been exceeded on namespace" + +[id="quay-feature-tracker"] +== {productname} feature tracker + +New features have been added to {productname}, some of which are currently in Technology Preview. Technology Preview features are experimental features and are not intended for production use. + +Some features available in previous releases have been deprecated or removed. Deprecated functionality is still included in {productname}, but is planned for removal in a future release and is not recommended for new deployments. For the most recent list of deprecated and removed functionality in {productname}, refer to Table 1.1. Additional details for more fine-grained functionality that has been deprecated and removed are listed after the table. + +//Remove entries with the same status older than the latest three releases. + +.Technology Preview tracker +[cols="4,1,1,1",options="header"] +|=== +|Feature | Quay 3.10 | Quay 3.9 | Quay 3.8 + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/use_red_hat_quay/use-quay-manage-repo#disabling-robot-account[Disabling robot accounts] +|General Availability +|- +|- + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/manage_red_hat_quay/red-hat-quay-namespace-auto-pruning-overview[{productname} namespace auto-pruning overview] +|General Availability +|- +|- + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.9/html-single/manage_red_hat_quay/index#operator-georepl-site-removal[Single site geo-replication removal] +|General Availability +|General Availability +|- + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.9/html-single/manage_red_hat_quay/index#proc_manage-log-storage-splunk[Splunk log forwarding] +|General Availability +|General Availability +|- + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.9/html-single/configure_red_hat_quay/index#config-fields-nutanix[Nutanix Object Storage] +|General Availability +|General Availability +|- + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#reference-miscellaneous-v2-ui[FEATURE_UI_V2] +|Technology Preview +|Technology Preview +|Technology Preview + +| link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/manage_red_hat_quay/index#proc_manage-ipv6-dual-stack[FEATURE_LISTEN_IP_VERSION] +|General Availability +|General Availability +|General Availability + +| link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/manage_red_hat_quay/index#ldap-super-users-enabling[LDAP_SUPERUSER_FILTER] +|General Availability +|General Availability +|General Availability + +| link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/manage_red_hat_quay/index#ldap-restricted-users-enabling[LDAP_RESTRICTED_USER_FILTER] +|General Availability +|General Availability +|General Availability + +| link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#configuring-superusers-full-access[FEATURE_SUPERUSERS_FULL_ACCESS] +|General Availability +|General Availability +|General Availability + +| link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#configuring-global-readonly-super-users[GLOBAL_READONLY_SUPER_USERS] +|General Availability +|General Availability +|General Availability + +| link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#configuring-feature-restricted-users[FEATURE_RESTRICTED_USERS] +|General Availability +|General Availability +|General Availability + +| link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#configuring-restricted-users-whitelist[RESTRICTED_USERS_WHITELIST] +|General Availability +|General Availability +|General Availability + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.7/html-single/use_red_hat_quay#quay-as-cache-proxy[{productname} as proxy cache for upstream registries] +|General Availability +|General Availability +|General Availability + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/manage_red_hat_quay/index#clair-crda-configuration[Java scanning with Clair] +|Technology Preview +|Technology Preview +|Technology Preview + +|=== + +//// +[id="deprecated-features"] +=== Deprecated features +//// diff --git a/modules/rn_3_11_0.adoc b/modules/rn_3_11_0.adoc new file mode 100644 index 000000000..614b75ba9 --- /dev/null +++ b/modules/rn_3_11_0.adoc @@ -0,0 +1,448 @@ +:_content-type: CONCEPT +[id="release-notes-311"] += {productname} release notes + +The following sections detail _y_ and _z_ stream release information. + +[id="rn-3-11-0"] +== RHBA-2024:1475 - {productname} 3.11.0 release + +Issued 2024-04-02 + +{productname} release {producty} is now available with Clair {clairproductminv}. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2024:1475[RHBA-2024:1475] advisory. For the most recent compatibility matrix, see link:https://access.redhat.com/articles/4067991[Quay Enterprise 3.x Tested Integrations]. + +[id="release-cadence-311"] +== {productname} release cadence + +With the release of {productname} 3.10, the product has begun to align its release cadence and lifecycle with {ocp}. As a result, {productname} releases are now generally available (GA) within approximately four weeks of the most recent version of {ocp}. Customers can not expect the support lifecycle phases of {productname} to align with {ocp} releases. + +For more information, see the link:https://access.redhat.com/support/policy/updates/rhquay/[{productname} Life Cycle Policy]. + +[id="documentation-changes-311"] +== {productname} documentation changes + +The {productname} configuration tool has been deprecated since version 3.10. With this release, references and procedures that use the configuration tool have been, or will be, removed. These procedures will remain in older versions of {productname}. + +[id="new-features-and-enhancements-311"] +== {productname} new features and enhancements + +The following updates have been made to {productname}. + +[id="aws-sts-support-quay-311"] +=== Support for AWS STS on {productname} + +Support for Amazon Web Services (AWS) Security Token Service (STS) is now offered for {productname}. AWS STS is a web service for requesting temporary, limited-privilege credentials for AWS Identity and Access Management (IAM) users and for users that you authenticate, or _federated users_. This feature is useful for clusters using Amazon S3 as an object storage, allowing {productname} to use STS protocols to authenticate with Amazon S3, which can enhance the overall security of the cluster and help to ensure that access to sensitive data is properly authenticated and authorized. This feature is also available for {ocp} deployments. + +For more information about configuring AWS STS for standalone {productname} deployments, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#configuring-aws-sts-quay[Configuring AWS STS for {productname}] + +[id="auto-pruning-enhancements"] +=== {productname} auto-pruning enhancements + +With the release of {productname} 3.10, a new auto-pruning feature was released. With that feature, {productname} administrators could set up auto-pruning policies on namespaces for both users and organizations. + +With this release, auto-pruning policies can now be set up on specified repositories. This feature allows for image tags to be automatically deleted within a repository based on specified criteria. Additionally, {productname} administrators can set auto-pruning policies on repositories that they have `admin` privileges for. + +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#red-hat-quay-namespace-auto-pruning-overview[{productname} auto-pruning overview]. + +[id="v2-ui-enhancements-311"] +=== {productname} v2 UI enhancements + +In {productname} 3.8, a new UI was introduced as a technology preview feature. With {productname} 3.11, the following enhancements have been made to the v2 UI. + +[id="usage-logs-ui-v2"] +==== {productname} v2 UI usage logs + +{productname} 3.11 adds functionality for usage logs when using the v2 UI. Usage logs provide the following information about your {productname} deployment: + +* *Monitoring of team activities*. Allows administrators to view team activities, such as team creation, membership changes, and role assignments. +* *Auditing of tag history actions*. Allows security auditors to audit tag history actions, including tag creations, updates, and deletions. +* *Tracking of repository label changes*. Allows repository owners to track changes to labels, including additions, modifications, and removals. +* *Monitoring of expiration settings*. Allows engineers to monitor actions related to tag expiration settings, such as setting expiration dates or disabling expiration for specific tags. + +Logs can be exported to an email address or to a callback URL, and are available at the Organization, repository, and namespace levels. + +For more information, see https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#viewing-usage-logs-v2-ui[Viewing usage logs on the {productname} v2 UI]. + +[id="dark-mode-ui-v2"] +==== {productname} v2 UI dark mode + +{productname} 3.11 offers users the ability to switch between light and dark modes when using the v2 UI. This feature also includes an automatic mode selection, which chooses between light or dark modes depending on the user's browser preference. + +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#selecting-dark-mode-ui[Selecting color theme preference on the {productname} v2 UI]. + +[id="builds-support-v2-ui"] +==== Builds support on {productname} v2 UI + +{productname} Builds are now supported when using the v2 UI. This feature must be enabled prior to building container images by setting `FEATURE_BUILD_SUPPORT: true` in your `config.yaml` file. + +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#starting-a-build[Creating a new build]. + +[id="auto-pruning-repositories-ui"] +==== Auto-pruning repositories v2 UI + +{productname} 3.11 offers users the ability to create auto-pruning policies using the v2 UI. + +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#red-hat-quay-namespace-auto-pruning-overview[{productname} auto-pruning overview]. + +[id="team-synchronization-oidc"] +=== Team synchronization support via {productname} OIDC + +This release allows administrators to leverage an OpenID Connect (OIDC) identity provider to synchronization team, or group, settings, so long as their OIDC provider supports the retrieval of group information from ID token or the `/userinfo` endpoint. Administrators can easily apply repository permissions to sets of users without having to manually create and sync group definitions between {productname} and the OIDC group, which is not scalable. + +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#oidc-team-sync[Team synchronization for {productname} OIDC deployments] + +[id="quay-operator-updates-311"] +== {productname} Operator updates + +The following updates have been made to the {productname} Operator: + +[id="configurable-resources-managed-components"] +=== Configurable resource requests for {productname-ocp} managed components + +With this release, users can manually adjust the resource requests on {productname-ocp} for the following components that have pods running: + +* `quay` +* `clair` +* `mirroring` +* `clairpostgres` +* `postgres` + +This feature allows users to run smaller test clusters, or to request more resources upfront in order to avoid partially degraded `Quay` pods. + +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/deploying_the_red_hat_quay_operator_on_openshift_container_platform/index#configuring-resources-managed-components[Configuring resources for managed components on {ocp}] + +[id="aws-sts-support-quay-operator-311"] +=== Support for AWS STS on {productname-ocp} + +Support for Amazon Web Services (AWS) Security Token Service (STS) is now offered for {productname} deployments on {ocp}. AWS STS is a web service for requesting temporary, limited-privilege credentials for AWS Identity and Access Management (IAM) users and for users that you authenticate, or _federated users_. This feature is useful for clusters using Amazon S3 as an object storage, allowing {productname} to use STS protocols to authenticate with Amazon S3, which can enhance the overall security of the cluster and help to ensure that access to sensitive data is properly authenticated and authorized. + +For more information about AWS STS for {productname-ocp}, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_operator_features/index#configuring-aws-sts-quay[Configuring AWS STS for {productname-ocp}] + +[id="new-quay-config-fields-311"] +== New {productname} configuration fields + +The following configuration fields have been added to {productname} {producty}. + +[id="aws-s3-sts-configuration-fields"] +=== Configuration fields for AWS S3 STS deployments + +The following configuration fields have been added when configuring AWS STS for {productname}. These fields are used when configuring AWS S3 storage for your deployment. + +* *.sts_role_arn*. The unique Amazon Resource Name (ARN) required when configuring AWS STS for {productname}. +* *.sts_user_access_key*. The generated AWS S3 user access key required when configuring AWS STS for {productname}. +* *.sts_user_secret_key*. The generated AWS S3 user secret key required when configuring AWS STS for {productname}. + +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index#config-fields-storage-aws-sts[AWS STS S3 storage]. + +[id="team-sync-configuration-field"] +=== Team synchronization configuration field + +The following configuration field has been added for the team synchronization via OIDC feature: + +* *PREFERRED_GROUP_CLAIM_NAME*: The key name within the OIDC token payload that holds information about the user's group memberships. + +[id="new-api-endpoints-311"] +== New API endpoints + +The following API endpoints have been added to {productname} {producty}: + +[id="repository-auto-pruning-policy-endpoint"] +=== Repository auto-pruning policy endpoints: + +The repository auto-pruning policy feature introduces the following API endpoint: + +* `*/api/v1/repository///autoprunepolicy/` ++ +This API endpoint can be used with `POST`, `GET`, and `DELETE` calls to create, see, and delete auto-pruning policies on a repository for specific users in your organization. Note that you must have `admin` privileges on the repository that you are creating the policy for when using these commands. + +[id="known-issues-and-limitations-310"] +== {productname} 3.11 known issues and limitations + +The following sections note known issues and limitations for {productname} {producty}. + +[id="oidc-team-sync-known-issues"] +=== {productname} OIDC team synchronization known issues + +[id="unable-set-password-user-settings-page"] +==== Unable to set user passwords via the *User Settings* page + +There is a known issue when {productname} uses OIDC as the authentication type with Microsoft Entra ID (previously Azure Active Directory). + +After logging in to {productname}, users are unable to set a password via the *User Settings* page. This is necessary for authentication when using Docker/Podman CLI to perform image push or pull operations to the registry. + +As a workaround, you can use Docker CLI and App Token as credentials when authenticating via OIDC. These tokens, alongside robot tokens, serve as an alternative to passwords and are considered the prescribed method for providing access to {productname} when authenticating via OIDC. + +For more information, see link:https://issues.redhat.com/browse/PROJQUAY-6754[*PROJQUAY-6754*]. + +[id="team-sync-removal-known-issue"] +==== Unable to sync change when OIDC user is removed from OIDC + +Currently, when an OIDC user is removed from their OIDC provider, the user is not removed from the team on {productname}. They are still able to use the robot account token and app token to push and pull images from the registry. This is the expected behavior, however this behavior will be changed in a future version of {productname}. +(link:https://issues.redhat.com/browse/PROJQUAY-6842[*PROJQUAY-6842*]) + +[id="entra-id-team-sync-known-issue"] +==== Object ID must be used when OIDC provider is Microsoft Entra ID + +When using Microsoft Entra ID as your OIDC provider, {productname} administrators must input the *Object ID* of the OIDC group instead of the group name. The v2 UI does not currently alert users that Microsoft Entra ID users must input the Object ID of the OIDC group. This is a known issue and will be fixed in a future version of {productname}. (link:https://issues.redhat.com/browse/PROJQUAY-6917[*PROJQUAY-6917*]) + +[id="sts-s3-storage-known-issue"] +=== STS S3 storage known issue + +When using Amazon Web Services (AWS) Security Token Service (STS) with proxy storage enabled, users are unable to pull images and the following error is returned: `Error: copying system image from manifest list: parsing image configuration: fetching blob: received unexpected HTTP status: 502 Bad Gateway`. This is a known issue and will be fixed in a future version of {productname}. + +[id="upgrading-38-311-limitation"] +=== Upgrading {productname-ocp} 3.8 directly to 3.11 limitation + +Upgrading {productname-ocp} from 3.8 to 3.11 does not work. Users must upgrade from {productname-ocp} from 3.8 to 3.9 or 3.10, and then proceed with the upgrade to 3.11. + +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrade_overview[Upgrade {productname}]. + +[id="configurable-resource-limitation"] +=== Configurable resource request limitation + +Attempting to set resource limitations for the `Quay` pod too low results in the pod being unable to boot up with the following statuses returned: `OOMKILLED` and `CrashLoopBackOff`. Resource limitations can not be set lower than the minimum requirement, which can be found on the link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/deploying_the_red_hat_quay_operator_on_openshift_container_platform/index#configuring-resources-managed-components[Configuring resources for managed components on {ocp}] page. + +[id="v2-ui-known-issues-311"] +=== {productname} v2 UI known issues + +The {productname} team is aware of the following known issues on the v2 UI: + +* link:https://issues.redhat.com/browse/PROJQUAY-6910[*PROJQUAY-6910*]. The new UI can't group and stack the chart on usage logs +* link:https://issues.redhat.com/browse/PROJQUAY-6909[*PROJQUAY-6909*]. The new UI can't toggle the visibility of the chart on usage log +* link:https://issues.redhat.com/browse/PROJQUAY-6904[*PROJQUAY-6904*]. "Permanently delete" tag should not be restored on new UI +* link:https://issues.redhat.com/browse/PROJQUAY-6899[*PROJQUAY-6899*]. The normal user can not delete organization in new UI when enable FEATURE_SUPERUSERS_FULL_ACCESS +* link:https://issues.redhat.com/browse/PROJQUAY-6892[*PROJQUAY-6892*]. The new UI should not invoke not required stripe and status page +* link:https://issues.redhat.com/browse/PROJQUAY-6884[*PROJQUAY-6884*]. The new UI should show the tip of slack Webhook URL when creating slack notification +* link:https://issues.redhat.com/browse/PROJQUAY-6882[*PROJQUAY-6882*]. The new UI global readonly super user can't see all organizations and image repos +* link:https://issues.redhat.com/browse/PROJQUAY-6881[*PROJQUAY-6881*]. The new UI can't show all operation types in the logs chart +* link:https://issues.redhat.com/browse/PROJQUAY-6861[*PROJQUAY-6861*]. The new UI "Last Modified" of organization always show N/A after target organization's setting is updated +* link:https://issues.redhat.com/browse/PROJQUAY-6860[*PROJQUAY-6860*]. The new UI update the time machine configuration of organization show NULL in usage logs +* link:https://issues.redhat.com/browse/PROJQUAY-6859[*PROJQUAY-6859*]. Thenew UI remove image repo permission show "undefined" for organization name in audit logs +* link:https://issues.redhat.com/browse/PROJQUAY-6854[*PROJQUAY-6854*]. "Device-based theme" doesn't work as design in Firefox +* link:https://issues.redhat.com/browse/PROJQUAY-6852[*PROJQUAY-6852*]. "Tag manifest with the branch or tag name" option in build trigger setup wizard should be checked by default. +* link:https://issues.redhat.com/browse/PROJQUAY-6832[*PROJQUAY-6832*]. The new UI should validate the OIDC group name when enable OIDC Directory Sync +* link:https://issues.redhat.com/browse/PROJQUAY-6831[*PROJQUAY-6831*]. The new UI should not show invited tab when the team is configured sync from OIDC group +* link:https://issues.redhat.com/browse/PROJQUAY-6830[*PROJQUAY-6830*]. The new UI should show the sync icon when the team is configured sync team members from OIDC Group +* link:https://issues.redhat.com/browse/PROJQUAY-6829[*PROJQUAY-6829*]. The new UI team member added to team sync from OIDC group should be audited in Organization logs page +* link:https://issues.redhat.com/browse/PROJQUAY-6825[*PROJQUAY-6825*]. Build cancel operation log can not be displayed correctly in new UI +* link:https://issues.redhat.com/browse/PROJQUAY-6812[*PROJQUAY-6812*]. The new UI the "performer by" is NULL of build image in logs page +* link:https://issues.redhat.com/browse/PROJQUAY-6810[*PROJQUAY-6810*]. The new UI should highlight the tag name with tag icon in logs page +* link:https://issues.redhat.com/browse/PROJQUAY-6808[*PROJQUAY-6808*]. The new UI can't click the robot account to show credentials in logs page +* link:https://issues.redhat.com/browse/PROJQUAY-6807[*PROJQUAY-6807*]. The new UI can't see the operations types in log page when quay is in dark mode +* link:https://issues.redhat.com/browse/PROJQUAY-6770[*PROJQUAY-6770*]. The new UI build image by uploading Docker file should support .tar.gz or .zip +* link:https://issues.redhat.com/browse/PROJQUAY-6769[*PROJQUAY-6769*]. The new UI should not display message "Trigger setup has already been completed" after build trigger setup completed +* link:https://issues.redhat.com/browse/PROJQUAY-6768[*PROJQUAY-6768*]. The new UI can't navigate back to current image repo from image build +* link:https://issues.redhat.com/browse/PROJQUAY-6767[*PROJQUAY-6767*]. The new UI can't download build logs +* link:https://issues.redhat.com/browse/PROJQUAY-6758[*PROJQUAY-6758*]. The new UI should display correct operation number when hover over different operation type +* link:https://issues.redhat.com/browse/PROJQUAY-6757[*PROJQUAY-6757*]. The new UI usage log should display the tag expiration time as date format + +[id="dark-mode-ui-v2-known-issues"] +==== {productname} v2 UI dark mode known issue + +If you are using the the automatic mode selection, which chooses between light or dark modes depending on the user's browser preference, your operating system appearance is overridden by the browser website appearance setting. If you find that the device-based theme is not working as expect, check your browser appearance setting. This is a known issue and will be fixed in a future version of {productname}. (link:https://issues.redhat.com/browse/PROJQUAY-6903[*PROJQUAY-6903*]) + +//// + +[id="tag-expiration-known-issue"] +=== Tag expiration behavior + +The following behavior was observed when setting expiration time on a tag that is *older* than two years. This is not a known issue, but is instead the default behavior. This will be changed in a future version of {productname} due to the potential issues that could arise for some users. + +When setting expiration time on a tag that is *older* than two years, the tag is expired immediately and subsequently garbage collected. It does not end up in the time machine, and is deleted from the registry. This issue occurs because, by default, the `LABELED_EXPIRATION_MAXIMUM` parameter is set to `104w`, or two years. + +As a temporary workaround, you can increase the default value of the `LABELED_EXPIRATION_MAXIMUM` parameter in your `config.yaml` file. For example: + +[source,yaml] +---- +# ... +LABELED_EXPIRATION_MAXIMUM: 156w +# ... +---- + +By increasing the value of this field to, for example, `156w` (three years), it is possible to set the expiration time for a tag that is up to 3 years old. For example, if a tag is created on March 25, 2021, the expiration date of the tag can be set up to March 24, 2024. The expiration date of the tag could not be set to later than this date, for example, July 2024, because it is over three years from when the tag was first created. + + +Additionally, {productname} administrators can add robot accounts to allowlists when disallowing the creation of new robot accounts. This ensures operability of approved robot accounts. + +* Robot accounts are mandatory for repository mirroring. Setting the `ROBOTS_DISALLOW` configuration field to `true` without allowlisting supplementary robot accounts will break mirroring configurations. This will be fixed in a future version of {productname} + +You must allowlist robot accounts with the `ROBOTS_WHITELIST` variable when managing robot accounts with the `ROBOTS_DISALLOW` field. Use the following reference when managing robot accounts: ++ +[source,yaml] +---- +ROBOTS_DISALLOW: true +ROBOTS_WHITELIST: + - quayadmin+robot1 + - quayadmin+robot2 + - quayadmin+robot3 +---- ++ +For more information, see. . . +//// + +[id="notable-technical-changes"] +== Notable technical changes + +The following technical changes have been made to {productname} in 3.11. + +[id="removal-support-pgbouncer"] +=== Removal of support for PgBouncer + +{productname} 3.11 does not support PgBouncer. + +[id="power-z-linuxone-support-matrix-changes"] +=== IBM Power, IBM Z, and IBM® LinuxONE support matrix changes + +Support has changed for some IBM Power, IBM Z, and IBM® LinuxONE features. For more information, see the "IBM Power, IBM Z, and IBM® LinuxONE support matrix" table. + +[id="bug-fixes-311"] +== {productname} bug fixes + +The following issues were fixed with {productname} 3.11: + +* link:https://issues.redhat.com/browse/PROJQUAY-6586[*PROJQUAY-6586*]. Big layer upload fails on Ceph/RADOS driver. +* link:https://issues.redhat.com/browse/PROJQUAY-6648[*PROJQUAY-6648*]. Application token Docker/Podman login command fails on windows. +* link:https://issues.redhat.com/browse/PROJQUAY-6673[*PROJQUAY-6673*]. Apply IGNORE_UNKNOWN_MEDIATYPE to child manifests in manifest lists. +* link:https://issues.redhat.com/browse/PROJQUAY-6619[*PROJQUAY-6619*]. Duplicate scrollbars in various UI screens. +* link:https://issues.redhat.com/browse/PROJQUAY-6235[*PROJQUAY-6235*]. mirror and readonly repositories should not be pruned. +* link:https://issues.redhat.com/browse/PROJQUAY-6243[*PROJQUAY-6243*]. Unable to edit repository description on Quay.io. +* link:https://issues.redhat.com/browse/PROJQUAY-5793[*PROJQUAY-5793*]. Next page button in tags view does not work correctly when the repo contains manifests and manifests lists. +* link:https://issues.redhat.com/browse/PROJQUAY-6442[*PROJQUAY-6442*]. new ui: Breadcrumb for teams page. +* link:https://issues.redhat.com/browse/PROJQUAY-6247[*PROJQUAY-6247*]. [New UI] Menu item naming convention doesn't follow "First Letter Capital" style. +* link:https://issues.redhat.com/browse/PROJQUAY-6261[*PROJQUAY-6261*]. Throw Robot Account exist error when entering existing robot account. +* link:https://issues.redhat.com/browse/PROJQUAY-6577[*PROJQUAY-6577*]. Quay operator does not render proper Clair config.yaml if customization is applied. +* link:https://issues.redhat.com/browse/PROJQUAY-6699[*PROJQUAY-6699*]. Broken links in Red hat Quay operator description. +* link:https://issues.redhat.com/browse/PROJQUAY-6841[*PROJQUAY-6841*]. Unable to upload dockerfile for build with 405. + +[id="quay-feature-tracker"] +== {productname} feature tracker + +New features have been added to {productname}, some of which are currently in Technology Preview. Technology Preview features are experimental features and are not intended for production use. + +Some features available in previous releases have been deprecated or removed. Deprecated functionality is still included in {productname}, but is planned for removal in a future release and is not recommended for new deployments. For the most recent list of deprecated and removed functionality in {productname}, refer to Table 1.1. Additional details for more fine-grained functionality that has been deprecated and removed are listed after the table. + +//Remove entries with the same status older than the latest three releases. + +.New features tracker +[cols="4,1,1,1",options="header"] +|=== +|Feature | Quay 3.11 | Quay 3.10 | Quay 3.9 + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#oidc-team-sync[Team synchronization for {productname} OIDC deployments] +|General Availability +|- +|- + +| link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/deploying_the_red_hat_quay_operator_on_openshift_container_platform/index#configuring-resources-managed-components[Configuring resources for managed components on {ocp}] +|General Availability +|- +|- + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#configuring-aws-sts-quay[Configuring AWS STS for {productname}], link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_operator_features/index#configuring-aws-sts-quay[Configuring AWS STS for {productname-ocp}] +|General Availability +|- +|- + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/manage_red_hat_quay/red-hat-quay-namespace-auto-pruning-overview[{productname} repository auto-pruning] +|General Availability +|- +|- + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#configuring-dark-mode-ui[Configuring dark mode on the {productname} v2 UI] +|General Availability +|- +|- + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/use_red_hat_quay/use-quay-manage-repo#disabling-robot-account[Disabling robot accounts] +|General Availability +|General Availability +|- + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/manage_red_hat_quay/red-hat-quay-namespace-auto-pruning-overview[{productname} namespace auto-pruning] +|General Availability +|General Availability +|- + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.9/html-single/manage_red_hat_quay/index#operator-georepl-site-removal[Single site geo-replication removal] +|General Availability +|General Availability +|General Availability + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.9/html-single/manage_red_hat_quay/index#proc_manage-log-storage-splunk[Splunk log forwarding] +|General Availability +|General Availability +|General Availability + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.9/html-single/configure_red_hat_quay/index#config-fields-nutanix[Nutanix Object Storage] +|General Availability +|General Availability +|General Availability + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#reference-miscellaneous-v2-ui[FEATURE_UI_V2] +|Technology Preview +|Technology Preview +|Technology Preview + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/manage_red_hat_quay/index#clair-crda-configuration[Java scanning with Clair] +|Technology Preview +|Technology Preview +|Technology Preview + +|=== + +[id="ibm-power-z-linuxone-support-matrix"] +=== IBM Power, IBM Z, and IBM® LinuxONE support matrix + +.list of supported and unsupported features +[cols="3,1,1",options="header"] +|=== +|Feature |IBM Power |IBM Z and IBM(R) LinuxONE + +|Allow team synchronization via OIDC on Azure +|Not Supported +|Not Supported + +|Backing up and restoring on a standalone deployment +|Supported +|Supported + +|Geo-Replication (Standalone) +|Not Supported +|Supported + +|Geo-Replication (Operator) +|Not Supported +|Not Supported + +|IPv6 +|Not Supported +|Not Supported + +|Migrating a standalone to operator deployment +|Supported +|Supported + +|Mirror registry +|Not Supported +|Not Supported + +|PostgreSQL connection pooling via pgBouncer +|Supported +|Supported + +|Quay config editor - mirror, OIDC +|Supported +|Supported + +|Quay config editor - MAG, Kinesis, Keystone, GitHub Enterprise +|Not Supported +|Not Supported + +|Quay config editor - Red Hat Quay V2 User Interface +|Supported +|Supported + +|Repo Mirroring +|Supported +|Supported +|=== \ No newline at end of file diff --git a/modules/rn_3_12_0.adoc b/modules/rn_3_12_0.adoc new file mode 100644 index 000000000..3e2d47a31 --- /dev/null +++ b/modules/rn_3_12_0.adoc @@ -0,0 +1,535 @@ +:_content-type: CONCEPT +[id="release-notes-312"] += {productname} release notes + +The following sections detail _y_ and _z_ stream release information. + +[id="rn-3-12-1"] +== RHBA-2024:5039 - {productname} 3.12.1 release + +Issued 2024-08-14 + +{productname} release 3.12.1 is now available with Clair {clairproductminv}. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2024:5039[RHBA-2024:5039] advisory. + +[id="new-features-312-1"] +=== {productname} 3.12.1 new features + +With this release, NetApp ONTAP S3 object storage is now supported. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index#config-fields-netapp-ontap[NetApp ONTAP S3 object storage]. + +[id="known-issues-312-1"] +=== {productname} 3.12.1 known issues + +When using NetApp ONTAP S3 object storage, images with large layer sizes fail to push. This is a known issue and will be fixed in a future version of {productname}. (link:https://issues.redhat.com/browse/PROJQUAY-7462[*PROJQUAY-7462*]). + +[id="bug-fixes-312-1"] +=== {productname} 3.12.1 bug fixes + +* link:https://issues.redhat.com/browse/PROJQUAY-7177[PROJQUAY-7177]. Previously, global read-only superusers could not obtain resources from an organization when using the API. This issue has been resolved. + +* link:https://issues.redhat.com/browse/PROJQUAY-7446[PROJQUAY-7446]. Previously, global read-only superusers could not obtain correct information when using the `listRepos` API endpoints. This issue has been resolved. + +* link:https://issues.redhat.com/browse/PROJQUAY-7449[PROJQUAY-7449]. Previously, global read-only superusers could not use some `superuser` API endpoints. This issue has been resolved. + +* link:https://issues.redhat.com/browse/PROJQUAY-7487[PROJQUAY-7487]. Previously, when a repository had multiple notifications enabled, the wrong type of event notification could be triggered. This issue has been resolved. + +* link:https://issues.redhat.com/browse/PROJQUAY-7491[PROJQUAY-7491]. When using NetAPP's OnTAP S3 implementation, the follow errors could be returned: `presigned URL request computed using signature-version v2 is not supported by ONTAP-S3`. This error occurred because `boto` iterates over a map of authentications if none is requested, and returns `v2` because it is ordered earlier than `v4`. This issue has been fixed, and the error is no longer returned. + +* link:https://issues.redhat.com/browse/PROJQUAY-7578[PROJQUAY-7578]. On the 3.12.1 UI, the release notes pointed to {productname}'s 3.7 release. This has been fixed, and they now point to the current version. + + +[id="upgrade-312-1"] +=== Upgrading to {productname} 3.12.1 + +For information about upgrading standalone {productname} deployments, see link:https://docs.redhat.com/en/documentation/red_hat_quay/3.12/html/upgrade_red_hat_quay/standalone-upgrade[Standalone upgrade]. + +For information about upgrading {productname-ocp}, see link:https://docs.redhat.com/en/documentation/red_hat_quay/3.12/html/upgrade_red_hat_quay/operator-upgrade[Upgrading the {productname} Operator]. + +[id="rn-3-12-0"] +== RHBA-2024:4525 - {productname} 3.12.0 release + +Issued 2024-07-23 + +{productname} release 3.12 is now available with Clair {clairproductminv}. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2024:4525[RHBA-2024:4525] advisory. For the most recent compatibility matrix, see link:https://access.redhat.com/articles/4067991[Quay Enterprise 3.x Tested Integrations]. + +[id="release-cadence-312"] +== {productname} release cadence + +With the release of {productname} 3.10, the product has begun to align its release cadence and lifecycle with {ocp}. As a result, {productname} releases are now generally available (GA) within approximately four weeks of the most recent version of {ocp}. Customers can not expect the support lifecycle phases of {productname} to align with {ocp} releases. + +For more information, see the link:https://access.redhat.com/support/policy/updates/rhquay/[{productname} Life Cycle Policy]. + +[id="documentation-changes-312"] +== {productname} documentation changes + +The following documentation changes have been made with the {productname} {producty} release: + +* The link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html/use_red_hat_quay/index[Use {productname} guide] now includes accompanying API procedures for basic operations, such as creating and deleting repositories and organizations by using the API, access management, and so on. + +[id="new-features-and-enhancements-312"] +== {productname} new features and enhancements + +The following updates have been made to {productname}. + +[id="splunk-event-collector-enhancements"] +=== Splunk event collector enhancements + +With this update, {productname} administrators can configure their deployment to forward action logs directly to a Splunk HTTP Event Collector (HEC). This enhancement enables seamless integration with Splunk for comprehensive log management and analysis. + +For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#proc_manage-log-storage-splunk[Configuring action log storage for Splunk]. + +[id="api-token-ownership"] +=== API token ownership + +Previously, when a {productname} organization owner created an API OAuth token, and that API OAuth token was used by another organization member, the action was logged to the creator of the token. This was undesirable for auditing purpose, notably in restricted environments where only dedicated registry administrators are organization owners. + +With this release, organization administrators can now assign OAuth API tokens to be created by other users with specific permissions. This allows the audit logs to be reflected accurately when the token is used by a user that has no organization administrative permissions to create an OAuth API token. + +For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#reassigning-oauth-access-token[Reassigning an OAuth access token]. + +[id="image-expiration-event"] +=== Image expiration notification + +Previously, {productname} administrators and users had no way of being alerted when an image was about to expire. With this update, an event can be configured to notify users when an image is about to expire. This helps {productname} users avoid unexpected pull failures. + +Image expiration event triggers can be configured to notify users through email, Slack, webhooks, and so on, and can be configured at the repository level. Triggers can be set for images expiring in any amount of days, and can work in conjunction with the auto-pruning feature. + +For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#creating-image-expiration-notification[Creating an image expiration notification]. + +[id="auto-pruning-enhancements"] +=== {productname} auto-pruning enhancements + +With the release of {productname} 3.10, a new auto-pruning feature was released. With that feature, {productname} administrators could set up auto-pruning policies on namespaces for both users and organizations so that image tags were automatically deleted based on specified criteria. In {productname} 3.11, this feature was enhanced so that auto-pruning policies could be set up on specified repositories. + +With this release, default auto-pruning policies can now be set up at the registry level. Default auto-pruning policies set up at the registry level can be configured on new and existing organizations. This feature saves {productname} administrators time, effort, and storage by enforcing registry-wide rules. + +{productname} administrators must enable this feature by updating their `config.yaml` file to include the `DEFAULT_NAMESPACE_AUTOPRUNE_POLICY` configuration field and one of `number_of_tags` or `creation_date` methods. Currently, this feature cannot be enabled by using the v2 UI or the API. + +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#red-hat-quay-namespace-auto-pruning-overview[{productname} auto-pruning overview]. + +[id="oci-compliance-updates"] +=== Open Container Initiative 1.1 implementation + +{productname} now supports the Open Container Initiative (OCI) 1.1 distribution spec version 1.1. Key highlights of this update include support for the following areas: + +* Enhanced capabilities for handling various types of artifacts, which provides better flexibility and compliance with OCI 1.1. +* Introduction of new reference types, which allows more descriptive referencing of artifacts. +* Introduction of the _referrers API_, which aids in the retrieval and management of referrers, which helps improve container image management. +* Enhance UI to better visualize referrers, which makes it easier for users to track and manage dependencies. + +For more information about OCI spec 1.1, see link:https://github.com/opencontainers/distribution-spec/tree/v1.1.0-rc1?tab=readme-ov-file#oci-distribution-specification[OCI Distribution Specification]. + +For more information about OCI support and {productname}, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html/use_red_hat_quay/oci-intro[Open Container Initiative support]. + +[id="metadata-support-annotations"] +=== Metadata support through annotations + +Some OCI media types do not utilize labels and, as such, critical information such as expiration timestamps are not included. With this release, {productname} now supports metadata passed through annotations to accommodate OCI media types that do not include these labels for metadata transmission. Tools such as ORAS (OCI Registry as Storage) can now be used to embed information with artifact types to help ensure that images operate properly, for example, to expire. + +For more information about OCI media types and how adding an annotation with ORAS works, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html/use_red_hat_quay/oci-intro[Open Container Initiative support]. + +[id="v2-ui-enhancement"] +=== {productname} v2 UI enhancements + +The following enhancements have been made to the {productname} v2 UI. + +[id="robot-account-creation-enhancement"] +==== Robot account creation enhancement + +* When creating a robot account with the {productname} v2 UI, administrators can now specify that the kubernetes runtime use a secret only for a specific organization or repository. This option can be selected by clicking the name of your robot account on the v2 UI, and then clicking the *Kubernetes* tab. + +[id="new-quay-config-fields-312"] +== New {productname} configuration fields + +The following configuration fields have been added to {productname} {producty}. + +[id="oauth-reassign-configuration-field"] +=== OAuth access token reassignment configuration field + +The following configuration field has been added for reassigning OAuth access tokens: +|=== +| Field | Type | Description + +| *FEATURE_ASSIGN_OAUTH_TOKEN* | Boolean | Allows organization administrators to assign OAuth tokens to other users. +|=== + +.Example OAuth access token reassignment YAML +[source,yaml] +---- +# ... +FEATURE_ASSIGN_OAUTH_TOKEN: true +# ... +---- + +[id="notification-configuration-field"] +=== Notification interval configuration field + +The following configuration field has been added to enhance {productname} notifications: + +|=== +| Field | Type | Description +| *NOTIFICATION_TASK_RUN_MINIMUM_INTERVAL_MINUTES* | Integer | The interval, in minutes, that defines the frequency to re-run notifications for expiring images. By default, this field is set to notify {productname} users of events happening every 5 hours. +|=== + +.Example notification re-run YAML +[source,yaml] +---- +# ... +NOTIFICATION_TASK_RUN_MINIMUM_INTERVAL_MINUTES: 10 +# ... +---- + +[id="registry-auto-prune-configuration-fields"] +=== Registry auto-pruning configuration fields + +The following configuration fields have been added to {productname} auto-pruning feature: + +|=== +| Field | Type | Description +| *NOTIFICATION_TASK_RUN_MINIMUM_INTERVAL_MINUTES* |Integer | The interval, in minutes, that defines the frequency to re-run notifications for expiring images. + + + +**Default:** `300` + +|*DEFAULT_NAMESPACE_AUTOPRUNE_POLICY* | Object | The default organization-wide auto-prune policy. + +|{nbsp}{nbsp}{nbsp} *.method: number_of_tags* | Object | The option specifying the number of tags to keep. + +|{nbsp}{nbsp}{nbsp} *.value: * | Integer | When used with *method: number_of_tags*, denotes the number of tags to keep. + + +For example, to keep two tags, specify `2`. + +|{nbsp}{nbsp}{nbsp} *.method: creation_date* | Object | The option specifying the duration of which to keep tags. +|{nbsp}{nbsp}{nbsp} *.value: * | Integer | When used with *creation_date*, denotes how long to keep tags. + + +Can be set to seconds (`s`), days (`d`), months (`m`), weeks (`w`), or years (`y`). Must include a valid integer. For example, to keep tags for one year, specify `1y`. + +|*AUTO_PRUNING_DEFAULT_POLICY_POLL_PERIOD* |Integer | The period in which the auto-pruner worker runs at the registry level. By default, it is set to run one time per day (one time per 24 hours). Value must be in seconds. + +|=== + +.Example registry auto-prune policy by number of tags +[source,yaml] +---- +DEFAULT_NAMESPACE_AUTOPRUNE_POLICY: + method: number_of_tags + value: 10 +---- + +.Example registry auto-prune policy by creation date +[source,yaml] +---- +DEFAULT_NAMESPACE_AUTOPRUNE_POLICY: + method: creation_date + value: 1y +---- + +[id="image-vulnerability-notification-field"] +=== Vulnerability detection notification configuration field + +The following configuration field has been added to notify users on detected vulnerabilities based on security level: + +|=== +| Field | Type | Description +| *NOTIFICATION_MIN_SEVERITY_ON_NEW_INDEX* | String | Set minimal security level for new notifications on detected vulnerabilities. Avoids creation of large number of notifications after first index. If not defined, defaults to `High`. Available options include `Critical`, `High`, `Medium`, `Low`, `Negligible`, and `Unknown`. +|=== + +.Example image vulnerability notification YAML +[source,yaml] +---- +NOTIFICATION_MIN_SEVERITY_ON_NEW_INDEX: High +---- + +[id="oci-referrers-api-configuration-field"] +=== OCI referrers API configuration field + +The following configuration field allows users to list OCI referrers of a manifest under a repository by using the v2 API: + +|=== +| Field | Type | Description +| *FEATURE_REFERRERS_API* | Boolean | Enables OCI 1.1's referrers API. +|=== + +.Example OCI referrers enablement YAML +[source,yaml] +---- +# ... +FEATURE_REFERRERS_API: True +# ... +---- + +[id="disable-strict-logging-configuration-field"] +=== Disable strict logging configuration field + +The following configuration field has been added to address when external systems like Splunk or ElasticSearch are configured as audit log destinations but are intermittently unavailable. When set to `True`, the logging event is logged to the stdout instead. + +|=== +| Field | Type | Description +| *ALLOW_WITHOUT_STRICT_LOGGING* | Boolean | When set to `True`, if the external log system like Splunk or ElasticSearch is intermittently unavailable, allows users to push images normally. Events are logged to the stdout instead. Overrides `ALLOW_PULLS_WITHOUT_STRICT_LOGGING` if set. +|=== + +.Example strict logging YAML +[source,yaml] +---- +# ... +ALLOW_WITHOUT_STRICT_LOGGING: True +# ... +---- + +[id="clair-index-layer-size-configuration-field"] +=== Clair indexing layer size configuration field + +The following configuration field has been added for the Clair security scanner, which allows {productname} administrators to set a maximum layer size allowed for indexing. + +|=== +| Field | Type | Description +| *SECURITY_SCANNER_V4_INDEX_MAX_LAYER_SIZE* | String | The maximum layer size allowed for indexing. If the layer size exceeds the configured size, the {productname} UI returns the following message: `The manifest for this tag has layer(s) that are too large to index by the Quay Security Scanner`. The default is `8G`, and the maximum recommended is `10G`. + + + *Example*: `8G` +|=== + +[id="new-api-endpoints-312"] +== API endpoint enhancements + +[id="new-changeorgquota-createorgquota-endpoints"] +=== New changeOrganizationQuota and createOrganizationQuota endpoints: + +The following optional API field has been added to the `changeOrganizationQuota` and `createOrganizationQuota` endpoints: + +|=== +|Name|Description|Schema + +|**limits** + +_optional_|Human readable storage capacity of the organization. Accepts SI units like Mi, Gi, or Ti, as well as non-standard units like GB or MB. Must be mutually exclusive with `limit_bytes`.|string +|=== + +Use this field to set specific limits when creating or changing an organization's quote limit. For more information about these endpoints, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#changeorganizationquota[changeOrganizationQuota] and link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#createorganizationquota[createOrganizationQuota]. + +[id="new-referrer-endpoints"] +=== New referrer API endpoint + +The following API endpoint allows use to obtain referrer artifact information: + +|=== +|Type|Name|Description|Schema +|path|**orgname** + +_required_|The name of the organization|string +|path|**repository** + +_required_|The full path of the repository. e.g. namespace/name|string +|path|**referrers** + +_required_| Looks up the OCI referrers of a manifest under a repository.|string +|**manifest_digest** + +_required_|The digest of the manifest|string +|=== + +To use this field, you must generate a v2 API OAuth token and set `FEATURE_REFERRERS_API: true` in your `config.yaml` file. For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-v2-oauth-access-token[Creating an OCI referrers OAuth access token]. + +[id="known-issues-and-limitations-312"] +== {productname} 3.12 known issues and limitations + +The following sections note known issues and limitations for {productname} {producty}. + +[id="v2-ui-known-issues-312"] +=== {productname} v2 UI known issues + +The {productname} team is aware of the following known issues on the v2 UI: + +* link:https://issues.redhat.com/browse/PROJQUAY-6910[*PROJQUAY-6910*]. The new UI can't group and stack the chart on usage logs +* link:https://issues.redhat.com/browse/PROJQUAY-6909[*PROJQUAY-6909*]. The new UI can't toggle the visibility of the chart on usage log +* link:https://issues.redhat.com/browse/PROJQUAY-6904[*PROJQUAY-6904*]. "Permanently delete" tag should not be restored on new UI +* link:https://issues.redhat.com/browse/PROJQUAY-6899[*PROJQUAY-6899*]. The normal user can not delete organization in new UI when enable FEATURE_SUPERUSERS_FULL_ACCESS +* link:https://issues.redhat.com/browse/PROJQUAY-6892[*PROJQUAY-6892*]. The new UI should not invoke not required stripe and status page +* link:https://issues.redhat.com/browse/PROJQUAY-6884[*PROJQUAY-6884*]. The new UI should show the tip of slack Webhook URL when creating slack notification +* link:https://issues.redhat.com/browse/PROJQUAY-6882[*PROJQUAY-6882*]. The new UI global readonly super user can't see all organizations and image repos +* link:https://issues.redhat.com/browse/PROJQUAY-6881[*PROJQUAY-6881*]. The new UI can't show all operation types in the logs chart +* link:https://issues.redhat.com/browse/PROJQUAY-6861[*PROJQUAY-6861*]. The new UI "Last Modified" of organization always show N/A after target organization's setting is updated +* link:https://issues.redhat.com/browse/PROJQUAY-6860[*PROJQUAY-6860*]. The new UI update the time machine configuration of organization show NULL in usage logs +* link:https://issues.redhat.com/browse/PROJQUAY-6859[*PROJQUAY-6859*]. Thenew UI remove image repo permission show "undefined" for organization name in audit logs +* link:https://issues.redhat.com/browse/PROJQUAY-6852[*PROJQUAY-6852*]. "Tag manifest with the branch or tag name" option in build trigger setup wizard should be checked by default. +* link:https://issues.redhat.com/browse/PROJQUAY-6832[*PROJQUAY-6832*]. The new UI should validate the OIDC group name when enable OIDC Directory Sync +* link:https://issues.redhat.com/browse/PROJQUAY-6830[*PROJQUAY-6830*]. The new UI should show the sync icon when the team is configured sync team members from OIDC Group +* link:https://issues.redhat.com/browse/PROJQUAY-6829[*PROJQUAY-6829*]. The new UI team member added to team sync from OIDC group should be audited in Organization logs page +* link:https://issues.redhat.com/browse/PROJQUAY-6825[*PROJQUAY-6825*]. Build cancel operation log can not be displayed correctly in new UI +* link:https://issues.redhat.com/browse/PROJQUAY-6812[*PROJQUAY-6812*]. The new UI the "performer by" is NULL of build image in logs page +* link:https://issues.redhat.com/browse/PROJQUAY-6810[*PROJQUAY-6810*]. The new UI should highlight the tag name with tag icon in logs page +* link:https://issues.redhat.com/browse/PROJQUAY-6808[*PROJQUAY-6808*]. The new UI can't click the robot account to show credentials in logs page +* link:https://issues.redhat.com/browse/PROJQUAY-6807[*PROJQUAY-6807*]. The new UI can't see the operations types in log page when quay is in dark mode +* link:https://issues.redhat.com/browse/PROJQUAY-6770[*PROJQUAY-6770*]. The new UI build image by uploading Docker file should support .tar.gz or .zip +* link:https://issues.redhat.com/browse/PROJQUAY-6769[*PROJQUAY-6769*]. The new UI should not display message "Trigger setup has already been completed" after build trigger setup completed +* link:https://issues.redhat.com/browse/PROJQUAY-6768[*PROJQUAY-6768*]. The new UI can't navigate back to current image repo from image build +* link:https://issues.redhat.com/browse/PROJQUAY-6767[*PROJQUAY-6767*]. The new UI can't download build logs +* link:https://issues.redhat.com/browse/PROJQUAY-6758[*PROJQUAY-6758*]. The new UI should display correct operation number when hover over different operation type +* link:https://issues.redhat.com/browse/PROJQUAY-6757[*PROJQUAY-6757*]. The new UI usage log should display the tag expiration time as date format + +[id="limitations-312"] +=== {productname} 3.12 limitations + +The following features are not supported on IBM Power (`ppc64le`) or IBM Z (`s390x`): + +* Ceph RadosGW storage +* Splunk HTTP Event Collector (HEC) + +[id="bug-fixes-312"] +== {productname} bug fixes + +The following issues were fixed with {productname} {producty}: + +* link:https://issues.redhat.com/browse/PROJQUAY-6763[*PROJQUAY-6763*]. Quay 3.11 new UI operations of enable/disable team sync from OIDC group should be audited +* link:https://issues.redhat.com/browse/PROJQUAY-6826[*PROJQUAY-6826*]. Log histogram can't be hidden in the new UI +* link:https://issues.redhat.com/browse/PROJQUAY-6855[*PROJQUAY-6855*]. Quay 3.11 new UI no usage log to audit operations under user namespace +* link:https://issues.redhat.com/browse/PROJQUAY-6857[*PROJQUAY-6857*]. Quay 3.11 new UI usage log chart covered the operations types list +* link:https://issues.redhat.com/browse/PROJQUAY-6931[*PROJQUAY-6931*]. OCI-compliant pagination +* link:https://issues.redhat.com/browse/PROJQUAY-6972[*PROJQUAY-6972*]. Quay 3.11 new UI can't open repository page when Quay has 2k orgs and 2k image repositories +* link:https://issues.redhat.com/browse/PROJQUAY-7037[*PROJQUAY-7037*]. Can't get slack and email notification when package vulnerability found +* link:https://issues.redhat.com/browse/PROJQUAY-7069[*PROJQUAY-7069*]. Invalid time format error messages and layout glitches in tag expiration modal +* link:https://issues.redhat.com/browse/PROJQUAY-7107[*PROJQUAY-7107*]. Quay.io overview page does not work in dark mode +* link:https://issues.redhat.com/browse/PROJQUAY-7239[*PROJQUAY-7239*]. Quay logging exception when caching specific `security_reports` +* link:https://issues.redhat.com/browse/PROJQUAY-7304[*PROJQUAY-7304*]. security: Add Vary header to 404 responses +* link:https://issues.redhat.com/browse/PROJQUAY-6973[*PROJQUAY-6973*]. Add OCI Pagination +* link:https://issues.redhat.com/browse/PROJQUAY-6974[*PROJQUAY-6974*]. Set a default auto-pruning policy at the registry level +* link:https://issues.redhat.com/browse/PROJQUAY-6976[*PROJQUAY-6976*]. Org owner can change ownership of API tokens +* link:https://issues.redhat.com/browse/PROJQUAY-6977[*PROJQUAY-6977*]. Trigger event on image expiration +* link:https://issues.redhat.com/browse/PROJQUAY-6979[*PROJQUAY-6979*]. Annotation Parsing +* link:https://issues.redhat.com/browse/PROJQUAY-6980[*PROJQUAY-6980*]. Add support for a global read only superuser +* link:https://issues.redhat.com/browse/PROJQUAY-7360[*PROJQUAY-7360*]. Missing index on subject_backfilled field in manifest table +* link:https://issues.redhat.com/browse/PROJQUAY-7393[*PROJQUAY-7393*]. Create backfill index concurrently +* link:https://issues.redhat.com/browse/PROJQUAY-7116[*PROJQUAY-7116*]. Allow to ignore audit logging failures + +[id="quay-feature-tracker"] +== {productname} feature tracker + +New features have been added to {productname}, some of which are currently in Technology Preview. Technology Preview features are experimental features and are not intended for production use. + +Some features available in previous releases have been deprecated or removed. Deprecated functionality is still included in {productname}, but is planned for removal in a future release and is not recommended for new deployments. For the most recent list of deprecated and removed functionality in {productname}, refer to Table 1.1. Additional details for more fine-grained functionality that has been deprecated and removed are listed after the table. + +//Remove entries with the same status older than the latest three releases. + +.New features tracker +[cols="4,1,1,1",options="header"] +|=== +|Feature | Quay 3.12 | Quay 3.11 | Quay 3.10 + +|link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#proc_manage-log-storage-splunk[Splunk HTTP Event Collector (HEC)] support +|General Availability +|- +|- + +|link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html/use_red_hat_quay/index#oci-intro[Open Container Initiative 1.1 support] +|General Availability +|- +|- + +|link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#reassigning-oauth-access-token[Reassigning an OAuth access token] +|General Availability +|- +|- + +|link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html/use_red_hat_quay/index#creating-image-expiration-notification[Creating an image expiration notification] +|General Availability +|- +|- + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#oidc-team-sync[Team synchronization for {productname} OIDC deployments] +|General Availability +|General Availability +|- + +| link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/deploying_the_red_hat_quay_operator_on_openshift_container_platform/index#configuring-resources-managed-components[Configuring resources for managed components on {ocp}] +|General Availability +|General Availability +|- + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#configuring-aws-sts-quay[Configuring AWS STS for {productname}], link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_operator_features/index#configuring-aws-sts-quay[Configuring AWS STS for {productname-ocp}] +|General Availability +|General Availability +|- + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/manage_red_hat_quay/red-hat-quay-namespace-auto-pruning-overview[{productname} repository auto-pruning] +|General Availability +|General Availability +|- + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#configuring-dark-mode-ui[Configuring dark mode on the {productname} v2 UI] +|General Availability +|General Availability +|- + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/use_red_hat_quay/use-quay-manage-repo#disabling-robot-account[Disabling robot accounts] +|General Availability +|General Availability +|General Availability + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/manage_red_hat_quay/red-hat-quay-namespace-auto-pruning-overview[{productname} namespace auto-pruning] +|General Availability +|General Availability +|General Availability + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#reference-miscellaneous-v2-ui[FEATURE_UI_V2] +|Technology Preview +|Technology Preview +|Technology Preview + +|=== + +[id="ibm-power-z-linuxone-support-matrix"] +=== IBM Power, IBM Z, and IBM® LinuxONE support matrix + +.list of supported and unsupported features +[cols="3,1,1",options="header"] +|=== +|Feature |IBM Power |IBM Z and IBM(R) LinuxONE + +|Allow team synchronization via OIDC on Azure +|Not Supported +|Not Supported + +|Backing up and restoring on a standalone deployment +|Supported +|Supported + +|Clair Disconnected +|Supported +|Supported + +|Geo-Replication (Standalone) +|Supported +|Supported + +|Geo-Replication (Operator) +|Not Supported +|Not Supported + +|IPv6 +|Not Supported +|Not Supported + +|Migrating a standalone to operator deployment +|Supported +|Supported + +|Mirror registry +|Not Supported +|Not Supported + +|PostgreSQL connection pooling via pgBouncer +|Supported +|Supported + +|Quay config editor - mirror, OIDC +|Supported +|Supported + +|Quay config editor - MAG, Kinesis, Keystone, GitHub Enterprise +|Not Supported +|Not Supported + +|Quay config editor - Red Hat Quay V2 User Interface +|Supported +|Supported + +|Quay Disconnected +|Supported +|Supported + +|Repo Mirroring +|Supported +|Supported +|=== \ No newline at end of file diff --git a/modules/rn_3_13_0.adoc b/modules/rn_3_13_0.adoc new file mode 100644 index 000000000..4e76e258d --- /dev/null +++ b/modules/rn_3_13_0.adoc @@ -0,0 +1,506 @@ +:_content-type: CONCEPT +[id="release-notes-313"] += {productname} release notes + +The following sections detail _y_ and _z_ stream release information. + +[id="rn-3-13-4"] +== RHBA-2025:1079 - {productname} 3.13.4 release + +Issued 2025-02-20 + +{productname} release 3.13.4 is now available with Clair {clairproductminv}. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2025:1079[RHBA-2025:1079] advisory. + +[id="rn-3-13-3"] +== RHBA-2025:0301 - {productname} 3.13.3 release + +Issued 2025-01-20 + +{productname} release 3.13.3 is now available with Clair {clairproductminv}. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2025:0301[RHBA-2025:0301] advisory. + +[id="bug-fixes-313-3"] +=== {productname} 3.13.3 bug fixes + +* link:https://issues.redhat.com/browse/PROJQUAY-[PROJQUAY-8336]. Previously, when using {productname} with managed Quay and Clair PostgreSQL databases, Red Hat Advanced Cluster Security would scan all running `Quay` pods and report `High Image Vulnerability in Quay PostgreSQL database and Clair PostgreSQL database`. This issue has been resolved. + +[id="rn-3-13-2"] +== RHBA-2024:10967 - {productname} 3.13.2 release + +Issued 2024-12-17 + +{productname} release 3.13.2 is now available with Clair {clairproductminv}. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2024:10967[RHBA-2024:10967] advisory. + +[id="enhancements-313-2"] +=== {productname} 3.13.2 new features + +With this release, a pull-through cache organization can now be created when using the {productname} v2 UI. For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#red-hat-quay-proxy-cache-procedure[Using {productname} to proxy a remote registry]. + +[id="known-issue-313-2"] +=== {productname} 3.13.2 known issues + +When using the pull-through proxy feature in {productname} with quota management enabled, and the organization quota fills up, it is expected that {productname} removes the least recently used image to free up space for new cached entries. However, images pull by digest are not evicted automatically when the quota is exceeded, which causes subsequent pull attempts to return a `Quota has been exceeded on namespace` error. + +As a temporary workaround, you can run a bash shell inside of the {productname} database pod to make digest-pulled images visible for eviction with the following setting: `update tag set hidden = 0;`. For more information, see link:https://issues.redhat.com/browse/PROJQUAY-8071[PROJQUAY-8071]. + +[id="bug-fixes-313-2"] +=== {productname} 3.13.2 bug fixes + +* link:https://issues.redhat.com/browse/PROJQUAY-8273[PROJQUAY-8273],link:https://issues.redhat.com/browse/PROJQUAY-6474[PROJQUAY-6474]. When deploying {productname} with an custom `HorizontalPodAutoscaler` component and then setting the component to `managed: false` in the `QuayRegistry` custom resource definition (CRD), the {productname} Operator continuously terminates and resets the `minReplicas` value to 2 for `mirror` and `clair` components. To work around this issue, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/deploying_the_red_hat_quay_operator_on_openshift_container_platform/index#operator-unmanaged-hpa[Using unmanaged Horizontal Pod Autoscalers]. + +* link:https://issues.redhat.com/browse/PROJQUAY-8208[PROJQUAY-8208]. Previously, {productname} would return a `501` error on repository or organization creation with the authorization type was set to OIDC and restricted users were set. This issue has been resolved. + +* link:https://issues.redhat.com/browse/PROJQUAY-8269[PROJQUAY-8269]. Previously, on the {productnamne} UI, the OAuth scopes page suggested that scopes could be applied to robot accounts. This was not the case. Wording on the OAuth scopes page of the UI has been fixed. + +[id="rn-3-13-1"] +== RHBA-2024:9478 - {productname} 3.13.1 release + +Issued 2024-11-18 + +{productname} release 3.13.1 is now available with Clair {clairproductminv}. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2024:9478[RHBA-2024:9478] advisory. + +[id="information-upgrading-3-13-1"] +== Information about upgrading to 3.13.1 + +Previously, when attempting to upgrade to {productname} 3.13, if FIPS mode was enabled for your {ocp} cluster with Clair enabled, Clair would not function in your cluster. This issue was resolved in version 3.13.1. Upgrading to {productname} 3.13 automatically upgrades users to version 3.13.1 so that this issue is avoided. Additionally, if you are upgrading from 3.13 to 3.13.1 and FIPs was enabled, upgrading to 3.13.1 resolves the issue. (link:https://issues.redhat.com/browse/PROJQUAY-8185[*PROJQUAY-8185*]) + +[id="enhancements-313-1"] +=== {productname} 3.13.1 enhancements + +With the release of {productname} 3.13.1, Hitachi Content Platform (HCP) is now supported for use a storage backend. This allows organizations to leverage HCP for scalable, secure, and reliable object storage within their {productname} registry deployments. + +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index#config-fields-hcp[HCP Object Storage]. + +[id="known-issues-313-1"] +=== {productname} 3.13.1 known issues + +When using Hitachi Content Platform for your object storage, attempting to push an image with a large layer to a {productname} registry results in the following error: + +[source,text] +---- +An error occurred (NoSuchUpload) when calling the CompleteMultipartUpload operation: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed. +---- + +This is a known issue and will be fixed in a future version of {productname}. + +[id="bug-fixes-313-1"] +=== {productname} 3.13.1 bug fixes + +* link:https://issues.redhat.com/browse/PROJQUAY-8185[PROJQUAY-8185]. Previously, when attempting to upgrade {productname-ocp} to 3.13 with FIPS mode enabled, the upgrade would fail for deploying using Clair. This issue has been resolved. Upgrading to 3.13.1 does not fail for {productname-ocp} using Clair with FIPS mode enabled. + +* link:https://issues.redhat.com/browse/PROJQUAY-8024[PROJQUAY-8024]. Previously, using Hitachi HCP v9.7 as your storage provider would return errors when attempting to pull images. This issue has been resolved. + +* link:https://issues.redhat.com/browse/PROJQUAY-5086[PROJQUAY-5086]. Previously, {productname-ocp} would produce information about horizontal pod autoscalers (HPAs) for some components (for example, `Clair`, `Redis`, `PostgreSQL`, and `ObjectStorage`) when they were unmanaged by the Operator. This issue has been resolved and information about HPAs are not longer reported for unmanaged components. + +[id="rn-3-13-0"] +== RHBA-2024:8408 - {productname} 3.13.0 release + +Issued 2024-10-30 + +{productname} release 3.13 is now available with Clair {clairproductminv}. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2024:8408[RHBA-2024:8408] advisory. For the most recent compatibility matrix, see link:https://access.redhat.com/articles/4067991[Quay Enterprise 3.x Tested Integrations]. For information the release cadence of {productname}, see the link:https://access.redhat.com/support/policy/updates/rhquay/[{productname} Life Cycle Policy]. + +[id="documentation-changes-313"] +== {productname} documentation changes + +The following documentation changes have been made with the {productname} {producty} release: + +* The {productname} _Builders_ feature that was originally documented in the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/use_red_hat_quay/index[Using {productname} guide] has been moved into a new, dedicated book titled "link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/builders_and_image_automation/index[Builders and image automation]". + +* The {productname} _Builders_ feature that was originally documented in the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_operator_features/index#red-hat-quay-builders-enhancement[{productname} Operator features] has been moved into a new, dedicated book titled "link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/builders_and_image_automation/index[Builders and image automation]". + +* A new book titled "link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/securing_red_hat_quay/index[Securing {productname}]" has been created. This book covers SSL and TLS for {productname}, and adding additional certificate authorities (CAs) to your deployment. More content will be added to this book in the future. + +* A new book titled "link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/managing_access_and_permissions/index[Managing access and permissions]" has been created. This book covers topics related to access controls, repository visibility, and robot accounts by using the UI and the API. More content will be added to this book in the future. + +[id="upgrading-quay-313"] +== Upgrading to {productname} 3.13 + +With {productname} 3.13, the `volumeSize` parameter has been implemented for use with the `clairpostgres` component of the `QuayRegistry` custom resource definition (CRD). This replaces the `volumeSize` parameter that was previously used for the `clair` component of the same CRD. + +If your {productname} 3.12 `QuayRegistry` custom resource definition (CRD) implemented a volume override for the `clair` component, you must ensure that the `volumeSize` field is included under the `clairpostgres` component of the `QuayRegistry` CRD. + +[IMPORTANT] +==== +Failure to move `volumeSize` from the `clair` component to the `clairpostgres` component will result in a failed upgrade to version 3.13. +==== + +For example: + +[source,yaml] +---- +spec: + components: + - kind: clair + managed: true + - kind: clairpostgres + managed: true + overrides: + volumeSize: +---- + +For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html/upgrade_red_hat_quay/index[Upgrade {productname}]. + +[id="new-features-and-enhancements-313"] +== {productname} new features and enhancements + +The following updates have been made to {productname}. + +[id="auto-pruning-enhancements"] +=== {productname} auto-pruning enhancements + +With the release of {productname} 3.10, a new auto-pruning feature was released. With that feature, {productname} administrators could set up auto-pruning policies on namespaces for both users and organizations so that image tags were automatically deleted based on specified criteria. In {productname} 3.11, this feature was enhanced so that auto-pruning policies could be set up on specified repositories. + +With {productname} 3.12, default auto-pruning policies default auto-pruning policies were made to be set up at the registry level on new and existing configurations, which saved {productname} administrators time, effort, and storage by enforcing registry-wide rules. + +With the release of {productname} {producty}, the following enhancements have been made to the auto-pruning feature. + +[id="tag-specification-patterns"] +==== Tag specification patterns in auto-pruning policies + +Previously, the {productname} auto-pruning feature could not target or exclude specific image tags. With the release of {productname} {producty}, it is now possible to specify a _regular expression_, or _regex_ to match a subset of tags for both organization- and repository-level auto-pruning policies. This allows {productname} administrators more granular auto-pruning policies to target only certain image tags for removal. + +For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#autopruning-regular-expressions[Using regular expressions with auto-pruning]. + +[id="multiple-auto-pruning-policies"] +==== Multiple auto-pruning policies + +Previously, {productname} only supported a single auto-pruning policy per organization and repository. With the release of {productname} {producty}, multiple auto-pruning policies can now be applied to an organization or a repository. These auto-pruning policies can be based on different tag naming (regex) patterns to cater for the different life cycles of images in the same repository or organization. This feature provides more flexibility when automating the image life cycle in your repository. + +Additional auto-pruning policies can be added on the {productname} v2 UI by clicking *Add Policy* on the *Auto-Pruning Policies* page. They can also be added by using the API. + +For more information about setting auto-prune policies, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#red-hat-quay-namespace-auto-pruning-overview[{productname} auto-pruning overview]. + +[id="keyless-auth-robot-accounts"] +=== Keyless authentication with robot accounts + +In previous versions of {productname}, robot account tokens were valid for the lifetime of the token unless deleted or regenerated. Tokens that do not expire have security implications for users who do not want to store long-term passwords or manage the deletion, or regeneration, or new authentication tokens. + +With {productname} {producty}, {productname} administrators are provided the ability to exchange {productname} robot account tokens for an external OIDC token. This allows robot accounts to leverage short-lived, or _ephemeral tokens_, that last one hour. Ephemeral tokens are refreshed regularly and can be used to authenticate individual transactions. + +This feature greatly enhances the security of your {productname} registry by mitigating the possibility of robot token exposure by removing the tokens after one hour. + +For more information, see https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#keyless-authentication-robot-accounts[Keyless authentication with robot accounts]. + +[id="quay-operator-updates-313"] +== {productname-ocp} new features and enhancements + +The following updates have been made to {productname-ocp}. + +[id="certificate-based-auth-quay-postgresql"] +=== Support for certificate-based authentication between {productname} and PostgreSQL + +With this release, support for certificate-based authentication between {productname} and PostgreSQL has been added. This allows {productname} administrators to supply their own SSL/TLS certificates that can be used for client-side authentication with PostgreSQL or CloudSQL. This provides enhanced security and allows for easier automation for your {productname} registry. + +For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/securing_red_hat_quay/index#cert-based-auth-quay-sql[Certificate-based authentication between {productname} and SQL]. + +[id="v2-ui-enhancement"] +=== {productname} v2 UI enhancements + +The following enhancements have been made to the {productname} v2 UI. + +[id="robot-federation-v2-ui-enhancement"] +==== Robot federation selection + +A new configuration page, *Set robot federation*, has been added to the {productname} v2 UI. This can be found by navigating to your organization or repository's robot account, clicking the menu kebab, and then clicking *Set robot federation*. This page is used when configuring keyless authentication with robot accounts, and allows you to add multiple OIDC providers to a single robot account. + +For more information, see https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#keyless-authentication-robot-accounts[Keyless authentication with robot accounts]. + +[id="new-quay-config-fields-313"] +== New {productname} configuration fields + +The following configuration fields have been added to {productname} {producty}. + +[id="disable-pushes-configuration-field"] +=== Disabling pushes to the {productname} registry configuration field + +In some cases, a read-only option for {productname} is not possible since it requires inserting a service key and other manual configuration changes. With the release of {productname} 3.13, a new configuration field has been added: `DISABLE_PUSHES`. + +When `DISABLE_PUSHES` is set to `true`, users are unable to push images or image tags to the registry when using the CLI. Most other registry operations continue as normal when this feature is enabled by using the {productname} UI. For example, changing tags, editing a repository, robot account creation and deletion, user creation, and so on are all possible by using the UI. + +When `DISABLE_PUSHES` is set to `true`, the {productname} garbage collector is disabled. As a result, when `PERMANENTLY_DELETE_TAGS` is enabled, using the {productname} UI to permanently delete a tag does not result in the immediate deletion of a tag. Instead, the tag stays in the repository until `DISABLE_PUSHES` is set to `false`, which re-enables the garbage collector. {productname} administrators should be aware of this caveat when using `DISABLE_PUSHES` and `PERMANENTLY_DELETE_TAGS` together. + +This field might be useful in some situations such as when {productname} administrators want to calculate their registry's quota and disable image pushing until after calculation has completed. With this method, administrators can avoid putting putting the whole registry in `read-only` mode, which affects the database, so that most operations can still be done. + +|=== +| Field | Type | Description + +|*DISABLE_PUSHES* |Boolean | Disables pushes of new content to the registry while retaining all other functionality. Differs from `read-only` mode because database is not set as `read-only`. Defaults to `false`. +|=== + +.Example DISABLE_PUSHES configuration field +[source,yaml] +---- +# ... +DISABLE_PUSHES: true +# ... +---- + +[id="new-api-endpoints-313"] +== API endpoint enhancements + +[id="new-auto-prune-policy-endpoints"] +=== New autoPrunePolicy endpoints + +`tagPattern` and `tagPatternMatches` API parameters have been added to the following API endpoints: + +* `createOrganizationAutoPrunePolicy` +* `updateOrganizationAutoPrunePolicy` +* `createRepositoryAutoPrunePolicy` +* `updateRepositoryAutoPrunePolicy` +* `createUserAutoPrunePolicy` +* `updateUserAutoPrunePolicy` + +These fields enhance the auto-pruning feature by allowing {productname} administrators more control over what images are pruned. The following table provides descriptions of these fields: + +|=== +|Name|Description|Schema + +|**tagPattern** + +_optional_|Tags only matching this pattern (regex) will be pruned. |string + +|**tagPatternMatches** + +_optional_|Determine whether pruned tags should or should not match the tagPattern. |boolean +|=== + +For example API commands, see link:https://docs.redhat.com/en/documentation/red_hat_quay/3.12/html-single/manage_red_hat_quay/index#red-hat-quay-namespace-auto-pruning-overview[{productname} auto-pruning overview]. + +[id="federated-robot-token-api-endpoints"] +=== New federated robot token API endpoints + +The following API endpoints have been added for the keyless authentication with robot accounts feature: + +* `GET oauth2/federation/robot/token`. Use this API endpoint to return an expiring robot token using the robot identity federation mechanism. + +* `POST /api/v1/organization/{orgname}/robots/{robot_shortname}/federation`. Use this API endpoint to create a federation configuration for the specified organization robot. + +[id="notable-technical-changes-313"] +== {productname} 3.13 notable technical changes + +Clair now requires its PostgreSQL database to be version 15. For standalone {productname} deployments, administrators must manually migrate their database over from PostgreSQL version 13 to version 15. For more information about this procedure, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/upgrade_red_hat_quay/index#upgrading-clair-postgresql-database[Upgrading the Clair PostgreSQL database]. + +For {productname-ocp} deployments, this update is automatically handled by the Operator so long as your Clair PostgreSQL database is currently using version 13. + +[id="known-issues-and-limitations-313"] +== {productname} 3.13 known issues and limitations + +The following sections note known issues and limitations for {productname} {producty}. + +[id="clair-suse-enterprise-known-issue"] +=== Clair vulnerability report known issue + +When pushing Suse Enterprise Linux Images with *HIGH* image vulnerabilities, Clair 4.8.0 does not report these vulnerabilities. This is a known issue and will be fixed in a future version of {productname}. + +[id="fips-mode-known-issue"] +=== FIPS mode known issue + +If FIPS mode is enabled for your {ocp} cluster and you use Clair, you must not upgrade the {productname} Operator to version {producty}. If you upgrade, Clair will not function in your cluster. (link:https://issues.redhat.com/browse/PROJQUAY-8185[*PROJQUAY-8185*]) + +[id="registry-auto-pruning-known-issue"] +=== Registry auto-pruning known issues + +The following known issues apply to the auto-pruning feature. + +[id="policy-prioritization-known-issue"] +==== Policy prioritization known issue + +Currently, the auto-pruning feature prioritizes the following order when configured: + +. Method: `creation_date` + `organization wide` +. Method: `creation_date` + `repository wide` +. Method: `number_of_tags` + `organization wide` +. Method: `number_of_tags` + `repository wide` + +This means that the auto-pruner first prioritizes, for example, an organization-wide policy set to expire tags by their creation date before it prunes images by the number of tags that it has. + +There is a known issue when configuring a registry-wide auto-pruning policy. If {productname} administrators configure a `number_of_tags` policy before a `creation_date` policy, it is possible to prune more than the intended set for the `number_of_tags` policy. This might lead to situations where a repository removes certain image tags unexpectedly. + +This is not an issue for organization or repository-wide auto-prune policies. This known issue only exists at the registry level. It will be fixed in a future version of {productname}. + +[id="unrecognizable-auto-prune-tag-patterns"] +==== Unrecognizable auto-prune tag patterns + +When creating an auto-prune policy, the pruner cannot recognize `\b` and `\B` patterns. This is a common behavior with regular expression patterns, wherein `\b` and `\B` match empty strings. {productname} administrators should avoid using _regex_ patterns that use `\B` and `\b` to avoid this issue. (link:https://issues.redhat.com/browse/PROJQUAY-8089[*PROJQUAY-8089*]) + +[id="v2-ui-known-issues-313"] +=== {productname} v2 UI known issues + +The {productname} team is aware of the following known issues on the v2 UI: + +* link:https://issues.redhat.com/browse/PROJQUAY-6910[*PROJQUAY-6910*]. The new UI can't group and stack the chart on usage logs +* link:https://issues.redhat.com/browse/PROJQUAY-6909[*PROJQUAY-6909*]. The new UI can't toggle the visibility of the chart on usage log +* link:https://issues.redhat.com/browse/PROJQUAY-6904[*PROJQUAY-6904*]. "Permanently delete" tag should not be restored on new UI +* link:https://issues.redhat.com/browse/PROJQUAY-6899[*PROJQUAY-6899*]. The normal user can not delete organization in new UI when enable FEATURE_SUPERUSERS_FULL_ACCESS +* link:https://issues.redhat.com/browse/PROJQUAY-6892[*PROJQUAY-6892*]. The new UI should not invoke not required stripe and status page +* link:https://issues.redhat.com/browse/PROJQUAY-6884[*PROJQUAY-6884*]. The new UI should show the tip of slack Webhook URL when creating slack notification +* link:https://issues.redhat.com/browse/PROJQUAY-6882[*PROJQUAY-6882*]. The new UI global readonly super user can't see all organizations and image repos +* link:https://issues.redhat.com/browse/PROJQUAY-6881[*PROJQUAY-6881*]. The new UI can't show all operation types in the logs chart +* link:https://issues.redhat.com/browse/PROJQUAY-6861[*PROJQUAY-6861*]. The new UI "Last Modified" of organization always show N/A after target organization's setting is updated +* link:https://issues.redhat.com/browse/PROJQUAY-6860[*PROJQUAY-6860*]. The new UI update the time machine configuration of organization show NULL in usage logs +* link:https://issues.redhat.com/browse/PROJQUAY-6859[*PROJQUAY-6859*]. Thenew UI remove image repo permission show "undefined" for organization name in audit logs +* link:https://issues.redhat.com/browse/PROJQUAY-6852[*PROJQUAY-6852*]. "Tag manifest with the branch or tag name" option in build trigger setup wizard should be checked by default. +* link:https://issues.redhat.com/browse/PROJQUAY-6832[*PROJQUAY-6832*]. The new UI should validate the OIDC group name when enable OIDC Directory Sync +* link:https://issues.redhat.com/browse/PROJQUAY-6830[*PROJQUAY-6830*]. The new UI should show the sync icon when the team is configured sync team members from OIDC Group +* link:https://issues.redhat.com/browse/PROJQUAY-6829[*PROJQUAY-6829*]. The new UI team member added to team sync from OIDC group should be audited in Organization logs page +* link:https://issues.redhat.com/browse/PROJQUAY-6825[*PROJQUAY-6825*]. Build cancel operation log can not be displayed correctly in new UI +* link:https://issues.redhat.com/browse/PROJQUAY-6812[*PROJQUAY-6812*]. The new UI the "performer by" is NULL of build image in logs page +* link:https://issues.redhat.com/browse/PROJQUAY-6810[*PROJQUAY-6810*]. The new UI should highlight the tag name with tag icon in logs page +* link:https://issues.redhat.com/browse/PROJQUAY-6808[*PROJQUAY-6808*]. The new UI can't click the robot account to show credentials in logs page +* link:https://issues.redhat.com/browse/PROJQUAY-6807[*PROJQUAY-6807*]. The new UI can't see the operations types in log page when quay is in dark mode +* link:https://issues.redhat.com/browse/PROJQUAY-6770[*PROJQUAY-6770*]. The new UI build image by uploading Docker file should support .tar.gz or .zip +* link:https://issues.redhat.com/browse/PROJQUAY-6769[*PROJQUAY-6769*]. The new UI should not display message "Trigger setup has already been completed" after build trigger setup completed +* link:https://issues.redhat.com/browse/PROJQUAY-6768[*PROJQUAY-6768*]. The new UI can't navigate back to current image repo from image build +* link:https://issues.redhat.com/browse/PROJQUAY-6767[*PROJQUAY-6767*]. The new UI can't download build logs +* link:https://issues.redhat.com/browse/PROJQUAY-6758[*PROJQUAY-6758*]. The new UI should display correct operation number when hover over different operation type +* link:https://issues.redhat.com/browse/PROJQUAY-6757[*PROJQUAY-6757*]. The new UI usage log should display the tag expiration time as date format + +[id="bug-fixes-313"] +== {productname} bug fixes + +The following issues were fixed with {productname} {producty}: + +* link:https://issues.redhat.com/browse/PROJQUAY-5681[*PROJQUAY-5681*]. Previously, when configuring an image repository with *Events and Notifications* to receive a Slack notification for *Push to Repository* and *Package Vulnerability Found*, no notification was returned of *new critical image vulnerability found*. This issue has been resolved. +* link:https://issues.redhat.com/browse/PROJQUAY-7244[*PROJQUAY-7244*]. Previously, it was not possible to filter for repositories under specific organizations. This issue has been resolved, and you can now filter for repositories under specific organizations. +* link:https://issues.redhat.com/browse/PROJQUAY-7388[*PROJQUAY-7388*]. Previously, when {productname} was configured with OIDC authentication using Microsoft Azure Entra ID and team sync was enabled, removing the team sync resulted in the usage logs chart displaying *Undefined*. This issue has been resolved. +* link:https://issues.redhat.com/browse/PROJQUAY-7430[*PROJQUAY-7430*]. Some public container image registries, for example, Google Cloud Registry, generate longer passwords for the login. When this happens, {productname} could not mirror images from those registries because the password length exceeded the maximum allowed in the {productname} database. ++ +The actual length limit imposed by the encryption mechanism is lower than `9000`. This implies that while the database can hold up to `9000` characters, the effective limit during encryption is actually `6000`, and be calculated as follows: {Max Password Length} = {field\_max\_length} - {_RESERVED\_FIELD\_SPACE}. A password length of `6000` ensures compatibility with AWS ECR and most registries. + +* link:https://issues.redhat.com/browse/PROJQUAY-7599[*PROJQUAY-7599*]. Previously, attempting to delete a manifest using a tag name and the {productname} v2 API resulted in a 405 error code. This was because there was no `delete_manifest_by_tagname` operation in the API. This issue has been resolved. +* link:https://issues.redhat.com/browse/PROJQUAY-7606[*PROJQUAY-7606*]. Users can now create a new team using the dashes (`-`) via the v2 UI. Previously, this could only be done using the API. +* link:https://issues.redhat.com/browse/PROJQUAY-7686[*PROJQUAY-7686*]. Previously, the vulnerability page showed vertical scroll bars when provided URLs in the advisories were too big, which caused difficulties in reading information from the page. This issue has been resolved. +* link:https://issues.redhat.com/browse/PROJQUAY-7982[*PROJQUAY-7982*]. There was a bug in the console service when using {quayio} for the first time. When attempting to create a user correlated with the console's user, clicking *Confirm username* refreshed the page and opened the same modal. This issue has been resolved. + +[id="quay-feature-tracker"] +== {productname} feature tracker + +New features have been added to {productname}, some of which are currently in Technology Preview. Technology Preview features are experimental features and are not intended for production use. + +Some features available in previous releases have been deprecated or removed. Deprecated functionality is still included in {productname}, but is planned for removal in a future release and is not recommended for new deployments. For the most recent list of deprecated and removed functionality in {productname}, refer to Table 1.1. Additional details for more fine-grained functionality that has been deprecated and removed are listed after the table. + +//Remove entries with the same status older than the latest three releases. + +.New features tracker +[cols="4,1,1,1",options="header"] +|=== +|Feature | Quay 3.13 | Quay 3.12 | Quay 3.11 + +|link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#keyless-authentication-robot-accounts[Keyless authentication with robot accounts] +|General Availability +|- +|- + +|link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/securing_red_hat_quay/index#cert-based-auth-quay-sql[Certificate-based authentication between {productname} and SQL] +|General Availability +|- +|- + +|link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#proc_manage-log-storage-splunk[Splunk HTTP Event Collector (HEC)] support +|General Availability +|General Availability +|- + +|link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html/use_red_hat_quay/index#oci-intro[Open Container Initiative 1.1 support] +|General Availability +|General Availability +|- + +|link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#reassigning-oauth-access-token[Reassigning an OAuth access token] +|General Availability +|General Availability +|- + +|link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html/use_red_hat_quay/index#creating-image-expiration-notification[Creating an image expiration notification] +|General Availability +|General Availability +|- + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#oidc-team-sync[Team synchronization for {productname} OIDC deployments] +|General Availability +|General Availability +|General Availability + +| link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/deploying_the_red_hat_quay_operator_on_openshift_container_platform/index#configuring-resources-managed-components[Configuring resources for managed components on {ocp}] +|General Availability +|General Availability +|General Availability + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#configuring-aws-sts-quay[Configuring AWS STS for {productname}], link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_operator_features/index#configuring-aws-sts-quay[Configuring AWS STS for {productname-ocp}] +|General Availability +|General Availability +|General Availability + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/manage_red_hat_quay/red-hat-quay-namespace-auto-pruning-overview[{productname} repository auto-pruning] +|General Availability +|General Availability +|General Availability + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#reference-miscellaneous-v2-ui[FEATURE_UI_V2] +|Technology Preview +|Technology Preview +|Technology Preview + +|=== + +[id="ibm-power-z-linuxone-support-matrix"] +=== IBM Power, IBM Z, and IBM® LinuxONE support matrix + +.list of supported and unsupported features +[cols="3,1,1",options="header"] +|=== +|Feature |IBM Power |IBM Z and IBM(R) LinuxONE + +|Allow team synchronization via OIDC on Azure +|Not Supported +|Not Supported + +|Backing up and restoring on a standalone deployment +|Supported +|Supported + +|Clair Disconnected +|Supported +|Supported + +|Geo-Replication (Standalone) +|Supported +|Supported + +|Geo-Replication (Operator) +|Supported +|Not Supported + +|IPv6 +|Not Supported +|Not Supported + +|Migrating a standalone to operator deployment +|Supported +|Supported + +|Mirror registry +|Supported +|Supported + +|PostgreSQL connection pooling via pgBouncer +|Supported +|Supported + +|Quay config editor - mirror, OIDC +|Supported +|Supported + +|Quay config editor - MAG, Kinesis, Keystone, GitHub Enterprise +|Not Supported +|Not Supported + +|Quay config editor - Red Hat Quay V2 User Interface +|Supported +|Supported + +|Quay Disconnected +|Supported +|Supported + +|Repo Mirroring +|Supported +|Supported +|=== \ No newline at end of file diff --git a/modules/rn_3_14_0.adoc b/modules/rn_3_14_0.adoc new file mode 100644 index 000000000..e5a9c552e --- /dev/null +++ b/modules/rn_3_14_0.adoc @@ -0,0 +1,287 @@ +:_content-type: CONCEPT +[id="release-notes-314"] += {productname} release notes + +The following sections detail _y_ and _z_ stream release information. + +[id="rn-3-14-0"] +== RHBA-2024:8408 - {productname} 3.14.0 release + +Issued 2025-04-02 + +{productname} release {producty} is now available with Clair {clairproductminv}. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2025:2467[RHBA-2025:2467] advisory. For the most recent compatibility matrix, see link:https://access.redhat.com/articles/4067991[Quay Enterprise 3.x Tested Integrations]. For information the release cadence of {productname}, see the link:https://access.redhat.com/support/policy/updates/rhquay/[{productname} Life Cycle Policy]. + +[id="documentation-changes-314"] +== {productname} documentation changes + +The following documentation changes have been made with the {productname} 3.14 release: + +* The {productname} API guide has been updated and split into two books: + +** link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_guide/index[{productname} API guide]. This book contains an overview of the {productname} API, an overview of token types (OAuth 2.0 access tokens, robot account tokens, and OCI referrers OAuth access tokens), how to enable and use the {productname} API, suggestions for token management, and example commands for leveraging API endpoints to execute commands. This book is useful if you are new to the {productname} API or want information about its token types and how to leverage the API. + +** link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index[{productname} API reference]. This book contains all API reference endpoints and accompanying example commands for those endpoints. This book is useful if you are already familiar with using the {productname} API. + +[id="new-features-and-enhancements-314"] +== {productname} new features and enhancements + +The following updates have been made to {productname}. + +[id="clair-enhancements"] +=== Clair enhancements + +With this release, Clair indexer data is now included with downstream builds. This allows {productname} administrators to more easily reference indexers in the `clair-config.yaml` file when running Clair in an air-gapped or disconnected environment. + +For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/vulnerability_reporting_with_clair_on_red_hat_quay/index#clair-disconnected-environments[Clair in disconnected environments]. + +[id="model-card-rendering"] +=== Model card rendering on the v2 UI + +With the release of {productname} 3.14, the v2 UI now renders model card information for machine learning models that include a model card. When a manifest has a certain annotation (for example, `application/x-mlmodel`) and a model card stored as a layer in the manifest, a *Model Card* tab is displayed on the tag's information page. The information on the *Model Card* page provides users with comprehensive insights into each model, and can help enhance a user's understanding of models stored within their registry. + +[NOTE] +==== +The *Model Card* rendering page is only available on the {productname} v2 UI. +==== + +To view model card information, {productname} users or administrators must push an artifact to a repository. The artifact must have have an accompanying model card. This information renders under *Repository* -> ** -> *Model Card*. + +For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#viewing-model-card-information[Viewing model card information by using the v2 UI]. + +[id="tag-expiration-enhancement"] +=== Tag expiration enhancement + +Previously, when configuring tag expiration for {productname}, the `yearly` option was unavailable on the {productname} v2 UI. With this update, users can now configure default tag expiration to occur yearly on the {productname} v2 UI. This can be set by using the {productname} UI or in your `config.yaml` file. For example: + +[source,yaml] +---- +DEFAULT_TAG_EXPIRATION: 1y +TAG_EXPIRATION_OPTIONS: + - 1y +---- + +[id="new-quay-config-fields-314"] +== {productname} configuration fields updates and changes + +The following configuration fields have been added to {productname} 3.14. + +[id="model-card-rendering-configuration-field"] +=== Model card rendering configuration fields + +The following configuration fields have been added for the model card rendering feature on the {productname} v2 UI: + +|=== +| Field | Type | Description + +|*FEATURE_UI_MODELCARD* |Boolean | Enables *Modelcard* image tab in UI. Defaults to `true`. +|*UI_MODELCARD_ARTIFACT_TYPE* | String | Defines the modelcard artifact type. +|*UI_MODELCARD_ANNOTATION* |Object | This optional field defines the layer annotation of the model card stored in an OCI image. +|*UI_MODELCARD_LAYER_ANNOTATION* |Object | This optional field defines the layer annotation of the model card stored in an OCI image. +|=== + +These configuration fields are enabled and set by default in your `config.yaml` file: + +.Example model card YAML +[source,yaml] +---- +FEATURE_UI_MODELCARD: true +UI_MODELCARD_ARTIFACT_TYPE: application/x-mlmodel +UI_MODELCARD_ANNOTATION: + org.opencontainers.image.description: "Model card metadata" +UI_MODELCARD_LAYER_ANNOTATION: + org.opencontainers.image.title: README.md +---- + +For more information, see link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#viewing-model-card-information[Viewing model card information by using the v2 UI]. + +[id="ignore-unknown-mediatype-removal"] +=== IGNORE_UNKNOWN_MEDIATYPES configuration field removal + +The `IGNORE_UNKNOWN_MEDIATYPES` configuration field has been removed. By default, {productname} accepts all artifact types. + +[id="new-quay-footer-fields"] +=== New {productname} footer fields + +The following configuration fields have been added to the original (v1) UI. You can use these fields to customize the footer of your on-prem v1 UI. + +[NOTE] +==== +These fields are currently unavailable on the {productname} v2 UI. +==== + +|=== +| Field | Type | Description + +|*FOOTER_LINKS* |Object | Enable customization of footer links in {productname}'s UI for on-prem installations. + +|*.TERMS_OF_SERVICE_URL* | String | Custom terms of service for on-prem installations. + + + +**Example:** + +`https://index.hr` + +|*.PRIVACY_POLICY_URL* | String | Custom privacy policy for on-prem installations. + + + +**Example:** + +`https://index.hr` +|*.SECURITY_URL* | String | Custom security page for on-prem installations. + + + +**Example:** + +`https://index.hr` + +| **.ABOUT_URL** | String | Custom about page for on-prem installations. + + + +**Example:** + +`https://index.hr` +|=== + +.Example footer links YAML +[source,yaml] +---- +FOOTER_LINKS: + "TERMS_OF_SERVICE_URL": "https://www.index.hr" + "PRIVACY_POLICY_URL": "https://www.example.hr" + "SECURITY_URL": "https://www.example.hr" + "ABOUT_URL": "https://www.example.hr" +---- + +[id="new-api-endpoints-314"] +== API endpoint enhancements + +No new API endpoints were added in {productname} 3.14. + +[id="known-issues-and-limitations-314"] +== {productname} {producty} known issues and limitations + +The following sections note known issues and limitations for {productname} 3.14. + +[id="unsupported-image-types-stuck"] +=== Unsupported image types stuck in querying status + +When pushing an unsupported image type, for example, an AI model, to a {productname} registry, the *Security Report* and *Packages* pages on the UI fail to load. This occurs because these image types are stuck in a `Querying` status and, as a result, the pages of these tabs are left blank. This is a known issue and will be fixed in a future version of {productname}. + +[id="bug-fixes-314"] +== {productname} bug fixes + +The following issues were fixed with {productname} 3.14: + +* link:https://issues.redhat.com/browse/PROJQUAY-8532[*PROJQUAY-8532*]. Previously, there was an issue when updating Clair when deployed with Amazon Web Services (AWS) Relational Database Service (RDS) from version 12.19 to 15.7. After upgrading, scanning new images would result images being stuck in a `Queued` state and be unable to procedure a bug report. This issue has been resolved. +* link:https://issues.redhat.com/browse/PROJQUAY-8131[*PROJQUAY-8131*]. Previously, users could receive an unknown exception when trying to serialize manifest type for caching on a referrer's endpoint. . This resulted in the following error: `Object of type Manifest is not JSON serializable`. This issue has been resolved. +* link:https://issues.redhat.com/browse/PROJQUAY-8272[*PROJQUAY-8272*]. Previously, nested indexes, or intexes referring to another index, were broke in {productname}. This coiuld result in the following response when pushing to a registry: `Error response from registry: recognizable error message not found: PUT "https://quay.io/v2/arewm/oci-spec-1217/manifests/nested-index": response status code 500: Internal Server Error`. This issue has been resolved. +* link:https://issues.redhat.com/browse/PROJQUAY-8559[*PROJQUAY-8559*]. Previously, a passport field in NGINX logs was not obfuscated. This issue has been resolved, and the `repeatPassword` value is hidden. + +[id="quay-feature-tracker"] +== {productname} feature tracker + +New features have been added to {productname}, some of which are currently in Technology Preview. Technology Preview features are experimental features and are not intended for production use. + +Some features available in previous releases have been deprecated or removed. Deprecated functionality is still included in {productname}, but is planned for removal in a future release and is not recommended for new deployments. For the most recent list of deprecated and removed functionality in {productname}, refer to Table 1.1. Additional details for more fine-grained functionality that has been deprecated and removed are listed after the table. + +//Remove entries with the same status older than the latest three releases. + +.New features tracker +[cols="4,1,1,1",options="header"] +|=== +|Feature | Quay 3.14 | Quay 3.13 | Quay 3.12 + +|link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#viewing-model-card-information[Viewing model card information by using the v2 UI]. +|General Availability +|- +|- + +|link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#keyless-authentication-robot-accounts[Keyless authentication with robot accounts] +|General Availability +|General Availability +|- + +|link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/securing_red_hat_quay/index#cert-based-auth-quay-sql[Certificate-based authentication between {productname} and SQL] +|General Availability +|General Availability +|- + +|link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/manage_red_hat_quay/index#proc_manage-log-storage-splunk[Splunk HTTP Event Collector (HEC)] support +|General Availability +|General Availability +|General Availability + +|link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html/use_red_hat_quay/index#oci-intro[Open Container Initiative 1.1 support] +|General Availability +|General Availability +|General Availability + +|link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#reassigning-oauth-access-token[Reassigning an OAuth access token] +|General Availability +|General Availability +|General Availability + +|link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html/use_red_hat_quay/index#creating-image-expiration-notification[Creating an image expiration notification] +|General Availability +|General Availability +|General Availability + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#reference-miscellaneous-v2-ui[FEATURE_UI_V2] +|Technology Preview +|Technology Preview +|Technology Preview + +|=== + +[id="ibm-power-z-linuxone-support-matrix"] +=== IBM Power, IBM Z, and IBM® LinuxONE support matrix + +.list of supported and unsupported features +[cols="3,1,1",options="header"] +|=== +|Feature |IBM Power |IBM Z and IBM(R) LinuxONE + +|Allow team synchronization via OIDC on Azure +|Not Supported +|Not Supported + +|Backing up and restoring on a standalone deployment +|Supported +|Supported + +|Clair Disconnected +|Supported +|Supported + +|Geo-Replication (Standalone) +|Supported +|Supported + +|Geo-Replication (Operator) +|Supported +|Not Supported + +|IPv6 +|Not Supported +|Not Supported + +|Migrating a standalone to operator deployment +|Supported +|Supported + +|Mirror registry +|Supported +|Supported + +|Quay config editor - mirror, OIDC +|Supported +|Supported + +|Quay config editor - MAG, Kinesis, Keystone, GitHub Enterprise +|Not Supported +|Not Supported + +|Quay config editor - Red Hat Quay V2 User Interface +|Supported +|Supported + +|Quay Disconnected +|Supported +|Supported + +|Repo Mirroring +|Supported +|Supported +|=== \ No newline at end of file diff --git a/modules/rn_3_20.adoc b/modules/rn_3_20.adoc new file mode 100644 index 000000000..9b0f91b05 --- /dev/null +++ b/modules/rn_3_20.adoc @@ -0,0 +1,69 @@ +[[rn-3-202]] +== Version 3.2.2 +Release Date: April 27, 2020 + +Fixed: + +* Clair correctly downloads vulnerabilities even if one fails +(see link:https://issues.redhat.com/browse/PROJQUAY-567[PROJQUAY-567]). + + +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-3-202[Link to this Release] + +[[rn-3-201]] +== Version 3.2.1 +Release Date: February 10, 2020 + +Fixed: + +* git: Remote code execution in recursive clones with nested submodules Security. +(See link:https://access.redhat.com/security/cve/CVE-2019-1387[CVE-2019-1387].) + +* yarn: nodejs-yarn: Install functionality can be abused to generate arbitrary symlinks. +(See link:https://access.redhat.com/security/cve/CVE-2019-10773[CVE-2019-10773].) + +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-3-201[Link to this Release] + +[[rn-3-200]] +== Version 3.2.0 +Release Date: December 17, 2019 + +Added: + +* New required manual config.yaml entry “DATABASE_SECRET_KEY” will be used to encrypt all robot tokens in the database (CVE-2019-10205) +* New Container Security Operator integrating security scanning into OpenShift Container Platform. +* Quay Setup Operator is now generally available (GA). +* Repository mirroring is now generally available (GA). +* Support for OpenShift Container Storage 4 leveraging NooBaa Multi-Cloud Gateway. +* Improved repository mirror logging. +* Notifications enabled for repository mirror start, finish, and error. +* Remove validation from repository mirror proxy config. +* Two guides were added to {productname} documentation: +Deploy {productname} on OpenShift (Setup Operator) and {productname} API Guide. + +Fixed: + +* Fixed for broken scrollbars in UI on pages such as repository tags. +* Fix inability to star a repository + +Deprecated: + +* "rkt" conversion: This feature is now marked as deprecated in the {productname} UI. +Expect the feature to be removed completely in the near future. + +* Bittorrent: This feature is deprecated and will not appear in the +{productname} UI unless it is already configured in an existing {productname} `config.yaml`. +Expect the feature to be removed completely in the near future. + +* V1 Push Support: This feature is deprecated. For {productname} v3.1, the config UI marked +this feature as follows: + ++ +``` +Docker V1 protocol support has been officially deprecated by +Quay and support will be removed in the next major version. +It is strongly suggested to have this flag enabled and to +restrict access to V1 push. +``` + +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-3-200[Link to this Release] diff --git a/modules/rn_3_30.adoc b/modules/rn_3_30.adoc new file mode 100644 index 000000000..12b329c77 --- /dev/null +++ b/modules/rn_3_30.adoc @@ -0,0 +1,171 @@ +[[rn-3-304]] +== Version 3.3.4 + +Fixed: + +* quay-bridge-operator references correct version + +[[rn-3-303]] +== Version 3.3.3 + +Fixed: + +* clair-jwt: fixed NVD streams +* CVE-2020-27831 quay: email notifications authorization bypass +* CVE-2020-27832 quay: persistent XSS in repository notification display + +[[rn-3-302]] +== Version 3.3.2 +* Version unreleased due to internal tooling issues + +[[rn-3-301]] +== Version 3.3.1 +Release Date: August 20, 2020 + +Fixed: + +* Config app installs supplied TLS certs at startup. This fix allows services that require certs to be configured properly (such as LDAP and storage). +* Tech preview clair-v4 correctly reindexes manifests. +* Build triggers can disclose robot account names and existence of private repos within namespaces (CVE-2020-14313) + + +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-3-301[Link to this Release] + +[[rn-3-300]] +== Version 3.3.0 + +=== quay / clair-jwt / quay-builder / clair + +Added: + +* (Tech Preview) New clair image available for non-production use (see docs) +* Quay now runs as the default user inside the container instead of as root. +* New configurable tagging options for builds, including tagging templates and ability to disable default “latest” and tag/branch behavior +* Configuration UI editing after validating through the “Save Configuration” button. +* Configuration app now supports configuring Elasticsearch for usage logs (and optionally via Kinesis). +* Ability to configure how long between “fresh login” checks +* Ability to add an additional filter for LDAP users on lookup +* Manifest labels displayed in the UI with links in them are now clickable to go to the URL +* The environment variable CONFIG_READ_ONLY_FIELDS can be specified to mark redis or the hostname configuration as read-only in the Quay Configuration Application’s UI. #310 +* (Tech Preview) Support for OCI indexes and manifests. Add the following to your config.yaml: + ++ +``` +# Feature Flag: Whether OCI manifest support should be enabled generally. +FEATURE_GENERAL_OCI_SUPPORT = True +``` + +* (Experimental) Support for pushing and pulling charts via Helm V3’s experimental system. Requires that OCI manifest support is enabled. Add the following to your config.yaml: + ++ +``` +# Feature Flag: Whether OCI manifest support should be enabled generally. +FEATURE_GENERAL_OCI_SUPPORT = True +# Feature Flag: Whether to allow Helm OCI content types. +# See: https://helm.sh/docs/topics/registries/ +FEATURE_EXPERIMENTAL_HELM_OCI_SUPPORT = True +``` + + +Fixed: + +* Repository mirror tag patterns handle whitespace between comma separated values. +* Fresh login checks were being used when unnecessary +* Georeplication from one Azure region to the other now uses the correct bucket and credentials +* Auth token handling to match recent GitHub API change +* Repository and namespace deletion now occurs in the background, ensuring they don’t fail +* No longer return “down converted” manifests on pull-by-digest +* Tags expiring in the future are now marked correctly as such in the tag history panel +* A number of performance improvements around various database queries +* Status codes of various Docker V2 APIs to conform with the spec +* Repository names now conform to the standard. Only lowercase letters, numbers, underscores, and hyphens are valid. + +Deprecated: + +* "rkt" conversion: This feature is now marked as deprecated in the {productname} UI. Expect the feature to be removed completely in the near future. +* Bittorrent: This feature is deprecated and will not appear in the{productname} UI unless it is already configured in an existing {productname} config.yaml. This feature will be removed in the next version of Quay. +* V1 Push Support: Docker V1 protocol support has been officially deprecated. Expect this feature to be removed in the next near future. +* Squashed image support: This feature is deprecated. This feature will be removed in the next version of Quay. +* images API: This API is deprecated and replaced by the manifest APIs. Expect this API to be removed completely in the near future. + +Note: + +* Do not use "Locally mounted directory" Storage Engine for any production configurations. Mounted NFS volumes are not supported. Local storage is meant for test-only installations. + +Known Issues: + +* Containers running as repository mirrors may lock under certain conditions; restart the containers as needed. + + +=== quay-operator + +Note: + +* Only supported on OCP-4.2 or newer +* UI supported on OCP-4.3 or newer + +Added: + +* Enhanced logic for Quay Configuration route +* Quay SSL Certificate uses TLS secret type +* Updated example Quay Ecosystem Custom Resource examples +* Retrofitted how external access is specified and managed +* New Schema for defining externalAccess as a field in QuayEcoystem +* Support for additional external access types (LoadBalancer and Ingress) +* Add additional roles to CSV to manage ingresses. +* Always use Port 8443 for Quay Config App's health probes. +* The Quay Config App now continues running by default. +* The Redis and Hostname configuration are marked "Read Only" in the Quay Configuration App. +* Support for managing superusers. +* Add ability to inject certificates, and any other file, into the Quay and Clair secrets. +* (OpenShift) SCC management refinement. Removal of SCCs when QuayEcosystem is deleted through the use of finalizers. +* Certificates and other secrets are now mounted in a way that is compatible with Quay and Quay's Config App. +* The operator now verifies the configuration for the Hostname, Redis, and Postgres when Quay's configuration secret is changed. + +Fixed: + +* Resolved issues with GitHub Actions CI/CD pipeline +* Resolved issue when specifying multiple replicas of a given component +* The "Repo Mirror" pod is now health-checked using the correct port. + +Known Issues: + +* Configuring Storage Geo-Replication for Azure in the CR causes the deployment to fail. +* The Hostname is set to an IP Address when using Load Balancers on GCP which causes the self-signed certificate validation to fail in Quay’s Config Application. +* Using the Postgres or Redis images from Dockerhub will fail. +* For advanced persistance configurations, Quay's PROXY_STORAGE feature is not exposed through the CR and can only be managed through Quay's Config app. +* Quay's Config App will always using TLS; it is not possible to configure it as HTTP-only in the CR. +* Node Ports do not currently work. +* Cloudfront cannot be properly configured using the CR. It can be managed using Quay's configuration app. +* This version of the operator cannot be used for an automatic upgrade due to schema changes in the CR. + + +=== quay-container-security-operator + +Note: + +* Only supported on OCP-4.2 or newer + +Added: + +* View Quay Security Scanner image vulnerability information for images running in a cluster using the OpenShift UI + + +=== quay-openshift-bridge-operator + +Note: + +* Only supported on OCP-4.2 or newer + +Added: + +* Synchronization of OpenShift namespaces as Quay organizations, including managing robot account credentials +* Synchronization of OpenShift ImageStreams as Quay repositories +* Automatically rewrite new Builds making use of ImageStreams to output to Quay +* Automatically import ImageStream tag once build completes + + + + + +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-3-300[Link to this Release] diff --git a/modules/rn_3_40.adoc b/modules/rn_3_40.adoc new file mode 100644 index 000000000..b5e16b85a --- /dev/null +++ b/modules/rn_3_40.adoc @@ -0,0 +1,259 @@ +[[rn-3-407]] +== Version 3.4.7 + +=== quay / clair / quay-builder + +Fixed: + +* link:https://issues.redhat.com/browse/PROJQUAY-2479[PROJQUAY-2479]. Update downstream Operator extensions API to "v1" for 3.4. + + +=== quay-operator + +Known issues: + +* link:https://issues.redhat.com/browse/PROJQUAY-2921[PROJQUAY-2921]. Quay App route hostname is changed when upgrade from 3.4.7 to 3.6.2. As a result, you should avoid upgrading from v3.4.7 to v3.5.* or to v3.6.*. + +[[rn-3-406]] +== Version 3.4.6 + +=== quay / clair / quay-builder + +Fixed: + +* Quay config validation fails on PostgreSQL 11 backed by SSL +* Quay config validation fails on SSL database connection on PostgreSQL 12 +with SCRAM password authentication +* Quay config validation fails on Azure PostgreSQL DB with SSL + + +=== quay-operator + +* Quay operator upgrade pods running all workers instead of just database +upgrade + + + + +[[rn-3-405]] +== Version 3.4.5 + +Fixed: + +* Remove requirement to include Kubernetes internal service hostnames as SAN entries in user-provided TLS to fix upgrade from v3.3 + + +[[rn-3-404]] +== Version 3.4.4 + +=== quay / clair / quay-builder + +Fixed: + +* Fix Clair python recognize known vulnerabilities link:https://issues.redhat.com/browse/PROJQUAY-1775[PROJQUAY-1775] + +[[rn-3-403]] +== Version 3.4.3 + +=== quay / clair / quay-builder + +Fixed: + +* Fix Quay security scanning backfill API link:https://issues.redhat.com/browse/PROJQUAY-1613[PROJQUAY-1613] +* Fix Clair python language matching link:https://issues.redhat.com/browse/PROJQUAY-1692[PROJQUAY-1692] + +=== quay-operator + +Fixed: + +* Fix Quay Operator handling of provided certificates related to BUILDMAN_HOSTNAME link:https://issues.redhat.com/browse/PROJQUAY-1577[PROJQUAY-1577] + + +[[rn-3-402]] +== Version 3.4.2 + +=== quay / clair / quay-builder + +Fixed: + +* Fix clair crash downloading RHEL content mapping +* Quay config-tool validates SMTP +* Quay config-tool now prevents SECRET_KEY from changing on config updates + +=== quay-operator + +Fixed: + +* Fix Quay Operator reconciler loop resulting in failed mirror configurations + + + +[[rn-3-401]] +== Version 3.4.1 + +=== quay / clair / quay-builder + +Fixed: + +* Quay config editor validates OIDC provider +* Quay config editor correctly validates MySQL database with SSL +* Quay config editor no longer requires Time Machine expiration when feature not enabled + +=== quay-operator + +Fixed: + +* Quay Operator generates correct cert for build manager +* Quay Operator documentation link corrected to 3.4 + +=== quay-container-security-operator + +Fixed: + +* `Quay` container Security Operator upgrade to 3.4.0 + +=== quay-openshift-bridge-operator + +Fixed: + +* Quay Bridge Operator upgrade to 3.4.0 + + + +[[rn-3-400]] +== Version 3.4.0 + +=== quay / clair / quay-builder + +Added/Changed: + +* Clair V4 now GA and the default security scanner for Quay 3.4.0. New features include support for notifications and disconnected deployments. +* New ConfigTool replaces the older Config App, providing better configuration validation and integration with the new Quay Operator. Quay now uses same validator as the ConfigTool at start time to ensure its configuration is correct. You will see a table of configuration validation status (pass/fail) now when Quay boots up. +* Quay codebase now completely migrated to python 3 with numerous dependency updates. +* (Tech Preview) Support for Helm V3 is no longer considered experimental. It can be enabled as follows: ++ +``` +# Enable Helm support- requires that general OCI support (Tech Preview) is enabled. +FEATURE_GENERAL_OCI_SUPPORT: True +FEATURE_HELM_OCI_SUPPORT: True +``` +* (Tech Preview) Due to necessary changes, the existing {productname} builders had to be removed and entirely rewritten. This has resulted in a loss of functionality so the new builders are being released as link:https://access.redhat.com/support/offerings/techpreview[Technology Preview]. Currently, builds are only available on OpenShift/Kubernetes utilizing Red Hat CoreOS for the sandbox VMs. The internal build manager has also been completely re-written to use gRPC and numerous core issues have been addressed. Please follow the provided documentation carefully when setting up. +* NooBaa has graduated from Technical Preview (TP) and now has General Availability (GA) status. + +Fixed: + +* PROJQUAY-121 Build manager scheduling too many builds +* PROJQUAY-139 Quay starts unreasonable number of workers when running in a container +* PROJQUAY-206 Repo mirroring sometimes locks up +* PROJQUAY-357 Properly escape arguments in entrypoint config +* PROJQUAY-381 Existing tags get deleted when mirroring fails +* PROJQUAY-399 Cannot setup mysql 8 for Quay via config tool +* PROJQUAY-480 Defunct Gunicorn Processes +* PROJQUAY-551 LDAP_USER_FILTER causes errors when not quoted +* PROJQUAY-575 Broken link for webhook POST in the webhook notifications page +* PROJQUAY-607 Changing SERVER_HOSTNAME triggers storage replication and 100% database CPU +* PROJQUAY-632 Lost usage logs when set kinesis as the logs producer +* PROJQUAY-635 Error 500 on Applications tab with naboo +* PROJQUAY-659 Creating new tags via the UI on a schema 2 manifest creates a schema 1 manifest +* PROJQUAY-675 Quay export logs select date range less than a month redirect to 500 error page +* PROJQUAY-676 Wrong image vulnerabilities link in OCP4.4 Overview page +* PROJQUAY-742 `Quay` container crashes when no user exists in database +* PROJQUAY-796 Mirrored images have new digest +* PROJQUAY-797 Config app does not copy database SSL file to correct place +* PROJQUAY-808 Dockerfile upload failure (LocalStorage) +* PROJQUAY-813 Quay cannot connect to mysql db when SSL/TLS is required +* PROJQUAY-822 Quay App POD log should not print out LDAP user's password as plaintext +* PROJQUAY-850 Config app fails to generate clair security.pem +* PROJQUAY-861 Deploy Quay is failed with AWS S3 as backend storage registry +* PROJQUAY-866 Possible name collisions when deplying multiple `QuayRegistries` +* PROJQUAY-867 Restrict Quay Operator to Single Namespace +* PROJQUAY-871 Kustomize secrets broken with prefixed resource names +* PROJQUAY-884 Add support for tar.gz config bundles +* PROJQUAY-887 Error when controller processes existing QuayRegistry +* PROJQUAY-907 Repo mirror start date not calculated correctly +* PROJQUAY-915 Simultaneously pushing the same manifest can result in a manifest error +* PROJQUAY-917 Incorrect encoding of CSRF token in UI +* PROJQUAY-923 Failed to set GCS as the storage backend for Quay via config tool +* PROJQUAY-930 Config bundle contains fields for unmanaged components +* PROJQUAY-933 Quay config app failed to validate Noobaa SSL configurations +* PROJQUAY-934 Quay edit permissions of robot account redirect to quay 500 error page +* PROJQUAY-935 Quay Image Repository Mirror was stuck +* PROJQUAY-940 Quay delete in use robot account get 500 error page +* PROJQUAY-942 Quay push image was failed when backend storage is Azure Blob Storage +* PROJQUAY-948 list_manifest_layers should not fail on shared blobs +* PROJQUAY-949 Have Clair V4 indexing handle manifest layer error +* PROJQUAY-953 Quay image repository Tags page can't display existing image tags +* PROJQUAY-958 Unhandled date token outside the given date range used for elasticsearch pagination +* PROJQUAY-973 Transaction error if the same repository is created twice during auth flow +* PROJQUAY-988 Quay update tag expiration does not work +* PROJQUAY-1002 Helm 3 OCI Support Push Fails due to invalid MIME type +* PROJQUAY-1011 Accessing build logs from super user panel doesnt work +* PROJQUAY-1015 RPM command error when getting rpm packages from layer database +* PROJQUAY-1023 oraclelinux:7 causes matcher bug +* PROJQUAY-1035 Unable to override gunicorn worker count in k8s +* PROJQUAY-1087 Fail to pull from managed objectstorage +* PROJQUAY-1101 Typo in /tools/generatekeypair.py +* PROJQUAY-1103 Remove need to modify SCC +* PROJQUAY-1112 Quay database reaches connection limit +* PROJQUAY-1122 Specify pull secret for component images +* PROJQUAY-1132 Running as config should not try to set httppasswd + +Deprecated: + +* Clair V2 (clair-jwt): With the GA of Clair V4, this version of Clair is now marked as deprecated. Users are encouraged to migrate to Clair V4 with this release. Clair V2 will be removed completely in the near future. +* App Registry: Customers using the App Registry feature should begin migrating to another application storage solution such as Helm V3 which uses the OCI standard container format. App Registry will be completely removed in the near future. + +Note: + +* Upgrading to Quay 3.4 will require a database migration which does not support downgrading back to a prior version of Quay. Please back up your database before performing a migration. + +Known Issues: + +* PROJQUAY-649 "openssl passwd" incorrect on OCP4 with FIPS mode enabled +* PROJQUAY-841 Provide and document an egress firewall whitelist +* PROJQUAY-888 Config App cannot connect to Postgres RDS instance via SSL +* PROJQUAY-960 Bucket addressing with Ceph in Quay +* PROJQUAY-1056 Quay deployment was failed at setup DB on GCP when use GCP SQL Postgresql +* PROJQUAY-1181 Quay config editor doesn't validate SMTP +* PROJQUAY-1390 Quay login with Openstack Keystone user was failed +* Official Red Hat repositories may now contain "source" images which will be included in Mirrored repositories. See link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html-single/building_running_and_managing_containers/index#getting-ubi-container-image-source-code_adding-software-to-a-running-ubi-container[Getting UBI Container Image Source Code] for an example of a source image tag. There is no simple way to exclude these source containers using Quay's current tag patterns. This will be addressed in future Quay versions. + + + +=== quay-operator + +* Only supported on OCP-4.5 or newer + +Added: + +* Completely redesigned Quay Operator with fully supported default storage configuration using RHOCS. +* Works in conjunction with new Config Tool to reconcile configuration updates made to a running Quay cluster. +* Handles migration from older `QuayEcosystem` Custom Resource to new `QuayRegistry` Custom Resource. + +Known Issues: + +* PROJQUAY-1056 Quay deployment was failed at setup DB on GCP when use GCP SQL Postgresql +* PROJQUAY-1394 Quay TNG Operator was failed to start managed postgresql database POD + (operator upgrades may encounter this issue, recreating your QuayRegistry CR should resolve the issue) + + +=== quay-container-security-operator + + +* Only supported on OCP-4.5 or newer + +Fixed: + +* PROJQUAY-676 Wrong image vulnerabilities link in OCP4.4 Overview page + + +=== quay-openshift-bridge-operator + +* Only supported on OCP-4.5 or newer + +Fixed: + +* PROJQUAY-1225 bridge-operator update to go-1.15 + + +link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_release_notes#rn-3-400[Link to this Release] diff --git a/modules/rn_3_50.adoc b/modules/rn_3_50.adoc new file mode 100644 index 000000000..5c800fdf2 --- /dev/null +++ b/modules/rn_3_50.adoc @@ -0,0 +1,155 @@ +[[rn-3-507]] +== Version 3.5.7 + +=== quay / clair / quay-builders + +Fixed: + +* CVE-2021-3762 quay-clair-container: quay/claircore: directory traversal when scanning crafted container image layer allows for arbitrary file write link:https://issues.redhat.com/browse/PROJQUAY-2486[PROJQUAY-2486] + +=== quay-operator / quay-container-security-operator / quay-openshift-bridge-operator + +* Update downstream operator extensions api to "v1" for 3.5 link:https://issues.redhat.com/browse/PROJQUAY-2480[PROJQUAY-2480] + + +[[rn-3-506]] +== Version 3.5.6 + +=== quay / clair / quay-builders + +Fixed: + +* rpm: package scanner leaks extracted layers link:https://issues.redhat.com/browse/PROJQUAY-2315[PROJQUAY-2315] + +[[rn-3-505]] +== Version 3.5.5 + +=== quay / clair / quay-builders + +Fixed: + +* Disable storing signatures during repo mirroring link:https://issues.redhat.com/browse/PROJQUAY-2312[PROJQUAY-2312] +* SecurityWorker fails when loading information when a V2 scanner is not configured link:https://issues.redhat.com/browse/PROJQUAY-2290[PROJQUAY-2290] +* SecurityWorker fails when indexing a manifest layer's location is remote link:https://issues.redhat.com/browse/PROJQUAY-2285[PROJQUAY-2285] +* Fixed backfill replication script relies on Image table link:https://issues.redhat.com/browse/PROJQUAY-2273[PROJQUAY-2273] +* Quay builders honor proxy environment variables link:https://issues.redhat.com/browse/PROJQUAY-2147[PROJQUAY-2147] + + +[[rn-3-504]] +== Version 3.5.4 + +=== quay / clair / quay-builders + +Fixed: + +* Clair scan throwing 400 bad request + +[[rn-3-503]] +== Version 3.5.3 + +=== quay / clair / quay-builder + +Fixed: + +* Quay config validation fails on PostgreSQL 11 backed by SSL +* Quay config validation fails on SSL database connection on PostgreSQL 12 +with SCRAM password authentication +* Quay config validation fails on Azure PostgreSQL DB with SSL +* Quay repository mirroring fixed +* Quay config validation crash on startup + +=== quay-operator + +* Quay operator upgrade pods running all workers instead of just database +upgrade + +[[rn-3-502]] +== Version 3.5.2 + +=== quay / clair / quay-builder + +Fixed: + +* Fix config validation of LDAP server to prevent the server from timing out on large LDAP requests. +* Fix quay-operator Service Account permissions to allow Quay Registry deletion. +* Fix clair's encoding of time in configuration. +* Enhance clair to discard unfixed and unaffected vulnerabilities in Red Hat OVAL v2 feed. +* Fix quay to prevent creation of empty files in storage during multi-part upload. +* Fix clair to properly start in a disconnected environment. + + +=== quay-operator + +Known issues: + +Geo-replication does not work when Quay is deployed on OpenShift using the Operator. + +[[rn-3-501]] +== Version 3.5.1 + +=== quay / clair / quay-builder + +Fixed: + +Fix Clair "duplicate key value violates unique constraint" after upgrade link:https://issues.redhat.com/browse/PROJQUAY-1889[PROJQUAY-1889] + +[[rn-3-500]] +== Version 3.5.0 + +=== quay / clair / quay-builder + +Note: + +Some features of Quay are not currently available when running on a FIPS-enabled OCP cluster or RHEL system: + +* FEATURE_MAILING will not work for user create validation, vulnerability notifications, and export logs +* Azure object storage is not available due to hashing +* Deprecated app-registry will not function + +Tech Preview + +* Due to necessary changes, the existing {productname} builders had to be removed and entirely rewritten. This has resulted in a loss of functionality so the new builders are being released as link:https://access.redhat.com/support/offerings/techpreview[Technology Preview]. Currently, builds are only available on OpenShift/Kubernetes utilizing Red Hat CoreOS for the sandbox VMs. The internal build manager has also been completely re-written to use gRPC and numerous core issues have been addressed. Please follow the provided documentation carefully when setting up. + +Deprecated: + +* Clair V2 (clair-jwt): With the GA of Clair V4, this version of Clair is now marked as deprecated. Users are encouraged to migrate to Clair V4 with this release. Clair V2 will be removed completely in the next release. +* App Registry: Customers using the App Registry feature should begin migrating to another application storage solution such as Helm V3 which uses the OCI standard container format. App Registry will be completely removed in the next release. + + +Fixed: + +* Fix quay running on a FIPS-enabled OCP cluster +* Fix validation of LDAP_USER_FILTER when missing from config bundle +* Upgrade internally-used jQuery +* Remove usage of TLS1.0 and TLS1.1 ciphers +* Fix build of uploaded Dockerfile when object storage is Swift +* Fix whitespace error in UI for repository count checker +* (CVE-2020-1747) Update PyYAML +* Fix quay.expires-after label for all linked images +* Helm chart support now generally available +* Fix validation of SMTP in config bundle +* Fix gitlab trigger build images now honor configured storage +* Fix OIDC session sends invalid state value in URL +* Fix custom OIDC external authentication ignores PREFERRED_URL_SCHEME configuration +* Fix config editor opening links in same page +* Fix setting USERFILES_LOCATION to valid storage if not default +* Fix typo in user confirmation screen +* Remove unused nodejs from container +* Fix default MAIL_DEFAULT_SENDER config value +* Fix config editor default tag expiration display +* (CVE-2020-13757) Remove usage of python-rsa package in favor of python-cryptography +* Added support of github action to publish to a repository +* Document clair updater URLs + +=== quay-operator + +Note: The new quay-operator OCP monitor dashboard requires that the operator be install in all namespaces (the default). If installed in a single namespace, the "monitoring" component will be unmanaged and not installed. + +* Document using disconnected clair with quay-operator +* Fix quay-operator version displayed in OCP console +* Fix BUILDMAN_HOSTNAME in config bundle with managed route component +* Added OCP monitoring integration + +=== quay-container-security-operator + +* Fix reading security metadata when FEATURE_ANONYMOUS_ACCESS is set to false diff --git a/modules/rn_3_60.adoc b/modules/rn_3_60.adoc new file mode 100644 index 000000000..a6c43ddf3 --- /dev/null +++ b/modules/rn_3_60.adoc @@ -0,0 +1,388 @@ +[[rns-3-607]] + +== Version 3.6.7 + +=== quay / clair / quay-builder + +* link:https://issues.redhat.com/browse/PROJQUAY-3812[PROJQUAY-3812]. [3.6] Failed to create non-existing repository in user account namespace by image pushing + +[[rns-3-606]] + +== Version 3.6.6 + +=== quay / clair / quay-builder + +Fixed: + +* link:https://issues.redhat.com/browse/PROJQUAY-3146[PROJQUAY-3146]. Strange partial deletion of mirrored tags. + +* link:https://issues.redhat.com/browse/PROJQUAY-3404[PROJQUAY-3404]. Build logs page is blank on Super User Admin panel. + +* link:https://issues.redhat.com/browse/PROJQUAY-3405[PROJQUAY-3405]. Build "copy Logs" doesn't work. + +* link:https://issues.redhat.com/browse/PROJQUAY-3638[PROJQUAY-3638]. Quay config validator crashes on 3.6.5 startup. + +[[rns-3-605]] + +== Version 3.6.5 + +=== quay / clair / quay-builder + +Fixed: + +* link:https://issues.redhat.com/browse/PROJQUAY-2983[PROJQUAY-2983]. Config validation fails if no AWS access keys are provided ver. 2. + +* link:https://issues.redhat.com/browse/PROJQUAY-3437[PROJQUAY-3437]. CVE-2022-24761 quay-registry-container: waitress: Inconsistent Interpretation of HTTP Requests ('HTTP Request Smuggling'). + +Added/Changed: + +* link:https://issues.redhat.com/browse/PROJQUAY-3421[PROJQUAY-3421]. Bump Clair to 4.4. + +=== quay-operator + +Added/Changed: + +* link:https://issues.redhat.com/browse/PROJQUAY-3444[PROJQUAY-3444]. Adds subscription annotation to CSVs. + +[[rn-3-604]] + +== Version 3.6.4 + +=== quay-operator + +Fixed: + +* link:https://issues.redhat.com/browse/PROJQUAY-3317[PROJQUAY-3317]. Quay 3.6.3 APP POD was crashed when use unmanaged tls component. + + +[[rn-3-603]] + +== Version 3.6.3 + +=== quay / clair / quay-builder + +Fixed: + +* link:https://issues.redhat.com/browse/PROJQUAY-2080[PROJQUAY-2080]. Quay failed to delete new team with 400 error code when the team role is Admin. + +* link:https://issues.redhat.com/browse/PROJQUAY-2941[PROJQUAY-2941]. Add aws-ip-ranges.json to downstream build. + +* link:https://issues.redhat.com/browse/PROJQUAY-2049[PROJQUAY-2343]. LDAP validation broken in Quay 3.4.z and 3.5.z. + +* link:https://issues.redhat.com/browse/PROJQUAY-3106[PROJQUAY-3106]. Issue while mirroring the images in Quay Operator v3.6.2. + +* link:https://issues.redhat.com/browse/PROJQUAY-3119[PROJQUAY-3119]. Quay is not garbage collecting blobs correctly (v3.6.3). + +* link:https://issues.redhat.com/browse/PROJQUAY-3179[PROJQUAY-3179]. Executor exception when username and password not specified to pull quay-builder. + +Added/Changed: + +* link:https://issues.redhat.com/browse/PROJQUAY-2989[PROJQUAY-2989]. Bump LDAP 3.2.0 to 3.4.0. + + +=== quay-operator + +Fixed: + +* link:https://issues.redhat.com/browse/PROJQUAY-2049[PROJQUAY-2049]. When routes are not managed a config editor endpoint is still propagated in status. + +* link:https://issues.redhat.com/browse/PROJQUAY-1812[PROJQUAY-1812]. Quay config app changes are not rolled out if QuayRegistry status is in `MigrationInProgress`. + +* link:https://issues.redhat.com/browse/PROJQUAY-1624[PROJQUAY-1624]. BITTORRENT_FILENAME_PEPPER has been removed from the config.yaml. + +* link:https://issues.redhat.com/browse/PROJQUAY-2696[PROJQUAY-2696]. Quay 3.6.0 Operator should block the deployment when route is managed. TLS is unmanaged without providing TLS Cert/Key pair. + +* link:https://issues.redhat.com/browse/PROJQUAY-2335[PROJQUAY-2335]. Quay Operator should block the deployment when Route is managed, TLS is unmanaged without providing TLS Cert/key pairs. + +* link:https://issues.redhat.com/browse/PROJQUAY-2067[PROJQUAY-2067]. Operator 3.5.1 fails to check Route API on OpenShift Container Platform 4.8. + +* link:https://issues.redhat.com/browse/PROJQUAY-2869[PROJQUAY-2869]. Quay Operator on OpenShift 4.6 with `huge_pages` cannot deploy. + +* link:https://issues.redhat.com/browse/PROJQUAY-2409[PROJQUAY-2409]. Incorrect parsing of extraneous zero characters at the beginning of an IP address octet. + +* link:https://issues.redhat.com/browse/PROJQUAY-2432[PROJQUAY-2432]. Panic due to racy read of persistConn after handler panic. + +* link:https://issues.redhat.com/browse/PROJQUAY-2593[PROJQUAY-2593]. Malformed archive may cause panic or memory exhaustion. + +* link:https://issues.redhat.com/browse/PROJQUAY-3169[PROJQUAY-3169]. Kubernetes executor doesn't filter completed jobs when counting running jobs. + +* link:https://issues.redhat.com/browse/PROJQUAY-3238[PROJQUAY-3238]. APP POD was failed to be ready with /health/instance check keeping report 499 Error Code. + +Added/Changed: + +* link:https://issues.redhat.com/browse/PROJQUAY-2973[PROJQUAY-2973]. Bump github.com/ulikunitz/xz dependency. + + +=== quay-openshift-bridge-operators + +* link:https://issues.redhat.com/browse/PROJQUAY-2732[PROJQUAY-2732]. Faster creation of resources and permissions. + +* link:https://issues.redhat.com/browse/PROJQUAY-2898[PROJQUAY-2898]. Review QBO - Issue with BuildConfig being mutated incorrectly. + +* link:https://issues.redhat.com/browse/PROJQUAY-2984[PROJQUAY-2984]. Change label/selector on QBO pod and service. + + +[[rn-3-602]] + +== Version 3.6.2 + +=== quay / clair / quay-builder + +Fixed: + +* link:https://issues.redhat.com/browse/PROJQUAY-2416[PROJQUAY-2416]. Builder jobs not completing and timing out after 3 minutes of inactivity. +* link:https://issues.redhat.com/browse/PROJQUAY-2313[PROJQUAY-2313]. Quay is using more storage than other registries on s3. +* link:https://issues.redhat.com/browse/PROJQUAY-2681[PROJQUAY-2681]. Quay 3.6.0 registry title was not changed after changes with the config editor. + +=== quay-operator + +Added/Changed: + +* As of {productname} v3.6.2, you can specify the desired size of storage resources provisioned for managed components. link:https://issues.redhat.com/browse/PROJQUAY-1090[PROJQUAY-1090]. + +Fixed: + +* link:https://issues.redhat.com/browse/PROJQUAY-2930[PROJQUAY-2930]. Quay Operator unable to reconcile when specified the PVC volume size of Clair PostgreSQL DB. +* link:https://issues.redhat.com/browse/PROJQUAY-2824[PROJQUAY-2824]. Upgrades to 3.6.1 are broken in OpenShift 4.6 + +=== quay-container-security-operator + +* link:https://issues.redhat.com/browse/PROJQUAY-2928[PROJQUAY-2928]. CSO shows the wrong title in Operator Hub. + + +=== quay-openshift-bridge-operators + +* link:https://issues.redhat.com/browse/PROJQUAY-2797[PROJQUAY-2797]. Quay Bridge Operator prevents deletion of builds. + + +[[rn-3-601]] +== Version 3.6.1 + +=== quay / clair / quay-builder + +Fixed: + +* link:https://issues.redhat.com/browse/PROJQUAY-1936[PROJQUAY-1936]. Quay Operator reports wrong hostname in registryEndpoint status field for custom hostnames. +* link:https://issues.redhat.com/browse/PROJQUAY-2122[PROJQUAY-2122]. Use Postgres image from registry.redhat.io. +* link:https://issues.redhat.com/browse/PROJQUAY-2435[PROJQUAY-2435]. Quay should not create HPA for Clair APP and Mirror when horizontalpodautoscaler component is unmanaged. +* link:https://issues.redhat.com/browse/PROJQUAY-2563[PROJQUAY-2563]. Quay stops indexing after Clair failure. +* link:https://issues.redhat.com/browse/PROJQUAY-2603[PROJQUAY-2603]. Quay Operator should not recreate managed Postgresql DB POD when no config change happened to database. +* link:https://issues.redhat.com/browse/PROJQUAY-2653[PROJQUAY-2653]. Add standard Helm layer type to default types. +* link:https://issues.redhat.com/browse/PROJQUAY-2691[PROJQUAY-2691]. Reclassified CVE ratings show source as unknown. +* link:https://issues.redhat.com/browse/PROJQUAY-2334[PROJQUAY-2334]. Deprecate FEATURE_HELM_OCI_SUPPORT in favor of OCI artifacts config. +* link:https://issues.redhat.com/browse/PROJQUAY-2541[PROJQUAY-2541]. Enrichment data visibility fix on Quay UI. +* link:https://issues.redhat.com/browse/PROJQUAY-2636[PROJQUAY-2636]. Operator communicates healthy status per managed component. + +[[rn-3-600]] +== Version 3.6.0 + +=== quay / clair / quay-builder + +Added/Changed: + +* {productname} 3.6 now includes support for the following Open Container Initiative (OCI) image media types by default: CLI cosigning, Helm, and the ztsd compression scheme. Other OCI media types can be configured by the user in their config.yaml file, for example: ++ +.config.yaml +[source,yaml] +---- +... +ALLOWED_OCI_ARTIFACT_TYPES: + application/vnd.oci.image.config.v1+json + - application/vnd.dev.cosign.simplesigning.v1+json + application/vnd.cncf.helm.config.v1+json + - application/tar+gzip + application/vnd.sylabs.sif.config.v1+json + - application/vnd.sylabs.sif.layer.v1+tar +... +---- ++ +[NOTE] +==== +When adding OCI media types that are not configured by default, users will also need to manually add support for cosign and Helm if desired. The ztsd compression scheme is supported by default, so users will not need to add that OCI media type to their config.yaml to enable support. +==== ++ +For more information, see https://issues.redhat.com/browse/PROJQUAY-1417[PROQUAY-1417] and link:https://issues.redhat.com/browse/PROJQUAY-1032[PROJQUAY-1032]. + +* You can now use the API to create a first user. (link:https://issues.redhat.com/browse/PROJQUAY-1926[PROJQUAY-1926]) + +* Support for nested repositories and extended repository names has been added. This change allows the use of `/` in repository names needed for certain {ocp} use cases. (link:https://issues.redhat.com/browse/PROJQUAY-1535[PROJQUAY-1535]) + +* Registry users now have the option to set `CREATE_PRIVATE_REPO_ON_PUSH` in their config.yaml to `True` or `False` depending on their security needs. (link:https://issues.redhat.com/browse/PROJQUAY-1929[PROJQUAY-1929]) + +* Pushing to a non-existent organization can now be configured to automatically create the organization. (link:https://issues.redhat.com/browse/PROJQUAY-1928[PROJQUAY-1928]) + +* Users are now required to enter namespace and repository names when deleting a repository. (link:https://issues.redhat.com/browse/PROJQUAY-763[PROJQUAY-763]) + +* Support for Ceph virtual-hosted-style bucket addressing has been added. (link:https://issues.redhat.com/browse/PROJQUAY-922[PROJQUAY-922]) + + +* With Clair v4.2, enrichment data is now viewable in the Quay UI. +Additionally, Clair v4.2 adds CVSS scores from the National Vulnerability Database for detected vulnerabilities. ++ +With this change, if the vulnerability has a CVSS score that is within 2 levels of the distro's score, the Quay UI present's the distro's score by default. ++ +For more information, see link:https://issues.redhat.com/browse/PROJQUAY-2102?filter=12382147[PROJQUAY-2102] and link:https://issues.redhat.com/browse/PROJQUAY-1724?filter=12382147[PROJQUAY-1724]. + +* The Quay Repository now shows *Repository Status* when repository mirroring is enabled. (link:https://issues.redhat.com/browse/PROJQUAY-591[PROJQUAY-591]) + +* Memory usage across Clair, notably around the `affected_manifests` call, has been improved. These changesets include: + +** `io.Pipe` is used to cross-wire JSON encoding and API requests in order to avoid buffering the entire body request in memory; +** `encoding/JSON` has been replaced with `github.com/ugorji/go/codec` configured for JSON in order to allow streaming the JSON encoding; +** `affected_manifests` calls in the notifier, which should prevent large vulnerability turnovers from causing extremely large API calls. ++ +For more information, see link:https://issues.redhat.com/browse/PROJQUAY-1693[PROJQUAY-1963]. + +* Red Hat Enterprise Linux (RHEL) 8 is strongly recommended for highly available, production quality deployments of {productname} 3.6. RHEL 7 has not been tested with {productname} 3.6, and will be deprecated in a future release. + +* Podman is strongly recommended for highly available, production quality deployments of {productname} 3.6. Docker has not been tested with {productname} 3.6, and will be deprecated in a future release. + +Fixed: + +* link:https://issues.redhat.com/browse/PROJQUAY-2047[PROJQUAY-2047]. Clair database keeps on growing. + +* link:https://issues.redhat.com/browse/PROJQUAY-1918[PROJQUAY-1918]. Clair v4.1.0.alpha2 indexer now works in {productname} 3.6. + +* link:https://issues.redhat.com/browse/PROJQUAY-1610[PROJQUAY-1610]. The `initContainer` from the Quay migration pod has been removed, which blocked the deployment process until Clair responded. As a result, Quay deployments now progress without waiting on the Clair deployment to finish. + +* link:https://issues.redhat.com/browse/PROJQUAY-1857[PROJQUAY-1857]. NamespaceGCWorker and RepositoryGCWorker shuts down when unable to acquire lock + +* link:https://issues.redhat.com/browse/PROJQUAY-1872[PROJQUAY-1872]. GC workers will sometimes fail to grab a lock due to Redis running out of connections + +* link:https://issues.redhat.com/browse/PROJQUAY-2414[PROJQUAY-2414]. Quay config editor was failed to validate AWS RDS TLS Cert + +* link:https://issues.redhat.com/browse/PROJQUAY-1626[PROJQUAY-1626]. Config validation fails if no AWS access keys are provided + +* link:https://issues.redhat.com/browse/PROJQUAY-1710[PROJQUAY-1710]. Notifications are getting lost + +* link:https://issues.redhat.com/browse/PROJQUAY-1813[PROJQUAY-1813]. Need ratelimiter for updaters + +* link:https://issues.redhat.com/browse/PROJQUAY-1815[PROJQUAY-1815]. Quay config editor can't validate the expire time of uploaded LDAPS CA Cert + +* link:https://issues.redhat.com/browse/PROJQUAY-1816[PROJQUAY-1816]. Quay export logs API return 200 when export logs mail not delivered to target address + +* link:https://issues.redhat.com/browse/PROJQUAY-1912[PROJQUAY-1912]. Internal notifier queue clogging with events + +* link:https://issues.redhat.com/browse/PROJQUAY-2119[PROJQUAY-2119]. Quay config validation fails on PostgreSQL 11 backed by SSL + +* link:https://issues.redhat.com/browse/PROJQUAY-2167[PROJQUAY-2167]. Mirroring stopped working in 3.5.2 + +* link:https://issues.redhat.com/browse/PROJQUAY-2269[PROJQUAY-2269]. SecurityWorker fails when indexing a manifest layer's location is remote + +* link:https://issues.redhat.com/browse/PROJQUAY-2200[PROJQUAY-2200]. Quay Config editor need to support sslmode=verify-full in config.yaml after uploading database SSL Cert + +* link:https://issues.redhat.com/browse/PROJQUAY-2185[PROJQUAY-2185]. Quay CR modified after making changes via the config tool + +=== {productname} feature tracker + +New features have been added to {productname}, some of which are currently in Technology Preview. Technology Preview features are experimental features and are not intended for production use. + +Some features available in previous releases have been deprecated or removed. Deprecated functionality is still included in {productname}, but is planned for removal in a future release and is not recommended for new deployments. For the most recent list of deprecated and removed functionality in {productname}, refer to the table below. Additional details for more fine-grained functionality that has been deprecated and removed are listed after the table. + +//This will eventually expand to cover the latest three releases. Since this is the first TP tracker, it will include only 3.6. + +.Technology Preview tracker +[cols="2a,2a",options="header"] +|=== +|Feature |Quay 3.6 + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index#config-fields-helm-oci[FEATURE_HELM_OCI_SUPPORT] +|Deprecated + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index#config-ui-database[MySQL and MariaDB database support] +|Deprecated + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#oci-intro[Open Container Initiative (OCI) Media types] +|General Availability + +|Java scanning with Clair +|Technology Preview +|=== + +==== Deprecated features + +* *FEATURE_HELM_OCI_SUPPORT*: This option has been deprecated and will be removed in a future version of {productname}. In {productname} 3.6, Helm artifacts are supported by default and included under the `FEATURE_GENERAL_OCI_SUPPORT` property. Users are no longer required to update their config.yaml files to enable support. (link:https://issues.redhat.com/browse/PROJQUAY-2334[PROJQUAY-2334]) + +* *MySQL and MariaDB database support*: The MySQL and mariaDB databases have been deprecated as of {productname} 3.6. Support for these databases will be removed in a future version of {productname}. If starting a new {productname} installation, it is strongly recommended to use PostgreSQL. (link:https://issues.redhat.com/browse/PROJQUAY-1998[PROJQUAY-1998]) + +==== Technology preview features + +* *Java scanning with Clair*: With {productname} 3.6, Clair 4.2 includes support for Java scanning. Java scanning is dependent on an external service (CRDA) to gather vulnerability data. Because Clair is using a shared default token to access the CRDA service, it might encounter rate limiting if too many requests are made in a short period of time. Because of this, Clair might miss certain vulnerabilities, for example, log4j. ++ +Customers can obtain and use their own token for CRDA which might help avoid the occurrence of rate limiting by submitting link:https://developers.redhat.com/content-gateway/link/3872178[the API key request form]. Because of these issues, Java scanning for Clair is considered Technical Preview and will be enhanced in future Quay updates. + +=== quay-operator + +[[operator-added-changed]] +Added/Changed: + +* {productname} 3.6 adds a `disconnected` annotation to Operators. For example: ++ +[source,yaml] +---- +metadata: + annotations: + operators.openshift.io/infrastructure-features: '["disconnected"]' +---- ++ +For more information, see link:https://issues.redhat.com/browse/PROJQUAY-1583[PROJQUAY-1583]. + +* In order to properly support Github actions, `RELATED_IMAGE` values can now be referenced by tag name (`name:tag`) or by digest (`name@sha256:123`). (link:https://issues.redhat.com/browse/PROJQUAY-1887[PROJQUAY-1887]), (link:https://issues.redhat.com/browse/PROJQUAY-1890[PROJQUAY-1890]) + +* `HorizontalPodAutoscalers` have been added to the Clair, Quay, and Mirror pods, so that they now automatically scale during load spikes. (link:https://issues.redhat.com/browse/PROJQUAY-1449[PROJQUAY-1449]) + +* The Quay Operator now reports the status of each managed component in a separate index inside of the same status property so that users can see the progress of a deployment or update. (link:https://issues.redhat.com/browse/PROJQUAY-1609[PROJQUAY-1609]) + +* `ssl.cert` and `ssl.key` are now moved to a separate, persistent Secret, which ensures that the cert/key pair is not re-generated upon every reconcile. These are now formatted as `edge` routes and mounted to the same directory in the Quay container. (link:https://issues.redhat.com/browse/PROJQUAY-1883[PROJQUAY-1883]) + +* Support for OpenShift Container Platform Edge-Termination Routes has been added by way of a new managed component, `tls`. This separates the `Route` component from TLS and allows users to configure both separately. `EXTERNAL_TLS_TERMINATION: true` is the opinionated setting. Managed `tls` means that the default cluster wildcart cert is used. Unmanaged `tls` means that the user provided cert/key pair will be injected into the `Route`. (link:https://issues.redhat.com/browse/PROJQUAY-2050[PROJQUAY-2050]) + +* The {productname} Operator can now be directly upgraded from 3.3 to 3.6 without regressions in `Route` handling, rollout speed, stability, and reconciliation robustness. (link:https://issues.redhat.com/browse/PROJQUAY-2100[PROJQUAY-2100]) + +* The Quay Operator now allows for more than one Mirroring pod. Users are also no longer required to manually adjust the Mirroring Pod deployment.(link:https://issues.redhat.com/browse/PROJQUAY-1327[PROJQUAY-1327]) + +* Previously, when running a 3.3.x version of {productname} with edge routing enabled, users were unable to upgrade to 3.4.x versions of {productname}. This has been resolved with the release of {productname} 3.6. (link:https://issues.redhat.com/browse/PROJQUAY-1694[PROJQUAY-1694]) + +* Users now have the option to set a minimum number of replica Quay pods when `HorizontalPodAutoscaler` is set. This reduces downtime when updating or reconfiguring Quay via the Operator during rescheduling events. (link:https://issues.redhat.com/browse/PROJQUAY-1763[PROJQUAY-1763]) + +[[operator-known-issues]] +Known issues: + +* link:https://issues.redhat.com/browse/PROJQUAY-2335[PROJQUAY-2335]. `Quay` Operator deployment should be blocked when TLS cert/key pairs are unprovided. Instead, the `Quay` Operator continues to deploy. + +* link:https://issues.redhat.com/browse/PROJQUAY-2389[PROJQUAY-2389]. Customer provided TLS certificates are lost after {productname} 3.6 Operator reconcile. + +* link:https://issues.redhat.com/browse/PROJQUAY-2545[PROJQUAY-2545]. Builders are only supported when TLS is unmanaged + +[[operator-fixed-issues]] +Fixed: + +* link:https://issues.redhat.com/browse/PROJQUAY-1709[PROJQUAY-1709]. Upgrading from an older operator with edge route breaks Quay + +* link:https://issues.redhat.com/browse/PROJQUAY-1974[PROJQUAY-1974]. Quay operator doesnt reconciles changes made by config app + +* link:https://issues.redhat.com/browse/PROJQUAY-1838[PROJQUAY-1838]. Quay Operator creates with every restart a new root ca + +* link:https://issues.redhat.com/browse/PROJQUAY-2068[PROJQUAY-2068]. Operator doesn't check for deployment failures + +* link:https://issues.redhat.com/browse/PROJQUAY-2121[PROJQUAY-2121]. Quay upgrade pods running all workers instead of just database upgrade + + + + +=== quay-container-security-operator + +* The Operator Lifecycle Manager now supports the new v1 CRD API, `apiextensions.k8s.io.v1.CustomResourceDefinition` for the Container Security Operator. This CRD should be used instead of the `v1beta1` CRD, which has been deprecated as of OpenShift Container Platform 4.9. (link:https://issues.redhat.com/browse/PROJQUAY-613[PROJQUAY-613]), (link:https://issues.redhat.com/browse/PROJQUAY-1791[PROJQUAY-1791]) + + +=== quay-openshift-bridge-operators + +* The installation experience for the Quay Bridge Operator (QBO) has been improved. Enhancements include the following: + +** `MutatingAdmissionWebhook` is created automatically during install. +** The QBO leverages the Operator Lifecycle Manager feature of auto-generating certificates and webhook configurations. +** The number of manual steps required to get the Quay Bridge Operator running has been decreased. ++ +For more information, see link:https://issues.redhat.com/browse/PROJQUAY-672[PROJQUAY-672]. + +* The certificate manager is now delegated by the Operator Lifecycle Manager. Certificates can now be valid for more than 65 days. (link:https://issues.redhat.com/browse/PROJQUAY-1062[PROJQUAY-1062]) diff --git a/modules/rn_3_70.adoc b/modules/rn_3_70.adoc new file mode 100644 index 000000000..07ce6e1e3 --- /dev/null +++ b/modules/rn_3_70.adoc @@ -0,0 +1,281 @@ +[[rn-3-708]] +== RHBA-2022:6353 - {productname} 3.7.8 bug fix update + +Issued: 2022-09-12 + +{productname} release 3.7.8 is now available. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2022:6353[RHBA-2022:6353] advisory. + +=== Bug fixes + +* link:https://issues.redhat.com/browse/PROJQUAY-4222[PROJQUAY-4222]. Quay can't connect to MySQL backed by SSL certificate. + +* link:https://issues.redhat.com/browse/PROJQUAY-4362[PROJQUAY-4362]. Proxy authentication fails when the upstream registry doesn't return the correct www-authenticate header. + +[[rn-3-707]] +== RHBA-2022:6154 - {productname} 3.7.7 bug fix update + +Issued: 2022-08-31 + +{productname} release 3.7.7 is now available. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2022:6154[RHBA-2022:6154] advisory. + +=== New features + +* With this update, the `REPO_MIRROR_ROLLBACK` configuration field has been added. When this field is set to `true`, the repository rolls back after a failed mirror attempt. By default, this field is set to `false`. + +=== Bug fixes + +* Previously, users could only mirror and replicate the entirety of their upstream repository. When complex expressions for tag discovery were used, a list of several tags to be mirrored was created. If the mirroring process failed for any tag failed at any point during the replication procedure, {productname} would revert the repository to its previous state. If the mirrored repository was empty, all tags that were correctly mirrored were deleted. For example, if you mirrored 10 tags, and 8 tags were mirrored successfully, but 2 failed, all of the successful tags would be deleted from the repository because of the 2 that failed. ++ +With this update, if a mirroring operation fails, it will no longer roll back the state of the repository. Instead, it will log the images that failed to properly mirror. ++ +For users who want their repository rolled back upon failure, the `REPO_MIRROR_ROLLBACK` feature has been added. When the feature is set to `true`, the repository rolls back after a failed mirror attempt. By default, the feature is set to `false`. ++ +For more information, see link:https://issues.redhat.com/browse/PROJQUAY-4296[PROJQUAY-4296] and link:https://issues.redhat.com/browse/PROJQUAY-4357[PROJQUAY-4357]. + +* link:https://issues.redhat.com/browse/PROJQUAY-4322[PROJQUAY-4322]. The image mirrored unsuccessfully can be pulled successfully. + +* link:https://issues.redhat.com/browse/PROJQUAY-3976[PROJQUAY-3976]. Pull-thru gives 500 when pulling certain images. + +[[rn-3-706]] +== RHBA-2022:5999 - {productname} 3.7.6 bug fix update + +Issued: 2022-08-15 + +{productname} release 3.7.6 is now available. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2022:5999[RHBA-2022:5999] advisory. + +=== Bug fixes + +* link:https://issues.redhat.com/browse/PROJQUAY-4227[PROJQUAY-4277]. Supported NGINX version in Quay's container. + +* link:https://issues.redhat.com/browse/PROJQUAY-2897[PROJQUAY-2897]. Ability to add annotations and labels to Quay development when using the Operator. + +* link:https://issues.redhat.com/browse/PROJQUAY-3743[PROJQUAY-3743]. Pull-thru proxy repository auto-creation should respect CREATE_PRIVATE_REPO_ON_PUSH config. + +* link:https://issues.redhat.com/browse/PROJQUAY-4229[PROJQUAY-4229]. Quay 3.7.5 images high vulnerability reported by Redhat ACS. + +* link:https://issues.redhat.com/browse/PROJQUAY-4254[PROJQUAY-4254]. Cannot cache (pull-thru) OCI image index. + + + +[[rn-3-705]] +== RHBA-2022:5727 - {productname} 3.7.5 bug fix update + +Issued: 2022-08-2 + +{productname} release 3.7.5 is now available. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2022:5727[RHBA-2022:5727] advisory. + +=== Bug fixes + +* link:https://issues.redhat.com/browse/PROJQUAY-3982[PROJQUAY-3982]. Tags reverted after mirroring. + +* link:https://issues.redhat.com/browse/PROJQUAY-1569[PROJQUAY-1569]. Provide support for pod anti affinity for Quay Operator. + +* link:https://issues.redhat.com/browse/PROJQUAY-4148[PROJQUAY-4148]. Add RS384 support for OIDC flow. + +* link:https://issues.redhat.com/browse/PROJQUAY-1603[PROJQUAY-1603]. Container-security-operator does not take pull secrets of OpenShift into account. + +* link:https://issues.redhat.com/browse/PROJQUAY-2153[PROJQUAY-2153]. Allow CSO to define proxy variables. + + +[[rn-3-704]] +== RHBA-2022:5559 - {productname} 3.7.4 bug fix update + +Issued: 2022-07-18 + +{productname} release 3.7.4 is now available. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2022:5559[RHBA-2022:5559] advisory. + +=== Bug fixes + +* link:https://issues.redhat.com/browse/PROJQUAY-3145[PROJQUAY-3145]. Usage logs error out with a 500 when repo mirroring is run with DEBUGLOG=true. + +* link:https://issues.redhat.com/browse/PROJQUAY-3819[PROJQUAY-3819]. Allow Builders to Use Self Signed Certificates. + +* link:https://issues.redhat.com/browse/PROJQUAY-4016[PROJQUAY-4016]. PrometheusRule is not being parsed correctly. + +* link:https://issues.redhat.com/browse/PROJQUAY-2659[PROJQUAY-2649]. Quay 3.6.0 Clair APP POD was failed to rolling update caused by PSQL error "FATAL: sorry, too many clients already". + +[[rn-3-703]] +== Version 3.7.3 + +=== quay / clair / quay-builder + +Fixed: + +* link:https://issues.redhat.com/browse/PROJQUAY-3965[PROJQUAY-3965]. Basic cosign signature visualization. + +* link:https://issues.redhat.com/browse/PROJQUAY-3981[PROJQUAY-3981]. Unable to navigate on level up in repo-view. + +* link:https://issues.redhat.com/browse/PROJQUAY-3999[PROJQUAY-3999]. Pushing big layers to Quay deployed on Azure OpenShift Cluster results in a 413. + +* link:https://issues.redhat.com/browse/PROJQUAY-3979[PROJQUAY-3979]. Quay 3.7.2 Postgres image vulnerability reported by Redhat ACS. + + +[[rn-3-702]] +== Version 3.7.2 + +=== quay / clair / quay-builder + +* link:https://issues.redhat.com/browse/PROJQUAY-3901[PROJQUAY-3901]. Clair 4.4.2 failed to fetch image layer from quay when image was from dockerhub. + +* link:https://issues.redhat.com/browse/PROJQUAY-3905[PROJQUAY-3905]. Quay 3.7.1 can't reconfig quota to replace system quota for super user account. + +* link:https://issues.redhat.com/browse/PROJQUAY-3802[PROJQUAY-3802]. Quay 3.7.0 image vulnerability reported by Redhat ACS. + +* link:https://issues.redhat.com/browse/PROJQUAY-1605[PROJQUAY-1605]. Quay 3.4 SMTP validation fails. + +* link:https://issues.redhat.com/browse/PROJQUAY-3879[PROJQUAY-3879]. The Quay Config Tool is not validating configurations for Github Enterprise Login. + +* link:https://issues.redhat.com/browse/PROJQUAY-3948[PROJQUAY-3948]. Show how to pull an image with podman. + +* link:https://issues.redhat.com/browse/PROJQUAY-3767[PROJQUAY-3767]. Quay 3.7.0 can't reconfig Quota to replace system default quota for user account. + +* link:https://issues.redhat.com/browse/PROJQUAY-3806[PROJQUAY-3806]. Cannot pull from proxy org as non-admin member. + +* link:https://issues.redhat.com/browse/PROJQUAY-3889[PROJQUAY-3889]. Quay quota consumption is not decreased in org level and image repo level after deleted image tags. + +* link:https://issues.redhat.com/browse/PROJQUAY-3920[PROJQUAY-3920]. Quay 3.7.1 can't config quota for normal user accounts by super user. + +* link:https://issues.redhat.com/browse/PROJQUAY-3614[PROJQUAY-3614]. The 'build successfully completed' does not send out notification by email, slack and UI notification. + + +[[rn-3-701]] +== Version 3.7.1 + +=== quay / clair / quay-builder + +* link:https://issues.redhat.com/browse/PROJQUAY-3841[PROJQUAY-3841]. Standalone UI Version is incorrect. + +* link:https://issues.redhat.com/browse/PROJQUAY-2346[PROJQUAY-2346]. Pushing failure of first attempt to create non-existing org or repository by skopeo and podman. + +* link:https://issues.redhat.com/browse/PROJQUAY-3701[PROJQUAY-3701]. Quay 3.7.0 API update default quota should not return 500 internal error. + +* link:https://issues.redhat.com/browse/PROJQUAY-3815[PROJQUAY-3815]. Custom Quota Warning Notification. + +* link:https://issues.redhat.com/browse/PROJQUAY-3818[PROJQUAY-3818]. pull-thru gives 500 when manifest list's sub-manifest is already proxied under different tag in same repo. + +* link:https://issues.redhat.com/browse/PROJQUAY-3828[PROJQUAY-3828]. Quay 3.7.0 quota consumption is not correct in image repo level when removed all tags. + +* link:https://issues.redhat.com/browse/PROJQUAY-3881[PROJQUAY-3881]. cert_install.sh script incorrectly parses certificates in certain situations. + + +[[rn-3-700]] +== Version 3.7.0 + +=== quay / clair / quay-builder + +Added/Changed: + +* Image APIs are now deprecated. Users should move to manifest-based APIs. (link:https://issues.redhat.com/browse/PROJQUAY-3418[PROJQUAY-3418]) + +* With {productname} 3.7, users have the ability to report storage consumption and to contain registry growth by establishing configured storage quota limits. With this feature, organizations can easily avoid exceeding storage limitations by rejecting pulls at a specified limit. (link:https://issues.redhat.com/browse/PROJQUAY-302[PROJQUAY-302], link:https://issues.redhat.com/browse/PROJQUAY-253[PROJQUAY-253]) + +* The bare-metal constraint required to run builds has been removed by adding an additional build option which does not contain the virtual machine layer. As a result, builds can be run on virtualized platforms. Backwards compatibility to run previous build configurations are also available. (link:https://issues.redhat.com/browse/PROJQUAY-295[PROJQUAY-295]) + +* {productname} can now act as a proxy cache to mitigate pull-rate limitations from upstream registries. This feature also accelerates pull performance, because images are pulled from the cache rather than upstream dependencies. Cached images are only updated when the upstream image digest differs from the cached image, reducing rate limitations and potential throttling. (link:https://issues.redhat.com/browse/PROJQUAY-465[PROJQUAY-465]) + +* Support for Microsoft Azure Government (MAG) has been added. This optional feature allows government agencies and public sector customers to select and specify a MAG endpoint in their Azure storage yaml. (link:https://issues.redhat.com/browse/PROJQUAY-891[PROJQUAY-891]) + +* Introduced in {productname} 3.6, Java scanning for Clair 4.2, which requires CRDA, included a default shared CRDA key and was enabled by default. Additionally, the default CRDA configuration supported low RPS. With {productname} 3.7, Java scanning no longer includes a default CRDA shared key, and is no longer enabled by default. Users must now manually enable CRDA for scan results, and enable it in Clair's configuration. To enable CRDA, see https://access.redhat.com/documentation/en-us/red_hat_quay/3.7/html-single/manage_red_hat_quay#clair_crda_configuration[Clair CRDA configuration]. ++ +[NOTE] +==== +This feature is currently denoted as `Technology Preview`. +==== + +* {productname} now accepts unsigned images. This feature can be enabled under an organization's *Repository Mirroring* page. (link:https://issues.redhat.com/browse/PROJQUAY-3106[PROJQUAY-3106]) + +Known issues: + +* link:https://issues.redhat.com/browse/PROJQUAY-3590[PROJQUAY-3590]. Quay 3.7.0 pull from cache should return quota exceeded error rather than general 403 error code. + +* link:https://issues.redhat.com/browse/PROJQUAY-3767[PROJQUAY-3767]. Quota for _user_ accounts cannot be reconfigured using the {productname} UI. + +Fixed: + +* link:https://issues.redhat.com/browse/PROJQUAY-3648[PROJQUAY-3648]. OAuth2 code flow: Missing state parameters when user is asked to authorize. + +* link:https://issues.redhat.com/browse/PROJQUAY-2495[PROJQUAY-2495]. Gitlab validation fails on Quay 3.5.6. + +* link:https://issues.redhat.com/browse/PROJQUAY-2560[PROJQUAY-2560]. The Quay Config Tool is not validating configurations for Github Enterprise Login. + +* link:https://issues.redhat.com/browse/PROJQUAY-3656[PROJQUAY-3656]. Could not verify GitHub OAuth credentials. + +=== quay-operator + +Added/Changed: + +* Advanced Clair configuration is now available for {productname} 3.7. The following features are now available to Quay administrators on the Quay Operator: + +** Configuration of Clair's updater set through the Quay Operator. +** Configuration of the database connection string through the Quay Operator. +** Configuration of custom certificates into the Clair deployment, which allows support of internal HTTPS proxies. +** Support for alternative fully qualified domain names (FQDN) for Clair that can leverage a global load balancing mechanism fronting different clusters running Clair. ++ +For more information, see link:https://issues.redhat.com/browse/PROJQUAY-2110[PROJQUAY-2210]. + +* With advanced Clair configuration, users can also provide a custom Clair configuration for an unmanaged Clair database on the {productname} Operator. An unmanaged Clair database allows the {productname} Operator to work in a Geo-Replicated environment, where multiple instances of the Operator must communicate with the same database. An unmanaged Clair database can also be used when a user requires a highly-available (HA) Clair database that exists outside of a cluster. (link:https://issues.redhat.com/browse/PROJQUAY-1696[PROJQUAY-1969]) + +* Geo-replication is now available with the {productname} Operator. This feature allows multiple, geographically distributed Quay deployments to work as a single registry from the perspective of a client or user. It significantly improves push and pull performance in a globally-distributed Quay setup. Image data is asynchronously replicated in the background with transparent failover / redirect for clients. (link:https://issues.redhat.com/browse/PROJQUAY-2504[PROJQUAY-2504]) + +* With {productname} 3.7, reconfiguring Quay through the UI no longer generates a new login password. The password now generates only once, and remains the same after reconciling `QuayRegistry` objects. (link:https://issues.redhat.com/browse/PROJQUAY-3318[PROJQUAY-3318]) + + +=== {productname} feature tracker + +New features have been added to {productname}, some of which are currently in Technology Preview. Technology Preview features are experimental features and are not intended for production use. + +Some features available in previous releases have been deprecated or removed. Deprecated functionality is still included in {productname}, but is planned for removal in a future release and is not recommended for new deployments. For the most recent list of deprecated and removed functionality in {productname}, refer to the table below. Additional details for more fine-grained functionality that has been deprecated and removed are listed after the table. + +//This will eventually expand to cover the latest three releases. Since this is the first TP tracker, it will include only 3.6. + +.Technology Preview tracker +[cols="4,1,1",options="header"] +|=== +|Feature | Quay 3.7 |Quay 3.6 + +|link:https://access.redhat.com//documentation/en-us/red_hat_quay/3.7/html-single/use_red_hat_quay#red-hat-quay-quota-management-and-enforcement[Quota management and enforcement] +|General Availability +|- + + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.7/html-single/use_red_hat_quay#red-hat-quay-builders-enhancement[{productname} build enhancements] +|General Availability +|- + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.7/html-single/use_red_hat_quay#quay-as-cache-proxy[{productname} as proxy cache for upstream registries] +|Technology Preview +|- + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.7/html-single/deploy_red_hat_quay_on_openshift_with_the_quay_operator/index[Geo-replication - {productname} Operator] +|General Availability +|- + + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.7/html-single/manage_red_hat_quay#unmanaged_clair_configuration[Advanced Clair configuration] +|General Availability +|- + +|Support for Microsoft Azure Government (MAG) +|General Availability +|- + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index#config-fields-helm-oci[FEATURE_HELM_OCI_SUPPORT] +|Deprecated +|Deprecated + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/configure_red_hat_quay/index#config-ui-database[MySQL and MariaDB database support] +|Deprecated +|Deprecated + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#oci-intro[Open Container Initiative (OCI) Media types] +|General Availability +|General Availability + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.7/html-single/manage_red_hat_quay#clair_crda_configuration[Java scanning with Clair] +|Technology Preview +|Technology Preview + +|Image APIs +|Deprecated +|General Availability +|=== diff --git a/modules/rn_3_80.adoc b/modules/rn_3_80.adoc new file mode 100644 index 000000000..0aa37fd61 --- /dev/null +++ b/modules/rn_3_80.adoc @@ -0,0 +1,451 @@ +:_content-type: CONCEPT + +[id="rn-3-804"] += RHBA-2023:1188 - {productname} 3.8.4 bug fix update + +Issued 2023-3-14 + +{productname} release 3.8.4 is now available. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2023:1188[RHBA-2023:1188] advisory. + +[id="bug-fixes-384"] +== Bug fixes + +* link:https://issues.redhat.com/browse/PROJQUAY-5074[PROJQUAY-5074]. Health checks should check storage engines. +* link:https://issues.redhat.com/browse/PROJQUAY-5117[PROJQUAY-5117]. Quay calls LDAP on robot account login. + +[id="rn-3-803"] += RHBA-2023:0906 - {productname} 3.8.3 bug fix update + +Issued 2023-2-27 + +{productname} release 3.8.3 is now available. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2023:0906[RHBA-2023:0906] advisory. + +[id="bug-fixes-383"] +== Bug fixes + +* link:https://issues.redhat.com/browse/PROJQUAY-3643[PROJQUAY-3643]. CVE-2022-24863 quay-registry-container: http-swagger: a denial of service attack consisting of memory exhaustion on the host system [quay-3.7] + +[id="rn-3-802"] += RHBA-2023:0789 - {productname} 3.8.2 bug fix update + +Issued 2023-2-15 + +{productname} release 3.8.2 is now available with Clair 4.6.0. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2023:0789[RHBA-2023:0789] advisory. + +[id="bug-fixes-382"] +== Bug fixes + +* link:https://issues.redhat.com/browse/PROJQUAY-4395[PROJQUAY-4395]. Default value of `false` for `CLEAN_BLOB_UPLOAD_FOLDER` does not make sense. +* link:https://issues.redhat.com/browse/PROJQUAY-4726[PROJQUAY-4726]. No audit logs when superuser trigger and cancel build under normal user's namespace with superuser full access enabled. +* link:https://issues.redhat.com/browse/PROJQUAY-4992[PROJQUAY-4992]. Cleanup deprecated appr code. + +[id="rn-3-801"] += RHBA-2023:0044 - {productname} 3.8.1 bug fix update + +Issued 2023-1-24 + +{productname} release 3.8.1 is now available. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2023:0044[RHBA-2023:0044] advisory. + +[id="bug-fixes-381"] +== Bug fixes + +* link:https://issues.redhat.com/browse/PROJQUAY-2164[PROJQUAY-2146]. Combined URLs in security scan report (pointing to errata URL). +* link:https://issues.redhat.com/browse/PROJQUAY-46674667[PROJQUAY-]. Web UI - viewing account results in error. +* link:https://issues.redhat.com/browse/PROJQUAY-4800[PROJQUAY-4800]. Add PUT method to CORS method list. +* link:https://issues.redhat.com/browse/PROJQUAY-4827[PROJQUAY-4857]. Add tracking and cookie content when domain contains Quay.io. +* link:https://issues.redhat.com/browse/PROJQUAY-4527[PROJQUAY-4527]. New UI toggle cannot switch back from new UI to current UI on Apple Safari. +* link:https://issues.redhat.com/browse/PROJQUAY-4663[PROJQUAY-4663]. Pagination for delete repository modal not showing correct values. +* link:https://issues.redhat.com/browse/PROJQUAY-4765[PROJQUAY-4765]. Quay 3.8.0 superuser does not have permission to add new team member to normal user's team when enabled superuser full access. + +[id="rn-3-800"] += RHBA-2022:6976 - {productname} 3.8.0 release + +Issued 2022-12-6 + +{productname} release 3.8.0 is now available with Clair 4.5.1. The bug fixes that are included in the update are listed in the link:https://access.redhat.com/errata/RHBA-2022:6976[RHBA-2022:6976] advisory. + +[id="new-features-and-enhancements-38"] +== {productname}, Clair, and Quay Builder new features and enhancements + +The following updates have been made to {productname}, Clair, and Quay Builders: + +* Previously, {productname} only supported the IPv4 protocol family. IPv6 support is now available in {productname} {producty} standalone deployments. Additionally, dual-stack (IPv4/IPv6) support is available. ++ +.Network protocol support +[cols="2,1,1",options="header"] +|=============================================================== +| Protocol family | {productname} 3.7 | {productname} 3.8 +| IPv4 | ✓ | ✓ +| IPv6 | | ✓ +| Dual-stack (IPv4/IPv6) | | ✓ + +|=============================================================== ++ +For more information, see link:https://issues.redhat.com/browse/PROJQUAY-272[PROJQUAY-272]. ++ +For a list of known limitations, see xref:ipv6-limitations-38[IPv6 and dual-stack limitations]. + +* Previously, {productname} did not require self-signed certificates to use Subject Alternative Names (SANs). {productname} users could temporarily enable Common Name matching with `GODEBUG=x509ignoreCN=0` to bypass the required certificate. ++ +With {productname} 3.8, {productname} has been upgraded to use Go version 1.17. As a result, setting `GODEBUG=x509ignoreCN=0` no longer works. Users must include self-signed certificates to use SAN. ++ +For more information, see link:https://issues.redhat.com/browse/PROJQUAY-1605[PROJQUAY-1605]. + +* The following enhancements have been made to the {productname} proxy cache feature: + +** Previously, the cache of a proxy organization with quota management enabled could reach full capacity. As a result, pulls for new images could be prevented until an administrator cleaned up the cached images. ++ +With this update, {productname} administrators can now use the storage quota of an organization to limit the cache size. Limiting the cache size ensures that backend storage consumption remains predictable by discarding images from the cache according to the pull frequency or overall usage of an image. As a result, the storage size allotted by quota management always stays within its limits. ++ +For more information, see https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/use_red_hat_quay#proxy-cache-leveraging-storage-quota-limits[Leveraging storage quota limits in proxy organizations]. + +** Previously, when mirroring a repository, an image with the `latest` tag must have existed in the remote repository. This requirement has been removed. Now, an image with the `latest` tag is no longer required, and you do not need to specify an existing tag explicitly. ++ +For more information on this update, see link:https://issues.redhat.com/browse/PROJQUAY-2179[PROJQUAY-2179]. ++ +For more information on tag patterns, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html/manage_red_hat_quay/repo-mirroring-in-red-hat-quay#mirroring-tag-patterns[Mirroring tag patterns]. + +* {productname} 3.8 now includes support for the following Open Container Initiative (OCI) image media types: +** Software Packadage Data Exchange (SPDX) +** Syft +** CycloneDX ++ +These can be configured by the users in their `config.yaml` file, for example: ++ +.config.yaml +[source,yaml] +---- +... +ALLOWED_OCI_ARTIFACT_TYPES: + application/vnd.syft+json + application/vnd.cyclonedx + application/vnd.cyclonedx+xml + application/vnd.cyclonedx+json + application/vnd.in-toto+json +... +---- ++ +[NOTE] +==== +When adding OCI media types that are not configured by default, users will also need to manually add support for cosign and Helm if desired. The ztsd compression scheme is supported by default, so users will not need to add that OCI media type to their config.yaml to enable support. +==== + +== New {productname} configuration fields + +* The following configuration field has been added to test {productname}'s new user interface: + +** **FEATURE_UI_V2**: With this configuration field, users can test the beta UI environment. ++ +*Default*: `False` ++ +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#reference-miscellaneous-v2-ui[v2 user interface configuration]. + +* The following configuration fields have been added to enhance the {productname} registry: + +** **FEATURE_LISTEN_IP_VERSION**: This configuration field allows users to set the protocol family to IPv4, IPv6, or dual-stack. This configuration field must be properly set, otherwise {productname} fails to start. ++ +*Default*: `IPv4` ++ +*Additional configurations*: `IPv6`, `dual-stack` ++ +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#config-fields-ipv6[IPv6 configuration field]. + +* The following configuration fields have been added to enhance Lightweight Directory Access Protocol (LDAP) deployments: + +** **LDAP_SUPERUSER_FILTER**: This configuration field is a subset of the `LDAP_USER_FILTER` configuration field. It allows {productname} administrators the ability to configure Lightweight Directory Access Protocol (LDAP) users as superusers when {productname} users select LDAP as their authentication provider. ++ +With this field, administrators can add or remove superusers without having to update the {productname} configuration file and restart their deployment. ++ +This field requires that your `AUTHENTICATION_TYPE` is set to `LDAP`. ++ +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#reference-ldap-super-user[LDAP superuser configuration reference]. + +** **LDAP_RESTRICTED_USER_FILTER**: This configuration field is a subset of the `LDAP_USER_FILTER` configuration field. When configured, allows {productname} administrators the ability to configure Lightweight Directory Access Protocol (LDAP) users as restricted users when {productname} uses LDAP as its authentication provider. ++ +This field requires that your `AUTHENTICATION_TYPE` is set to `LDAP`. ++ +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#reference-ldap-restricted-user[LDAP restricted user configuration]. + +* The following configuration fields have been added to enhance the superuser role: + +** **FEATURE_SUPERUSERS_FULL_ACCESS**: This configuration field grants superusers the ability to read, write, and delete content from other repositories in namespaces that they do not own or have explicit permissions for. ++ +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#configuring-superusers-full-access[FEATURE_SUPERUSERS_FULL_ACCESS configuration reference]. + +** **GLOBAL_READONLY_SUPER_USERS**: This configuration field grants users of this list read access to all repositories, regardless of whether they are public repositories. ++ +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#configuring-global-readonly-super-users[GLOBAL_READONLY_SUPER_USERS configuration reference]. ++ +[NOTE] +==== +In its current state, this feature only allows designated users to pull content from all repositories. Administrative restrictions will be added in a future version of {productname}. +==== + +* The following configuration fields have been added to enhance user permissions: + +** **FEATURE_RESTRICTED_USERS**: When set with `RESTRICTED_USERS_WHITELIST`, restricted users cannot create organizations or content in their own namespace. Normal permissions apply for an organization's membership, for example, a restricted user will still have normal permissions in organizations based on the teams that they are members of. ++ +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#configuring-feature-restricted-users[ FEATURE_RESTRICTED_USERS configuration reference]. + +** **RESTRICTED_USERS_WHITELIST**: When set with `FEATURE_RESTRICTED_USERS: true`, administrators can exclude users from the `FEATURE_RESTRICTED_USERS` setting. ++ +For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#configuring-restricted-users-whitelist[RESTRICTED_USERS_WHITELIST configuration reference]. + +[id="quay-operator-updates"] +== {productname} Operator + +The following updates have been made to the {productname} Operator: + +* Previously, the {productname} Operator only supported the IPv4 protocol family. IPv6 support is now available in {productname} {producty} Operator deployments. ++ +.Network protocol support +[cols="1,1,1",options="header"] +|=============================================================== +| Protocol family | {productname} 3.7 Operator | {productname} 3.8 Operator +| IPv4 | ✓ | ✓ +| IPv6 | | ✓ +| Dual-stack (IPv4/IPv6) | | + +|=============================================================== ++ +For more information, see link:https://issues.redhat.com/browse/PROJQUAY-272[PROJQUAY-272]. ++ +For a list of known limitations, see xref:ipv6-limitations-38[IPv6 and dual-stack limitations]. + +[id="known-issues-and-limitations-38"] +== {productname} 3.8 known issues and limitations + +[id="known-issues-38"] +=== Known issues: + +* The `metadata_json` column in the `logentry3` table on MySQL deployments has a limited size of `TEXT`. Currently, the default size of the column set to be `TEXT` is 65535 bytes. 65535 bytes is not big enough for some mirror logs when debugging is turned `off`. When a statement containing `TEXT` larger than 65535 bytes is sent to MySQL, the data sent is truncated to fit into the 65535 boundary. Consequently, this creates issues when the `metadata_json` object is decoded, and the decode fails because the string is not terminated properly. As a result, {productname} returns a 500 error. ++ +There is currently no workaround for this issue, and it will be addressed in a future version of {productname}. For more information, see link:https://issues.redhat.com/browse/PROJQUAY-4305[PROJQUAY-4305]. + +* There is a known issue when using the `--sign-by-sigstore-private-key` flag with some versions of Podman v4.y.z or greater. When the flag is used, the following error is returned: `Error: writing signatures: writing sigstore attachments is disabled by configuration`. To use this flag with Podman v4, your version must be v4.2.1; versions prior to 4.2.1 return the aforementioned error. There is currently no workaround for this issue, and it will be addressed in a future version of Podman. + +* Currently, when pushing images with the Cosign private key `sigstore` with Podman 4, the following error is returned: `Error: received unexpected HTTP status: 500 Internal Server Error`. This is a known issue and will be fixed in a future version of Podman. ++ +For more information, see link:https://issues.redhat.com/browse/PROJQUAY-4588[PROJQUAY-4588]. + +* There is a known issue when using the `FEATURE_SUPERUSERS_FULL_ACCESS` configuration field with the {productname} v2 UI. When this field is set, all superuser actions on tenant content should be audited. Currently, when a superuser deletes an existing organization that is owned by a normal user, there is no way to audit that operation. This will be fixed in a future version of {productname}. + +* There is a known issue when using the `FEATURE_SUPERUSERS_FULL_ACCESS` configuration field with the {productname} v2 UI. When setting this field to `true` in your config.yaml file, {productname} superusers can view organizations created by normal users, but cannot see the image repository. As a temporary workaround, superusers can view those repositories by navigating to them from the *Organizations* page. This will be fixed in a future version of {productname}. + +* When setting the `FEATURE_SUPERUSERS_FULL_ACCESS` configuration field to `true`, superusers do not have permission to create a new image repository under a normal user's organization. This is a known issue and will be fixed in a future version of {productname}. + +* When running {productname} in the old UI, timed-out sessions would require that a superuser input their password again in the pop-up window. With the new UI, superusers are returned to the main page and required to input their username and password credentials. This is a known issue and will be fixed in a future version of the new UI. + +* When `FEATURE_RESTRICTED_USERS` is set to `true`, superusers are unable to create new organizations. This is a known issue and will be fixed in a future version of {productname}. + +* If `FEATURE_RESTRICTED_USERS` or `LDAP_RESTRICTED_USER_FILTER` are set with a user, for example, `user1`, and the same user is also a superuser, they will not be able to create new organizations. This is a known issue. The superuser configuration field should take precedence over the restricted user configuration, however this is also an invalid configuration. {productname} administrators should not set the same user as both a restricted user and a superuser. This will be fixed in a future version of {productname} so that the superuser configuration field takes precedence over the restricted user field. + +* After selecting *Enable Storage Replication* in the {productname} configuration editor and reconfiguring your {productname} deployment, the new `Quay` and `Mirror` pods fail to start. This error occurs because the `Quay` and `Mirror` pods rely on the `QUAY_DISTRIBUTED_STORAGE_PREFERENCE` environment variable, which is now unsupported in {productname} {producty}. ++ +As a temporary workaround, you must update the `QuayRegistry` `config.yaml` file manually to include the `QUAY_DISTRIBUTED_STORAGE_PREFERENCE` environment variable, for example: ++ +[source,yaml] +---- + spec: + components: + - kind: clair + managed: true + - kind: postgres + managed: true + - kind: objectstorage + managed: false + - kind: redis + managed: true + - kind: horizontalpodautoscaler + managed: true + - kind: route + managed: true + - kind: mirror + managed: true + overrides: + env: + - name: QUAY_DISTRIBUTED_STORAGE_PREFERENCE + value: local_us + - kind: monitoring + managed: false + - kind: tls + managed: true + - kind: quay + managed: true + overrides: + env: + - name: QUAY_DISTRIBUTED_STORAGE_PREFERENCE + value: local_us + - kind: clairpostgres + managed: true +---- ++ +This is a known issue and will be fixed in a future version of {productname}. + +* When configuring {productname} AWS S3 Cloudfront, a new parameter, `s3_region` is required. Currently, the {productname} config editor does not include this field. As a temporary workaround, you must manually insert the `s3_region` parameter in your `config.yaml` file, for example: ++ +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + default: + - CloudFrontedS3Storage + - cloudfront_distribution_domain: + cloudfront_distribution_org_overrides: {} + cloudfront_key_id: 3.9: + +** **POSTGRES_UPGRADE_DELETE_BACKUP**: When set to `True`, removes old persistent volume claims (PVCs) after upgrading. ++ +**Default**: `False` + +* The following configuration field has been added to track various events: + +** **ACTION_LOG_AUDIT_LOGINS**: When set to `True`, tracks advanced events such as logging into, and out of, the UI, and logging in using Docker for regular users, robot accounts, and for application-specific token accounts. ++ +**Default**: `True` + +[id="quay-operator-updates"] +== {productname} Operator + +The following updates have been made to the {productname} Operator: + +* Currently, the {productname} Operator and Clair use PostgreSQL 10. PostgreSQL 10 had its final release on November 10, 2022 and is no longer supported. ++ +With this release, if your database is managed by the {productname} Operator, updating from {productname} 3.8 -> 3.9 automatically handles upgrading PostgreSQL 10 to PostgreSQL 13. ++ +[IMPORTANT] +==== +Users with a managed database will be required to upgrade their PostgreSQL database from 10 -> 13. +==== ++ +If you do not want the {productname} Operator to upgrade your PostgreSQL deployment from 10 -> 13, you must set the PostgreSQL parameter to `managed: false` in your `quayregistry.yaml` file. For more information about setting your database to unmanaged, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/deploying_the_red_hat_quay_operator_on_openshift_container_platform/index#operator-unmanaged-postgres[Using an existing Postgres database]. ++ +[IMPORTANT] +==== +* It is highly recommended that you upgrade to PostgreSQL 13. PostgreSQL 10 had its final release on November 10, 2022 and is no longer supported. For more information, see the link:https://www.postgresql.org/support/versioning/[PostgreSQL Versioning Policy]. +==== ++ +If you want your PostgreSQL database to match the same version as your {rhel} system, see link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/deploying_different_types_of_servers/using-databases#migrating-to-a-rhel-8-version-of-postgresql_using-postgresql[Migrating to a RHEL 8 version of PostgreSQL] for {rhel-short} 8 or link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/configuring_and_using_database_servers/using-postgresql_configuring-and-using-database-servers#migrating-to-a-rhel-9-version-of-postgresql_using-postgresql[Migrating to a RHEL 9 version of PostgreSQL] for {rhel-short} 9. + +For more information about the {productname} 3.8 -> 3.9 procedure, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.9/html-single/upgrade_red_hat_quay/index#operator-upgrade[Upgrading the {productname} Operator overview]. + + +[id="known-issues-and-limitations-39"] +== {productname} 3.9 known issues and limitations + +The following sections note known issues and limitations for {productname} 3.9. + +[id="known-issues-39"] +=== Known issues: + +[id="upgrading-known-issues"] +==== Upgrading known issues + +There are two known issues when upgrading your {productname} deployment: + +* If your {productname} deployment is upgrading from one y-stream to the next, for example, from 3.8.10 -> 3.8.11, you must not switch the upgrade channel from `stable-3.8` to `stable-3.9`. Changing the upgrade channel in the middle of a y-stream upgrade will disallow {productname} from upgrading to 3.9. This is a known issue and will be fixed in a future version of {productname}. + +* When upgrading from {productname} 3.7 to 3.9, you might receive the following error: `pg_dumpall: error: query failed: ERROR: xlog flush request 1/B446CCD8 is not satisfied --- flushed only to 1/B0013858`. As a workaround to this issue, you can delete the `quayregistry-clair-postgres-upgrade` job on your {ocp} deployment, which should resolve the issue. + +[id="other-known-issues"] +==== Other known issues + +* Using `conftest pull` commands to obtain policies might return the following error: `Error: download policies: client get: stat /policy/quayregistry-quay-quay-enterprise-847.apps.quaytest-847.qe.devcluster.openshift.com/conftest/policy:latest: no such file or directory`. As a workaround, you can add the `oci://` prefix on your registry host. For example: ++ +[source,terminal] +---- +$ conftest pull oci://mkoktest.quaydev.org/admin/conftest:v1 +---- ++ +This is a known issue and will be fixed in a future version of {productname}. (link:https://issues.redhat.com/browse/PROJQUAY-5573[*PROJQUAY-5573*]) + +* {productname} 3.9 introduced changes to the quota management feature. One of these changes is that tags in the time machine window now count towards the quota total of your organization. ++ +There is a known issue when the proxy cache feature is enabled and configured in a new organization with a link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.9/html-single/manage_red_hat_quay/index#quota-management-arch[hard quota check] and time machine settings set to longer than *a few seconds* under their organization settings. In sum, tags in a proxy organization are all given a tag expiration that defaults to 1 day. If your proxy organization has a time machine policy set to longer than *a few seconds* under your organization settings, and the tag expires, it is not immediately available for garbage collection; it must wait to be outside of the time machine window before it can be garbage collected. Because subtraction happens upon garbage collection, and pruned tags are kept within the time frame allotted by your organization's settings, image tags are not immediately garbage collected. This results in the quota consumption metric not being updated, and runs the risk of your proxy organization going over the allotted quota. ++ +When a hard quota check is configured for a proxy organization, {productname} administrators will want to reclaim the space taken by tags within the time machine window to prevent organizations from hitting their allotted quota. As a temporary workaround, you can set the time machine expiration for proxy organizations to *a few seconds* under *Organizations* -> *Settings* on the {productname} UI. This immediately removes image tags and allows for more accurate quota consumption metrics. ++ +This is a non-issue for proxy organizations employing a soft quota check and can be ignored. + +* When removing a site from your geo-replicated {productname} deployment, you might receive the following error when running `python -m util.removelocation`: `/app/lib/python3.9/site-packages/tzlocal/unix.py:141: SyntaxWarning: "is not" with a literal. Did you mean "!="? while start is not 0: /app/lib/python3.9/site-packages/netaddr/strategy/{}init{}.py:189: SyntaxWarning: "is not" with a literal. Did you mean "!="? if word_sep is not ''`. You can confirm the deletion of your site by entering `y`. The error is a known issue and will be removed in a future version of {productname}. + +[id="limitations-39"] +=== {productname} 3.9 limitations + +* You must use the Splunk UI to view {productname} action logs. At this time, viewing Splunk action logs on the {productname} *Usage Logs* page is unsupported, and returns the following message: `Method not implemented. Splunk does not support log lookups`. + +[id="bug-fixes-39"] +== {productname} bug fixes + +* Previously, on {productname} Lightweight Directory Access Protocol (LDAP) deployments, there was a bug that disallowed referrals from being used with team synchronization and in other circumstances. With this update, referrals can be turned off globally for {productname} to ensure proper behavior across all components. + +* Previously, only last access timestamps were recorded in {productname}. This issue has been fixed, and now the following timestamps are recorded: ++ +** Login to the {productname} UI. +** Logout of the {productname} UI. +** Login via Docker CLI (registry API) for regular users. +** Login via Docker CLI (Registry API) for robot accounts. +** Login via Docker CLI (Registry API) for app-specific tokens accounts. ++ +You can disable this timestamp feature by setting `ACTION_LOG_AUDIT_LOGINS` to `false` in your `config.yaml` file. This field is set to `true` by default. ++ +[NOTE] +==== +Logout events from the client side (Docker or Podman) are not causing requests to the registry API and are therefore not trackable. +==== + +* link:https://issues.redhat.com/browse/PROJQUAY-4614[PROJQUAY-4614]. Add conftest mediatypes to default Quay configuration. +* link:https://issues.redhat.com/browse/PROJQUAY-4865[PROJQUAY-4865]. Remove unused dependencies. +* link:https://issues.redhat.com/browse/PROJQUAY-4957[PROJQUAY-4957]. Limit indexing of manifests that continuously fail. +* link:https://issues.redhat.com/browse/PROJQUAY-5009[PROJQUAY-5009]. secscan: add api client timeout. +* link:https://issues.redhat.com/browse/PROJQUAY-5018[PROJQUAY-5018]. Ignore unknown media types in manifests. +* link:https://issues.redhat.com/browse/PROJQUAY-5237[PROJQUAY-5237]. The number of repositories in organization is incorrect in new UI. +* link:https://issues.redhat.com/browse/PROJQUAY-4993[PROJQUAY-4993]. Support Action Log Forward to Splunk. +* link:https://issues.redhat.com/browse/PROJQUAY-4567[PROJQUAY-4567]. Robot Tokens. +* link:https://issues.redhat.com/browse/PROJQUAY-5289[PROJQUAY-5289]. Create a new username for accounts that login via SSO in the new UI. +* link:https://issues.redhat.com/browse/PROJQUAY-5362[PROJQUAY-5362]. API: Add filtering to Tags API. +* link:https://issues.redhat.com/browse/PROJQUAY-5207[PROJQUAY-5207]. Phase 3: Quay.io Summit Deliverables. +* link:https://issues.redhat.com/browse/PROJQUAY-4608[PROJQUAY-4608]. Quay Operator should install a fully supported version of Postgres for Quay and Clair. +* link:https://issues.redhat.com/browse/PROJQUAY-5050[PROJQUAY-5050]. Can't provide a link to quay directly to an image that works in both old UI and new UI. +* link:https://issues.redhat.com/browse/PROJQUAY-5253[PROJQUAY-5253]. Don't convert dashes to underscores during first login. +* link:https://issues.redhat.com/browse/PROJQUAY-4303[PROJQUAY-4303]. Multi-arch images are ignored in storage consumption calculation. +* link:https://issues.redhat.com/browse/PROJQUAY-4304[PROJQUAY-4304]. Empty repositories are reporting storage consumption. +* link:https://issues.redhat.com/browse/PROJQUAY-5634[PROJQUAY-5634]. oci: Allow optional components in the image config to be set to "null". +* link:https://issues.redhat.com/browse/PROJQUAY-5639[PROJQUAY-5639]. Quay 3.9.0 delete organization under normal user by superuser was failed with unauthorized error. +* link:https://issues.redhat.com/browse/PROJQUAY-5642[PROJQUAY-5642]. Quay 3.9.0 image High Vulnerability reported by Redhat ACS. +* link:https://issues.redhat.com/browse/PROJQUAY-5630[PROJQUAY-5630]. Quay 3.9.0 Quay image High vulnerability issue CVE-2022-28948. + +[id="quay-feature-tracker"] +== {productname} feature tracker + +New features have been added to {productname}, some of which are currently in Technology Preview. Technology Preview features are experimental features and are not intended for production use. + +Some features available in previous releases have been deprecated or removed. Deprecated functionality is still included in {productname}, but is planned for removal in a future release and is not recommended for new deployments. For the most recent list of deprecated and removed functionality in {productname}, refer to Table 1.1. Additional details for more fine-grained functionality that has been deprecated and removed are listed after the table. + +//Remove entries older than the latest three releases. + +.Technology Preview tracker +[cols="4,1,1,1",options="header"] +|=== +|Feature | Quay 3.9 | Quay 3.8 | Quay 3.7 + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.9/html-single/manage_red_hat_quay/index#operator-georepl-site-removal[Single site geo-replication removal] +|General Availability +|- +|- + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.9/html-single/manage_red_hat_quay/index#proc_manage-log-storage-splunk[Splunk log forwarding] +|General Availability +|- +|- + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.9/html-single/configure_red_hat_quay/index#config-fields-nutanix[Nutanix Object Storage] +|General Availability +|- +|- + +|Docker v1 support +|Deprecated +|Deprecated +|General Availability + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#reference-miscellaneous-v2-ui[FEATURE_UI_V2] +|Technology Preview +|Technology Preview +| - + +| link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/manage_red_hat_quay/index#proc_manage-ipv6-dual-stack[FEATURE_LISTEN_IP_VERSION] +|General Availability +|General Availability +|- + +| link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/manage_red_hat_quay/index#ldap-super-users-enabling[LDAP_SUPERUSER_FILTER] +|General Availability +|General Availability +|- + +| link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/manage_red_hat_quay/index#ldap-restricted-users-enabling[LDAP_RESTRICTED_USER_FILTER] +|General Availability +|General Availability +| - + +| link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#configuring-superusers-full-access[FEATURE_SUPERUSERS_FULL_ACCESS] +|General Availability +|General Availability +|- + +| link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#configuring-global-readonly-super-users[GLOBAL_READONLY_SUPER_USERS] +|General Availability +|General Availability +| - + +| link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#configuring-feature-restricted-users[FEATURE_RESTRICTED_USERS] +|General Availability +|General Availability +|- + +| link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/configure_red_hat_quay/index#configuring-restricted-users-whitelist[RESTRICTED_USERS_WHITELIST] +|General Availability +|General Availability +|- + +|link:https://access.redhat.com//documentation/en-us/red_hat_quay/{producty}/html-single/use_red_hat_quay#red-hat-quay-quota-management-and-enforcement[Quota management and enforcement] +|General Availability +|General Availability +|General Availability + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.7/html-single/use_red_hat_quay#red-hat-quay-builders-enhancement[{productname} build enhancements] +|General Availability +|General Availability +|General Availability + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.7/html-single/use_red_hat_quay#quay-as-cache-proxy[{productname} as proxy cache for upstream registries] +|General Availability +|General Availability +|Technology Preview + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.7/html-single/deploy_red_hat_quay_on_openshift_with_the_quay_operator/index[Geo-replication - {productname} Operator] +|General Availability +|General Availability +|General Availability + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.7/html-single/manage_red_hat_quay#unmanaged_clair_configuration[Advanced Clair configuration] +|General Availability +|General Availability +|General Availability + +|Support for Microsoft Azure Government (MAG) +|General Availability +|General Availability +|General Availability + +|link:https://access.redhat.com/documentation/en-us/red_hat_quay/3.8/html-single/manage_red_hat_quay/index#clair-crda-configuration[Java scanning with Clair] +|Technology Preview +|Technology Preview +|Technology Preview + +|=== + +//// +[id="deprecated-features"] +=== Deprecated features +//// diff --git a/modules/robot-account-manage-api.adoc b/modules/robot-account-manage-api.adoc new file mode 100644 index 000000000..1546017d1 --- /dev/null +++ b/modules/robot-account-manage-api.adoc @@ -0,0 +1,4 @@ +[id="robot-account-manage-api"] += Creating and configuring robot accounts by using the {productname} API + +Robot accounts can be created, retrieved, changed, and deleted for both organizations and users by using the {productname} API. \ No newline at end of file diff --git a/modules/robot-account-overview.adoc b/modules/robot-account-overview.adoc new file mode 100644 index 000000000..dbd3c02bf --- /dev/null +++ b/modules/robot-account-overview.adoc @@ -0,0 +1,44 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: CONCEPT +[id="allow-robot-access-user-repo"] += {productname} Robot Account overview + +Robot Accounts are used to set up automated access to the repositories in +your +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +ifeval::["{context}" == "quay-security"] +{productname}. +endif::[] +registry. They are similar to {ocp} service accounts. + +Setting up a Robot Account results in the following: + +* Credentials are generated that are associated with the Robot Account. + +* Repositories and images that the Robot Account can push and pull images from are identified. + +* Generated credentials can be copied and pasted to use with different container clients, such as Docker, Podman, Kubernetes, Mesos, and so on, to access each defined repository. + +ifeval::["{context}" == "quay-security"] +Robot Accounts can help secure your {productname} registry by offering various security advantages, such as the following: + +* Specifying repository access. +* Granular permissions, such as `Read` (pull) or `Write` (push) access. They can also be equipped with `Admin` permissions if warranted. +* Designed for CI/CD pipelines, system integrations, and other automation tasks, helping avoid credential exposure in scripts, pipelines, or other environment variables. +* Robot Accounts use tokens instead of passwords, which provides the ability for an administrator to revoke the token in the event that it is compromised. + +endif::[] + +Each Robot Account is limited to a single user namespace or Organization. For example, the Robot Account could provide access to all repositories for the user `quayadmin`. However, it cannot provide access to repositories that are not in the user's list of repositories. + +Robot Accounts can be created using the {productname} UI, or through the CLI using the {productname} API. After creation, {productname} administrators can leverage more advanced features with Robot Accounts, such as keyless authentication. \ No newline at end of file diff --git a/modules/robot-account-permissions-api.adoc b/modules/robot-account-permissions-api.adoc new file mode 100644 index 000000000..71ccf8f1c --- /dev/null +++ b/modules/robot-account-permissions-api.adoc @@ -0,0 +1,76 @@ +:_content-type: CONCEPT +[id="robot-account-permissions-api"] += Obtaining robot account information by using the {productname} API + +Robot account information, such as permissions, can be obtained for both organizations and users by using the {productname} API. + +.Prerequisites + +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +* Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getorgrobot[`GET /api/v1/organization/{orgname}/robots/{robot_shortname}`] API endpoint to return information for a robot for an organization: ++ +[source,terminal] +---- +curl -X GET \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/organization//robots/" +---- ++ +.Example output ++ +[source,terminal] +---- +{"name": "test+example", "created": "Mon, 25 Nov 2024 16:25:16 -0000", "last_accessed": null, "description": "", "token": "BILZ6YTVAZAKOGMD9270OKN3SOD9KPB7OLKEJQOJE38NBBRUJTIH7T5859DJL31Q", "unstructured_metadata": {}} +---- + +* Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getorgrobotpermissions[`GET /api/v1/organization/{orgname}/robots/{robot_shortname}/permissions`] endpoint to return the list of permissions for a specific organization robot: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/organization//robots//permissions" +---- ++ +.Example output ++ +[source,terminal] +---- +{"permissions": [{"repository": {"name": "testrepo", "is_public": true}, "role": "admin"}]} +---- + +* Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getuserrobot[`GET /api/v1/user/robots/{robot_shortname}`] API endpoint to return the user's robot with the specified name: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/user/robots/" +---- ++ +.Example output ++ +[source,terminal] +---- +{"name": "quayadmin+mirror_robot", "created": "Wed, 15 Jan 2025 17:22:09 -0000", "last_accessed": null, "description": "", "token": "QBFYWIWZOS1I0P0R9N1JRNP1UZAOPUIR3EB4ASPZKK9IA1SFC12LTEF7OJHB05Z8", "unstructured_metadata": {}} +---- + +* Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getuserrobotpermissions[`GET /api/v1/user/robots/{robot_shortname}/permissions`] API endpoint to return a list of permissions for the user robot: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/user/robots//permissions" +---- ++ +.Example output ++ +[source,terminal] +---- +{"permissions": [{"repository": {"name": "busybox", "is_public": false}, "role": "write"}]} +---- \ No newline at end of file diff --git a/modules/robot-account-tokens.adoc b/modules/robot-account-tokens.adoc new file mode 100644 index 000000000..21fb10b49 --- /dev/null +++ b/modules/robot-account-tokens.adoc @@ -0,0 +1,17 @@ +:_content-type: REFERENCE +[id="robot-account-tokens"] += Robot account tokens + +Robot account _tokens_ are _password-type_ credentials used to access a {productname} registry via normal Docker v2 endpoints; these are defined as _tokens_ on the UI because the password itself is encrypted. + +Robot account tokens are persistent tokens designed for automation and continuous integration workflows. By default, {productname}'s robot account tokens do not expire and do not require user interaction, which makes robot accounts ideal for non-interactive use cases. + +Robot account tokens are automatically generated at the time of a robot's creation and are non-user specific; that is, they are connected to the user and organization namespace where where they are created. for example, a robot named `project_tools+` is associated with the `project_tools` namespace. + +Robot account tokens provide access without needing a user's personal credentials. How the robot account is configured, for example, with one of `READ`, `WRITE`, or `ADMIN` permissions, ultimately defines the actions that the robot account can take. + +Because robot account tokens are persistent and do not expire by default, they are ideal for automated workflows that require consistent access to {productname} without manual renewal. Despite this, robot account tokens can be easily re-generated by using the the UI. They can also be regenerated by using the proper API endpoint via the CLI. To enhance the security of your {productname} deployment, administrators should regularly refresh robot account tokens. Additionally, with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/use_red_hat_quay/index#keyless-authentication-robot-accounts[_keyless authentication with robot accounts_] feature, robot account tokens can be exchanged for external OIDC tokens and leveraged so that they only last one hour, enhancing the security of your registry. + +When a namespace gets deleted, or when the robot account is deleted itself, they are garbage collected when the collector is scheduled to run. + +The following section shows you how to use the API to re-generate a robot account token for organization robots and user robots. \ No newline at end of file diff --git a/modules/role-based-access-control-intro.adoc b/modules/role-based-access-control-intro.adoc new file mode 100644 index 000000000..73a888800 --- /dev/null +++ b/modules/role-based-access-control-intro.adoc @@ -0,0 +1,15 @@ +[id="role-based-access-control"] += {productname} permissions model + +{productname}'s permission model provides fine-grained access control over repositories and the content of those repositories, helping ensure secure collaboration and automation. {productname} administrators can grant users and robot accounts one of the following levels of access: + +* *Read*: Allows users, robots, and teams to pull images. +* *Write*: Allows users, robots, and teams to push images. +* *Admin*: Provides users, robots, and teams administrative privileges. + +[NOTE] +==== +Administrative users can delegate new permissions for existing users and teams, change existing permissions, and revoke permissions when necessary +==== + +Collectively, these levels of access provide users or robot accounts the ability to perform specific tasks, like pulling images, pushing new versions of an image into the registry, or managing the settings of a repository. These permissions can be delegated across the entire organization and on specific repositories. For example, *Read* permissions can be set to a specific team within the organization, while *Admin* permissions can be given to all users across all repositories within the organization. diff --git a/modules/root-rule-config-api-example.adoc b/modules/root-rule-config-api-example.adoc new file mode 100644 index 000000000..7da3016c6 --- /dev/null +++ b/modules/root-rule-config-api-example.adoc @@ -0,0 +1,11 @@ +:_content-type: CONCEPT +[id="root-rule-config-api-example"] + += rule_rule object reference + +[source,yaml] +---- + { + "root_rule": {"rule_kind": "tag_glob_csv", "rule_value": ["latest", "foo", "bar"]}, + } +---- \ No newline at end of file diff --git a/modules/rotating-log-files.adoc b/modules/rotating-log-files.adoc new file mode 100644 index 000000000..b7c79256b --- /dev/null +++ b/modules/rotating-log-files.adoc @@ -0,0 +1,14 @@ +:_content-type: CONCEPT +[id="rotating-log-files"] += Rotating log files of {productname} containers + +In some cases, users have found that the log rotation configurations for Nginx within the {productname} image do not function as intended. This leads to the logs not being rotated properly. + +{productname} containers do not have a `logrotate` binary. Nginx logs are forwarded to `/dev/stdout` by default. The log rotation for these logs is governed by the container runtime that you are using. + +To address the log rotation for {productname} container logs, you must configure log rotation at the container runtime level. Refer to the documentation or configuration options of your container runtime to set up log rotation for {ProductName} container logs accordingly. + +[role="_additional-resources"] +.Additional resources + +For more information, see link:https://access.redhat.com/solutions/6974691[How to rotate log files of Quay containers?]. \ No newline at end of file diff --git a/modules/running-ldap-debug-mode.adoc b/modules/running-ldap-debug-mode.adoc new file mode 100644 index 000000000..2e7407a43 --- /dev/null +++ b/modules/running-ldap-debug-mode.adoc @@ -0,0 +1,26 @@ +:_content-type: PROCEDURE +[id="running-ldap-debug-mode"] += Running an LDAP {productname} deployment in debug mode + +Use the following procedure to run an LDAP deployment of {productname} in debug mode. + +.Procedure + +. Enter the following command to run your LDAP {productname} deployment in debug mode: ++ +[source,terminal] +---- +$ podman run -p 443:8443 -p 80:8080 -e DEBUGLOG=true -e USERS_DEBUG=1 -v /config:/conf/stack -v /storage:/datastorage -d {productrepo}/{quayimage}:{productminv} +---- + +. To view the debug logs, enter the following command: ++ +[source,terminal] +---- +$ podman logs +---- ++ +[IMPORTANT] +==== +Setting `USERS_DEBUG=1` exposes credentials in clear text. This variable should be removed from the {productname} deployment after debugging. The log file that is generated with this environment variable should be scrutinized, and passwords should be removed before sending to other users. Use with caution. +==== \ No newline at end of file diff --git a/modules/running-operator-debug-mode.adoc b/modules/running-operator-debug-mode.adoc new file mode 100644 index 000000000..e947ad7c7 --- /dev/null +++ b/modules/running-operator-debug-mode.adoc @@ -0,0 +1,29 @@ +:_content-type: PROCEDURE +[id="running-operator-debug-mode"] += Running the {productname} Operator in debug mode + +Use the following procedure to run the {productname} Operator in debug mode. + +.Procedure + +. Enter the following command to edit the `QuayRegistry` custom resource definition: ++ +[source,terminal] +---- +$ oc edit quayregistry -n +---- + +. Update the `QuayRegistry` to add the following parameters: ++ +[source,yaml] +---- +spec: + - kind: quay + managed: true + overrides: + env: + - name: DEBUGLOG + value: "true" +---- + +. After the {productname} Operator has restarted with debugging enabled, try pulling an image from the registry. If it is still slow, dump all logs from all `Quay` pods to a file, and check the files for more information. \ No newline at end of file diff --git a/modules/running-quay-debug-mode-intro.adoc b/modules/running-quay-debug-mode-intro.adoc new file mode 100644 index 000000000..881395a2b --- /dev/null +++ b/modules/running-quay-debug-mode-intro.adoc @@ -0,0 +1,22 @@ +:_content-type: CONCEPT +[id="running-quay-debug-mode-intro"] += Running {productname} in debug mode + +Red Hat recommends gathering your debugging information when opening a support case. Running {productname} in debug mode provides verbose logging to help administrators find more information about various issues. Enabling debug mode can speed up the process to reproduce errors and validate a solution for things like geo-replication deployments, Operator deployments, standalone {productname} deployments, object storage issues, and so on. Additionally, it helps the Red Hat Support to perform a root cause analysis. + +[id="debug-configuration-fields"] +== {productname} debug variables + +{productname} offers two configuration fields that can be added to your `config.yaml` file to help diagnose issues or help obtain log information. + +.Debug configuration variables +[cols="3a,1a,2a",options="header"] +|=== +| Variable | Type | Description +| **DEBUGLOG** | Boolean | Whether to enable or disable debug logs. Must be `true` or `false`. +| **USERS_DEBUG** |Integer. Either `0` or `1`. | Used to debug LDAP operations in clear text, including passwords. Must be used with `DEBUGLOG=TRUE`. + +[IMPORTANT] +==== +Setting `USERS_DEBUG=1` exposes credentials in clear text. This variable should be removed from the {productname} deployment after debugging. The log file that is generated with this environment variable should be scrutinized, and passwords should be removed before sending to other users. Use with caution. +==== +|=== \ No newline at end of file diff --git a/modules/running-quay-debug-mode.adoc b/modules/running-quay-debug-mode.adoc new file mode 100644 index 000000000..fc8c380ea --- /dev/null +++ b/modules/running-quay-debug-mode.adoc @@ -0,0 +1,23 @@ +:_content-type: PROCEDURE +[id="running-standalone-debug-mode"] += Running a standalone {productname} deployment in debug mode + +Running {productname} in debug mode provides verbose logging to help administrators find more information about various issues. Enabling debug mode can speed up the process to reproduce errors and validate a solution. + +Use the following procedure to run a standalone deployment of {productname} in debug mode. + +.Procedure + +. Enter the following command to run your standalone {productname} deployment in debug mode: ++ +[source,terminal] +---- +$ podman run -p 443:8443 -p 80:8080 -e DEBUGLOG=true -v /config:/conf/stack -v /storage:/datastorage -d {productrepo}/{quayimage}:{productminv} +---- + +. To view the debug logs, enter the following command: ++ +[source,terminal] +---- +$ podman logs +---- \ No newline at end of file diff --git a/modules/scalability-intro.adoc b/modules/scalability-intro.adoc new file mode 100644 index 000000000..3c662dc8a --- /dev/null +++ b/modules/scalability-intro.adoc @@ -0,0 +1,7 @@ +[[scalability-intro]] += Scalability + + +* Massive scale testing Quay.io +* Real-time garbage collection +* Automated squashing \ No newline at end of file diff --git a/modules/scans-not-working-behind-proxy.adoc b/modules/scans-not-working-behind-proxy.adoc new file mode 100644 index 000000000..40fd525f1 --- /dev/null +++ b/modules/scans-not-working-behind-proxy.adoc @@ -0,0 +1,34 @@ +:_content-type: PROCEDURE +[id="scans-not-working-behind-proxy"] += Clair scans are not working behind proxy + +In some cases, {productname} debug logs return a `401` error when interacting with Clair through a proxy, which suggests that {productname} is unable to communicate with Clair. For example: `securityworker stdout | 2022-11-08 14:32:52,443 [106] [DEBUG] [urllib3.connectionpool] http://192.168.xx.xx:3128 "GET http://clairv4/indexer/api/v1/index_state HTTP/1.1" 401 843 securityworker stdout | 2022-11-08 14:32:52,474 [106] [ERROR] [util.secscan.v4.api] Security scanner endpoint responded with non-200 HTTP status code: 401`. + +This issue occurs because {productname} inherited the cluster proxy configuration from {ocp} and attempted to connect with Clair through the proxy, which results in the aforementioned error code. + +To resolve this issue, remove any proxy variables from the `QuayRegistry` custom resource definition (CRD) to keep {productname} unproxied. For example: + +[source,yaml] +---- +kind: QuayRegistry +components: + - kind: quay + managed: true +overrides: + env: + - name: DEBUGLOG + value: "true" + - name: NO_PROXY + value: svc.cluster.local,localhost,quay.example.com + - name: HTTP_PROXY + value: "" + - name: HTTPS_PROXY + value: "" +---- + +You must set the proxy variables for Clair. Proxy variables can be copied from {ocp}'s `cluster proxy` file. Add the full Clair service name to `NO_PROXY` in the `QuayRegistry` CRD. + +[role="_additional-resources"] +.Additional resources + +For more information, see link:https://access.redhat.com/solutions/6988319[Clair scans are not working beind proxy]. \ No newline at end of file diff --git a/modules/search-api.adoc b/modules/search-api.adoc new file mode 100644 index 000000000..475f547e8 --- /dev/null +++ b/modules/search-api.adoc @@ -0,0 +1,57 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc +:_content-type: CONCEPT +[id="search-api"] += Searching against registry context + +You can use `search` API endpoints to perform searches against all registry context. + +.Procedure + +* Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#conductreposearch[`GET /api/v1/find/repositories`] endpoint to get a list of apps and repositories that match the specified query: ++ +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/find/repositories?query=&page=1&includeUsage=true" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +{"results": [], "has_additional": false, "page": 2, "page_size": 10, "start_index": 10} +---- + +* Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#conductsearch[`GET /api/v1/find/all`] endpoint to get a list of entities and resources that match the specified query: ++ +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/find/all?query=" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +{"results": [{"kind": "repository", "title": "repo", "namespace": {"title": "user", "kind": "user", "avatar": {"name": "quayadmin", "hash": "6d640d802fe23b93779b987c187a4b7a4d8fbcbd4febe7009bdff58d84498fba", "color": "#f7b6d2", "kind": "user"}, "name": "quayadmin", "score": 1, "href": "/user/quayadmin"}, "name": "busybox", "description": null, "is_public": false, "score": 4.0, "href": "/repository/quayadmin/busybox"}]} +---- + +* Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getmatchingentities[`GET /api/v1/entities/{prefix}`] endpoint to get a list of entities that match the specified prefix. ++ +[source,terminal] +---- +$ curl -X GET "https://quay-server.example.com/api/v1/entities/?includeOrgs=&includeTeams=&namespace=" \ + -H "Authorization: Bearer " +---- ++ +.Example output ++ +[source,terminal] +---- +{"results": [{"name": "quayadmin", "kind": "user", "is_robot": false, "avatar": {"name": "quayadmin", "hash": "6d640d802fe23b93779b987c187a4b7a4d8fbcbd4febe7009bdff58d84498fba", "color": "#f7b6d2", "kind": "user"}}]} +---- \ No newline at end of file diff --git a/modules/secrets-garbage-collected.adoc b/modules/secrets-garbage-collected.adoc new file mode 100644 index 000000000..bc3fe5e42 --- /dev/null +++ b/modules/secrets-garbage-collected.adoc @@ -0,0 +1,22 @@ +:_content-type: CONCEPT +[id="secrets-garbage-collected"] += Garbage collecting secrets on the {productname} Operator + +When a change to your `config.yaml` file is made, some secrets are created, but the old ones might remain in the namespace. This happens because on every reconciliation, the {productname} Operator recreates all secrets, including the PostgreSQL password, the config editor password, and the {productname} configuration itself. After a few changes, the number of secrets in the namespace grows to the point where it is difficult to tell which secret is being used, and where. Consequently, it can complicate the debugging process and cause other issues with your deployment. + +There is no automatic mechanism to prune secrets in a {productname} Operator deployment. As a workaround, you can locate and manually delete secrets that are not in use by other resources by running the following command: + +[source,terminal] +---- +$ oc delete secret +---- + + +[role="_additional-resources"] +.Additional resources + +For more information, see the following resources: + +* link:https://issues.redhat.com/browse/PROJQUAY-5172[PROJQUAY-5172]. + +* link:https://access.redhat.com/solutions/6974476[When using the Quay Operator, Secrets don't get garbage collected]. \ No newline at end of file diff --git a/modules/security-intro.adoc b/modules/security-intro.adoc new file mode 100644 index 000000000..880c83127 --- /dev/null +++ b/modules/security-intro.adoc @@ -0,0 +1,29 @@ +[[security-intro]] += {productname} security overview + +{productname} is built for real enterprise use cases where content governance and security are two major focus areas. {productname} content governance and security includes a built-in vulnerability scanning via Clair. + +Clair is an open source tool developed by CoreOS for Quay that generates analyses of vulnerabilities in application containers, which currently includes Open Container Initiative (OCI) and Docker images. Clients that use the Clair API to index their container images can then match their images against known vulnerabilities. + +Clair supports the extraction of contents and assignment of vulnerabilities from the following official base containers: + +* Ubuntu Linux +* Debian Linux +* Red Hat Enterprise Linux +* SUSE +* Oracle Linux +* Alpine Linux +* Amazon Linux +* VMware Photon +* Python + + + + + + + + + + + diff --git a/modules/security-scanning-api.adoc b/modules/security-scanning-api.adoc new file mode 100644 index 000000000..caab437ba --- /dev/null +++ b/modules/security-scanning-api.adoc @@ -0,0 +1,24 @@ +:_content-type: CONCEPT +[id="security-scanning-api"] += View Clair security scans by using the API + +You can view Clair security scans by using the API. + +.Procedure + +* Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getrepomanifestsecurity[`GET /api/v1/repository/{repository}/manifest/{manifestref}/security`] endpoint to retrieve security information about a specific manifest in a repository. For example: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + "https://quay-server.example.com/api/v1/repository///manifest//security?vulnerabilities=" +---- ++ +.Example output ++ +[source,terminal] +---- +{"status": "queued", "data": null} +---- \ No newline at end of file diff --git a/modules/security-scanning-ui.adoc b/modules/security-scanning-ui.adoc new file mode 100644 index 000000000..80fec1bc7 --- /dev/null +++ b/modules/security-scanning-ui.adoc @@ -0,0 +1,23 @@ +:_content-type: CONCEPT +[id="security-scanning-ui"] += Viewing Clair security scans by using the UI + +You can view Clair security scans on the UI. + +.Procedure + +. Navigate to a repository and click *Tags* in the navigation pane. This page shows the results of the security scan. + +. To reveal more information about multi-architecture images, click *See Child Manifests* to see the list of manifests in extended view. + +. Click a relevant link under *See Child Manifests*, for example, *1 Unknown* to be redirected to the *Security Scanner* page. + +. The *Security Scanner* page provides information for the tag, such as which CVEs the image is susceptible to, and what remediation options you might have available. + +[NOTE] +==== +Image scanning only lists vulnerabilities found by Clair security scanner. What users do about the vulnerabilities are uncovered is up to said user. +ifeval::["{context}" == "use-quay"] +{productname} superusers do not act on found vulnerabilities. +endif::[] +==== \ No newline at end of file diff --git a/modules/security-scanning.adoc b/modules/security-scanning.adoc new file mode 100644 index 000000000..3d380a316 --- /dev/null +++ b/modules/security-scanning.adoc @@ -0,0 +1,30 @@ +:_content-type: CONCEPT +[id="security-scanning"] += Clair security scans + +ifeval::["{context}" == "quay-io"] +{quayio} comes equipped with Clair security scanner. For more information about Clair on {quayio}, see "Clair security scanner." +endif::[] +ifeval::["{context}" == "use-quay"] +Clair security scanner is not enabled for {productname} by default. To enable Clair, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/vulnerability_reporting_with_clair_on_red_hat_quay/testing-clair-with-quay[Clair on {productname}]. +endif::[] + +Clair security scans can be viewed on the UI, or by the API. + +.Procedure + +. Navigate to a repository and click *Tags* in the navigation pane. This page shows the results of the security scan. + +. To reveal more information about multi-architecture images, click *See Child Manifests* to see the list of manifests in extended view. + +. Click a relevant link under *See Child Manifests*, for example, *1 Unknown* to be redirected to the *Security Scanner* page. + +. The *Security Scanner* page provides information for the tag, such as which CVEs the image is susceptible to, and what remediation options you might have available. + +[NOTE] +==== +Image scanning only lists vulnerabilities found by Clair security scanner. What users do about the vulnerabilities are uncovered is up to said user. +ifeval::["{context}" == "use-quay"] +{productname} superusers do not act on found vulnerabilities. +endif::[] +==== \ No newline at end of file diff --git a/modules/set-team-role.adoc b/modules/set-team-role.adoc new file mode 100644 index 000000000..72f428d5d --- /dev/null +++ b/modules/set-team-role.adoc @@ -0,0 +1,32 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: PROCEDURE +[id="set-team-role"] += Setting a team role by using the UI + +After you have created a team, you can set the role of that team within the +Organization. + +.Prerequisites + +* You have created a team. + +.Procedure + +. On the {productname} landing page, click the name of your Organization. + +. In the navigation pane, click *Teams and Membership*. + +. Select the *TEAM ROLE* drop-down menu, as shown in the following figure: ++ +image:set-team-role.png[Set the role that a team has within an organization] + +. For the selected team, choose one of the following roles: ++ +* *Admin*. Full administrative access to the organization, including the ability to create teams, add members, and set permissions. +* *Member*. Inherits all permissions set for the team. +* *Creator*. All member permissions, plus the ability to create new repositories. \ No newline at end of file diff --git a/modules/setting-default-quota.adoc b/modules/setting-default-quota.adoc new file mode 100644 index 000000000..71e341708 --- /dev/null +++ b/modules/setting-default-quota.adoc @@ -0,0 +1,13 @@ +:_content-type: CONCEPT +[id="default-quota"] += Setting default quota + +To specify a system-wide default storage quota that is applied to every organization and user, you can use the *DEFAULT_SYSTEM_REJECT_QUOTA_BYTES* configuration flag. + +If you configure a specific quota for an organization or user, and then delete that quota, the system-wide default quota will apply if one has been set. Similarly, if you have configured a specific quota for an organization or user, and then modify the system-wide default quota, the updated system-wide default will override any specific settings. + +For more information about the `DEFAULT_SYSTEM_REJECT_QUOTA_BYTES` flag, + +//need link for 3.9 + +see link: diff --git a/modules/setting-role-of-team-within-organization-api.adoc b/modules/setting-role-of-team-within-organization-api.adoc new file mode 100644 index 000000000..ccb04b19b --- /dev/null +++ b/modules/setting-role-of-team-within-organization-api.adoc @@ -0,0 +1,54 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: PROCEDURE + +[id="setting-role-of-team-within-organization-api"] += Setting the role of a team within an organization by using the API + +Use the following procedure to view and set the role a team within an organization using the API. + +.Prerequisites + +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +. Enter the following link:https://docs.redhat.com/en/documentation/red_hat_quay/3.12/html-single/red_hat_quay_api_reference/index#getorganizationteampermissions[`GET /api/v1/organization/{orgname}/team/{teamname}/permissions`] command to return a list of repository permissions for the organization's team. Note that your team must have been added to a repository for this command to return information. ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "/api/v1/organization//team//permissions" +---- ++ +.Example output ++ +[source,terminal] +---- +{"permissions": [{"repository": {"name": "api-repo", "is_public": true}, "role": "admin"}]} +---- + +. You can create or update a team within an organization to have a specified role of *admin*, *member*, or *creator* using the link:https://docs.redhat.com/en/documentation/red_hat_quay/3.12/html-single/red_hat_quay_api_reference/index#updateorganizationteam[`PUT /api/v1/organization/{orgname}/team/{teamname}`] command. For example: ++ +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "role": "" + }' \ + "/api/v1/organization//team/" +---- ++ +.Example output ++ +[source,terminal] +---- +{"name": "testteam", "description": "", "can_view": true, "role": "creator", "avatar": {"name": "testteam", "hash": "827f8c5762148d7e85402495b126e0a18b9b168170416ed04b49aae551099dc8", "color": "#ff7f0e", "kind": "team"}, "new_team": false} +---- \ No newline at end of file diff --git a/modules/setting-tag-expiration-api.adoc b/modules/setting-tag-expiration-api.adoc new file mode 100644 index 000000000..389312d8c --- /dev/null +++ b/modules/setting-tag-expiration-api.adoc @@ -0,0 +1,32 @@ +:_content-type: CONCEPT +[id="setting-tag-expirations-api"] += Setting tag expirations by using the API + +Image tags can be set to expire by using the API. + +.Prerequisites + +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +* You can set when an image a tag expires by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#changetag[`PUT /api/v1/repository/{repository}/tag/{tag}`] command and passing in the expiration field: ++ +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + --data '{ + "expiration": "" + }' \ + https:///api/v1/repository///tag/ +---- ++ +.Example output ++ +[source,terminal] +---- +"Updated" +---- diff --git a/modules/setting-tag-expirations-v2-ui.adoc b/modules/setting-tag-expirations-v2-ui.adoc new file mode 100644 index 000000000..ae25965b5 --- /dev/null +++ b/modules/setting-tag-expirations-v2-ui.adoc @@ -0,0 +1,190 @@ +:_content-type: CONCEPT +[id="setting-tag-expirations-v2-ui"] += Setting tag expirations + +Image tags can be set to expire from a +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +repository at a chosen date and time using the _tag expiration_ feature. This feature includes the following characteristics: + +* When an image tag expires, it is deleted from the repository. If it is the last tag for a specific image, the image is also set to be deleted. + +* Expiration is set on a per-tag basis. It is not set for a repository as a whole. + +* After a tag is expired or deleted, it is not immediately removed from the registry. This is contingent upon the allotted time designed in the _time machine_ feature, which defines when the tag is permanently deleted, or garbage collected. By default, this value is set at _14 days_, however the administrator can adjust this time to one of multiple options. Up until the point that garbage collection occurs, tags changes can be reverted. + +ifeval::["{context}" == "use-quay"] +The {productname} superuser has no special privilege related to deleting expired images from user repositories. There is no central mechanism for the superuser to gather information and act on user repositories. It is up to the owners of each repository to manage expiration and the deletion of their images. +endif::[] + +Tag expiration can be set up in one of three ways: + +* By setting the `quay.expires-after=` label in the Dockerfile when the image is created. This sets a time to expire from when the image is built. This label only works for image manifests. + +* By setting the `quay.expires-after=` annotation label in the Dockerfile when the image is created. `--annotation` can be passed in for both image manifests and image indexes. + +* By selecting an expiration date on the +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +UI. For example: ++ +image:tag-expires-ui.png[Change tag expiration under the Options icon or from the EXPIRES column] + +Setting tag expirations can help automate the cleanup of older or unused tags, helping to reduce storage space. + +[id="setting-tag-expiration-using-ui"] +== Setting tag expiration from a repository + +.Procedure + +. On the {productname} v2 UI dashboard, click *Repositories* in the navigation pane. + +. Click the name of a repository that has image tags. + +. Click the menu kebab for an image and select *Change expiration*. + +. Optional. Alternatively, you can bulk add expiration dates by clicking the box of multiple tags, and then select *Actions* -> *Set expiration*. + +. In the *Change Tags Expiration* window, set an expiration date, specifying the day of the week, month, day of the month, and year. For example, `Wednesday, November 15, 2023`. Alternatively, you can click the calendar button and manually select the date. + +. Set the time, for example, `2:30 PM`. + +. Click *Change Expiration* to confirm the date and time. The following notification is returned: `Successfully set expiration for tag test to Nov 15, 2023, 2:26 PM`. + +. On the {productname} v2 UI *Tags* page, you can see when the tag is set to expire. For example: ++ +image:tag-expiration-v2-ui.png[{productname} v2 UI tag expiration] + +[id="setting-tag-expiration-from-dockerfile"] +== Setting tag expiration from a Dockerfile + +You can add a label, for example, `quay.expires-after=20h` to an image tag by using the `docker label` command to cause the tag to automatically expire after the time that is indicated. The following values for hours, days, or weeks are accepted: + +* `1h` +* `2d` +* `3w` + +Expiration begins from the time that the image is pushed to the registry. + +.Procedure + +* Enter the following `docker label` command to add a label to the desired image tag. The label should be in the format `quay.expires-after=20h` to indicate that the tag should expire after 20 hours. Replace `20h` with the desired expiration time. For example: ++ +[source,terminal] +---- +$ docker label quay.expires-after=20h quay-server.example.com/quayadmin/: +---- + +[id="setting-tag-expiration-annotation"] +== Setting tag expiration using annotations + +You can add an annotation, for example, `quay.expires-after=20h`, to an image tag using the `--annotation` flag when pushing an image to the registry. This annotation causes the tag to automatically expire after the specified time. The annotation can be applies to both image manifests and image indexes. The following values for hours, days, or weeks are accepted: + +* `1h` +* `2d` +* `3w` + +Expiration begins from the time that the image is pushed to the registry. + +[NOTE] +==== +Using the `--annotation` flag is simplest using the `oras` CLI tool. +==== + +.Prerequisites + +* You have downloaded the `oras` CLI. For more information, see link:https://oras.land/docs/installation[Installation]. + +.Procedure + +. Enter the following `oras push --annotation` command to add an annotation to the desired image tag. The annotation should be in the format `quay.expires-after=` to indicate that the tag should expire the set time. For example: ++ +[source,terminal] +---- +$ oras push --annotation quay.expires-after= \ + //: \ + : +---- ++ +.Example output ++ +[source,terminal] +---- +✓ Uploaded hello.txt 12/12 B 100.00% 321ms + └─ sha256:74b9e308133afb3bceae961097cb2aa481483869d695ce1414cd2bc7f046027c +✓ Uploaded application/vnd.oci.empty.v1+json 2/2 B 100.00% 328ms + └─ sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a +✓ Uploaded application/vnd.oci.image.manifest.v1+json 620/620 B 100.00% 0s + └─ sha256:c370e931b5eca44fd753bd92e6991ed3be70008e8df15078083359409111f8c3 +Pushed [registry] quay-server.example.com/fortestuser/busybox:test2 +ArtifactType: application/vnd.unknown.artifact.v1 +---- + +. Confirm that the expiration date has been applied by checking the {productname} UI, or by entering the following command: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + https:///api/v1/repository///tag/?specificTag=" \ +---- ++ +.Example output ++ +[source,terminal] +---- +{"tags": [{"name": "test2", "reversion": false, "start_ts": 1743706344, "end_ts": 1743778344, "manifest_digest": "sha256:c370e931b5eca44fd753bd92e6991ed3be70008e8df15078083359409111f8c3", "is_manifest_list": false, "size": 12, "last_modified": "Thu, 03 Apr 2025 18:52:24 -0000", "expiration": "Fri, 04 Apr 2025 14:52:24 -0000"}, {"name": "test2", "reversion": false, "start_ts": 1742493776, "end_ts": 1743706344, "manifest_digest": "sha256:d80aa3d7f5f5388cfae543b990d3cd3d47ff51c48ef29ff66102427bf7bc0a88", "is_manifest_list": false, "size": 2266046, "last_modified": "Thu, 20 Mar 2025 18:02:56 -0000", "expiration": "Thu, 03 Apr 2025 18:52:24 -0000"}], "page": 1, "has_additional": false} +---- + +[id="removing-tag-expiration-annotation"] +== Removing tag expiration using annotations + +With the `oras` CLI tool, you can unset previously established expiration times. + +.Prerequisites + +* You have downloaded the `oras` CLI. For more information, see link:https://oras.land/docs/installation[Installation]. +* You have pushed an image with the `quay.expires-after=` annotation. + +.Procedure + +. Enter the following `oras push --annotation` command to remove an annotation to the desired image tag. The annotation should be in the format `quay.expires-after=never`. For example: ++ +[source,terminal] +---- +$ oras push --annotation quay.expires-after=never \ + //: \ + : +---- ++ +.Example output ++ +[source,terminal] +---- +✓ Uploaded hello.txt 12/12 B 100.00% 321ms + └─ sha256:74b9e308133afb3bceae961097cb2aa481483869d695ce1414cd2bc7f046027c +✓ Uploaded application/vnd.oci.empty.v1+json 2/2 B 100.00% 328ms + └─ sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a +✓ Uploaded application/vnd.oci.image.manifest.v1+json 620/620 B 100.00% 0s + └─ sha256:c370e931b5eca44fd753bd92e6991ed3be70008e8df15078083359409111f8c3 +Pushed [registry] quay-server.example.com/fortestuser/busybox:test2 +ArtifactType: application/vnd.unknown.artifact.v1 +---- + +. The latest manifest will no longer have an expiration time. Confirm that the expiration date has been removed by checking the {productname} UI, or by entering the following command: ++ +[source,terminal] +---- +{"tags": [{"name": "test2", "reversion": false, "start_ts": 1743708135, "manifest_digest": "sha256:19e3a3501b4125cce9cb6bb26ac9207c325259bef94dc66490b999f93c4c83a9", "is_manifest_list": false, "size": 12, "last_modified": "Thu, 03 Apr 2025 19:22:15 -0000"}, {"name": "test2", "reversion": false, "start_ts": 1743706344, "end_ts": 1743708135}]} +---- ++ +Note that no expiration time is listed. \ No newline at end of file diff --git a/modules/setting-up-builds-aws.adoc b/modules/setting-up-builds-aws.adoc new file mode 100644 index 000000000..7f3f4e05d --- /dev/null +++ b/modules/setting-up-builds-aws.adoc @@ -0,0 +1,41 @@ +:_content-type: PROCEDURE +[id="setting-up-builds-aws"] += {productname} Builder configuration with Amazon Elastic Compute Cloud + +{productname} can also be configured to use Amazon Elastic Compute Cloud (EC2) instances as _build worker_ nodes. This is useful for situations where you might want to have EC2 based _builds_ available as a backup solution in the event that your {ocp} _build workers_ are overloaded or unavailable. + +[NOTE] +==== +Amazon EC2 _builds_ are not supported by Red{nbsp}Hat. This is currently provided as an upstream feature only. +==== + +You can follow the steps in "Configuring bare metal builds for {productname-ocp}" and substitute the following changes in your configuration bundle to enable Amazon EC2. + +.Example configuration for bare metal builds with Amazon EC2 +[source,yaml] +---- + EXECUTORS: + - EXECUTOR: ec2 + QUAY_USERNAME: + QUAY_PASSWORD: + WORKER_IMAGE: quay.io/quay/quay-builder + WORKER_TAG: latest + EC2_REGION: us-east-1 + COREOS_AMI: ami-02545325b519192df # Fedora CoreOS <1> + AWS_ACCESS_KEY: ***** + AWS_SECRET_KEY: ***** + EC2_INSTANCE_TYPE: t2.large + EC2_VPC_SUBNET_ID: + EC2_SECURITY_GROUP_IDS: + - + EC2_KEY_NAME: + BLOCK_DEVICE_SIZE: 58 + SSH_AUTHORIZED_KEYS: <2> + - + - + HTTP_PROXY: + HTTPS_PROXY: + NO_PROXY: +---- +<1> Specifies an AMI name where _builds_ will be run. Unlike bare metal _builds_, these container _builds_ are done directly within an ephemeral EC2 instance. This AMI must utilize ignition and contain a docker. The AMI shown in this example is used by {quay.io} for its build system. +<2> Allows public SSH keys to be added to the build environment for remote troubleshooting access. This key, or keys, should correspond to the private key that an admin or developer will use to SSH into the build worker for debugging purposes. This key can be obtained by establishing an SSH connection to the remote host using a specific SSH key and port. For example: `$ ssh -i /path/to/ssh/key/set/in/ssh_authorized_keys -p 9999 core@localhost`. \ No newline at end of file diff --git a/modules/signature-does-not-exist.adoc b/modules/signature-does-not-exist.adoc new file mode 100644 index 000000000..b642dc750 --- /dev/null +++ b/modules/signature-does-not-exist.adoc @@ -0,0 +1,14 @@ +:_content-type: PROCEDURE +[id="signature-does-not-exist"] += Mirroring fails with "signature does not exist" error + +During the mirroring process in {productname}, users encounter a failure with the error message `signature does not exist`. This issue arises when attempting to mirror images and prevents their successful replication. + +The cause of this issue lies in the presence of old unsigned images on `registry.redhat.io`. Due to their lack of proper signatures, these images are unable to be mirrored successfully. + +This issue was addressed in {productname} version 3.6.4. The fix implemented in this version introduced a new checkbox named `Accept Unsigned Images` under the *Mirror Configuration* tab. Enabling this checkbox allows the mirroring process to proceed successfully, bypassing the `signature does not exist` error. + +[role="_additional-resources"] +.Additional resources + +For more information, see link:https://access.redhat.com/solutions/6804261[Red Hat Quay Image mirroring fails with error "signature does not exist"]. \ No newline at end of file diff --git a/modules/sizing-intro.adoc b/modules/sizing-intro.adoc new file mode 100644 index 000000000..aa3bcd25f --- /dev/null +++ b/modules/sizing-intro.adoc @@ -0,0 +1,22 @@ +:_content-type: CONCEPT +[id="sizing-intro"] += {productname} sizing and subscriptions + +Scalability of {productname} is one of its key strengths, with a single code base supporting a broad spectrum of deployment sizes, including the following: + +* Proof of Concept deployment on a single development machine +* Mid-size deployment of approximately 2,000 users that can serve content to dozens of Kubernetes clusters +* High-end deployment such as link:https://quay.io[Quay.io] that can serve thousands of Kubernetes clusters world-wide + +Since sizing heavily depends on a multitude of factors, such as the number of users, images, concurrent pulls and pushes, there are no standard sizing recommendations. + +The following are the minimum requirements for systems running {productname} (per container/pod instance): + +* **Quay:** minimum 6 GB; recommended 8 GB, 2 more more vCPUs +* **Clair:** recommended 2 GB RAM and 2 or more vCPUs +* **Storage:**: recommended 30 GB +* **NooBaa:** minimum 2 GB, 1 vCPU (when `objectstorage` component is selected by the Operator) +* **Clair database:** minimum 5 GB required for security metadata + +Stateless components of {productname} can be scaled out, but this will cause a heavier load on stateful backend services. + diff --git a/modules/sizing-sample.adoc b/modules/sizing-sample.adoc new file mode 100644 index 000000000..3d568e02b --- /dev/null +++ b/modules/sizing-sample.adoc @@ -0,0 +1,47 @@ +:_content-type: CONCEPT +[id="sizing-sample"] += {productname} sample sizings + +The following table shows approximate sizing for Proof of Concept, mid-size, and high-end deployments. Whether a deployment runs appropriately with the same metrics depends on many factors not shown below. + +[width="100%",cols="2,^,^,^"options="header"] +|==== +| Metric | Proof of concept | Mid-size | High End + +(Quay.io) +| No. of Quay containers by default | 1 | 4 | 15 +| No. of Quay containers max at scale-out | N/A | 8 | 30 +| No. of Clair containers by default | 1 | 3 | 10 +| No. of Clair containers max at scale-out | N/A | 6 | 15 +| No. of mirroring pods (to mirror 100 repositories) | 1 | 5-10 | N/A +.^| Database sizing| 2 -4 Cores + +6-8 GB RAM + +10-20 GB disk +| 4-8 Cores + +6-32 GB RAM + +100 GB - 1 TB disk +| 32 cores + +244 GB + +1+ TB disk +| Object storage backend sizing | 10-100 GB | 1 - 20 TB | 50+ TB up to PB +| Redis cache sizing | | 2 Cores + +2-4 GB RAM +| 4 cores + + 28 GB RAM +| Underlying node sizing + +(physical or virtual) +| 4 Cores + +8 GB RAM +| 4-6 Cores + + 12-16 GB RAM +| Quay: + +13 cores + +56GB RAM + + + +Clair: + +2 cores + +4 GB RAM +|==== + +For further details on sizing & related recommendations for mirroring, see the section on xref:arch-mirroring-intro[repository mirroring]. + +The sizing for the Redis cache is only relevant if you use Quay builders, otherwise it is not significant. \ No newline at end of file diff --git a/modules/skipping-source-control-triggered-build.adoc b/modules/skipping-source-control-triggered-build.adoc new file mode 100644 index 000000000..a9a7f3e84 --- /dev/null +++ b/modules/skipping-source-control-triggered-build.adoc @@ -0,0 +1,11 @@ +[id="skipping-source-control-triggered-build"] += Skipping a source control-triggered build + +To specify that a commit should be ignored by the +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +build system, add the text `[skip build]` or `[build skip]` anywhere in your commit message. \ No newline at end of file diff --git a/modules/ssl-config-cli.adoc b/modules/ssl-config-cli.adoc new file mode 100644 index 000000000..9514d7bf1 --- /dev/null +++ b/modules/ssl-config-cli.adoc @@ -0,0 +1,64 @@ +:_content-type: PROCEDURE +[id="configuring-ssl-using-cli"] += Configuring custom SSL/TLS certificates by using the command line interface + +SSL/TLS must be configured by using the command-line interface (CLI) and updating your `config.yaml` file manually. + +.Prerequisites + +* You have created a certificate authority and signed the certificate. + +.Procedure + +. Copy the certificate file and primary key file to your configuration directory, ensuring they are named `ssl.cert` and `ssl.key` respectively: ++ +[source,terminal] ++ +---- +cp ~/ssl.cert ~/ssl.key /path/to/configuration_directory +---- + +. Navigate to the configuration directory by entering the following command: ++ +[source,terminal] +---- +$ cd /path/to/configuration_directory +---- + +. Edit the `config.yaml` file and specify that you want {productname} to handle SSL/TLS: ++ +.Example `config.yaml` file +[source,yaml] +---- +# ... +SERVER_HOSTNAME: +... +PREFERRED_URL_SCHEME: https +# ... +---- + +. Optional: Append the contents of the `rootCA.pem` file to the end of the `ssl.cert` file by entering the following command: ++ +[source,terminal] +---- +$ cat rootCA.pem >> ssl.cert +---- + +. Stop the `Quay` container by entering the following command: ++ +[source,terminal] +---- +$ sudo podman stop +---- + +. Restart the registry by entering the following command: ++ +[subs="verbatim,attributes"] +---- + +$ sudo podman run -d --rm -p 80:8080 -p 443:8443 \ + --name=quay \ + -v $QUAY/config:/conf/stack:Z \ + -v $QUAY/storage:/datastorage:Z \ + {productrepo}/{quayimage}:{productminv} +---- \ No newline at end of file diff --git a/modules/ssl-config-ui.adoc b/modules/ssl-config-ui.adoc new file mode 100644 index 000000000..b73a20a05 --- /dev/null +++ b/modules/ssl-config-ui.adoc @@ -0,0 +1,36 @@ +:_content-type: PROCEDURE +[id="configuring-ssl-using-ui"] += Configuring SSL/TLS using the {productname} UI + +Use the following procedure to configure SSL/TLS using the {productname} UI. + +To configure SSL/TLS using the command line interface, see "Configuring SSL/TLS using the command line interface". + +.Prerequisites + +* You have created a certificate authority and signed a certificate. + +.Procedure + +. Start the `Quay` container in configuration mode: ++ +[subs="verbatim,attributes"] +---- +$ sudo podman run --rm -it --name quay_config -p 80:8080 -p 443:8443 {productrepo}/{quayimage}:{productminv} config secret +---- + +. In the *Server Configuration* section, select *{productname} handles TLS* for SSL/TLS. Upload the certificate file and private key file created earlier, ensuring that the *Server Hostname* matches the value used when the certificates were created. + +. Validate and download the updated configuration. + +. Stop the `Quay` container and then restart the registry by entering the following command: ++ +[subs="verbatim,attributes"] +---- +$ sudo podman rm -f quay +$ sudo podman run -d --rm -p 80:8080 -p 443:8443 \ +--name=quay \ +-v $QUAY/config:/conf/stack:Z \ +-v $QUAY/storage:/datastorage:Z \ +{productrepo}/{quayimage}:{productminv} +---- diff --git a/modules/ssl-create-certs.adoc b/modules/ssl-create-certs.adoc new file mode 100644 index 000000000..a684579d0 --- /dev/null +++ b/modules/ssl-create-certs.adoc @@ -0,0 +1,100 @@ +:_content-type: PROCEDURE +[id="creating-a-certificate-authority"] += Creating a Certificate Authority + +Use the following procedure to set up your own CA and use it to issue a server certificate for your domain. This allows you to secure communications with SSL/TLS using your own certificates. + +.Procedure + +. Generate the root CA key by entering the following command: ++ +[source,terminal] +---- +$ openssl genrsa -out rootCA.key 2048 +---- + +. Generate the root CA certificate by entering the following command: ++ +[source,terminal] +---- +$ openssl req -x509 -new -nodes -key rootCA.key -sha256 -days 1024 -out rootCA.pem +---- + +. Enter the information that will be incorporated into your certificate request, including the server hostname, for example: ++ +[source,terminal] +---- +Country Name (2 letter code) [XX]:IE +State or Province Name (full name) []:GALWAY +Locality Name (eg, city) [Default City]:GALWAY +Organization Name (eg, company) [Default Company Ltd]:QUAY +Organizational Unit Name (eg, section) []:DOCS +Common Name (eg, your name or your server's hostname) []:quay-server.example.com +---- + +. Generate the server key by entering the following command: ++ +[source,terminal] +---- +$ openssl genrsa -out ssl.key 2048 +---- + +. Generate a signing request by entering the following command: ++ +[source,terminal] +---- +$ openssl req -new -key ssl.key -out ssl.csr +---- + +. Enter the information that will be incorporated into your certificate request, including the server hostname, for example: ++ +[source,terminal] +---- +Country Name (2 letter code) [XX]:IE +State or Province Name (full name) []:GALWAY +Locality Name (eg, city) [Default City]:GALWAY +Organization Name (eg, company) [Default Company Ltd]:QUAY +Organizational Unit Name (eg, section) []:DOCS +Common Name (eg, your name or your server's hostname) []:quay-server.example.com +Email Address []: +---- + +. Create a configuration file `openssl.cnf`, specifying the server hostname, for example: ++ +.Example `openssl.cnf` file +[source,terminal] +---- +[req] +req_extensions = v3_req +distinguished_name = req_distinguished_name +[req_distinguished_name] +[ v3_req ] +basicConstraints = CA:FALSE +keyUsage = nonRepudiation, digitalSignature, keyEncipherment +subjectAltName = @alt_names +[alt_names] +DNS.1 = +IP.1 = 192.168.1.112 +---- + +. Use the configuration file to generate the certificate `ssl.cert`: ++ +[source,terminal] +---- +$ openssl x509 -req -in ssl.csr -CA rootCA.pem -CAkey rootCA.key -CAcreateserial -out ssl.cert -days 356 -extensions v3_req -extfile openssl.cnf +---- + +. Confirm your created certificates and files by entering the following command: ++ +[source,terminal] +---- +$ ls /path/to/certificates +---- ++ +.Example output ++ +[source,terminal] +---- +rootCA.key ssl-bundle.cert ssl.key custom-ssl-config-bundle-secret.yaml rootCA.pem ssl.cert +openssl.cnf rootCA.srl ssl.csr +---- \ No newline at end of file diff --git a/modules/ssl-intro.adoc b/modules/ssl-intro.adoc new file mode 100644 index 000000000..8dff4cf0a --- /dev/null +++ b/modules/ssl-intro.adoc @@ -0,0 +1,4 @@ +[id="introduction-using-ssl"] += Using SSL/TLS + +Documentation for _Using SSL/TLS_ has been revised and moved to link:https://docs.redhat.com/en/documentation/red_hat_quay/3/html-single/securing_red_hat_quay/index[Securing {productname}]. This chapter will be removed in a future version of {productname}. \ No newline at end of file diff --git a/modules/ssl-testing-cli.adoc b/modules/ssl-testing-cli.adoc new file mode 100644 index 000000000..b196e3234 --- /dev/null +++ b/modules/ssl-testing-cli.adoc @@ -0,0 +1,39 @@ +:_content-type: PROCEDURE +[id="testing-ssl-tls-configuration-using-cli"] += Testing the SSL/TLS configuration using the CLI + +Your SSL/TLS configuration can be tested by using the command-line interface (CLI). Use the following procedure to test your SSL/TLS configuration. + +Use the following procedure to test your SSL/TLS configuration using the CLI. + +.Procedure + +. Enter the following command to attempt to log in to the {productname} registry with SSL/TLS enabled: ++ +[source,terminal] +---- +$ sudo podman login quay-server.example.com +---- ++ +.Example output ++ +[source,terminal] +---- +Error: error authenticating creds for "quay-server.example.com": error pinging docker registry quay-server.example.com: Get "https://quay-server.example.com/v2/": x509: certificate signed by unknown authority +---- + +. Because Podman does not trust self-signed certificates, you must use the `--tls-verify=false` option: ++ +[source,terminal] +---- +$ sudo podman login --tls-verify=false quay-server.example.com +---- ++ +.Example output ++ +[source,terminal] +---- +Login Succeeded! +---- ++ +In a subsequent section, you will configure Podman to trust the root Certificate Authority. \ No newline at end of file diff --git a/modules/ssl-testing-ui.adoc b/modules/ssl-testing-ui.adoc new file mode 100644 index 000000000..22360312b --- /dev/null +++ b/modules/ssl-testing-ui.adoc @@ -0,0 +1,17 @@ +:_content-type: PROCEDURE +[id="testing-ssl-tls-using-browser"] += Testing the SSL/TLS configuration using a browser + +Use the following procedure to test your SSL/TLS configuration using a browser. + +.Procedure + +. Navigate to your {productname} registry endpoint, for example, `https://quay-server.example.com`. If configured correctly, the browser warns of the potential risk: ++ +image:ssl-connection-not-private.png[Potential risk] + +. Proceed to the log in screen. The browser notifies you that the connection is not secure. For example: ++ +image:ssl-connection-not-secure.png[Connection not secure] ++ +In the following section, you will configure Podman to trust the root Certificate Authority. \ No newline at end of file diff --git a/modules/ssl-tls-quay-overview.adoc b/modules/ssl-tls-quay-overview.adoc new file mode 100644 index 000000000..6fdaf4a6b --- /dev/null +++ b/modules/ssl-tls-quay-overview.adoc @@ -0,0 +1,9 @@ +:_content-type: CONCEPT +[id="ssl-tls-quay-overview"] += SSL and TLS for {productname} + +The Secure Sockets Layer (SSL) protocol was originally developed by Netscape Corporation to provide a mechanism for secure communication over the Internet. Subsequently, the protocol was adopted by the Internet Engineering Task Force (IETF) and renamed to Transport Layer Security (TLS). + +TLS (Transport Layer Security) is a cryptographic protocol used to secure network communications. When hardening system security settings by configuring preferred key-exchange protocols, authentication methods, and encryption algorithms, it is necessary to bear in mind that the broader the range of supported clients, the lower the resulting security. Conversely, strict security settings lead to limited compatibility with clients, which can result in some users being locked out of the system. Be sure to target the strictest available configuration and only relax it when it is required for compatibility reasons. + +{productname} can be configured to use SSL/TLS certificates to ensure secure communication between clients and the {productname} server. This configuration involves the use of valid SSL/TLS certificates, which can be obtained from a trusted Certificate Authority (CA) or generated as self-signed certificates for internal use. \ No newline at end of file diff --git a/modules/ssl-tls-sql.adoc b/modules/ssl-tls-sql.adoc new file mode 100644 index 000000000..2859ce59f --- /dev/null +++ b/modules/ssl-tls-sql.adoc @@ -0,0 +1,7 @@ +:_content-type: PROCEDURE +[id="cert-based-auth-quay-sql"] += Certificate-based authentication between {productname} and SQL + +{productname} administrators can configure certificate-based authentication between {productname} and SQL (PostgreSQL and GCP CloudSQL) by supplying their own SSL/TLS certificates for client-side authentication. This provides enhanced security and allows for easier automation for your {productname} registry. + +The following sections shows you how to configure certificate-based authentication between {productname} and PostgreSQL, and {productname} and CloudSQL. \ No newline at end of file diff --git a/modules/ssl-trust-ca-podman.adoc b/modules/ssl-trust-ca-podman.adoc new file mode 100644 index 000000000..1c1714854 --- /dev/null +++ b/modules/ssl-trust-ca-podman.adoc @@ -0,0 +1,28 @@ +:_content-type: PROCEDURE +[id="configuring-podman-trust-ca"] += Configuring Podman to trust the Certificate Authority + +Podman uses two paths to locate the Certificate Authority (CA) file: `/etc/containers/certs.d/` and `/etc/docker/certs.d/`. Use the following procedure to configure Podman to trust the CA. + +.Procedure + +. Copy the root CA file to one of `/etc/containers/certs.d/` or `/etc/docker/certs.d/`. Use the exact path determined by the server hostname, and name the file `ca.crt`: ++ +[source,terminal] +---- +$ sudo cp rootCA.pem /etc/containers/certs.d/quay-server.example.com/ca.crt +---- + +. Verify that you no longer need to use the `--tls-verify=false` option when logging in to your {productname} registry: ++ +[source,terminal] +---- +$ sudo podman login quay-server.example.com +---- ++ +.Example output ++ +[source,terminal] +---- +Login Succeeded! +---- \ No newline at end of file diff --git a/modules/ssl-trust-ca-system.adoc b/modules/ssl-trust-ca-system.adoc new file mode 100644 index 000000000..dfcb82759 --- /dev/null +++ b/modules/ssl-trust-ca-system.adoc @@ -0,0 +1,52 @@ +:_content-type: PROCEDURE +[id="configuring-system-trust-ca"] += Configuring the system to trust the certificate authority + +Use the following procedure to configure your system to trust the certificate authority. + +.Procedure + +. Enter the following command to copy the `rootCA.pem` file to the consolidated system-wide trust store: ++ +[source,terminal] +---- +$ sudo cp rootCA.pem /etc/pki/ca-trust/source/anchors/ +---- + +. Enter the following command to update the system-wide trust store configuration: ++ +[source,terminal] +---- +$ sudo update-ca-trust extract +---- + +. Optional. You can use the `trust list` command to ensure that the `Quay` server has been configured: ++ +[source,terminal] +---- +$ trust list | grep quay + label: quay-server.example.com +---- ++ +Now, when you browse to the registry at `https://quay-server.example.com`, the lock icon shows that the connection is secure: ++ +image:ssl-connection-secure.png[Connection not secure] + +. To remove the `rootCA.pem` file from system-wide trust, delete the file and update the configuration: ++ +[source,terminal] +---- +$ sudo rm /etc/pki/ca-trust/source/anchors/rootCA.pem +---- ++ +[source,terminal] +---- +$ sudo update-ca-trust extract +---- ++ +[source,terminal] +---- +$ trust list | grep quay +---- + +More information can be found in the RHEL 9 documentation in the chapter link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html-single/securing_networks/index#using-shared-system-certificates_securing-networks[Using shared system certificates]. diff --git a/modules/standalone-deployment-backup-restore.adoc b/modules/standalone-deployment-backup-restore.adoc new file mode 100644 index 000000000..de403a72e --- /dev/null +++ b/modules/standalone-deployment-backup-restore.adoc @@ -0,0 +1,5 @@ +:_content-type: CONCEPT +[id="standalone-deployment-backup-restore"] += Backing up and restoring {productname} on a standalone deployment + +Use the content within this section to back up and restore {productname} in standalone deployments. \ No newline at end of file diff --git a/modules/standalone-georepl-site-removal.adoc b/modules/standalone-georepl-site-removal.adoc new file mode 100644 index 000000000..6055f424d --- /dev/null +++ b/modules/standalone-georepl-site-removal.adoc @@ -0,0 +1,105 @@ +:_content-type: PROCEDURE +[id="standalone-georepl-site-removal"] += Removing a geo-replicated site from your standalone {productname} deployment + +By using the following procedure, {productname} administrators can remove sites in a geo-replicated setup. + +.Prerequisites + +* You have configured {productname} geo-replication with at least two sites, for example, `usstorage` and `eustorage`. +* Each site has its own Organization, Repository, and image tags. + +.Procedure + +. Sync the blobs between all of your defined sites by running the following command: ++ +[source,terminal] +---- +$ python -m util.backfillreplication +---- ++ +[WARNING] +==== +Prior to removing storage engines from your {productname} `config.yaml` file, you *must* ensure that all blobs are synced between all defined sites. Complete this step before proceeding. +==== + +. In your {productname} `config.yaml` file for site `usstorage`, remove the `DISTRIBUTED_STORAGE_CONFIG` entry for the `eustorage` site. + +. Enter the following command to obtain a list of running containers: ++ +[source,terminal] +---- +$ podman ps +---- ++ +.Example output ++ +[source,terminal] +---- +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +92c5321cde38 registry.redhat.io/rhel8/redis-5:1 run-redis 11 days ago Up 11 days ago 0.0.0.0:6379->6379/tcp redis +4e6d1ecd3811 registry.redhat.io/rhel8/postgresql-13:1-109 run-postgresql 33 seconds ago Up 34 seconds ago 0.0.0.0:5432->5432/tcp postgresql-quay +d2eadac74fda registry-proxy.engineering.redhat.com/rh-osbs/quay-quay-rhel8:v3.9.0-131 registry 4 seconds ago Up 4 seconds ago 0.0.0.0:80->8080/tcp, 0.0.0.0:443->8443/tcp quay +---- + +. Enter the following command to execute a shell inside of the PostgreSQL container: ++ +[source,terminal] +---- +$ podman exec -it postgresql-quay -- /bin/bash +---- + +. Enter psql by running the following command: ++ +[source,terminal] +---- +bash-4.4$ psql +---- + +. Enter the following command to reveal a list of sites in your geo-replicated deployment: ++ +[source,terminal] +---- +quay=# select * from imagestoragelocation; +---- ++ +.Example output ++ +[source,terminal] +---- + id | name +----+------------------- + 1 | usstorage + 2 | eustorage +---- + +. Enter the following command to exit the postgres CLI to re-enter bash-4.4: ++ +[source,terminal] +---- +\q +---- + +. Enter the following command to permanently remove the `eustorage` site: ++ +[IMPORTANT] +==== +The following action cannot be undone. Use with caution. +==== ++ +[source,terminal] +---- +bash-4.4$ python -m util.removelocation eustorage +---- ++ +.Example output ++ +[source,terminal] +---- +WARNING: This is a destructive operation. Are you sure you want to remove eustorage from your storage locations? [y/n] y +Deleted placement 30 +Deleted placement 31 +Deleted placement 32 +Deleted placement 33 +Deleted location eustorage +---- \ No newline at end of file diff --git a/modules/standalone-to-operator-backup-restore.adoc b/modules/standalone-to-operator-backup-restore.adoc new file mode 100644 index 000000000..c6b81b42b --- /dev/null +++ b/modules/standalone-to-operator-backup-restore.adoc @@ -0,0 +1,376 @@ +:_content-type: REFERENCE +[id="migrating-standalone-quay-to-operator"] += Migrating a standalone {productname} deployment to a {productname} Operator deployment + +The following procedures allow you to back up a standalone {productname} deployment and migrate it to the {productname} Operator on OpenShift Container Platform. + +[id="backing-up-standalone-deployment"] +== Backing up a standalone deployment of {productname} + +.Procedure + +. Back up the `config.yaml` of your standalone {productname} deployment: ++ +[source,terminal] +---- +$ mkdir /tmp/quay-backup +$ cp /path/to/Quay/config/directory/config.yaml /tmp/quay-backup +---- + +. Create a backup of the database that your standalone {productname} deployment is using: ++ +[source,terminal] +---- +$ pg_dump -h DB_HOST -p 5432 -d QUAY_DATABASE_NAME -U QUAY_DATABASE_USER -W -O > /tmp/quay-backup/quay-database-backup.sql +---- + +. Install the link:https://docs.aws.amazon.com/cli/v1/userguide/install-linux.html#install-linux-bundled-sudo[AWS CLI] if you do not have it already. + +. Create an `~/.aws/` directory: ++ +[source,terminal] +---- +$ mkdir ~/.aws/ +---- + +. Obtain the `access_key` and `secret_key` from the `config.yaml` of your standalone deployment: ++ +[source,terminal] +---- +$ grep -i DISTRIBUTED_STORAGE_CONFIG -A10 /tmp/quay-backup/config.yaml +---- ++ +Example output: ++ +[source,yaml] +---- +DISTRIBUTED_STORAGE_CONFIG: + minio-1: + - RadosGWStorage + - access_key: ########## + bucket_name: quay + hostname: 172.24.10.50 + is_secure: false + port: "9000" + secret_key: ########## + storage_path: /datastorage/registry +---- + +. Store the `access_key` and `secret_key` from the `config.yaml` file in your `~/.aws` directory: ++ +[source,terminal] +---- +$ touch ~/.aws/credentials +---- + +. Optional: Check that your `access_key` and `secret_key` are stored: ++ +[source,terminal] +---- +$ cat > ~/.aws/credentials << EOF +[default] +aws_access_key_id = ACCESS_KEY_FROM_QUAY_CONFIG +aws_secret_access_key = SECRET_KEY_FROM_QUAY_CONFIG +EOF +---- ++ +Example output: ++ +[source,terminal] +---- +aws_access_key_id = ACCESS_KEY_FROM_QUAY_CONFIG +aws_secret_access_key = SECRET_KEY_FROM_QUAY_CONFIG +---- ++ +[NOTE] +==== +If the `aws cli` does not automatically collect the `access_key` and `secret_key` from the ``~/.aws/credentials file`, you can, you can configure these by running `aws configure` and manually inputting the credentials. +==== + +. In your `quay-backup` directory, create a `bucket_backup` directory: ++ +[source,terminal] +---- +$ mkdir /tmp/quay-backup/bucket-backup +---- + +. Backup all blobs from the S3 storage: ++ +[source,terminal] +---- +$ aws s3 sync --no-verify-ssl --endpoint-url https://PUBLIC_S3_ENDPOINT:PORT s3://QUAY_BUCKET/ /tmp/quay-backup/bucket-backup/ +---- ++ +[NOTE] +==== +The `PUBLIC_S3_ENDPOINT` can be read from the {productname} `config.yaml` file under `hostname` in the `DISTRIBUTED_STORAGE_CONFIG`. If the endpoint is insecure, use `http` instead of `https` in the endpoint URL. +==== + +Up to this point, you should have a complete backup of all {productname} data, blobs, the database, and the `config.yaml` file stored locally. In the following section, you will migrate the standalone deployment backup to {productname} on OpenShift Container Platform. + +[id="using-standalone-content-migrate-ocp"] +== Using backed up standalone content to migrate to OpenShift Container Platform. + +.Prerequisites + +* Your standalone {productname} data, blobs, database, and `config.yaml` have been backed up. +* {productname} is deployed on OpenShift Container Platform using the {productname} Operator. +* A `QuayRegistry` with all components set to `managed`. + +.Procedure + +[NOTE] +==== +The procedure in this documents uses the following namespace: `quay-enterprise`. +==== + +. Scale down the {productname} Operator: ++ +[source,terminal] +---- +$ oc scale --replicas=0 deployment quay-operator.v3.6.2 -n openshift-operators +---- + +. Scale down the application and mirror deployments: ++ +[source,terminal] +---- +$ oc scale --replicas=0 deployment QUAY_MAIN_APP_DEPLOYMENT QUAY_MIRROR_DEPLOYMENT +---- + +. Copy the database SQL backup to the `Quay` PostgreSQL database instance: ++ +[source,terminal] +---- +$ oc cp /tmp/user/quay-backup/quay-database-backup.sql quay-enterprise/quayregistry-quay-database-54956cdd54-p7b2w:/var/lib/pgsql/data/userdata +---- + + +. Obtain the database password from the Operator-created `config.yaml` file: ++ +[source,terminal] +---- +$ oc get deployment quay-quay-app -o json | jq '.spec.template.spec.volumes[].projected.sources' | grep -i config-secret +---- ++ +Example output: ++ +[source,yaml] +---- + "name": "QUAY_CONFIG_SECRET_NAME" +---- ++ +[source,terminal] +---- +$ oc get secret quay-quay-config-secret-9t77hb84tb -o json | jq '.data."config.yaml"' | cut -d '"' -f2 | base64 -d -w0 > /tmp/quay-backup/operator-quay-config-yaml-backup.yaml +---- ++ +[source,terminal] +---- +cat /tmp/quay-backup/operator-quay-config-yaml-backup.yaml | grep -i DB_URI +---- ++ +Example output: ++ +---- +postgresql://QUAY_DATABASE_OWNER:PASSWORD@DATABASE_HOST/QUAY_DATABASE_NAME +---- + +. Execute a shell inside of the database pod: ++ +[source,terminal] +---- +# oc exec -it quay-postgresql-database-pod -- /bin/bash +---- + +. Enter psql: ++ +[source,terminal] +---- +bash-4.4$ psql +---- + +. Drop the database: ++ +[source,terminal] +---- +postgres=# DROP DATABASE "example-restore-registry-quay-database"; +---- ++ +Example output: ++ +---- +DROP DATABASE +---- + +. Create a new database and set the owner as the same name: ++ +[source,terminal] +---- +postgres=# CREATE DATABASE "example-restore-registry-quay-database" OWNER "example-restore-registry-quay-database"; +---- ++ +Example output: ++ +---- +CREATE DATABASE +---- + +. Connect to the database: ++ +[source,terminal] +---- +postgres=# \c "example-restore-registry-quay-database"; +---- ++ +Example output: ++ +[source,terminal] +---- +You are now connected to database "example-restore-registry-quay-database" as user "postgres". +---- + +. Create a `pg_trmg` extension of your `Quay` database: ++ +[source,terminal] +---- +example-restore-registry-quay-database=# create extension pg_trgm ; +---- ++ +Example output: ++ +[source,terminal] +---- +CREATE EXTENSION +---- + +. Exit the postgres CLI to re-enter bash-4.4: ++ +[source,terminal] +---- +\q +---- + +. Set the password for your PostgreSQL deployment: ++ +[source,terminal] +---- +bash-4.4$ psql -h localhost -d "QUAY_DATABASE_NAME" -U QUAY_DATABASE_OWNER -W < /var/lib/pgsql/data/userdata/quay-database-backup.sql +---- ++ +Example output: ++ +---- +SET +SET +SET +SET +SET +---- + +. Exit bash mode: ++ +[source,terminal] +---- +bash-4.4$ exit +---- + +. Create a new configuration bundle for the {productname} Operator. ++ +[source,terminal] +---- +$ touch config-bundle.yaml +---- + +. In your new `config-bundle.yaml`, include all of the information that the registry requires, such as LDAP configuration, keys, and other modifications that your old registry had. Run the following command to move the `secret_key` to your `config-bundle.yaml`: ++ +[source,terminal] +---- +$ cat /tmp/quay-backup/config.yaml | grep SECRET_KEY > /tmp/quay-backup/config-bundle.yaml +---- ++ +[NOTE] +==== +You must manually copy all the LDAP, OIDC and other information and add it to the /tmp/quay-backup/config-bundle.yaml file. +==== + +. Create a configuration bundle secret inside of your OpenShift cluster: ++ +[source,terminal] +---- +$ oc create secret generic new-custom-config-bundle --from-file=config.yaml=/tmp/quay-backup/config-bundle.yaml +---- + +. Scale up the `Quay` pods: ++ +---- +$ oc scale --replicas=1 deployment quayregistry-quay-app +deployment.apps/quayregistry-quay-app scaled +---- + +. Scale up the mirror pods: ++ +---- +$ oc scale --replicas=1 deployment quayregistry-quay-mirror +deployment.apps/quayregistry-quay-mirror scaled +---- + +. Patch the `QuayRegistry` CRD so that it contains the reference to the new custom configuration bundle: ++ +---- +$ oc patch quayregistry QUAY_REGISTRY_NAME --type=merge -p '{"spec":{"configBundleSecret":"new-custom-config-bundle"}}' +---- ++ +[NOTE] +==== +If {productname} returns a `500` internal server error, you might have to update the `location` of your `DISTRIBUTED_STORAGE_CONFIG` to `default`. +==== + +. Create a new AWS `credentials.yaml` in your `/.aws/` directory and include the `access_key` and `secret_key` from the Operator-created `config.yaml` file: ++ +[source,terminal] +---- +$ touch credentials.yaml +---- ++ +[source,terminal] +---- +$ grep -i DISTRIBUTED_STORAGE_CONFIG -A10 /tmp/quay-backup/operator-quay-config-yaml-backup.yaml +---- ++ +[source,terminal] +---- +$ cat > ~/.aws/credentials << EOF +[default] +aws_access_key_id = ACCESS_KEY_FROM_QUAY_CONFIG +aws_secret_access_key = SECRET_KEY_FROM_QUAY_CONFIG +EOF +---- ++ +[NOTE] +==== +If the `aws cli` does not automatically collect the `access_key` and `secret_key` from the ``~/.aws/credentials file`, you can configure these by running `aws configure` and manually inputting the credentials. +==== + +. Record the NooBaa's publicly available endpoint: ++ +[source,terminal] +---- +$ oc get route s3 -n openshift-storage -o yaml -o jsonpath="{.spec.host}{'\n'}" +---- + +. Sync the backup data to the NooBaa backend storage: ++ +[source,terminal] +---- +$ aws s3 sync --no-verify-ssl --endpoint-url https://NOOBAA_PUBLIC_S3_ROUTE /tmp/quay-backup/bucket-backup/* s3://QUAY_DATASTORE_BUCKET_NAME +---- + +. Scale the Operator back up to 1 pod: ++ +[source,terminal] +---- +$ oc scale –replicas=1 deployment quay-operator.v3.6.4 -n openshift-operators +---- + +The Operator uses the custom configuration bundle provided and reconciles all secrets and deployments. Your new {productname} deployment on {ocp} should contain all of the information that the old deployment had. You should be able to pull all images. \ No newline at end of file diff --git a/modules/starting-a-build.adoc b/modules/starting-a-build.adoc new file mode 100644 index 000000000..7c256269c --- /dev/null +++ b/modules/starting-a-build.adoc @@ -0,0 +1,68 @@ +:_content-type: CONCEPT +[id="starting-a-build"] += Starting a new build + +ifeval::["{context}" == "quay-io"] +By default, {quayio} users can start new _builds_ out-of-the-box. +endif::[] + +ifeval::["{context}" == "quay-builders-image-automation"] +After you have enabled the {productname} _builds_ feature by configuring your deployment, you can start a new build by invoking a _build trigger_ or by uploading a Dockerfile. +endif::[] + +Use the following procedure to start a new _build_ by uploading a Dockerfile. For information about creating a _build trigger_, see "Build triggers". + +.Prerequisites + +* You have navigated to the *Builds* page of your repository. +ifeval::["{context}" == "quay-builders-image-automation"] +* You have configured your environment to use the _build_ feature. +endif::[] + +.Procedure + +. On the *Builds* page, click *Start New Build*. + +. When prompted, click *Upload Dockerfile* to upload a Dockerfile or an archive that contains a Dockerfile at the root directory. + +. Click *Start Build*. ++ +[NOTE] +==== +* Currently, users cannot specify the Docker build context when manually starting a build. +* Currently, BitBucket is unsupported on the {productname} v2 UI. +==== + +. You are redirected to the _build_, which can be viewed in real-time. Wait for the Dockerfile _build_ to be completed and pushed. + +. Optional. you can click *Download Logs* to download the logs, or *Copy Logs* to copy the logs. + +. Click the back button to return to the *Repository Builds* page, where you can view the _build history_. ++ +image:build-history.png[Build history v2 UI] + +ifeval::["{context}" == "quay-builders-image-automation"] +. You can check the status of your _build_ by clicking the commit in the *Build History* page, or by running the following command: ++ +---- +$ oc get pods -n virtual-builders +---- ++ +.Example output +---- +NAME READY STATUS RESTARTS AGE +f192fe4a-c802-4275-bcce-d2031e635126-9l2b5-25lg2 1/1 Running 0 7s +---- + +. After the _build_ has completed, the `oc get pods -n virtual-builders` command returns no resources: ++ +[source,terminal] +---- +$ oc get pods -n virtual-builders +---- ++ +.Example output +---- +No resources found in virtual-builders namespace. +---- +endif::[] \ No newline at end of file diff --git a/modules/storage-buckets-not-synced.adoc b/modules/storage-buckets-not-synced.adoc new file mode 100644 index 000000000..e4cb75b22 --- /dev/null +++ b/modules/storage-buckets-not-synced.adoc @@ -0,0 +1,14 @@ +:_content-type: PROCEDURE +[id="storage-buckets-not-synced"] += Unsynced storage buckets in a geo-replication environment + +In some cases, your s3 buckets might differ in size and the number of objects. This occurs because, over a period of time, {productname} registries are deleted. However, within {productname} there is no mechanism to ensure that a delete image is entirety removed from the backing storage. Because of this, it is likely that many layers of such images are still in the backing storage and causing inconsistencies in all backing stores. + +`Replicationworkers` from the backfill script might take some time to catch up with the latest tasks, especially when images are consistently being pushed and new layers are being added to the registry. A difference in the size of back s3 storage is common and not problematic. However, in rare cases, it might lead to failed pulls due to layers of an image not being present in the `imagestoragelocation` table. + +Currently, there is no workaround for this issue. + +[role="_additional-resources"] +.Additional resources + +For more information, see link:https://access.redhat.com/solutions/7010202[In Quay Geo-Replication, Storage Buckets not synced]. \ No newline at end of file diff --git a/modules/storage-health-check-geo-repl.adoc b/modules/storage-health-check-geo-repl.adoc new file mode 100644 index 000000000..c05156e47 --- /dev/null +++ b/modules/storage-health-check-geo-repl.adoc @@ -0,0 +1,86 @@ +:_content-type: PROCEDURE +[id="storage-health-check-geo-repl"] += Geo-replication storage health check issues + +There is a known issue when running a three-site geo-replication {productname} environment. When one of the three sites goes down due to storage failure, restarting the `Quay` pods in the remaining two sites causes {productname} to shut down. + +When checking the status of your geo-replication environment, the `GET /health/endtoend` health check endpoint does not check distributed storage engines. It only checks the preferred storage engine. + +This is the expected behavior, however here are two workarounds for this issue. + +[id="adding-overrides-to-quayregistry-crd"] +== Workaround 1: Adding overrides to the QuayRegistry CRD + +Use the following procedure to add overrides to the `QuayRegistry` CRD. Overriding the `QuayRegistry` custom resource definition (CRD) disables the initial validation. + +.Procedure + +[IMPORTANT] +==== +The overrides field is potentially destructive and should be removed from your `QuayRegistry` CRD as soon as possible. +==== + +* Update your `QuayRegistry` CRD to include the following information: ++ +[source,yaml] +---- +spec: +- kind: quay + managed: true + overrides: + env: + - name: IGNORE_VALIDATION + value: "true" <1> +---- +<1> `value` is a boolean and must be in quotation marks. This forces {productname} to restart. This restart also runs the config tool as the first process which does a health check on the configuration and ensures that all components that {productname} hooks to are available. + +[id="remove-offending-storage-engine"] +== Workaround 2: Removing the offending storage engine + +Another workaround is to remove the storage engine that is failing. To successfully remove a certain storage engine, you must remove the storage name, driver and all related parameters to that driver from the {productname} `config.yaml` file. Also remove the storage driver name from the `DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS` and `DISTRIBUTED_STORAGE_PREFERENCE` fields. For example: + +[source,yaml] +---- +... +DISTRIBUTED_STORAGE_CONFIG: + default: # storage name + - RadosGWStorage # storage driver + - access_key: minioadmin # driver parameters + bucket_name: quay + hostname: 10.0.0.1 + is_secure: false + port: "9000" + secret_key: minioadmin + storage_path: /datastorage/registry + swift: # storage name + - SwiftStorage # storage driver + - auth_url: http://10.0.50.50/identity # driver parameters + auth_version: "3" + os_options: + tenant_id: + user_domain_name: +DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: + - default + - swift +DISTRIBUTED_STORAGE_PREFERENCE: + - default + - swift +... +---- + +Removing a faulty storage engine includes the following conditions: + +* This change must be done on all {productname} instances that you are running. The `Quay` pods should come online afterwards. +* Image that are completely stored in the failed data center will not be pullable. +* Geo-replication is an asynchronous operation, it happens in batches and after the image has been completely pushed to the registry. There is no guarantee that all blobs for all images pushed to the failed data center were transferred to other storage locations in time. If such an image is encountered, it should be re-pushed to {productname} again. +* After the failed storage engine has been restored, the configuration for that storage engine should be restored to remaining 2 {productname} instances and {productname} should be restarted. One needs to enqueue blobs that are now in the remaining two data centers to be pushed to the failed data center. This can be done with the following script: ++ +[source,terminal] +---- +$ oc exec -it quay-pod-name -- pythom -m util.backfillreplication +---- + +[role="_additional-resources"] +.Additional resources + +For more information, see link:https://access.redhat.com/solutions/7010204[Geo-replication storage health-check issue] \ No newline at end of file diff --git a/modules/storage-troubleshooting-issues.adoc b/modules/storage-troubleshooting-issues.adoc new file mode 100644 index 000000000..6e838bdb1 --- /dev/null +++ b/modules/storage-troubleshooting-issues.adoc @@ -0,0 +1,31 @@ +:_content-type: PROCEDURE +[id="storage-troubleshooting-issues"] += Troubleshooting {productname} object storage issues + +Use the following options to troubleshoot {productname} object storage issues. + +.Procedure + +* Enter the following command to see what object storage is used: ++ +[source,terminal] +---- +$ oc get quayregistry quay-registry-name -o yaml +---- + +* Ensure that the object storage you are using is officially supported by {productname} by checking the link:https://access.redhat.com/articles/4067991[tested integrations] page. + +* Enable debug mode. For more information, see "Running {productname} in debug mode". + +* Check your object storage configuration in your `config.yaml` file. Ensure that it is accurate and matches the settings provided by your object storage provider. You can check information like access credentials, endpoint URLs, bucket and container names, and other relevant configuration parameters. + +* Ensure that {productname} has network connectivity to the object storage endpoint. Check the network configurations to ensure that there are no restrictions blocking the communication between {productname} and the object storage endpoint. + +* If `FEATURE_STORAGE_PROXY` is enabled in your `config.yaml` file, check to see if its download URL is accessible. This can be found in the {productname} debug logs. For example: ++ +[source,terminal] +---- +$ curl -vvv "https://QUAY_HOSTNAME/_storage_proxy/dhaWZKRjlyO......Kuhc=/https/quay.hostname.com/quay-test/datastorage/registry/sha256/0e/0e1d17a1687fa270ba4f52a85c0f0e7958e13d3ded5123c3851a8031a9e55681?AWSAccessKeyId=xxxx&Signature=xxxxxx4%3D&Expires=1676066703" +---- + +* Try access the object storage service outside of {productname} to determine if the issue is specific to your deployment, or the underlying object storage. You can use command line tools like `aws`, `gsutil`, or `s3cmd` provided by the object storage provider to perform basic operations like listing buckets, containers, or uploading and downloading objects. This might help you isolate the problem. diff --git a/modules/storage-troubleshooting.adoc b/modules/storage-troubleshooting.adoc new file mode 100644 index 000000000..108401786 --- /dev/null +++ b/modules/storage-troubleshooting.adoc @@ -0,0 +1,7 @@ +:_content-type: CONCEPT +[id="storage-troubleshooting"] += Troubleshooting {productname} object storage + +Object storage is a type of data storage architecture that manages data as discrete units called `objects`. Unlike traditional file systems that organize data into hierarchical directories and files, object storage treats data as independent entities with unique identifiers. Each object contains the data itself, along with metadata that describes the object and enables efficient retrieval. + +{productname} uses object storage as the underlying storage mechanism for storing and managing container images. It stores container images as individual objects. Each container image is treated as an object, with its own unique identifier and associated metadata. \ No newline at end of file diff --git a/modules/subscription-intro.adoc b/modules/subscription-intro.adoc new file mode 100644 index 000000000..4571a2daa --- /dev/null +++ b/modules/subscription-intro.adoc @@ -0,0 +1,27 @@ +:_content-type: CONCEPT +[id="subscription-intro"] += {productname} subscription information + +{productname} is available with Standard or Premium support, and subscriptions are based on deployments. + +[NOTE] +==== +Deployment means an installation of a single {productname} registry using a shared data backend. +==== + +With a {productname} subscription, the following options are available: + +* There is no limit on the number of pods, such as Quay, Clair, Builder, and so on, that you can deploy. +* {productname} pods can run in multiple data centers or availability zones. +* Storage and database backends can be deployed across multiple data centers or availability zones, but only as a single, shared storage backend and single, shared database backend. +* {productname} can manage content for an unlimited number of clusters or standalone servers. +* Clients can access the {productname} deployment regardless of their physical location. +* You can deploy {productname} on {ocp} infrastructure nodes to minimize subscription requirements. +* You can run the Container Security Operator (CSO) and the Quay Bridge Operator (QBO) on your {ocp} clusters at no additional cost. + +[NOTE] +==== +{productname} geo-replication requires a subscription for each storage replication. The database, however, is shared. +==== + +For more information about purchasing a {productname} subscription, see link:https://www.redhat.com/en/technologies/cloud-computing/quay[{productname}]. diff --git a/modules/superuser-manage-api.adoc b/modules/superuser-manage-api.adoc new file mode 100644 index 000000000..ab3e93c51 --- /dev/null +++ b/modules/superuser-manage-api.adoc @@ -0,0 +1,4 @@ +[id="superuser-manage-api"] += Managing your deployment as a superuser with the {productname} API + +Through the {productname} UI, superusers have the ability to create, list, change, and delete aspects of the registry, such as users, service keys, a user's quota, and more. \ No newline at end of file diff --git a/modules/team-permissions-api.adoc b/modules/team-permissions-api.adoc new file mode 100644 index 000000000..d73a5fa17 --- /dev/null +++ b/modules/team-permissions-api.adoc @@ -0,0 +1,70 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc + +:_content-type: CONCEPT +[id="repo-manage-team-permissions"] += Managing team permissions by using the {productname} API + +Use the following procedure to manage team permissions by using the {productname} API. + +. Permissions for a specified team can be returned by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getteampermissions[`GET /api/v1/repository/{repository}/permissions/team/{teamname}`] endpoint: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/repository///permissions/team/" +---- ++ +.Example output ++ +[source,terminal] +---- +{"role": "write"} +---- + +. Permissions for all teams can be returned with the link:[`GET /api/v1/repository/{repository}/permissions/team/`] endpoint. For example: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/repository///permissions/team/" +---- ++ +.Example output ++ +[source,terminal] +---- +{"permissions": {"ironmanteam": {"role": "read", "name": "ironmanteam", "avatar": {"name": "ironmanteam", "hash": "8045b2361613622183e87f33a7bfc54e100a41bca41094abb64320df29ef458d", "color": "#969696", "kind": "team"}}, "sillyteam": {"role": "read", "name": "sillyteam", "avatar": {"name": "sillyteam", "hash": "f275d39bdee2766d2404e2c6dbff28fe290969242e9fcf1ffb2cde36b83448ff", "color": "#17becf", "kind": "team"}}}} +---- + +. Permissions for a specified team can be changed by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#changeteampermissions[`PUT /api/v1/repository/{repository}/permissions/team/{teamname}`] command. For example: ++ +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{"role": ""}' \ + "https://quay-server.example.com/api/v1/repository///permissions/team/" +---- ++ +.Example output ++ +[source,terminal] +---- +{"role": "admin", "name": "superteam", "avatar": {"name": "superteam", "hash": "48cb6d114200039fed5c601480653ae7371d5a8849521d4c3bf2418ea013fc0f", "color": "#9467bd", "kind": "team"}} +---- + +. Team permissions can be deleted with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleteteampermissions[`DELETE /api/v1/repository/{repository}/permissions/team/{teamname}`] command. For example: ++ +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/repository///permissions/team/" +---- ++ +This command does not return output in the CLI. \ No newline at end of file diff --git a/modules/teams-overview.adoc b/modules/teams-overview.adoc new file mode 100644 index 000000000..48aa0ebe5 --- /dev/null +++ b/modules/teams-overview.adoc @@ -0,0 +1,11 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: CONCEPT +[id="teams-overview"] += {productname} teams overview + +In {productname} a _team_ is a group of users with shared permissions, allowing for efficient management and collaboration on projects. Teams can help streamline access control and project management within organizations and repositories. They can be assigned designated permissions and help ensure that members have the appropriate level of access to their repositories based on their roles and responsibilities. diff --git a/modules/tenancy-model.adoc b/modules/tenancy-model.adoc new file mode 100644 index 000000000..74ed0a2e8 --- /dev/null +++ b/modules/tenancy-model.adoc @@ -0,0 +1,22 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: CONCEPT +[id="tenancy-model"] += Tenancy model + +image:178_Quay_architecture_0821_tenancy_model.png[Tenancy model] + +* **Organizations** provide a way of sharing repositories under a common namespace that does not belong to a single user. Instead, these repositories belong to several users in a shared setting, such as a company. + +* **Teams** provide a way for an Organization to delegate permissions. Permissions can be set at the global level (for example, across all repositories), or on specific repositories. They can also be set for specific sets, or groups, of users. + +* **Users** can log in to a registry through the web UI or a by using a client like Podman and using their respective login commands, for example, `$ podman login`. Each user automatically gets a user namespace, for example, `//`, or `quay.io/` if you are using {quayio}. + +ifeval::["{context}" == "use-quay"] +* **Superusers** have enhanced access and privileges through the *Super User Admin Panel* in the user interface. Superuser API calls are also available, which are not visible or accessible to normal users. +endif::[] + +* **Robot accounts** provide automated access to repositories for non-human users like pipeline tools. Robot accounts are similar to {ocp} *Service Accounts*. Permissions can be granted to a robot account in a repository by adding that account like you would another user or team. \ No newline at end of file diff --git a/modules/testing-3-800.adoc b/modules/testing-3-800.adoc new file mode 100644 index 000000000..dc04cb487 --- /dev/null +++ b/modules/testing-3-800.adoc @@ -0,0 +1,910 @@ +[[testing-3-800]] +== Testing 3.8.0 features + +The following sections in this guide explain how to enable new features and test that they are working. + +[[enabling-ipv6-dual-stack]] +=== Enabling and testing the IPv6 and dual-stack protocol family on standalone {productname} deployments + +Your {productname} deployment can now be served in locations that only support IPv6, such as Telco and Edge environments. Support is also offered for dual-stack networking so your {productname} deployment can listen on IPv4 and IPv6 simultaneously. + +[[enabling-ipv6]] +==== Enabling and testing IPv6 + +Use the following procedure to enable IPv6 on your standalone {productname} deployment. + +.Prerequisites + +* You have updated {productname} to 3.8. +* Your host and container software platform (Docker, Podman) must be configured to support IPv6. + +.Procedure + +. In your deployment's `config.yaml` file, add the `FEATURE_LISTEN_IP_VERSION` parameter and set it to `IPv6`, for example: ++ +[source,yaml] +---- +--- +FEATURE_GOOGLE_LOGIN: false +FEATURE_INVITE_ONLY_USER_CREATION: false +FEATURE_LISTEN_IP_VERSION: IPv6 +FEATURE_MAILING: false +FEATURE_NONSUPERUSER_TEAM_SYNCING_SETUP: false +--- +---- + +. Start, or restart, your {productname} deployment. + +. Check that your deployment is listening to IPv6. ++ +.. For a standalone deployment, enter the following command: ++ +[source,terminal] +---- +$ curl /health/instance +{"data":{"services":{"auth":true,"database":true,"disk_space":true,"registry_gunicorn":true,"service_key":true,"web_gunicorn":true}},"status_code":200} +---- ++ +.. For an Operator based deployment, enter the following command: ++ +[source,terminal] +---- +$ curl /health/instance +{"data":{"services":{"auth":true,"database":true,"disk_space":true,"registry_gunicorn":true,"service_key":true,"web_gunicorn":true}},"status_code":200} +---- + +===== Expected Results + +After enabling IPv6 in your deployment's `config.yaml`, all {productname} features can be used as normal, so long as your environment is configured to use IPv6 and is not hindered by the ipv6-limitations[current limitations]. + +[WARNING] +==== +If your environment is configured to IPv4, but the `FEATURE_LISTEN_IP_VERSION` configuration field is set to `IPv6`, {productname} will fail to deploy. +==== + +You can use the following procedure to test that your {productname} deployment can push and pull images in an IPv6 environment. + +.Procedure + +. Pull a sample image from an external registry: ++ +[source,terminal] +---- +$ podman pull busybox +---- + +. Tag the image: ++ +[source,terminal] +---- +$ podman tag docker.io/library/busybox quay-server.example.com/quayadmin/busybox:test +---- + +. Push the image to your {productname} registry: ++ +[source,terminal] +---- +$ podman push --tls-verify=false quay-server.example.com/quayadmin/busybox:test +---- + +. Test access to the image from the CLI by deleting your local copy of the image: ++ +[source,terminal] +---- +$ podman rmi quay-server.example.com/quayadmin/busybox:test +---- + +. Pull the image from your {productname} registry: ++ +[source,terminal] +---- +$ podman pull --tls-verify=false quay-server.example.com/quayadmin/busybox:test +---- + +[[enabling-dual-stack]] +==== Enabling and testing dual-stack + +.Prerequisites + +* You have updated {productname} to 3.8. +* Your host and container software platform (Docker, Podman) must be configured to support IPv6. + +.Procedure + +. In your deployment's `config.yaml` file, add the `FEATURE_LISTEN_IP_VERSION` parameter and set it to `dual-stack`, for example: ++ +[source,yaml] +---- +--- +FEATURE_GOOGLE_LOGIN: false +FEATURE_INVITE_ONLY_USER_CREATION: false +FEATURE_LISTEN_IP_VERSION: dual-stack +FEATURE_MAILING: false +FEATURE_NONSUPERUSER_TEAM_SYNCING_SETUP: false +--- +---- + +. Start, or restart, your {productname} deployment. + +. Check that your deployment is listening to both channels by entering the following command: +.. For IPv4, enter the following command: ++ +[source,terminal] +---- +$ curl --ipv4 +{"data":{"services":{"auth":true,"database":true,"disk_space":true,"registry_gunicorn":true,"service_key":true,"web_gunicorn":true}},"status_code":200} +---- +.. For IPv6, enter the following command: ++ +[source,terminal] +---- +$ curl --ipv6 +{"data":{"services":{"auth":true,"database":true,"disk_space":true,"registry_gunicorn":true,"service_key":true,"web_gunicorn":true}},"status_code":200} +---- + +===== Expected Results + +After enabling dual-stack in your deployment's `config.yaml`, all {productname} features can be used as normal, so long as your environment is configured for dual-stack. + +You can use the following procedure to test that your {productname} deployment can push and pull images in a dual-stack environment. + +.Procedure + +. Pull a sample image from an external registry: ++ +[source,terminal] +---- +$ podman pull busybox +---- + +. Tag the image: ++ +[source,terminal] +---- +$ podman tag docker.io/library/busybox quay-server.example.com/quayadmin/busybox:test +---- + +. Push the image to your {productname} registry: ++ +[source,terminal] +---- +$ podman push --tls-verify=false quay-server.example.com/quayadmin/busybox:test +---- + +. Test access to the image from the CLI by deleting your local copy of the image: ++ +[source,terminal] +---- +$ podman rmi quay-server.example.com/quayadmin/busybox:test +---- + +. Pull the image from your {productname} registry: ++ +[source,terminal] +---- +$ podman pull --tls-verify=false quay-server.example.com/quayadmin/busybox:test +---- + +[[enabling-ldap-super-users]] +=== Enabling LDAD superusers for {productname} + +The `LDAP_SUPERUSER_FILTER` configuration field is now available. With this field configured, {productname} administrators can configure Lightweight Directory Access Protocol (LDAP) users as superusers if {productname} uses LDAP as its authentication provider. + +Use the following procedure to enable LDAP superusers on your {productname} deployment. + +.Prerequisites + +* Your {productname} deployment uses LDAP as its authentication provider. +* You have configured the `LDAP_USER_FILTER` field. + +.Procedure + +. In your deployment's `config.yaml` file, add the `LDAP_SUPERUSER_FILTER` parameter and add the group of users you want configured as super users, for example, `root`: ++ +[source,yaml] +---- +LDAP_ADMIN_DN: uid=,ou=Users,o=,dc=,dc=com +LDAP_ADMIN_PASSWD: ABC123 +LDAP_ALLOW_INSECURE_FALLBACK: false +LDAP_BASE_DN: + - o= + - dc= + - dc=com +LDAP_EMAIL_ATTR: mail +LDAP_UID_ATTR: uid +LDAP_URI: ldap://.com +LDAP_USER_FILTER: (memberof=cn=developers,ou=Users,o=,dc=,dc=com) +LDAP_SUPERUSER_FILTER: (memberOf=cn=root,ou=Admin,o=,dc=,dc=com) +LDAP_USER_RDN: + - ou= + - o= + - dc= + - dc=com +---- + +. Start, or restart, your {productname} deployment. + +===== Expected Results + +After enabling the `LDAP_SUPERUSER_FILTER` feature, your LDAP {productname} users have superuser privileges. The following options are available to superusers: + +* Manage users +* Manage organizations +* Manage service keys +* View the change log +* Query the usage logs +* Create globally visible user messages + +Use the following procedure to test that your {productname} LDAP users have been given superusers privileges. + +.Prerequisites + +* You have configured the `LDAP_SUPERUSER_FILTER` field. + +.Procedure + +. Log in to your {productname} registry as the configured LDAP superuser. + +. Access the *Super User Admin Panel* by clicking on your user name or avatar in the top right-hand corner of the UI. If you have been properly configured as a superuser, an extra item is presented in the drop-down list called *Super User Admin Panel*. + +. On the *{productname} Management* page, click *Globally visible user messages* on the navigation pane. + +. Click *Create Message* to reveal a drop-down menu containing *Normal*, *Warning*, and *Error* message types. + +. Enter a message by selecting *Click to set message*, then click *Create Message*. + +Now, when users log in to the {productname} registry, they are presented with a global message. + + +[[enabling-ldap-restricted-users]] +=== Enabling LDAP restricted users for {productname} + +The `LDAP_RESTRICTED_USER_FILTER` is now available. This configuration field is a subset of the `LDAP_USER_FILTER` configuration field. When configured, allows {productname} administrators the ability to configure Lightweight Directory Access Protocol (LDAP) users as restricted users when {productname} uses LDAP as its authentication provider. + +Use the following procedure to enable LDAP restricted users on your {productname} deployment. + +.Prerequisites + +* Your {productname} deployment uses LDAP as its authentication provider. +* You have configured the `LDAP_USER_FILTER` field. + +.Procedure + +. In your deployment's `config.yaml` file, add the `LDAP_RESTRICTED_USER_FILTER` parameter and specify the group of restricted users, for example, `members`: ++ +[source,yaml] +---- +LDAP_ADMIN_DN: uid=,ou=Users,o=,dc=,dc=com +LDAP_ADMIN_PASSWD: ABC123 +LDAP_ALLOW_INSECURE_FALLBACK: false +LDAP_BASE_DN: + - o= + - dc= + - dc=com +LDAP_EMAIL_ATTR: mail +LDAP_UID_ATTR: uid +LDAP_URI: ldap://.com +LDAP_USER_FILTER: (memberof=cn=developers,ou=Users,o=,dc=,dc=com) +LDAP_RESTRICTED_USER_FILTER: (=) +LDAP_USER_RDN: + - ou= + - o= + - dc= + - dc=com +---- + +. Start, or restart, your {productname} deployment. + + +[[enabling-superusers-full-access]] +=== Enabling and testing `FEATURE_SUPERUSERS_FULL_ACCESS` + +The `FEATURE_SUPERUSERS_FULL_ACCESS` feature is now available. This configuration field grants superusers the ability to read, write, and delete content from other repositories in namespaces that they do not own or have explicit permissions for. + +[NOTE] +==== +* This feature is only available on the beta of the new UI. When enabled, it shows all organizations that the super user has access to. To enable the beta of the new UI, see xref:enabling-ui-v2[FEATURE_UI_V2] +* When this field is enabled, the superuser cannot view the image repository of every organization at once. This is a known limitation and will be fixed in a future version of {productname}. As a temporary workaround, the superuser can view image repositories by navigating to them from the *Organizations* page. +==== + + +Use the following procedure to test the `FEATURE_SUPERUSERS_FULL_ACCESS` feature. + +.Prerequisites + +* You have defined the `SUPER_USERS` configuration field in your `config.yaml` file. + +.Procedure + +. In your deployment's `config.yaml` file, add the `FEATURE_SUPERUSERS_FULL_ACCESS` parameter and set it to `true`, for example: ++ +[source,yaml] +---- +--- +SUPER_USERS: +- quayadmin +FEATURE_SUPERUSERS_FULL_ACCESS: True +--- +---- + +. Start, or restart, your {productname} deployment. + +==== Expected results + +With this feature enabled, your superusers should be able to read, write, and delete content from other repositories in namespaces that they do not own. To ensure that this feature is working as intended, use the following procedure. + +.Prerequisites + +* You have set the `FEATURE_SUPERUSERS_FULL_ACCESS` field to `true` in your `config.yaml` file. + +.Procedure + +. Open your {productname} registry and click *Create new account*. + +. Create a new user, for example, `user1`. + +. Log in as `user`. + +. Click *user1* under the *Users and Organizations*. + +. Create a new repository but clicking *creating a new repository*. + +. Enter a repository name, for example, `testrepo`, then click *Create private repository*. + +. Use the CLI to log in to the registry as `user`: ++ +[source,terminal] +---- +$ podman login --tls-verify=false quay-server.example.com +---- ++ +Example output: ++ +[source,terminal] +---- +Username: user1 +Password: +Login Succeeded! +---- + +. Pull a sample image by entering the following command: ++ +[source,terminal] +---- +$ podman pull busybox +---- + +. Tag the image: ++ +[source,terminal] +---- +$ podman tag docker.io/library/busybox quay-server.example.com/user1/testrepo/busybox:test +---- + +. Push the image to your {productname} registry: ++ +[source,terminal] +---- +$ sudo podman push --tls-verify=false quay-server.example.com/user1/testrepo/busybox:test +---- + +. Ensure that you have successfully pushed the image to your repository by navigating to `www.quay-server.example.com/repository/user1/testrepo/busybox` and clicking *Tags* in the navigation pane. + +. Sign out of `user1` by clicking *user1* -> *Sign out all sessions*. + +. Log out of the registry using the CLI: ++ +---- +$ podman logout quay-server.example.com +---- ++ +Example output: ++ +[source,terminal] +---- +Removed login credentials for quay-server.example.com +---- + +. On the UI, log in as the designated superuser with full access privileges, or example `quayadmin`. + +. On the CLI, log in as the designated superuser with full access privileges, or example `quayadmin`: ++ +[source,terminal] +---- +$ podman login quay-server.example.com +---- ++ +Example output: ++ +[source,terminal] +---- +Username: quayadmin +Password: +Login Succeeded! +---- + +. Now, you can pull the `busybox` image from `user1's` repository by entering the following command: ++ +[source,terminal] +---- +$ podman pull --tls-verify=false quay-server.example.com/user1/testrepo/busybox:test +---- ++ +Example output: ++ +---- +Trying to pull quay-server.example.com/stevsmit/busybox:test... +Getting image source signatures +Copying blob 29c7fae3c03c skipped: already exists +Copying config 2bd2971487 done +Writing manifest to image destination +Storing signatures +2bd29714875d9206777f9e8876033cbcd58edd14f2c0f1203435296b3f31c5f7 +---- + +. You can also push images to `user1's` repository by entering the following commands: ++ +[source,terminal] +---- +$ podman tag docker.io/library/busybox quay-server.example.com/user1/testrepo/busybox:test1 +---- ++ +[source,terminal] +---- +$ podman push quay-server.example.com/user1/testrepo/busybox:test1 +---- ++ +Example output: ++ +---- +Getting image source signatures +Copying blob 29c7fae3c03c skipped: already exists +Copying config 2bd2971487 done +Writing manifest to image destination +Storing signatures +---- + +. Additionally, you can delete images from `user1's` repository by using the tagged image's API: ++ +[source,terminal] +---- +$ curl -X DELETE -H "Authorization: Bearer " http://quay-server.example.com/api/v1/repository/user1/testrepo/tag/test1 +---- ++ +[NOTE] +==== +For more information about obtaining OAuth tokens, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/red_hat_quay_api_guide/using_the_red_hat_quay_api#create_oauth_access_token[Create OAuth access token]. +==== + +[[enabling-feature-restricted-users]] +=== Enabling and testing `FEATURE_RESTRICTED_USERS` + +With this feature enabled, normal users are unable to create organizations. + +.Procedure + +. In your deployment's `config.yaml` file, add the `FEATURE_RESTRICTED_USERS` parameter and set it to `true`, for example: ++ +[source,yaml] +---- +--- +FEATURE_RESTRICTED_USERS: true +--- +---- + +. Start, or restart, your {productname} deployment. + +==== Expected results + +With this feature enabled, normal users cannot create organizations. To ensure that this feature is working as intended, use the following procedure. + +.Prerequisites + +* `FEATURE_RESTRICTED_USERS` is set to `true` in your `config.yaml`. +* Your {productname} registry has a sample tag. + +.Procedure + +. Log in as a normal {productname} user, for example, `user1`. + +. Click *Create New Organization* on the {productname} UI. + +. In the *Organization Name* box, enter a name, for example, `testorg`. + +. Click *Create Organization*. This will result in an `Unauthorized` messaged. + +[[enabling-restricted-users-read-only]] +=== Enabling and testing `RESTRICTED_USER_READ_ONLY` + +When `FEATURE_RESTRICTED_USERS` is set to `true`, `RESTRICTED_USER_READ_ONLY` restricts users to read-only operations. + +Use the following procedure to enable `FEATURE_RESTRICTED_USERS`. + +.Prerequisites + +* `FEATURE_RESTRICTED_USERS` is set to `true` in your `config.yaml`. + +.Procedure + +. In your deployment's `config.yaml` file, add the `RESTRICTED_USER_READ_ONLY` parameter set it to `true`: ++ +[source,yaml] +---- +FEATURE_RESTRICTED_USERS: true +RESTRICTED_USER_READ_ONLY: true +---- + +. Start, or restart, your {productname} deployment. + +==== Expected results + +With this feature enabled, users will only be able to perform read-only operations. Use the following procedures to ensure that this feature is working as intended: + +.Prerequisites + +* `FEATURE_RESTRICTED_USERS` is set to `true` in your `config.yaml`. +* `RESTRICTED_USER_READ_ONLY` is set to `true` in your `config.yaml`. +* Your {productname} registry has a sample tag. + +.Procedure + +. Log in to your {productname} registry as the normal user, for example, `user1`. + +. On the {productname} UI, click *Explore*. + +. Select a repository, for example, *quayadmin/busybox*. + +. Select *Tags* on the navigation pane. + +. Pull a sample tag from the repository, for example: ++ +[source,terminal] +---- +$ podman pull quay-server.example.com/quayadmin/busybox:test +---- ++ +Example output: ++ +[source,terminal] +---- +Trying to pull quay-server.example.com/quayadmin/busybox:test... +Getting image source signatures +Copying blob 29c7fae3c03c skipped: already exists +Copying config 2bd2971487 done +Writing manifest to image destination +Storing signatures +2bd29714875d9206777f9e8876033cbcd58edd14f2c0f1203435296b3f31c5f7 +---- + +Next, try to push an image. This procedure should result in `unauthorized`. + +. Tag an image by entering the following command: ++ +[source,terminal] +---- +$ podman tag docker.io/library/busybox quay-server.example.com/user1/busybox:test +---- + +. Push the image by entering the following command: ++ +[source,terminal] +---- +$ podman push quay-server.example.com/user1/busybox:test +---- ++ +Example output: ++ +[source,terminal] +---- +Getting image source signatures +Copying blob 29c7fae3c03c skipped: already exists +Copying config 2bd2971487 done +Writing manifest to image destination +Error: writing manifest: uploading manifest test to quay-server.example.com/user3/busybox: unauthorized: access to the requested resource is not authorized +---- + +Next, try to create an organization using the {productname} UI: + +. Log in to your {productname} registry as the whitelisted user, for example, `user1`. + +. On the UI, click *Create New Organization*. + +If properly configured, `user1` is unable to create a new organization. + + +[[enabling-restricted-users-whitelist]] +=== Enabling and testing `RESTRICTED_USERS_WHITELIST` + +When this feature is set, specified users are excluded from the `FEATURE_RESTRICTED_USERS` and `RESTRICTED_USER_READ_ONLY` configurations. Use the following procedure to exclude users from the `FEATURE_RESTRICTED_USERS` and `RESTRICTED_USER_READ_ONLY` settings so that they can have `read` and `write` privileges. + +.Prerequisites + +* `FEATURE_RESTRICTED_USERS` is set to `true` in your `config.yaml`. + +.Procedure + +. In your deployment's `config.yaml` file, add the `RESTRICTED_USERS_WHITELIST` parameter and a user, for example, `user1`: ++ +[source,yaml] +---- +FEATURE_RESTRICTED_USERS: true +RESTRICTED_USERS_WHITELIST: + - user2 +---- + +. Start, or restart, your {productname} deployment. + +==== Expected results + +With this feature enabled, whitelisted users can create organizations, or read or write content from the repository even if `FEATURE_RESTRICTED_USERS` is set to `true`. To ensure that this feature is working as intended, use the following procedures. + +. Log in to your {productname} registry as the white listed user, for example, `user2`. + +. On the UI, click *Create New Organization*. + +. Enter an organization name, for example, `testorg`. + +. Click *Create Organization*. If successful, you will be loaded on to the organization's page. + +Next, as the white listed user, try to push an image. This should result in a successfully pushed image. + +. Tag an image by entering the following command: ++ +[source,terminal] +---- +$ podman tag docker.io/library/busybox quay-server.example.com/user2/busybox:test +---- + +. Push the image by entering the following command: ++ +[source,terminal] +---- +$ podman push quay-server.example.com/user2/busybox:test +---- ++ +Example output: ++ +[source,terminal] +---- +Getting image source signatures +Copying blob 29c7fae3c03c skipped: already exists +Copying config 2bd2971487 done +Writing manifest to image destination +Storing signatures +---- + +//// + +[[enabling-ui-v2]] +=== Enabling and testing `FEATURE_UI_V2` + +With this feature enabled, you can toggle between the current version of the user interface, and the new version of the user interface. + +[IMPORTANT] +==== +* This UI is currently in beta and subject to change. In its current state, users can only create, view, and delete organizations, repositories, and image tags. +* When running {productname} in the old UI, timed-out sessions would require that the user input their password again in the pop-up window. With the new UI, users are returned to the main page and required to input their username and password credentials. This is a known issue and will be fixed in a future version of the new UI. +* There is a discrepancy in how image manifest sizes are reported between the legacy UI and the new UI. In the legacy UI, image manifests were reported in mebibytes. In the new UI, {productname} uses the standard definition of megabyte (MB) to report image manifest sizes. +==== + +.Procedure + +. In your deployment's `config.yaml` file, add the `FEATURE_UI_V2` parameter and set it to `true`, for example: ++ +[source,yaml] +---- +--- +FEATURE_TEAM_SYNCING: false +FEATURE_UI_V2: true +FEATURE_USER_CREATION: true +--- +---- + +. Log in to your {productname} deployment. + +. In the navigation pane of your {productname} deployment, you are given the option to toggle between *Current UI* and *New UI*. Click the toggle button to set it to new UI, and then click *Use Beta Environment*, for example: ++ +image:38-ui-toggle.png[{productname} 3.8 UI toggle] + +==== Creating a new organization in the {productname} 3.8 beta UI + +.Prerequisites + +* You have toggled your {productname} deployment to use the 3.8 beta UI. + +Use the following procedure to create an organization using the {productname} 3.8 beta UI. + +.Procedure + +. Click *Organization* in the navigation pane. + +. Click *Create Organization*. + +. Enter an *Organization Name*, for example, `testorg`. + +. Click *Create*. + +Now, your example organization should populate under the *Organizations* page. + +==== Deleting an organization using the {productname} 3.8 beta UI + +Use the following procedure to delete an organization using the {productname} 3.8 beta UI. + +.Procedure + +. On the *Organizations* page, select the name of the organization you want to delete, for example, `testorg`. + +. Click the *More Actions* drop down menu. + +. Click *Delete*. ++ +[NOTE] +==== +On the *Delete* page, there is a *Search* input box. With this box, users can search for specific organizations to ensure that they are properly scheduled for deletion. For example, if a user is deleting 10 organizations and they want to ensure that a specific organization was deleted, they can use the *Search* input box to confirm said organization is marked for deletion. +==== + +. Confirm that you want to permanently delete the organization by typing *confirm* in the box. + +. Click *Delete*. + +After deletion, you are returned to the *Organizations* page. + +[NOTE] +==== +You can delete more than one organization at a time by selecting multiple organizations, and then clicking *More Actions* -> *Delete*. +==== + +==== Creating a new repository using the {productname} 3.8 beta UI + +Use the following procedure to create a repository using the {productname} 3.8 beta UI. + +.Procedure + +. Click *Repositories* on the navigation pane. + +. Click *Create Repository*. + +. Select a namespace, for example, *quayadmin*, and then enter a *Repository name*, for example, `testrepo`. + +. Click *Create*. + +Now, your example repository should populate under the *Repositories* page. + +==== Deleting a repository using the {productname} 3.8 beta UI + +.Prerequisites + +* You have created a repository. + +.Procedure + +. On the *Repositories* page of the {productname} 3.8 beta UI, click the name of the image you want to delete, for example, `quay/admin/busybox`. + +. Click the *More Actions* drop-down menu. + +. Click *Delete*. ++ +[NOTE] +==== +If desired, you could click *Make Public* or *Make Private*. +==== + +. Type *confirm* in the box, and then click *Delete*. + +. After deletion, you are returned to the *Repositories* page. + +==== Pushing an image to the {productname} 3.8 beta UI + +Use the following procedure to push an image to the {productname} 3.8 beta UI. + +.Procedure + +. Pull a sample image from an external registry: ++ +[source,terminal] +---- +$ podman pull busybox +---- + +. Tag the image: ++ +[source,terminal] +---- +$ podman tag docker.io/library/busybox quay-server.example.com/quayadmin/busybox:test +---- + +. Push the image to your {productname} registry: ++ +[source,terminal] +---- +$ podman push quay-server.example.com/quayadmin/busybox:test +---- + +. Navigate to the *Repositories* page on the {productname} UI and ensure that your image has been properly pushed. + +. You can check the security details by selecting your image tag, and then navigating to the *Security Report* page. + +==== Deleting an image using the {productname} 3.8 beta UI + +Use the following procedure to delete an image using the{productname} 3.8 beta UI. + +.Prerequisites + +* You have pushed an image to your {productname} registry. + +.Procedure + +. On the *Repositories* page of the {productname} 3.8 beta UI, click the name of the image you want to delete, for example, `quay/admin/busybox`. + +. Click the *More Actions* drop-down menu. + +. Click *Delete*. ++ +[NOTE] +==== +If desired, you could click *Make Public* or *Make Private*. +==== + +. Type *confirm* in the box, and then click *Delete*. + +. After deletion, you are returned to the *Repositories* page. + +=== Enabling the {productname} legacy UI + +. In the navigation pane of your {productname} deployment, you are given the option to toggle between *Current UI* and *New UI*. Click the toggle button to set it to *Current UI*. ++ +image:38-ui-toggle.png[{productname} 3.8 UI toggle] + +[[leveraging-storage-quota-limits]] + +//// +=== Leveraging storage quota limits in proxy organizations + +With {productname} 3.8, the proxy cache feature has been enhanced with an auto-pruning feature for tagged images. The auto-pruning of image tags is only available when a proxied namespace has quota limitations configured. Currently, if an image size is greater than quota for an organization, the image is skipped from being uploaded until an administrator creates the necessary space. Now, when an image is pushed that exceeds the allotted space, the auto-pruning enhancement marks the least recently used tags for deletion. As a result, the new image tag is stored, while the least used image tag is marked for deletion. + +[IMPORTANT] +==== +* As part of the auto-pruning feature, the tags that are marked for deletion are eventually garbage collected by the garbage collector (gc) worker process. As a result, the quota size restriction is not fully enforced during this period. +* Currently, the namespace quota size computation does not take into account the size for manifest child. This is a known issue and will be fixed in a future version of {productname}. +==== + +==== Testing the storage quota limits feature in proxy organizations + +Use the following procedure to test the auto-pruning feature of an organization with proxy cache and storage quota limitations enabled. + +.Prerequisites + +* Your organization is configured to serve as a proxy organization. The following example proxies from quay.io. + +* `FEATURE_PROXY_CACHE` is set to `true` in your `config.yaml` file. + +* `FEATURE_QUOTA_MANAGEMENT` is set to `true` in your `config.yaml` file. + +* Your organization is configured with a quota limit, for example, `150 MB`. + +.Procedure + +. Pull an image to your repository from your proxy organization, for example: ++ +---- +$ podman pull quay-server.example.com/proxytest/projectquay/clair:4.2.3 +---- + +. Depending on the space left in your repository, you might need to pull additional images from your proxy organization, for example: ++ +---- +$ podman pull quay-server.example.com/proxytest/projectquay/clair:4.1.5 +---- + +. In the {productname} registry UI, click the name of your repository. + +* Click *Tags* in the navigation pane and ensure that `clair:4.2.3` and `clair:4.1.5` are tagged. + +. Pull the last image that will result in your repository exceeding the allotted quota, for example: ++ +---- +$ podman pull quay-server.example.com/proxytest/projectquay/clair:4.1.4 +---- + +. Refresh the *Tags* page of your {productname} registry. The first image that you pushed, for example, `clair:4.2.3` should have been auto-pruned. The *Tags* page should now show `clair:4.2.3` and `clair:4.1.4`. +//// \ No newline at end of file diff --git a/modules/testing-clair.adoc b/modules/testing-clair.adoc new file mode 100644 index 000000000..360ef3939 --- /dev/null +++ b/modules/testing-clair.adoc @@ -0,0 +1,61 @@ +:_content-type: CONCEPT +[id="testing-clair"] += Testing Clair + + + + + + + + + + + + + + + + + + +//// + +Currently, there are two methods for testing Clair independently of a {productname} subscription: + +* In a local development environment +* In a distributed deployment + +[IMPORTANT] +==== +Official documentation for testing Clair without a {productname} subscription is unsupported. These procedures and subsequent updates are maintained by upstream contributors and developers. For more information, see link:https://quay.github.io/clair/howto/getting_started.html[Getting Started With ClairV4]. + +For official Clair documentation, see. . . +==== + +[id="testing-clair-local-development-environment"] +== Testing Clair in a local development environment + +The simplest way to run Clair for test purposes is to use the local development environment. The local development environment can be used to test and develop Clair's integration with {productname}. Documentation for this procedure can be found on the open source Clair project at link:https://quay.github.io/clair/howto/testing.html[Testing ClairV4]. + +[id="clair-modes"] +== Testing Clair in a distributed deployment + +When testing Clair in a distributed deployment, Clair uses PostgreSQL for its data persistence. Clair migrations are supported. Users can point Clair to a fresh database to set it up. + +In a distributed deployment, users can test run Clair in the following modes: + +* Indexer mode. When Clair is running in indexer mode, it is responsible for receiving manifests and generating `IndexReports`. An `IndexReport` is an intermediate representation of a manifest's content and is used to discover vulnerabilities. + +* Matcher mode. When Clair is running in matcher mode, it is responsible for receiving `IndexReports` and generating `VulnerabilityReports`. A `VulnerabilityReport` describes the contents of a manifest and any vulnerabilities affecting it. + +* Notifier mode. When Clair is running in notifier mode, it is responsible for generating notifications when new vulnerabilities affecting a previously indexed manifest enter the system. The notifier will send notifications through the configured mechanisms. + +* Combination mode. When Clair is running in combination mode, the `indexer`, `matcher`, and `notifier` each run on a single OS process. + +[NOTE] +==== +These modes are available when running Clair with a {productname} subscription. +==== + +For more information on testing Clair in a distributed deployment, see link:https://quay.github.io/clair/howto/getting_started.html#modes[Getting Started With ClairV4]. diff --git a/modules/testing-features.adoc b/modules/testing-features.adoc new file mode 100644 index 000000000..5ab77199d --- /dev/null +++ b/modules/testing-features.adoc @@ -0,0 +1,17 @@ +[[testing-features]] += Testing {productname} 3.7 Features + +The following features can be tested in one of two ways: + + * Using the {productname} Operator in the Community Catalog of your OpenShift Container Platform cluster. + * Using the following standalone images: ++ +[source,yaml] +---- +Quay: quay.io/projectquay/quay:3.7.0-rc.2 +Clair: quay.io/projectquay/clair:3.7.0-rc.1 +Quay Builder: quay.io/projectquay/quay-builder:3.7.0-rc.2 +Quay Builder QEMU: quay.io/projectquay/quay-builder-qemu:main +Postgres: centos/postgresql-10 centos7@sha256:de1560cb35e5ec643e7b3a772ebaac8e3a7a2a8e8271d9e91ff023539b4dfb33 +Redis: centos/redis-32-centos7@sha256:06dbb609484330ec6be6090109f1fa16e936afcf975d1cbc5fff3e6c7cae7542 +---- diff --git a/modules/testing-oci-support.adoc b/modules/testing-oci-support.adoc new file mode 100644 index 000000000..20f35563d --- /dev/null +++ b/modules/testing-oci-support.adoc @@ -0,0 +1,177 @@ +:_content-type: CONCEPT +[id="attaching-referrers-image-tag"] += Attaching referrers to an image tag + +The following procedure shows you how to attach referrers to an image tag using different schemas supported by the OCI distribution spec 1.1 using the `oras` CLI. This is useful for attaching and managing additional metadata like referrers to container images. + +.Prerequisites + +* You have downloaded the `oras` CLI. For more information, see link:https://oras.land/docs/installation[Installation]. +* You have access to an OCI media artifact. + +.Procedure + +. Tag an OCI media artifact by entering the following command: ++ +[source,terminal] +---- +$ podman tag ///: +---- + +. Push the artifact to your {productname} registry. For example: ++ +[source,terminal] +---- +$ podman push ///: +---- + +. Enter the following command to attach a manifest using the OCI 1.1 referrers `API` schema with `oras`: ++ +[source,terminal] +---- +$ oras attach --artifact-type --distribution-spec v1.1-referrers-api \ +///: \ +.txt +---- ++ +.Example output ++ +[source,terminal] +---- +-spec v1.1-referrers-api quay.io/testorg3/myartifact-image:v1.0 hi.txt +✓ Exists hi.txt 3/3 B 100.00% 0s + └─ sha256:98ea6e4f216f2fb4b69fff9b3a44842c38686ca685f3f55dc48c5d3fb1107be4 +✓ Exists application/vnd.oci.empty.v1+json 2/2 B 100.00% 0s + └─ sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a +✓ Uploaded application/vnd.oci.image.manifest.v1+json 723/723 B 100.00% 677ms + └─ sha256:31c38e6adcc59a3cfbd2ef971792aaf124cbde8118e25133e9f9c9c4cd1d00c6 +Attached to [registry] quay.io/testorg3/myartifact-image@sha256:db440c57edfad40c682f9186ab1c1075707ce7a6fdda24a89cb8c10eaad424da +Digest: sha256:31c38e6adcc59a3cfbd2ef971792aaf124cbde8118e25133e9f9c9c4cd1d00c6 +---- + +. Enter the following command to attach a manifest using the OCI 1.1 referrers `tag` schema: ++ +[source,terminal] +---- +$ oras attach --artifact-type --distribution-spec v1.1-referrers-tag \ + ///: \ +.txt +---- ++ +.Example output ++ +[source,terminal] +---- +✓ Exists hi.txt 3/3 B 100.00% 0s + └─ sha256:98ea6e4f216f2fb4b69fff9b3a44842c38686ca685f3f55dc48c5d3fb1107be4 +✓ Exists application/vnd.oci.empty.v1+json 2/2 B 100.00% 0s + └─ sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a +✓ Uploaded application/vnd.oci.image.manifest.v1+json 723/723 B 100.00% 465ms + └─ sha256:2d4b54201c8b134711ab051389f5ba24c75c2e6b0f0ff157fce8ffdfe104f383 +Attached to [registry] quay.io/testorg3/myartifact-image@sha256:db440c57edfad40c682f9186ab1c1075707ce7a6fdda24a89cb8c10eaad424da +Digest: sha256:2d4b54201c8b134711ab051389f5ba24c75c2e6b0f0ff157fce8ffdfe104f383 +---- + +. Enter the following command to discoverer referrers of the artifact using the `tag` schema: ++ +[source,terminal] +---- +$ oras discover --insecure --distribution-spec v1.1-referrers-tag \ +///: +---- ++ +.Example output ++ +[source,terminal] +---- +quay.io/testorg3/myartifact-image@sha256:db440c57edfad40c682f9186ab1c1075707ce7a6fdda24a89cb8c10eaad424da +└── doc/example + └── sha256:2d4b54201c8b134711ab051389f5ba24c75c2e6b0f0ff157fce8ffdfe104f383 +---- + +. Enter the following command to discoverer referrers of the artifact using the `API` schema: ++ +[source,terminal] +---- +$ oras discover --distribution-spec v1.1-referrers-api \ +///: +---- ++ +.Example output ++ +[source,terminal] +---- +Discovered 3 artifacts referencing v1.0 +Digest: sha256:db440c57edfad40c682f9186ab1c1075707ce7a6fdda24a89cb8c10eaad424da + +Artifact Type Digest + sha256:2d4b54201c8b134711ab051389f5ba24c75c2e6b0f0ff157fce8ffdfe104f383 + sha256:22b7e167793808f83db66f7d35fbe0088b34560f34f8ead36019a4cc48fd346b + sha256:bb2b7e7c3a58fd9ba60349473b3a746f9fe78995a88cb329fc2fd1fd892ea4e4 +---- + +. Optional. You can also discover referrers by using the `/v2///referrers/` endpoint. For this to work, you must generate a v2 API token and set `FEATURE_REFERRERS_API: true` in your `config.yaml` file. + +.. Update your `config.yaml` file to include the `FEATURE_REFERRERS_API` field. For example: ++ +[source,yaml] +---- +# ... +FEATURE_REFERRERS_API: true +# ... +---- + +.. Enter the following command to Base64 encode your credentials: ++ +[source,terminal] +---- +$ echo -n ':' | base64 +---- ++ +.Example output ++ +[source,terminal] +---- +abcdeWFkbWluOjE5ODlraWROZXQxIQ== +---- + +.. Enter the following command to use the base64 encoded token and modify the URL endpoint to your {productname} server: ++ +[source,terminal] +---- +$ curl --location '/v2/auth?service=&scope=repository:quay/listocireferrs:pull,push' --header 'Authorization: Basic ' -k | jq +---- ++ +.Example output ++ +[source,terminal] +---- +{ + "token": "..." +} +---- + +. Enter the following command, using the v2 API token, to list OCI referrers of a manifest under a repository: ++ +[source,terminal] +---- +$ GET https:///v2///referrers/sha256:0de63ba2d98ab328218a1b6373def69ec0d0e7535866f50589111285f2bf3fb8 +--header 'Authorization: Bearer -k | jq +---- ++ +.Example output ++ +[source,terminal] +---- +{ + "schemaVersion": 2, + "mediaType": "application/vnd.oci.image.index.v1+json", + "manifests": [ + { + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "digest": "sha256:2d4b54201c8b134711ab051389f5ba24c75c2e6b0f0ff157fce8ffdfe104f383", + "size": 793 + }, + ] +} +---- diff --git a/modules/testing-ssl-tls-configuration.adoc b/modules/testing-ssl-tls-configuration.adoc new file mode 100644 index 000000000..520db12f7 --- /dev/null +++ b/modules/testing-ssl-tls-configuration.adoc @@ -0,0 +1,5 @@ +:_content-type: PROCEDURE +[id="testing-ssl-tls-configuration"] += Testing the SSL/TLS configuration + +Your SSL/TLS configuration can be tested by using the command-line interface (CLI). Use the following procedure to test your SSL/TLS configuration. \ No newline at end of file diff --git a/modules/token-overview.adoc b/modules/token-overview.adoc new file mode 100644 index 000000000..d9a12a22c --- /dev/null +++ b/modules/token-overview.adoc @@ -0,0 +1,15 @@ +:_content-type: CONCEPT +[id="token-overview"] += Introduction to {productname} OAuth 2.0 tokens + +The {productname} OAuth 2 token system provides a secure, standards-based method for accessing {productname}'s API and other relevant resources. The OAuth 2 token-based approach provides a secure method for handling authentication and authorization for complex environments. Compared to more traditional API tokens, {productname}'s OAuth 2 token system offers the following enhancements: + +* Standards-based security, which adheres to the link:https://oauth.net/2/[OAuth 2.0 protocol]. +* Revocable access by way of deleting the application in which the OAuth 2 token exists. +* Fine-grained access control, which allows {productname} administrators the ability to assign specific permissions to tokens. +* Delegated access, which allows third-party applications and services to act on behalf of a user. +* Future-proofing, which helps ensure that {productname} remains compatible with other services, platforms, and integrations. + +{productname} primarily supports two types of tokens: OAuth 2 access tokens and robot account tokens. A third token type, an _OCI referrers access token_, that is required to list OCI referrers of a manifest under a repository, is also available when warranted. + +The following chapters provide more details about each token type and how to generate each token type. \ No newline at end of file diff --git a/modules/troubleshooting-401-helm.adoc b/modules/troubleshooting-401-helm.adoc new file mode 100644 index 000000000..f4798fca1 --- /dev/null +++ b/modules/troubleshooting-401-helm.adoc @@ -0,0 +1,12 @@ +:_content-type: CONCEPT +[id="troubleshooting-401-helm"] += Troubleshooting Helm chart pushes on {productname} + +In some cases, pushing a Helm chart to your {productname} registry might return the following error: `Error: unexpected status: 401 UNAUTHORIZED`. This error primarily occurs when using robot accounts, and because Helm interprets the repository by assuming that the repository used to push is the name of the Helm chart. For example, if you create a chart named `etherpad` and then push to a repository named `etherpad`, it works. However, pushing to a different repository, for example, `samplerepo`, when using a robot account does not work because Helm interprets it as pushing to `samplerepo/etherpad`. This error occurs because usually robot accounts do not usually have permissions to create a repository. + +As a workaround for this issue, use a robot account that is added to a team that has creator privileges. + +[role="_additional-resources"] +.Additional resources + +For more information, see link:https://access.redhat.com/solutions/6973126[Unable to push helm chart to Quay registry prompts unexpected status: 401 UNAUTHORIZED]. \ No newline at end of file diff --git a/modules/troubleshooting-builds.adoc b/modules/troubleshooting-builds.adoc new file mode 100644 index 000000000..9d5e4b54c --- /dev/null +++ b/modules/troubleshooting-builds.adoc @@ -0,0 +1,93 @@ + +:_content-type: PROCEDURE +[id="troubleshooting-builds"] += Troubleshooting Builds + +The _builder_ instances started by the _build manager_ are ephemeral. This means that they will either get shut down by {productname} on timeouts or failure, or garbage collected by the control plane (EC2/K8s). In order to obtain the _builds_ logs, you must do so while the _builds_ are running. + +[id="debug-config-flag"] +== DEBUG config flag + +The `DEBUG` flag can be set to `true` in order to prevent the _builder_ instances from getting cleaned up after completion or failure. For example: + +[source,yaml] +---- + EXECUTORS: + - EXECUTOR: ec2 + DEBUG: true + ... + - EXECUTOR: kubernetes + DEBUG: true + ... +---- + +When set to `true`, the debug feature prevents the _build nodes_ from shutting down after the `quay-builder` service is done or fails. It also prevents the _build manager_ from cleaning up the instances by terminating EC2 instances or deleting Kubernetes jobs. This allows debugging _builder node_ issues. + +Debugging should not be set in a production cycle. The lifetime service still exists; for example, the instance still shuts down after approximately two hours. When this happens, EC2 instances are terminated and Kubernetes jobs are completed. + +Enabling debug also affects the `ALLOWED_WORKER_COUNT` because the unterminated instances and jobs still count toward the total number of running workers. As a result, the existing _builder workers_ must be manually deleted if `ALLOWED_WORKER_COUNT` is reached to be able to schedule new _builds_. + +ifdef::upstream[] +[id="troubleshooting-amazon-ec2"] +== Troubleshooting Amazon EC2 + +Use the following procedure to troubleshoot Amazon EC2 Builds. + +.Procedure + +. Start a Build in {productname}. + +. In the EC2 console, identify the Build instance. Build instances are named `Quay Ephemeral Builder` and have the tag {`: `} + +. Using the SSH key set by the `EC2_KEY_NAME` configuration field, log in to the Builder instance by running the following command: ++ +[source,terminal] +---- +$ ssh -i /path/to/ssh/key/in/ec2/or/config/id_rsa core@ +---- + +. Obtain the `quay-builder` service logs by entering the following commands: ++ +[source,terminal] +---- +$ systemctl status quay-builder +---- ++ +[source,terminal] +---- +$ journalctl -f -u quay-builder +---- +endif::upstream[] + +[id="openshift-kubernetes-troubleshooting"] +== Troubleshooting {ocp} and Kubernetes Builds + +Use the following procedure to troubleshooting {ocp} Kubernetes Builds. + +.Procedure + +. Create a port forwarding tunnel between your local machine and a pod running with either an {ocp} cluster or a Kubernetes cluster by entering the following command: ++ +[source,terminal] +---- +$ oc port-forward 9999:2222 +---- + +. Establish an SSH connection to the remote host using a specified SSH key and port, for example: ++ +[source,terminal] +---- +$ ssh -i /path/to/ssh/key/set/in/ssh_authorized_keys -p 9999 core@localhost +---- + +. Obtain the `quay-builder` service logs by entering the following commands: ++ +[source,terminal] +---- +$ systemctl status quay-builder +---- ++ +[source,terminal] +---- +$ journalctl -f -u quay-builder +---- diff --git a/modules/troubleshooting-components.adoc b/modules/troubleshooting-components.adoc new file mode 100644 index 000000000..0b1ddb7e0 --- /dev/null +++ b/modules/troubleshooting-components.adoc @@ -0,0 +1,9 @@ +:_content-type: CONCEPT +[id="troubleshooting-components"] += Troubleshooting {productname} components + +This document focuses on troubleshooting specific components within {productname}, providing targeted guidance for resolving issues that might arise. Designed for system administrators, operators, and developers, this resource aims to help diagnose and troubleshoot problems related to individual components of {productname}. + +In addition to the following procedures, {productname} components can also be troubleshot by running {productname} in debug mode, obtaining log information, obtaining configuration information, and performing health checks on endpoints. + +By using the following procedures, you are able to troubleshoot common component issues. Afterwards, you can search for solutions on the link:https://access.redhat.com/knowledgebase[Red Hat Knowledgebase], or file a support ticket with the Red Hat Support team. \ No newline at end of file diff --git a/modules/troubleshooting-forgotten-passwords.adoc b/modules/troubleshooting-forgotten-passwords.adoc new file mode 100644 index 000000000..81f9adba9 --- /dev/null +++ b/modules/troubleshooting-forgotten-passwords.adoc @@ -0,0 +1,110 @@ +:_content-type: CONCEPT +[id="troubleshooting-forgotten-passwords"] += Resetting superuser passwords on {productname} standalone deployments + +Use the following procedure to reset a superuser's password. + +.Prerequisites + +* You have created a {productname} superuser. +* You have installed Python 3.9. +* You have installed the `pip` package manager for Python. +* You have installed the `bcrypt` package for `pip`. + +.Procedure + +. Generate a secure, hashed password using the `bcrypt` package in Python 3.9 by entering the following command: ++ +[source,terminal] +---- +$ python3.9 -c 'import bcrypt; print(bcrypt.hashpw(b"newpass1234", bcrypt.gensalt(12)).decode("utf-8"))' +---- ++ +.Example output ++ +[source,terminal] +---- +$2b$12$T8pkgtOoys3G5ut7FV1She6vXlYgU.6TeoGmbbAVQtN8X8ch4knKm +---- + +. Enter the following command to show the container ID of your {productname} container registry: ++ +[source,terminal] +---- +$ sudo podman ps -a +---- ++ +.Example output ++ +[source,terminal] +---- +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +70560beda7aa registry.redhat.io/rhel8/redis-5:1 run-redis 2 hours ago Up 2 hours ago 0.0.0.0:6379->6379/tcp redis +8012f4491d10 registry.redhat.io/quay/quay-rhel8:v3.8.2 registry 3 minutes ago Up 8 seconds ago 0.0.0.0:80->8080/tcp, 0.0.0.0:443->8443/tcp quay +8b35b493ac05 registry.redhat.io/rhel8/postgresql-10:1 run-postgresql 39 seconds ago Up 39 seconds ago 0.0.0.0:5432->5432/tcp postgresql-quay +---- + +. Execute an interactive shell for the `postgresql` container image by entering the following command: ++ +[source,terminal] +---- +$ sudo podman exec -it 8b35b493ac05 /bin/bash +---- + +. Re-enter the `quay` PostgreSQL database server, specifying the database, username, and host address: ++ +[source,terminal] +---- +bash-4.4$ psql -d quay -U quayuser -h 192.168.1.28 -W +---- + +. Update the `password_hash` of the superuser admin who lost their password: ++ +[source,terminal] +---- +quay=> UPDATE public.user SET password_hash = '$2b$12$T8pkgtOoys3G5ut7FV1She6vXlYgU.6TeoGmbbAVQtN8X8ch4knKm' where username = 'quayadmin'; +---- ++ +.Example output ++ +[source,terminal] +---- +UPDATE 1 +---- + +. Enter the following to command to ensure that the `password_hash` has been updated: ++ +[source,terminal] +---- +quay=> select * from public.user; +---- ++ +.Example output ++ +[source,terminal] +---- +id | uuid | username | password_hash | email | verified | stripe_id | organization | robot | invoice_email | invalid_login_attempts | last_invalid_login |removed_tag_expiration_s | enabled | invoice_email_address | company | family_name | given_name | location | maximum_queued_builds_count | creation_date | last_accessed +----+--------------------------------------+-----------+--------------------------------------------------------------+-----------------------+--- +-------+-----------+--------------+-------+---------------+------------------------+----------------------------+--------------------------+------ +---+-----------------------+---------+-------------+------------+----------+-----------------------------+----------------------------+----------- +1 | 73f04ef6-19ba-41d3-b14d-f2f1eed94a4a | quayadmin | $2b$12$T8pkgtOoys3G5ut7FV1She6vXlYgU.6TeoGmbbAVQtN8X8ch4knKm | quayadmin@example.com | t | | f | f | f | 0 | 2023-02-23 07:54:39.116485 | 1209600 | t | | | | | | | 2023-02-23 07:54:39.116492 +---- + +. Log in to your {productname} deployment using the new password: ++ +[source,terminal] +---- +$ sudo podman login -u quayadmin -p newpass1234 http://quay-server.example.com --tls-verify=false +---- ++ +.Example output ++ +[source,terminal] +---- +Login Succeeded! +---- + +[role="_additional-resources"] +.Additional resources + +For more information, see link:https://access.redhat.com/solutions/6964805[Resetting Superuser Password for Quay]. diff --git a/modules/troubleshooting-general.adoc b/modules/troubleshooting-general.adoc new file mode 100644 index 000000000..48418210b --- /dev/null +++ b/modules/troubleshooting-general.adoc @@ -0,0 +1,5 @@ +:_content-type: CONCEPT +[id="troubleshooting-general"] += General troubleshooting for {productname} + +The following sections detail general troubleshooting errors for {productname}. \ No newline at end of file diff --git a/modules/troubleshooting-how-tos.adoc b/modules/troubleshooting-how-tos.adoc new file mode 100644 index 000000000..4628c4206 --- /dev/null +++ b/modules/troubleshooting-how-tos.adoc @@ -0,0 +1,6 @@ +:_content-type: CONCEPT +[id="troubleshooting-how-tos"] += How To guide + +This "How to" guide provides step-by-step instructions for troubleshooting common issues encountered while using {productname}. Whether you're a system administrator, developer, or user, this guide helps identify and resolve problems effectively. + diff --git a/modules/troubleshooting-slow-pushes.adoc b/modules/troubleshooting-slow-pushes.adoc new file mode 100644 index 000000000..5b13b1219 --- /dev/null +++ b/modules/troubleshooting-slow-pushes.adoc @@ -0,0 +1,297 @@ +:_content-type: CONCEPT +[id="troubleshooting-slow-pushes"] += Troubleshooting slow image pushes and pulls on the {productname} Operator + +In some cases, your {productname} deployment on {ocp} might experience slow pushes and pulls. The {productname} Operator is only able to serve or accept container image data as fast as the underlying storage allows. There are various causes that might dictate pull speed on a {productname} registry, including: + +* Intermittent networking issues to {productname}'s s3 storage, as pulls directly depend on it. +* Slow backend storage. +* Various problems on {ocp} nodes. +* DNS issues in the cluster. +* Layers or blobs of an image are large, even if the image size is not. +* Using a VPN. +* High network bandwidth loads that lead to hogging network resources on certain {ocp} pods. + +To explore the root cause of some of these issues, you can run {productname} in debug mode. For more information, see Running {productname} in debug mode. + +Other troubleshooting procedures for slow pushes and pulls on the {productname} Operator can be found in subsequent sections. + +[id="understanding-quay-setup"] +== Understanding your {productname} setup + +In some cases, understanding your {productname} setup and the load on the cluster, and determining whether it is enough, can help diagnose slow pushes and pulls. + +[id="comparing-containizeration-platforms"] +== Comparing containerization platforms + +In some cases, pushes and pulls done with the Podman CLI might be slow. Use the following procedure to compare Podman pushes and pulls to Docker. + +.Procedure + +. Enter the following command to check how long it takes for the Podman client to pull images: ++ +[source,terminal] +---- +$ time podman pull //: +---- ++ +.Example output ++ +[source,terminal] +---- +Getting image source signatures +Copying blob 900e6061671b done +Copying config 8135583d97 done +Writing manifest to image destination +Storing signatures +8135583d97feb82398909c9c97607159e6db2c4ca2c885c0b8f590ee0f9fe90d +0.57user 0.11system 0:00.99elapsed 68%CPU (0avgtext+0avgdata 78716maxresident)k +800inputs+15424outputs (18major+6528minor)pagefaults 0swaps +---- + +. Compare the Podman time with another client's time, like Docker. For example: ++ +[source,terminal] +---- +$ time docker pull //: +---- ++ +.Example output ++ +[source,terminal] +---- +Getting image source signatures +Copying blob sha256: [--------------------------------------] 0.0b/4.2MB +Copying config sha256: [--------------------------------------] 0.0b/1.5KB +Writing manifest to image destination +Storing signatures + +real 0m15.346s +user 0m0.056s +sys 0m0.020s +---- + +[id="checking-health-quay-pods"] +== Checking the health of your deployment using the API + +In some cases, the health of your `Quay` pods might be compromised. Use the following procedure to check the health of your `Quay` pods. + +.Procedure + +. The following commands run a health check on the `Quay` pods: ++ +.. If you are using custom certificates for {productname}, you can enter the following commands: ++ +[source,terminal] +---- +$ curl -k /health/instance +---- ++ +.Example output ++ +[source,terminal] +---- +{"data":{"services":{"auth":true,"database":true,"disk_space":true,"registry_gunicorn":true,"service_key":true,"web_gunicorn":true}},"status_code":200} +---- ++ +[source,terminal] +---- +$ curl -k /health/endtoend +---- ++ +.Example output ++ +[source,terminal] +---- +{"data":{"services":{"auth":true,"database":true,"redis":true,"storage":true}},"status_code":200} +---- ++ +[source,terminal] +---- +$ curl -k /health/warning +---- ++ +.Example output ++ +[source,terminal] +---- +{"data":{"services":{"disk_space_warning":true}},"status_code":200} +---- + +.. If you are not using custom certificates, enter the following commands: ++ +[source,terminal] +---- +$ curl /health/instance +---- ++ +.Example output ++ +[source,terminal] +---- +{"data":{"services":{"auth":true,"database":true,"disk_space":true,"registry_gunicorn":true,"service_key":true,"web_gunicorn":true}},"status_code":200} +---- ++ +[source,terminal] +---- +$ curl /health/endtoend +---- ++ +.Example output ++ +[source,terminal] +---- +{"data":{"services":{"auth":true,"database":true,"redis":true,"storage":true}},"status_code":200} +---- ++ +[source,terminal] +---- +$ curl -k /health/warning +---- ++ +.Example output ++ +[source,terminal] +---- +{"data":{"services":{"disk_space_warning":true}},"status_code":200} +---- + +. If the status of your `Quay` pod is reported as `unhealthy`, consult with your storage provider to ensure that it is supported for use with {productname}. Otherwise, you can check the link:https://access.redhat.com/articles/4067991[Quay Enterprise 3.x Test Integrations] document. + + +[id="checking-network-connection"] +== Checking the network connection between {productname} and the storage location + +In some cases, the network connection between {productname} and its storage location might be erroneous. + +Use the following procedure to check the network connection between {productname} and the storage location. + +.Procedure + +* From a system that has access to {productname} and to the storage provider, enter the following command: ++ +[source,terminal] +---- +$ ping +---- ++ +.Example output ++ +[source,terminal] +---- +Destination Host Unreachable +---- ++ +If an error is returned, there is network connectively issues or the storage provider is currently unavailable. + +[id="checking-size-image"] +== Checking the size of the image + +In some cases, overall time when pulling an image can be determined by its size. Use the following procedure to check the size of an image. + +.Procedure + +. Enter the following command to obtain the image manifest: ++ +[source,terminal] +---- +$ curl -X GET https:///v2//manifests/ -H "Accept: application/vnd.docker.distribution.manifest.v2+json" +---- + +. Enter the following command to extract the image size from the response: ++ +[source,terminal] +---- +$ curl -X GET https:///v2//manifests/ -H "Accept: application/vnd.docker.distribution.manifest.v2+json" | jq '.config.size' +---- ++ +[NOTE] +==== +The response is in a JSON document. Locate the `config` field. Within that field, you will find a `size` property. The value of `size` represents the size of the image in bytes. +==== + +[id="checking-throughput-vms"] +== Checking the throughput of your virtual machines to your storage bucket + +Use the following procedure to check the throughput of your virtual machine to your storage provider. The execution time revealed in the following procedure might help you optimize performance, reveal why pushes and pulls are slow, or compare different configurations or setups. + +.Prerequisites + +* You have installed the AWS CLI (`aws`). + +.Procedure + +. Enter the following command to create a sample file of 500 MB, that is filled with random data, in the `/tmp` directory: ++ +[source,terminal] +---- +$ dd if=/dev/urandom of=/tmp/random-file count=10 bs=50M iflag=fullblock +---- + +. Enter the following command to set the value of your AWS access key: ++ +[source,terminal] +---- +$ export AWS_ACCESS_KEY_ID= +---- + +. Enter the following command to set the value of your AWS secret access key: ++ +[source,terminal] +---- +$ export AWS_SECRET_ACCESS_KEY=123456789ABCD +---- + +. Copy the sample file created in Step 1 to your storage bucket, measuring the execution time, by entering the following command: ++ +[source,terminal] +---- +$ time { aws s3 cp --no-verify-ssl --endpoint-url https://.com /tmp/random-file s3://; } +---- + +. Remove the sample file by entering the following command: ++ +[source,terminal] +---- +$ rm /tmp/random-file +---- + +. Copy the sample file from your storage bucket to your local directory, measuring the execution time, by entering the following command: ++ +[source,terminal] +---- +$ time { aws s3 cp --no-verify-ssl --endpoint-url https://.com s3:///random-file /tmp; } +---- ++ +Use this information to reveal insights into the performance of the virtual machine and storage provider that you are using. + +[id="obtaining-regional-information"] +== Obtaining regional information + +If your {productname} machine is located in a different region as your s3 bucket, pushes and pulls might be slower than expected. + +[id="configuring-firewall"] +== Configuring firewalls + +If your machine has any proxies or firewalls between the client pulling images and the {productname} registry, additional latency might be introduced, or bandwidth could be restricted. You can try disabling your firewall to improve push and pull speeds. + +[NOTE] +==== +If you are not using the `FEATURE_PROXY_STORAGE` feature, {productname} provides a direct download link to the client through Podman, Skopeo, or Docker. At this point, traffic does not go through {productname}. Instead, the client pings the underlying storage and requests the image layer. +==== + +[id="checking-antivirus-software"] +== Checking your antivirus software + +In some cases, antivirus software can interact with an image when it is pulled. This can increase the time that it takes to pull an image. Ensure that your antivirus software does not interfere with images when they are being pulled. + +[id="checking-resource-allocation"] +== Checking resource allocation + +In some cases, an under-provisioned machine can result in slow performance. Check the resource allocation for the machine that is hosting the `Quay` pod or container. Ensure that it has sufficient CPU, memory, and network resources allocated to handle the expected workload. + + +[role="_additional-resources"] +.Additional resources + +For more information, see link:https://access.redhat.com/articles/7023728[Image pull is slow in Quay]. diff --git a/modules/understanding-action-logs.adoc b/modules/understanding-action-logs.adoc new file mode 100644 index 000000000..e89f87aa6 --- /dev/null +++ b/modules/understanding-action-logs.adoc @@ -0,0 +1,234 @@ +:_content-type: CONCEPT +[id="understanding-action-logs"] += Understanding usage logs + +By default, usage logs are stored in the {productname} database. They are exposed through the web UI, on the organization and repository levels, and in the *Superuser Admin Panel*. + +Database logs capture a wide ranges of events in {productname}, such as the changing of account plans, user actions, and general operations. Log entries include information such as the action performed (`kind_id`), the user who performed the action (`account_id` or `performer_id`), the timestamp (`datetime`), and other relevant data associated with the action (`metadata_json`). + +[id="viewing-database-logs"] +== Viewing database logs + +The following procedure shows you how to view repository logs that are stored in a PostgreSQL database. + +.Prerequisites + +* You have administrative privileges. +* You have installed the `psql` CLI tool. + +.Procedure + +. Enter the following command to log in to your {productname} PostgreSQL database: ++ +[source,terminal] +---- +$ psql -h -p 5432 -U -d +---- ++ +.Example output ++ +[source,terminal] +---- +psql (16.1, server 13.7) +Type "help" for help. +---- + +. Optional. Enter the following command to display the tables list of your PostgreSQL database: ++ +[source,terminal] +---- +quay=> \dt +---- ++ +.Example output ++ +[source,terminal] +---- + List of relations + Schema | Name | Type | Owner +--------+----------------------------+-------+---------- + public | logentry | table | quayuser + public | logentry2 | table | quayuser + public | logentry3 | table | quayuser + public | logentrykind | table | quayuser +... +---- + +. You can enter the following command to return a list of `repository_ids` that are required to return log information: ++ +[source,terminal] +---- +quay=> SELECT id, name FROM repository; +---- ++ +.Example output ++ +[source,terminal] +---- + id | name +----+--------------------- + 3 | new_repository_name + 6 | api-repo + 7 | busybox +... +---- + +. Enter the following command to use the `logentry3` relation to show log information about one of your repositories: ++ +[source,terminal] +---- +SELECT * FROM logentry3 WHERE repository_id = ; +---- ++ +.Example output ++ +[source,terminal] +---- + id | kind_id | account_id | performer_id | repository_id | datetime | ip | metadata_json + + 59 | 14 | 2 | 1 | 6 | 2024-05-13 15:51:01.897189 | 192.168.1.130 | {"repo": "api-repo", "namespace": "test-org"} +---- ++ +In the above example, the following information is returned: ++ +[source,terminal] +---- +{ + "log_data": { + "id": 59 <1> + "kind_id": "14", <2> + "account_id": "2", <3> + "performer_id": "1", <4> + "repository_id": "6", <5> + "ip": "192.168.1.100", <6> + "metadata_json": {"repo": "api-repo", "namespace": "test-org"} <7> + "datetime": "2024-05-13 15:51:01.897189" <8> + } +} +---- +<1> The unique identifier for the log entry. +<2> The action that was done. In this example, it was `14`. The key, or table, in the following section shows you that this `kind_id` is related to the creation of a repository. +<3> The account that performed the action. +<4> The performer of the action. +<5> The repository that the action was done on. In this example, `6` correlates to the `api-repo` that was discovered in Step 3. +<6> The IP address where the action was performed. +<7> Metadata information, including the name of the repository and its namespace. +<8> The time when the action was performed. + +[id="log-entry-kind-ids"] +== Log entry kind_ids + +The following table represents the `kind_ids` associated with {productname} actions. + +[cols="1,3,6", options="header"] +|=== +|kind_id |Action |Description + +|1 |account_change_cc |Change of credit card information. +|2 |account_change_password |Change of account password. +|3 |account_change_plan |Change of account plan. +|4 |account_convert |Account conversion. +|5 |add_repo_accesstoken |Adding an access token to a repository. +|6 |add_repo_notification |Adding a notification to a repository. +|7 |add_repo_permission |Adding permissions to a repository. +|8 |add_repo_webhook |Adding a webhook to a repository. +|9 |build_dockerfile |Building a Dockerfile. +|10 |change_repo_permission |Changing permissions of a repository. +|11 |change_repo_visibility |Changing the visibility of a repository. +|12 |create_application |Creating an application. +|13 |create_prototype_permission |Creating permissions for a prototype. +|14 |create_repo |Creating a repository. +|15 |create_robot |Creating a robot (service account or bot). +|16 |create_tag |Creating a tag. +|17 |delete_application |Deleting an application. +|18 |delete_prototype_permission |Deleting permissions for a prototype. +|19 |delete_repo |Deleting a repository. +|20 |delete_repo_accesstoken |Deleting an access token from a repository. +|21 |delete_repo_notification |Deleting a notification from a repository. +|22 |delete_repo_permission |Deleting permissions from a repository. +|23 |delete_repo_trigger |Deleting a repository trigger. +|24 |delete_repo_webhook |Deleting a webhook from a repository. +|25 |delete_robot |Deleting a robot. +|26 |delete_tag |Deleting a tag. +|27 |manifest_label_add |Adding a label to a manifest. +|28 |manifest_label_delete |Deleting a label from a manifest. +|29 |modify_prototype_permission |Modifying permissions for a prototype. +|30 |move_tag |Moving a tag. +|31 |org_add_team_member |Adding a member to a team. +|32 |org_create_team |Creating a team within an organization. +|33 |org_delete_team |Deleting a team within an organization. +|34 |org_delete_team_member_invite |Deleting a team member invitation. +|35 |org_invite_team_member |Inviting a member to a team in an organization. +|36 |org_remove_team_member |Removing a member from a team. +|37 |org_set_team_description |Setting the description of a team. +|38 |org_set_team_role |Setting the role of a team. +|39 |org_team_member_invite_accepted |Acceptance of a team member invitation. +|40 |org_team_member_invite_declined |Declining of a team member invitation. +|41 |pull_repo |Pull from a repository. +|42 |push_repo |Push to a repository. +|43 |regenerate_robot_token |Regenerating a robot token. +|44 |repo_verb |Generic repository action (specifics might be defined elsewhere). +|45 |reset_application_client_secret |Resetting the client secret of an application. +|46 |revert_tag |Reverting a tag. +|47 |service_key_approve |Approving a service key. +|48 |service_key_create |Creating a service key. +|49 |service_key_delete |Deleting a service key. +|50 |service_key_extend |Extending a service key. +|51 |service_key_modify |Modifying a service key. +|52 |service_key_rotate |Rotating a service key. +|53 |setup_repo_trigger |Setting up a repository trigger. +|54 |set_repo_description |Setting the description of a repository. +|55 |take_ownership |Taking ownership of a resource. +|56 |update_application |Updating an application. +|57 |change_repo_trust |Changing the trust level of a repository. +|58 |reset_repo_notification |Resetting repository notifications. +|59 |change_tag_expiration |Changing the expiration date of a tag. +|60 |create_app_specific_token |Creating an application-specific token. +|61 |revoke_app_specific_token |Revoking an application-specific token. +|62 |toggle_repo_trigger |Toggling a repository trigger on or off. +|63 |repo_mirror_enabled |Enabling repository mirroring. +|64 |repo_mirror_disabled |Disabling repository mirroring. +|65 |repo_mirror_config_changed |Changing the configuration of repository mirroring. +|66 |repo_mirror_sync_started |Starting a repository mirror sync. +|67 |repo_mirror_sync_failed |Repository mirror sync failed. +|68 |repo_mirror_sync_success |Repository mirror sync succeeded. +|69 |repo_mirror_sync_now_requested |Immediate repository mirror sync requested. +|70 |repo_mirror_sync_tag_success |Repository mirror tag sync succeeded. +|71 |repo_mirror_sync_tag_failed |Repository mirror tag sync failed. +|72 |repo_mirror_sync_test_success |Repository mirror sync test succeeded. +|73 |repo_mirror_sync_test_failed |Repository mirror sync test failed. +|74 |repo_mirror_sync_test_started |Repository mirror sync test started. +|75 |change_repo_state |Changing the state of a repository. +|76 |create_proxy_cache_config |Creating proxy cache configuration. +|77 |delete_proxy_cache_config |Deleting proxy cache configuration. +|78 |start_build_trigger |Starting a build trigger. +|79 |cancel_build |Cancelling a build. +|80 |org_create |Creating an organization. +|81 |org_delete |Deleting an organization. +|82 |org_change_email |Changing organization email. +|83 |org_change_invoicing |Changing organization invoicing. +|84 |org_change_tag_expiration |Changing organization tag expiration. +|85 |org_change_name |Changing organization name. +|86 |user_create |Creating a user. +|87 |user_delete |Deleting a user. +|88 |user_disable |Disabling a user. +|89 |user_enable |Enabling a user. +|90 |user_change_email |Changing user email. +|91 |user_change_password |Changing user password. +|92 |user_change_name |Changing user name. +|93 |user_change_invoicing |Changing user invoicing. +|94 |user_change_tag_expiration |Changing user tag expiration. +|95 |user_change_metadata |Changing user metadata. +|96 |user_generate_client_key |Generating a client key for a user. +|97 |login_success |Successful login. +|98 |logout_success |Successful logout. +|99 |permanently_delete_tag |Permanently deleting a tag. +|100 |autoprune_tag_delete |Auto-pruning tag deletion. +|101 |create_namespace_autoprune_policy |Creating namespace auto-prune policy. +|102 |update_namespace_autoprune_policy |Updating namespace auto-prune policy. +|103 |delete_namespace_autoprune_policy |Deleting namespace auto-prune policy. +|104 |login_failure |Failed login attempt. +|=== + + + diff --git a/modules/understanding-tag-naming-build-triggers.adoc b/modules/understanding-tag-naming-build-triggers.adoc new file mode 100644 index 000000000..d0de06c1d --- /dev/null +++ b/modules/understanding-tag-naming-build-triggers.adoc @@ -0,0 +1,31 @@ +:_content-type: CONCEPT +[id="understanding-tag-naming-build-triggers"] += Tag naming for build triggers + +Custom tags are available for use in +ifeval::["{context}" == "quay-io"] +{quayio}. +endif::[] +ifeval::["{context}" == "use-quay"] +{productname}. +endif::[] +ifeval::["{context}" == "quay-builders-image-automation"] +{productname}. +endif::[] + +One option is to include any string of characters assigned as a tag for each built image. Alternatively, you can use the following tag templates on the *Configure Tagging* section of the build trigger to tag images with information from each commit: + +image:custom-tagging.png[Configure Tagging] + +* *${commit}*: Full SHA of the issued commit +* *${parsed_ref.branch}*: Branch information (if available) +* *${parsed_ref.tag}*: Tag information (if available) +* *${parsed_ref.remote}*: The remote name +* *${commit_info.date}*: Date when the commit was issued +* *${commit_info.author.username}*: Username of the author of the commit +* *${commit_info.short_sha}*: First 7 characters of the commit SHA +* *${committer.properties.username}*: Username of the committer + +This list is not complete, but does contain the most useful options for tagging purposes. You can find the complete tag template schema on link:https://github.com/quay/quay/blob/abfde5b9d2cf7d7145e68a00c9274011b4fe0661/buildtrigger/basehandler.py#L96-L195[this page]. + +For more information, see link:https://access.redhat.com/solutions/7033393[Set up custom tag templates in build triggers for {productname} and {quayio}]. \ No newline at end of file diff --git a/modules/unknown-artifacts.adoc b/modules/unknown-artifacts.adoc new file mode 100644 index 000000000..b9a91c83f --- /dev/null +++ b/modules/unknown-artifacts.adoc @@ -0,0 +1,30 @@ +// Document included in the following assemblies: + +// Configuring Red hat Quay + +:_content-type: REFERENCE +[id="unknown-artifacts"] += Unknown media types + +.Unknown media types configuration field +[cols="3a,1a,2a",options="header"] +|=== +|Field |Type |Description + +|**IGNORE_UNKNOWN_MEDIATYPES** | Boolean | When enabled, allows a container registry platform to disregard specific restrictions on supported artifact types and accept any unrecognized or unknown media types. + +**Default:** `false` + +|=== + +[id="configuring-unknown-media-types"] +== Configuring unknown media types + +The following YAML is the example configuration when enabling unknown or unrecognized media types. + +.Unknown media types YAML configuration +[source,yaml] +---- +IGNORE_UNKNOWN_MEDIATYPES: true +---- + diff --git a/modules/unmanaging-clair-database.adoc b/modules/unmanaging-clair-database.adoc new file mode 100644 index 000000000..1d2acd13b --- /dev/null +++ b/modules/unmanaging-clair-database.adoc @@ -0,0 +1,37 @@ +// Module included in the following assemblies: +// +// clair/master.adoc + +:_content-type: PROCEDURE +[id="unmanaging-clair-database"] += Running a custom Clair configuration with an unmanaged Clair database + +Use the following procedure to set your Clair database to unmanaged. + +[IMPORTANT] +==== +You must not use the same externally managed PostgreSQL database for both {productname} and Clair deployments. Your PostgreSQL database must also not be shared with other workloads, as it might exhaust the natural connection limit on the PostgreSQL side when connection-intensive workloads, like {productname} or Clair, contend for resources. Additionally, pgBouncer is not supported with {productname} or Clair, so it is not an option to resolve this issue. +==== + +.Procedure + +* In the Quay Operator, set the `clairpostgres` component of the `QuayRegistry` custom resource to `managed: false`: ++ +[source,yaml] +---- +apiVersion: quay.redhat.com/v1 +kind: QuayRegistry +metadata: + name: quay370 +spec: + configBundleSecret: config-bundle-secret + components: + - kind: objectstorage + managed: false + - kind: route + managed: true + - kind: tls + managed: false + - kind: clairpostgres + managed: false +---- \ No newline at end of file diff --git a/modules/unsupported-security-scan.adoc b/modules/unsupported-security-scan.adoc new file mode 100644 index 000000000..c79ad5943 --- /dev/null +++ b/modules/unsupported-security-scan.adoc @@ -0,0 +1,17 @@ +:_content-type: PROCEDURE +[id="unsupported-security-scan"] += Image security scan reporting Unsupported + +In some cases, Clair cannot scan images and returns the following error: `{"level":"error","component":"internal/indexer/controller/Controller.Index","manifest":"sha256:e76c212f0288f1f4fe79d219fc6a90514234ef1016babdb7e11946db959d1bac","state":"FetchLayers","error":"failed to fetch layers: encountered error while fetching a layer: fetcher: unexpected status code: 404 Not Found (body starts: \"NoSuchKeyThe specified key does not exist./quay/datastorage/registry/sha256/a3/a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4?AWSAccessKeyId=xxxxxxxxxxxx\")","time":"2022-10-12T06:59:42Z","message":"error during scan"}`. + +This error means that a particular layer is missing from the bucket. Objects in S3 bucket are referenced by keys. If a key is missing, that means that that object the key references is missing and is not found in the bucket. In the above example, the layer with SHA `a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4` is missing from the S3 bucket. + +To resolve this issue, the image to which the SHA IDs belong to must be re-pushed to the registry so that all blobs are re-pushed as well. + +[role="_additional-resources"] +.Additional resources + +For more information, see the following resources: + +* link:https://repost.aws/knowledge-center/404-error-nosuchkey-s3[How can I troubleshoot the 404 "NoSuchKey" error from Amazon S3?] +* link:https://access.redhat.com/solutions/6358352[Quay image SECURITY SCAN show Unsupported]. \ No newline at end of file diff --git a/modules/upgrading-geo-repl-quay-operator.adoc b/modules/upgrading-geo-repl-quay-operator.adoc new file mode 100644 index 000000000..1cb57bcf5 --- /dev/null +++ b/modules/upgrading-geo-repl-quay-operator.adoc @@ -0,0 +1,116 @@ +:_content-type: PROCEDURE +[id="upgrading-geo-repl-quay-operator"] += Upgrading a geo-replication deployment of {productname-ocp} + +Use the following procedure to upgrade your geo-replicated {productname-ocp} deployment. + +[IMPORTANT] +==== +* When upgrading geo-replicated {productname-ocp} deployment to the next y-stream release (for example, {productname} 3.7 -> {productname} 3.8), you must stop operations before upgrading. +* There is intermittent downtime down upgrading from one y-stream release to the next. +* It is highly recommended to back up your {productname-ocp} deployment before upgrading. +==== + +.Procedure + +[NOTE] +==== +This procedure assumes that you are running the {productname} registry on three or more systems. For this procedure, we will assume three systems named `System A,` `System B,` and `System C`. `System A` will serve as the primary system in which the {productname} Operator is deployed. +==== + +. On System B and System C, scale down your {productname} registry. This is done by disabling auto scaling and overriding the replica county for {productname}, mirror workers, and Clair if it is managed. Use the following `quayregistry.yaml` file as a reference: ++ +[source,yaml] +---- +apiVersion: quay.redhat.com/v1 +kind: QuayRegistry +metadata: + name: registry + namespace: ns +spec: + components: + … + - kind: horizontalpodautoscaler + managed: false <1> + - kind: quay + managed: true + overrides: <2> + replicas: 0 + - kind: clair + managed: true + overrides: + replicas: 0 + - kind: mirror + managed: true + overrides: + replicas: 0 + … +---- +<1> Disable auto scaling of `Quay`, `Clair` and `Mirroring` workers +<2> Set the replica count to 0 for components accessing the database and objectstorage ++ +[NOTE] +==== +You must keep the {productname} registry running on System A. Do not update the `quayregistry.yaml` file on System A. +==== + +. Wait for the `registry-quay-app`, `registry-quay-mirror`, and `registry-clair-app` pods to disappear. Enter the following command to check their status: ++ +[source,terminal] +---- +oc get pods -n +---- ++ +.Example output ++ +[source,terminal] +---- +quay-operator.v3.7.1-6f9d859bd-p5ftc 1/1 Running 0 12m +quayregistry-clair-postgres-7487f5bd86-xnxpr 1/1 Running 1 (12m ago) 12m +quayregistry-quay-app-upgrade-xq2v6 0/1 Completed 0 12m +quayregistry-quay-redis-84f888776f-hhgms 1/1 Running 0 12m +---- + +. On System A, initiate a {productname} upgrade to the latest y-stream version. This is a manual process. For more information about upgrading installed Operators, see link:https://docs.openshift.com/container-platform/{ocp-y}/operators/admin/olm-upgrading-operators.html[Upgrading installed Operators]. For more information about {productname} upgrade paths, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/deploy_red_hat_quay_on_openshift_with_the_quay_operator/operator-upgrade#upgrading_the_quay_operator[Upgrading the {productname} Operator]. + +. After the new {productname} registry is installed, the necessary upgrades on the cluster are automatically completed. Afterwards, new {productname} pods are started with the latest y-stream version. Additionally, new `Quay` pods are scheduled and started. + +. Confirm that the update has properly worked by navigating to the {productname} UI: + +.. In the *OpenShift* console, navigate to *Operators* → *Installed Operators*, and click the *Registry Endpoint* link. ++ +[IMPORTANT] +==== +Do not execute the following step until the {productname} UI is available. Do not upgrade the {productname} registry on System B and on System C until the UI is available on System A. +==== + +. Confirm that the update has properly worked on System A, initiate the {productname} upgrade on System B and on System C. The Operator upgrade results in an upgraded {productname} installation, and the pods are restarted. ++ +[NOTE] +==== +Because the database schema is correct for the new y-stream installation, the new pods on System B and on System C should quickly start. +==== + +. After updating, revert the changes made in step 1 of this procedure by removing `overrides` for the components. For example: ++ +[source,yaml] +---- +apiVersion: quay.redhat.com/v1 +kind: QuayRegistry +metadata: + name: registry + namespace: ns +spec: + components: + … + - kind: horizontalpodautoscaler + managed: true <1> + - kind: quay + managed: true + - kind: clair + managed: true + - kind: mirror + managed: true + … +---- +<1> If the `horizontalpodautoscaler` resource was set to `true` before the upgrade procedure, or if you want {productname} to scale in case of a resource shortage, set it to `true`. diff --git a/modules/upgrading-geo-repl-quay.adoc b/modules/upgrading-geo-repl-quay.adoc new file mode 100644 index 000000000..53ecaa2a5 --- /dev/null +++ b/modules/upgrading-geo-repl-quay.adoc @@ -0,0 +1,184 @@ +:_content-type: PROCEDURE +[id="upgrading-geo-repl-quay"] += Upgrading a geo-replication deployment of standalone {productname} + +Use the following procedure to upgrade your geo-replication {productname} deployment. + +[IMPORTANT] +==== +* When upgrading geo-replication {productname} deployments to the next y-stream release (for example, {productname} 3.7 -> {productname} 3.8), or geo-replication deployments, you must stop operations before upgrading. +* There is intermittent downtime down upgrading from one y-stream release to the next. +* It is highly recommended to back up your {productname} deployment before upgrading. +==== + +.Prerequisites + +* You have logged into `registry.redhat.io` + +.Procedure + +[NOTE] +==== +This procedure assumes that you are running {productname} services on three (or more) systems. For more information, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/deploy_red_hat_quay_-_high_availability/index#preparing_for_red_hat_quay_high_availability[Preparing for {productname} high availability]. +==== + +. Obtain a list of all {productname} instances on each system running a {productname} instance. + +.. Enter the following command on System A to reveal the {productname} instances: ++ +[source,terminal] +---- +$ sudo podman ps +---- ++ +.Example output ++ +[source,terminal] ++ +.Example output ++ +[source,terminal] +---- +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +ec16ece208c0 registry.redhat.io/quay/quay-rhel8:v{producty-n1} registry 6 minutes ago Up 6 minutes ago 0.0.0.0:80->8080/tcp, 0.0.0.0:443->8443/tcp quay01 +---- + +.. Enter the following command on System B to reveal the {productname} instances: ++ +[source,terminal] +---- +$ sudo podman ps +---- ++ +.Example output ++ +[source,terminal] ++ +---- +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +7ae0c9a8b37d registry.redhat.io/quay/quay-rhel8:v{producty-n1} registry 5 minutes ago Up 2 seconds ago 0.0.0.0:82->8080/tcp, 0.0.0.0:445->8443/tcp quay02 +---- + +.. Enter the following command on System C to reveal the {productname} instances: ++ +[source,terminal] +---- +$ sudo podman ps +---- ++ +.Example output ++ +[source,terminal] ++ +---- +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +e75c4aebfee9 registry.redhat.io/quay/quay-rhel8:v{producty-n1} registry 4 seconds ago Up 4 seconds ago 0.0.0.0:84->8080/tcp, 0.0.0.0:447->8443/tcp quay03 +---- + +. Temporarily shut down all {productname} instances on each system. + +.. Enter the following command on System A to shut down the {productname} instance: ++ +[source,terminal] +---- +$ sudo podman stop ec16ece208c0 +---- + +.. Enter the following command on System B to shut down the {productname} instance: ++ +[source,terminal] +---- +$ sudo podman stop 7ae0c9a8b37d +---- + +.. Enter the following command on System C to shut down the {productname} instance: ++ +[source,terminal] +---- +$ sudo podman stop e75c4aebfee9 +---- + +. Obtain the latest {productname} version, for example, {productname} {producty}, on each system. + +.. Enter the following command on System A to obtain the latest {productname} version: ++ +[source,terminal] +---- +$ sudo podman pull registry.redhat.io/quay/quay-rhel8:{productminv} +---- + +.. Enter the following command on System B to obtain the latest {productname} version: ++ +[source,terminal] +---- +$ sudo podman pull registry.redhat.io/quay/quay-rhel8:v{producty} +---- + +.. Enter the following command on System C to obtain the latest {productname} version: ++ +[source,terminal] +---- +$ sudo podman pull registry.redhat.io/quay/quay-rhel8:{productminv} +---- + +. On System A of your highly available {productname} deployment, run the new image version, for example, {productname} {producty}: ++ +[source,terminal] +---- +# sudo podman run --restart=always -p 443:8443 -p 80:8080 \ + --sysctl net.core.somaxconn=4096 \ + --name=quay01 \ + -v /mnt/quay/config:/conf/stack:Z \ + -v /mnt/quay/storage:/datastorage:Z \ + -d registry.redhat.io/quay/quay-rhel8:{productminv} +---- + +. Wait for the new {productname} container to become fully operational on System A. You can check the status of the container by entering the following command: ++ +[source,terminal] +---- +$ sudo podman ps +---- ++ +.Example output ++ +[source,terminal] +---- +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +70b9f38c3fb4 registry.redhat.io/quay/quay-rhel8:v{producty} registry 2 seconds ago Up 2 seconds ago 0.0.0.0:82->8080/tcp, 0.0.0.0:445->8443/tcp quay01 +---- + +. Optional: Ensure that {productname} is fully operation by navigating to the {productname} UI. + +. After ensuring that {productname} on System A is fully operational, run the new image versions on System B and on System C. + +.. On System B of your highly available {productname} deployment, run the new image version, for example, {productname} {producty}: ++ +[source,terminal] +---- +# sudo podman run --restart=always -p 443:8443 -p 80:8080 \ + --sysctl net.core.somaxconn=4096 \ + --name=quay02 \ + -v /mnt/quay/config:/conf/stack:Z \ + -v /mnt/quay/storage:/datastorage:Z \ + -d registry.redhat.io/quay/quay-rhel8:{productminv} +---- + +.. On System C of your highly available {productname} deployment, run the new image version, for example, {productname} {producty}: ++ +[source,terminal] +---- +# sudo podman run --restart=always -p 443:8443 -p 80:8080 \ + --sysctl net.core.somaxconn=4096 \ + --name=quay03 \ + -v /mnt/quay/config:/conf/stack:Z \ + -v /mnt/quay/storage:/datastorage:Z \ + -d registry.redhat.io/quay/quay-rhel8:{productminv} +---- + +. You can check the status of the containers on System B and on System C by entering the following command: ++ +[source,terminal] +---- +$ sudo podman ps +---- \ No newline at end of file diff --git a/modules/upgrading-postgresql.adoc b/modules/upgrading-postgresql.adoc new file mode 100644 index 000000000..92c5c2303 --- /dev/null +++ b/modules/upgrading-postgresql.adoc @@ -0,0 +1,163 @@ +:_content-type: PROCEDURE +[id="upgrading-postgresql"] += Updating {productname} and the {productname} and Clair PostgreSQL databases on {ocp} + +[IMPORTANT] +==== +If your {productname} deployment is upgrading from one y-stream to the next, for example, from 3.8.10 -> 3.8.11, you must not switch the upgrade channel from `stable-3.8` to `stable-3.9`. Changing the upgrade channel in the middle of a y-stream upgrade will disallow {productname} from upgrading to 3.9. This is a known issue and will be fixed in a future version of {productname}. +==== + +When updating {productname} 3.8 -> 3.9, the Operator automatically upgrades the existing PostgreSQL databases for Clair and {productname} from version 10 to version 13. + +[IMPORTANT] +==== +* Users with a managed database are required to upgrade their PostgreSQL database from 10 -> 13. +* If your {productname} and Clair databases are managed by the Operator, the database upgrades for each component must succeed for the 3.9.0 upgrade to be successful. If either of the database upgrades fail, the entire {productname} version upgrade fails. This behavior is expected. +==== + +You can update {productname} and the {productname} and Clair PostgreSQL databases on {ocp} by using the *Web Console* UI, or by using the CLI. + +[id="updating-quay-clair-postgresql-db-console"] +== Updating {productname} and the {productname} and Clair PostgreSQL databases using the {ocp} web console + +Use the following procedure to update {productname} and the {productname} and Clair PostgreSQL databases using the {ocp} web console. + +[IMPORTANT] +==== +* This upgrade is irreversible. It is highly recommended that you upgrade to PostgreSQL 13. PostgreSQL 10 had its final release on November 10, 2022 and is no longer supported. For more information, see the link:https://www.postgresql.org/support/versioning/[PostgreSQL Versioning Policy]. +* If your {productname} and Clair databases are managed by the Operator, the database upgrades for each component must succeed for the 3.9.0 upgrade to be successful. If either of the database upgrades fail, the entire {productname} version upgrade fails. This behavior is expected. +* By default, {productname} is configured to save old persistent volume claims (PVCs) from PostgreSQL 10. To disable this setting and remove old PVCs, you must set `POSTGRES_UPGRADE_DELETE_BACKUP` to `True` in your `quay-operator` `Subscription` object. +==== + +.Prerequisites + +* You have installed {productname} 3.6, 3.7, or 3.8 on {ocp}. +* 100 GB of free, additional storage. ++ +During the upgrade process, additional persistent volume claims (PVCs) are provisioned to store the migrated data. This helps prevent a destructive operation on user data. The upgrade process rolls out PVCs for 50 GB for both the {productname} database upgrade, and the Clair database upgrade. + +.Procedure + +. Optional. Back up your old PVCs from PostgreSQL 10 by setting `POSTGRES_UPGRADE_DELETE_BACKUP` to `false` your `quay-operator` `Subscription` object. For example: ++ +[source,yaml] +---- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: quay-operator + namespace: quay-enterprise +spec: + channel: stable-3.8 + name: quay-operator + source: redhat-operators + sourceNamespace: openshift-marketplace + config: + env: + - name: POSTGRES_UPGRADE_DELETE_BACKUP <1> + value: "false" +---- +<1> When set to `true`, removes old PVCs after upgrading. + +. In the {ocp} Web Console, navigate to *Operators* -> *Installed Operators*. + +. Click on the {productname} Operator. + +. Navigate to the *Subscription* tab. + +. Under *Subscription details* click *Update channel*. + +. Select *stable-3.9* and save the changes. + +. Check the progress of the new installation under *Upgrade status*. Wait until the upgrade status changes to *1 installed* before proceeding. + +. In your {ocp} cluster, navigate to *Workloads* -> *Pods*. Existing pods should be terminated, or in the process of being terminated. + +. Wait for the following pods, which are responsible for upgrading the database and alembic migration of existing data, to spin up: `clair-postgres-upgrade`, `quay-postgres-upgrade`, and `quay-app-upgrade`. + +. After the `clair-postgres-upgrade`, `quay-postgres-upgrade`, and `quay-app-upgrade` pods are marked as *Completed*, the remaining pods for your {productname} deployment spin up. This takes approximately ten minutes. + +. Verify that the `quay-database` uses the `postgresql-13` image and `clair-postgres` pods now use the `postgresql-15` image. + +. After the `quay-app` pod is marked as *Running*, you can reach your {productname} registry. + +[id="updating-quay-clair-postgresql-db-cli"] +== Updating {productname} and the {productname} and Clair PostgreSQL databases using the CLI + +Use the following procedure to update {productname} and the {productname} and Clair PostgreSQL databases using the command-line interface (CLI). + +[IMPORTANT] +==== +* This upgrade is irreversible. It is highly recommended that you upgrade to PostgreSQL 13. PostgreSQL 10 had its final release on November 10, 2022 and is no longer supported. For more information, see the link:https://www.postgresql.org/support/versioning/[PostgreSQL Versioning Policy]. +* By default, {productname} is configured to save old persistent volume claims (PVCs) from PostgreSQL 10. To disable this setting and remove old PVCs, you must set `POSTGRES_UPGRADE_DELETE_BACKUP` to `True` in your `quay-operator` `Subscription` object. +==== + +.Prerequisites + +* You have installed {productname} 3.6, 3.7, or 3.8 on {ocp}. +* 100 GB of free, additional storage. ++ +During the upgrade process, additional persistent volume claims (PVCs) are provisioned to store the migrated data. This helps prevent a destructive operation on user data. The upgrade process rolls out PVCs for 50 GB for both the {productname} database upgrade, and the Clair database upgrade. + +.Procedure + +. Retrieve your `quay-operator` configuration file by entering the following `oc get` command: ++ +[source,terminal] +---- +$ oc get subscription quay-operator -n quay-enterprise -o yaml > quay-operator.yaml +---- + +. Retrieve the latest version of the {productname} Operator and its channel by entering the following command: ++ +[source,terminal] +---- +oc get packagemanifests quay-operator \ + -o jsonpath='{range .status.channels[*]}{@.currentCSV} {@.name}{"\n"}{end}' \ + | awk '{print "STARTING_CSV=" $1 " CHANNEL=" $2 }' \ + | sort -nr \ + | head -1 +---- ++ +.Example output ++ +[source,terminal] +---- +STARTING_CSV=quay-operator.v3.9.0 CHANNEL=stable-3.9 +---- + +. Using the output from the previous command, update your `Subscription` custom resource for the {productname} Operator and save it as `quay-operator.yaml`. For example: ++ +[source,yaml] +---- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: quay-operator + namespace: quay-enterprise +spec: + channel: stable-3.9 <1> + name: quay-operator + source: redhat-operators + sourceNamespace: openshift-marketplace + config: + env: + - name: POSTGRES_UPGRADE_DELETE_BACKUP <2> + value: "false" +---- +<1> Specify the value you obtained in the previous step for the `spec.channel` parameter. +<2> Optional. Back up your old PVCs from PostgreSQL 10 by setting `POSTGRES_UPGRADE_DELETE_BACKUP` to `false` your `quay-operator` `Subscription` object. + +. Enter the following command to apply the configuration: ++ +[source,terminal] +---- +$ oc apply -f quay-operator.yaml +---- ++ +.Example output ++ +[source,terminal] +---- +subscription.operators.coreos.com/quay-operator created +---- \ No newline at end of file diff --git a/modules/use-quay-export-logs-api.adoc b/modules/use-quay-export-logs-api.adoc new file mode 100644 index 000000000..167512e52 --- /dev/null +++ b/modules/use-quay-export-logs-api.adoc @@ -0,0 +1,67 @@ +:_content-type: PROCEDURE +[id="use-quay-export-logs-api"] += Exporting logs by using the API + +Detailed logs can be exported to a callback URL or to an email address. + +.Prerequisites + +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#exportuserlogs[`POST /api/v1/user/exportlogs`] endpoint to export logs for the current user: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -H "Accept: application/json" \ + -d '{ + "starttime": "", + "endtime": "", + "callback_email": "your.email@example.com" + }' \ + "http:///api/v1/user/exportlogs" +---- ++ +.Example output ++ +[source,terminal] +---- +{"export_id": "6a0b9ea9-444c-4a19-9db8-113201c38cd4"} +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#exportorglogs[`POST /api/v1/organization/{orgname}/exportlogs`] endpoint to export logs for an Organization: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -H "Accept: application/json" \ + -d '{ + "starttime": "", + "endtime": "", + "callback_email": "org.logs@example.com" + }' \ + "http:///api/v1/organization/{orgname}/exportlogs" +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#exportrepologs[`POST /api/v1/repository/{repository}/exportlogs`] endpoint to export logs for a repository: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -H "Accept: application/json" \ + -d '{ + "starttime": "2024-01-01", + "endtime": "2024-06-18", + "callback_url": "http://your-callback-url.example.com" + }' \ + "http:///api/v1/repository/{repository}/exportlogs" +---- \ No newline at end of file diff --git a/modules/use-quay-export-logs.adoc b/modules/use-quay-export-logs.adoc new file mode 100644 index 000000000..73f1ba40a --- /dev/null +++ b/modules/use-quay-export-logs.adoc @@ -0,0 +1,58 @@ +:_content-type: PROCEDURE +[id="use-quay-export-logs"] += Exporting repository logs by using the UI + +ifeval::["{context}" == "quay-io"] +You can obtain a larger number of log files and save them outside of {quayio} by using the *Export Logs* feature. This feature has the following benefits and constraints: +endif::[] + +ifeval::["{context}" == "use-quay"] +You can obtain a larger number of log files and save them outside of the {productname} database by using the *Export Logs* feature. This feature has the following benefits and constraints: +endif::[] + +* You can choose a range of dates for the logs you want to gather from a repository. + +* You can request that the logs be sent to you by an email attachment or directed to a callback URL. + +* To export logs, you must be an administrator of the repository or namespace. + +* 30 days worth of logs are retained for all users. + +* Export logs only gathers log data that was previously produced. It does not stream logging data. + +ifeval::["{context}" == "use-quay"] +* Your {productname} instance must be configured for external storage for this feature. Local storage does not work for exporting logs. +endif::[] + +* When logs are gathered and made available to you, you should immediately copy that data if you want to save it. By default, the data expires after one hour. + +Use the following procedure to export logs. + +.Procedure + +. Select a repository for which you have administrator privileges. + +. Click the *Logs* tab. + +. Optional. If you want to specify specific dates, enter the range in the *From* and *to* boxes. + +. Click the *Export Logs* button. An Export Usage Logs pop-up appears, as shown ++ +image:export-usage-logs.png[Enter email or callback URL to receive exported logs] + +. Enter an email address or callback URL to receive the exported log. For the callback URL, you can use a URL to a specified domain, for example, . + +. Select *Confirm* to start the process for gather the selected log entries. Depending on the amount of logging data being gathered, this can take anywhere from a few minutes to several hours to complete. + +. When the log export is completed, the one of following two events happens: ++ +* An email is received, alerting you to the available of your requested exported log entries. + +* A successful status of your log export request from the webhook URL is returned. Additionally, a link to the exported data is made available for you to delete to download the logs. + +ifeval::["{context}" == "use-quay"] +[NOTE] +==== +The URL points to a location in your {productname} external storage and is set to expire within one hour. Make sure that you copy the exported logs before the expiration time if you intend to keep your logs. +==== +endif::[] \ No newline at end of file diff --git a/modules/use-quay-pull-image.adoc b/modules/use-quay-pull-image.adoc new file mode 100644 index 000000000..9a79b30b4 --- /dev/null +++ b/modules/use-quay-pull-image.adoc @@ -0,0 +1,65 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: CONCEPT +[id="use-quay-pull-image"] += Pulling an image + +_Pulling_ an image refers to the process of downloading a container image from a registry or another container registry to your local system or a container orchestration platform like Kubernetes or {ocp}. + +When you pull an image from {quayio}, you are essentially fetching a copy of that image to use on your local machine or in your container orchestration environment. This is a fundamental step in the containerization process, as it allows you to access the software contained within the image and run it as containers on your infrastructure. + +[IMPORTANT] +==== +As a safety measure against DDOS attacks, {quayio} rates limit pulls. If you are executing too many pulls from the same time, from a single client, you might receive a `429` response. +==== + +To pull an image from {quayio}, you typically use a container management tool like Podman or a container orchestration platform like {ocp}. + +Use the following procedure to pull an image from {quayio}. + +.Prerequisites + +* You have download and installed the `podman` CLI. +* You have logged into {quayio}. + +.Procedure + +. Enter the following command to pull a sample image, for example, `busybox`, from {quayio}: ++ +[source,terminal] +---- +$ podman pull quay.io/quay/busybox +---- ++ +.Example output ++ +[source,terminal] +---- +Trying to pull quay.io/quay/busybox... +Getting image source signatures +Copying blob 4c892f00285e done +Copying config 22667f5368 done +Writing manifest to image destination +Storing signatures +22667f53682a2920948d19c7133ab1c9c3f745805c14125859d20cede07f11f9 +---- + +. You can check that you have pulled the container by running the following command: ++ +[source,terminal] +---- +$ podman images +---- ++ +.Example output ++ +[source,terminal] +---- +stevsmit@stevsmit quay_io (quayio-book) $ podman images +REPOSITORY TAG IMAGE ID CREATED SIZE +quay.io/quay/busybox latest e3121c769e39 3 years ago 1.45 MB +---- \ No newline at end of file diff --git a/modules/user-create.adoc b/modules/user-create.adoc new file mode 100644 index 000000000..b15b2c7ab --- /dev/null +++ b/modules/user-create.adoc @@ -0,0 +1,20 @@ + +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +// Needs updated when v2 UI panel is available + +:_content-type: CONCEPT +[id="user-create"] +ifeval::["{context}" == "quay-io"] += {quayio} user accounts overview +endif::[] +ifeval::["{context}" == "use-quay"] += {productname} user accounts overview +endif::[] + +A _user account_ represents an individual with authenticated access to the platform's features and functionalities. User accounts provide the capability to create and manage repositories, upload and retrieve container images, and control access permissions for these resources. This account is pivotal for organizing and overseeing container image management within {productname}. + +You can create and delete new users on the z{productname} UI or by using the {productname} API. \ No newline at end of file diff --git a/modules/user-org-intro.adoc b/modules/user-org-intro.adoc new file mode 100644 index 000000000..25bbb32cc --- /dev/null +++ b/modules/user-org-intro.adoc @@ -0,0 +1,44 @@ +// Module included in the following assembles: + +// * quay_io/master.adoc +// * use_quay/master.adoc + +:_content-type: CONCEPT +[id="user-org-intro_{context}"] += {productname} tenancy model + +Before creating repositories to contain your container images in +ifeval::["{context}" == "quay-io"] +{quayio}, +endif::[] +ifeval::["{context}" == "use-quay"] +{productname}, +endif::[] +you should consider how these repositories will be structured. With +ifeval::["{context}" == "quay-io"] +{quayio}, +endif::[] +ifeval::["{context}" == "use-quay"] +{productname}, +endif::[] +each repository requires a connection with either an _Organization_ or a _User_. This affiliation defines ownership and access control for the repositories. + +//// +[discrete] +[id="tenancy-model"] +== Tenancy model + +image:178_Quay_architecture_0821_tenancy_model.png[Tenancy model] + +* **Organizations** provide a way of sharing repositories under a common namespace that does not belong to a single user. Instead, these repositories belong to several users in a shared setting, such as a company. + +* **Teams** provide a way for an Organization to delegate permissions. Permissions can be set at the global level (for example, across all repositories), or on specific repositories. They can also be set for specific sets, or groups, of users. + +* **Users** can log in to a registry through the web UI or a by using a client like Podman and using their respective login commands, for example, `$ podman login`. Each user automatically gets a user namespace, for example, `//`, or `quay.io/` if you are using {quayio}. + +ifeval::["{context}" == "use-quay"] +* **Superusers** have enhanced access and privileges through the *Super User Admin Panel* in the user interface. Superuser API calls are also available, which are not visible or accessible to normal users. +endif::[] + +* **Robot accounts** provide automated access to repositories for non-human users like pipeline tools. Robot accounts are similar to {ocp} *Service Accounts*. Permissions can be granted to a robot account in a repository by adding that account like you would another user or team. +//// \ No newline at end of file diff --git a/modules/user-permissions-repo.adoc b/modules/user-permissions-repo.adoc new file mode 100644 index 000000000..b5fa86ff3 --- /dev/null +++ b/modules/user-permissions-repo.adoc @@ -0,0 +1,88 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc + +:_content-type: CONCEPT +[id="repo-manage-user-permissions"] += Managing user permissions by using the {productname} API + +Use the following procedure to manage user permissions by using the {productname} API. + +.Procedure + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getuserpermissions[`GET /api/v1/repository/{repository}/permissions/user/{username}`] endpoint to obtain repository permissions for a user. For example: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/repository//permissions/user/" +---- ++ +.Example output ++ +[source,terminal] +---- +$ {"role": "read", "name": "testuser", "is_robot": false, "avatar": {"name": "testuser", "hash": "f660ab912ec121d1b1e928a0bb4bc61b15f5ad44d5efdc4e1c92a25e99b8e44a", "color": "#6b6ecf", "kind": "user"}, "is_org_member": false} +---- + +. All user permissions can be returned with the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listrepouserpermissions[`GET /api/v1/repository/{repository}/permissions/user/`] endpoint: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/repository///permissions/user/" +---- ++ +.Example output ++ +[source,terminal] +---- +{"permissions": {"quayadmin": {"role": "admin", "name": "quayadmin", "is_robot": false, "avatar": {"name": "quayadmin", "hash": "6d640d802fe23b93779b987c187a4b7a4d8fbcbd4febe7009bdff58d84498fba", "color": "#f7b6d2", "kind": "user"}, "is_org_member": true}, "test+example": {"role": "admin", "name": "test+example", "is_robot": true, "avatar": {"name": "test+example", "hash": "3b03050c26e900500437beee4f7f2a5855ca7e7c5eab4623a023ee613565a60e", "color": "#a1d99b", "kind": "robot"}, "is_org_member": true}}} +---- + +. Alternatively, you can use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getusertransitivepermission[`GET /api/v1/repository/{repository}/permissions/user/{username}/transitive`] endpoint to return only the repository permission for the user: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/repository//permissions/user//transitive" +---- ++ +.Example output ++ +[source,terminal] +---- +{"permissions": [{"role": "admin"}]} +---- + +. You can change the user's permissions, such as making the user an `admin` by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#changeuserpermissions[`PUT /api/v1/repository/{repository}/permissions/user/{username}`] endpoint. For example: ++ +[source,terminal] +---- +$ curl -X PUT \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{"role": ""}' \ + "https://quay-server.example.com/api/v1/repository//permissions/user/" +---- ++ +.Example output ++ +[source,terminal] +---- +{"role": "admin", "name": "testuser", "is_robot": false, "avatar": {"name": "testuser", "hash": "f660ab912ec121d1b1e928a0bb4bc61b15f5ad44d5efdc4e1c92a25e99b8e44a", "color": "#6b6ecf", "kind": "user"}, "is_org_member": false} +---- + +. User permissions can be deleted by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#deleteuserpermissions[`DELETE /api/v1/repository/{repository}/permissions/user/{username}`] endpoint. For example: ++ +[source,terminal] +---- +$ curl -X DELETE \ + -H "Authorization: Bearer " \ + "https://quay-server.example.com/api/v1/repository///permissions/user/" +---- ++ +This command does not return output. \ No newline at end of file diff --git a/modules/using-other-oci-artifacts-with-quay.adoc b/modules/using-other-oci-artifacts-with-quay.adoc new file mode 100644 index 000000000..8410dac6c --- /dev/null +++ b/modules/using-other-oci-artifacts-with-quay.adoc @@ -0,0 +1,60 @@ +// Document included in the following assemblies: + +// Using Red Hat Quay + +:_content-type: REFERENCE +[id="using-other-oci-artifacts-with-quay"] += Using other artifact types + +By default, other artifact types are enabled for use by +ifeval::["{context}" == "quay-io"] +{quayio}. +endif::[] +ifeval::["{context}" == "use-quay"] +{productname}. +endif::[] + +ifeval::["{context}" == "use-quay"] +Use the following procedure to add additional OCI media types. + +.Prerequisites + +* You have set `FEATURE_GENERAL_OCI_SUPPORT` to `true` in your `config.yaml` file. + +.Procedure + +. In your `config.yaml` file, add the `ALLOWED_OCI_ARTIFACT_TYPES` configuration field. For example: ++ +[source,yaml] +---- +FEATURE_GENERAL_OCI_SUPPORT: true +ALLOWED_OCI_ARTIFACT_TYPES: + : + - + - + + : + - + - +---- + +. Add support for your desired artifact type, for example, Singularity Image Format (SIF), by adding the following to your `config.yaml` file: ++ +[source,yaml] +---- +ALLOWED_OCI_ARTIFACT_TYPES: + application/vnd.oci.image.config.v1+json: + - application/vnd.dev.cosign.simplesigning.v1+json + application/vnd.cncf.helm.config.v1+json: + - application/tar+gzip + application/vnd.sylabs.sif.config.v1+json: + - application/vnd.sylabs.sif.layer.v1+tar +---- ++ +[IMPORTANT] +==== +When adding artifact types that are not configured by default, {productname} administrators will also need to manually add support for Cosign and Helm if desired. +==== ++ +Now, users can tag SIF images for their {productname} registry. +endif::[] \ No newline at end of file diff --git a/modules/using-the-api-to-create-an-organization.adoc b/modules/using-the-api-to-create-an-organization.adoc new file mode 100644 index 000000000..443bbb452 --- /dev/null +++ b/modules/using-the-api-to-create-an-organization.adoc @@ -0,0 +1,75 @@ +:_content-type: PROCEDURE +[id="using-the-api-to-create-an-organization"] +== Using the API to create an organization + +The following procedure details how to use the API to create a {productname} organization. + +.Prerequisites + +* You have invoked the `/api/v1/user/initialize` API, and passed in the username, password, and email address. +* You have called out the rest of the {productname} API by specifying the returned OAuth code. + +.Procedure + +. To create an organization, use a POST call to `api/v1/organization/` endpoint: ++ +[source,terminal] +---- +$ curl -X POST -k --header 'Content-Type: application/json' -H "Authorization: Bearer 6B4QTRSTSD1HMIG915VPX7BMEZBVB9GPNY2FC2ED" https://example-registry-quay-quay-enterprise.apps.docs.quayteam.org/api/v1/organization/ --data '{"name": "testorg", "email": "testorg@example.com"}' +---- ++ +Example output: ++ +[source,yaml] +---- +"Created" +---- + +. You can retrieve the details of the organization you created by entering the following command: ++ +[source,terminal] +---- +$ curl -X GET -k --header 'Content-Type: application/json' -H "Authorization: Bearer 6B4QTRSTSD1HMIG915VPX7BMEZBVB9GPNY2FC2ED" https://min-registry-quay-quay-enterprise.apps.docs.quayteam.org/api/v1/organization/testorg +---- ++ +Example output: ++ +[source,terminal] +---- +{ + "name": "testorg", + "email": "testorg@example.com", + "avatar": { + "name": "testorg", + "hash": "5f113632ad532fc78215c9258a4fb60606d1fa386c91b141116a1317bf9c53c8", + "color": "#a55194", + "kind": "user" + }, + "is_admin": true, + "is_member": true, + "teams": { + "owners": { + "name": "owners", + "description": "", + "role": "admin", + "avatar": { + "name": "owners", + "hash": "6f0e3a8c0eb46e8834b43b03374ece43a030621d92a7437beb48f871e90f8d90", + "color": "#c7c7c7", + "kind": "team" + }, + "can_view": true, + "repo_count": 0, + "member_count": 1, + "is_synced": false + } + }, + "ordered_teams": [ + "owners" + ], + "invoice_email": false, + "invoice_email_address": null, + "tag_expiration_s": 1209600, + "is_free_account": true +} +---- diff --git a/modules/using-the-api.adoc b/modules/using-the-api.adoc new file mode 100644 index 000000000..144c612a6 --- /dev/null +++ b/modules/using-the-api.adoc @@ -0,0 +1,87 @@ +:_content-type: REFERENCE +[id="using-the-api"] += Using the {productname} API + +After you have created an application and generated an OAuth 2 access token with the desired settings, you can pass in the access token to `GET`, `PUT`, `POST`, or `DELETE` settings by using the API from the CLI. Generally, a {productname} API command looks similar to the following example: + +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " \ <1> + https:///api/v1/// <2> +---- +<1> The OAuth 2 access token that was generated through the {productname} UI. +<2> The URL of your {productname} deployment and the desired API endpoint. + +All {productname} APIs are documented in the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#red_hat_quay_application_programming_interface_api[Application Programming Interface (API)] chapter. Understanding how they are documented is crucial to successful invocation. Take, for example, the following entry for the `createAppToken` API endpoint: + +[source,text] +---- +*createAppToken* <1> +Create a new app specific token for user. <2> + +*POST /api/v1/user/apptoken* <3> + +**Authorizations: **oauth2_implicit (**user:admin**) <4> + + Request body schema (application/json) + +*Path parameters* <5> + +Name: **title** +Description: Friendly name to help identify the token. +Schema: string + +*Responses* <6> + +|HTTP Code|Description |Schema +|201 |Successful creation | +|400 |Bad Request |<<_apierror,ApiError>> +|401 |Session required |<<_apierror,ApiError>> +|403 |Unauthorized access |<<_apierror,ApiError>> +|404 |Not found |<<_apierror,ApiError>> +|=== +---- +<1> The name of the API endpoint. +<2> A brief description of the API endpoint. +<3> The API endpoint used for invocation. +<4> The authorizations required to use the API endpoint. +<5> The available paths to be used with the API endpoint. In this example, `title` is the only path to be used with the `POST /api/v1/user/apptoken` endpoint. +<6> The API responses for this endpoint. + +In order to use an API endpoint, you pass in your access token and then include the appropriate fields depending on your needs. The following procedure shows you how to use the `POST /api/v1/user/apptoken` endpoint. + +.Prerequisites + +* You have access to the {productname} API, which entails having already created an OAuth 2 access token. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +* Create a user application by entering the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#appspecifictokens[`POST /api/v1/user/apptoken`] API call: ++ +[source,terminal] +---- +$ curl -X POST \ + -H "Authorization: Bearer " <1> + -H "Content-Type: application/json" \ + -d '{ + "title": "MyAppToken" <2> + }' \ + "http://quay-server.example.com/api/v1/user/apptoken" <3> +---- +<1> The Oauth access token. +<2> The name of your application token. +<3> The URL of your {productname} deployment appended with the `/api/v1/user/apptoken` endpoint. ++ +.Example output ++ +[source,terminal] +---- +{"token": {"uuid": "6b5aa827-cee5-4fbe-a434-4b7b8a245ca7", "title": "MyAppToken", "last_accessed": null, "created": "Wed, 08 Jan 2025 19:32:48 -0000", "expiration": null, "token_code": "K2YQB1YO0ABYV5OBUYOMF9MCUABN12Y608Q9RHFXBI8K7IE8TYCI4WEEXSVH1AXWKZCKGUVA57PSA8N48PWED9F27PXATFUVUD9QDNCE9GOT9Q8ACYPIN0HL"}} +---- + +.Verification + +* On the {productname} UI, click your username in the navigation pane -> *Account Settings*. The name of your application appears under the *Docker CLI and other Application Tokens* heading. For example: ++ +image::application-token.png[Application token] \ No newline at end of file diff --git a/modules/using-the-oauth-token.adoc b/modules/using-the-oauth-token.adoc new file mode 100644 index 000000000..5aa9fa977 --- /dev/null +++ b/modules/using-the-oauth-token.adoc @@ -0,0 +1,45 @@ +:_content-type: PROCEDURE +[id="using-the-oauth-token"] +== Using the OAuth token + +After invoking the API, you can call out the rest of the {productname} API by specifying the returned OAuth code. + +.Prerequisites + +* You have invoked the `/api/v1/user/initialize` API, and passed in the username, password, and email address. + +.Procedure + +* Obtain the list of current users by entering the following command: ++ +[source,terminal] +---- +$ curl -X GET -k -H "Authorization: Bearer 6B4QTRSTSD1HMIG915VPX7BMEZBVB9GPNY2FC2ED" https://example-registry-quay-quay-enterprise.apps.docs.quayteam.org/api/v1/superuser/users/ +---- ++ +Example output: ++ +[source,yaml] +---- +{ + "users": [ + { + "kind": "user", + "name": "quayadmin", + "username": "quayadmin", + "email": "quayadmin@example.com", + "verified": true, + "avatar": { + "name": "quayadmin", + "hash": "3e82e9cbf62d25dec0ed1b4c66ca7c5d47ab9f1f271958298dea856fb26adc4c", + "color": "#e7ba52", + "kind": "user" + }, + "super_user": true, + "enabled": true + } + ] +} +---- ++ +In this instance, the details for the `quayadmin` user are returned as it is the only user that has been created so far. diff --git a/modules/using-v2-ui.adoc b/modules/using-v2-ui.adoc new file mode 100644 index 000000000..8c6f81cac --- /dev/null +++ b/modules/using-v2-ui.adoc @@ -0,0 +1,559 @@ +:_content-type: PROCEDURE +[id="using-v2-ui"] += Using the v2 UI + +ifeval::["{context}" == "quay-io"] +The {quayio} v2 UI is enabled by default, and can be toggled on or off at a user's discretion. +endif::[] +ifeval::["{context}" == "use-quay"] +Use the following procedures to configure, and use, the {productname} v2 UI. +endif::[] + +[id="reference-miscellaneous-v2-ui"] +== v2 user interface configuration +ifeval::["{context}" == "quay-io"] +On {quayio}, you can toggle between the current version of the user interface and the new version of the user interface. +endif::[] + +ifeval::["{context}" == "use-quay"] +With `FEATURE_UI_V2` enabled, you can toggle between the current version of the user interface and the new version of the user interface. +endif::[] + +[IMPORTANT] +==== +* This UI is currently in beta and subject to change. In its current state, users can only create, view, and delete organizations, repositories, and image tags. +* When using the old UI, timed-out sessions would require that the user input their password again in the pop-up window. With the new UI, users are returned to the main page and required to input their username and password credentials. This is a known issue and will be fixed in a future version of the new UI. +* There is a discrepancy in how image manifest sizes are reported between the legacy UI and the new UI. In the legacy UI, image manifests were reported in mebibytes. The v2 UI uses the standard definition of megabyte (MB) to report image manifest sizes. +==== + +.Procedure +ifeval::["{context}" == "use-quay"] +. In your deployment's `config.yaml` file, add the `FEATURE_UI_V2` parameter and set it to `true`, for example: ++ +[source,yaml] +---- +--- +FEATURE_TEAM_SYNCING: false +FEATURE_UI_V2: true +FEATURE_USER_CREATION: true +--- +---- +endif::[] + +. Log in to your +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +deployment. + +. In the navigation pane of your deployment, you are given the option to toggle between *Current UI* and *New UI*. Click the toggle button to set it to new UI, and then click *Use Beta Environment*, for example: ++ +image:38-ui-toggle.png[{productname} v2 UI toggle] + +[id="creating-new-organization-v2-ui"] +=== Creating a new organization using the v2 UI + +.Prerequisites + +* You have toggled your deployment to use the v2 UI. + +Use the following procedure to create an organization using the v2 UI. + +.Procedure + +. Click *Organization* in the navigation pane. + +. Click *Create Organization*. + +. Enter an *Organization Name*, for example, `testorg`. + +. Click *Create*. + +Now, your example organization should populate under the *Organizations* page. + +[id="deleting-organization-v2"] +=== Deleting an organization using the v2 UI + +Use the following procedure to delete an organization using the v2 UI. + +.Procedure + +. On the *Organizations* page, select the name of the organization you want to delete, for example, `testorg`. + +. Click the *More Actions* drop down menu. + +. Click *Delete*. ++ +[NOTE] +==== +On the *Delete* page, there is a *Search* input box. With this box, users can search for specific organizations to ensure that they are properly scheduled for deletion. For example, if a user is deleting 10 organizations and they want to ensure that a specific organization was deleted, they can use the *Search* input box to confirm said organization is marked for deletion. +==== + +. Confirm that you want to permanently delete the organization by typing *confirm* in the box. + +. Click *Delete*. ++ +After deletion, you are returned to the *Organizations* page. ++ +[NOTE] +==== +You can delete more than one organization at a time by selecting multiple organizations, and then clicking *More Actions* -> *Delete*. +==== + +[id="creating-new-repository-v2"] +=== Creating a new repository using the v2 UI + +Use the following procedure to create a repository using the v2 UI. + +.Procedure + +. Click *Repositories* on the navigation pane. + +. Click *Create Repository*. + +. Select a namespace, for example, *quayadmin*, and then enter a *Repository name*, for example, `testrepo`. ++ +[IMPORTANT] +==== +Do not use the following words in your repository name: +* `build` +* `trigger` +* `tag` + +When these words are used for repository names, users are unable access the repository, and are unable to permanently delete the repository. Attempting to delete these repositories returns the following error: `Failed to delete repository , HTTP404 - Not Found.` +==== + +. Click *Create*. ++ +Now, your example repository should populate under the *Repositories* page. + +[id="deleting-repository-v2"] +=== Deleting a repository using the v2 UI + +.Prerequisites + +* You have created a repository. + +.Procedure + +. On the *Repositories* page of the v2 UI, click the name of the image you want to delete, for example, `quay/admin/busybox`. + +. Click the *More Actions* drop-down menu. + +. Click *Delete*. ++ +[NOTE] +==== +If desired, you could click *Make Public* or *Make Private*. +==== + +. Type *confirm* in the box, and then click *Delete*. + +. After deletion, you are returned to the *Repositories* page. + +[id="pushing-image-v2"] +=== Pushing an image to the v2 UI + +Use the following procedure to push an image to the v2 UI. + +.Procedure + +. Pull a sample image from an external registry: ++ +[source,terminal] +---- +$ podman pull busybox +---- + +. Tag the image: ++ +[source,terminal] +---- +$ podman tag docker.io/library/busybox quay-server.example.com/quayadmin/busybox:test +---- + +. Push the image to your registry: ++ +[source,terminal] +---- +$ podman push quay-server.example.com/quayadmin/busybox:test +---- + +. Navigate to the *Repositories* page on the v2 UI and ensure that your image has been properly pushed. + +. You can check the security details by selecting your image tag, and then navigating to the *Security Report* page. + +[id="deleting-image-v2"] +=== Deleting an image using the v2 UI + +Use the following procedure to delete an image using the v2 UI. + +.Prerequisites + +* You have pushed an image to your registry. + +.Procedure + +. On the *Repositories* page of the v2 UI, click the name of the image you want to delete, for example, `quay/admin/busybox`. + +. Click the *More Actions* drop-down menu. + +. Click *Delete*. ++ +[NOTE] +==== +If desired, you could click *Make Public* or *Make Private*. +==== + +. Type *confirm* in the box, and then click *Delete*. + +. After deletion, you are returned to the *Repositories* page. + +[id="creating-team-v2-ui"] +=== Creating a new team using the {productname} v2 UI + +Use the following procedure to create a new team using the {productname} v2 UI. + +.Prerequisites + +* You have created an organization with a repository. + +.Procedure + +. On the {productname} v2 UI, click the name of an organization. + +. On your organization's page, click *Teams and membership*. + +. Click the *Create new team* box. + +. In the *Create team* popup window, provide a name for your new team. + +. Optional. Provide a description for your new team. + +. Click *Proceed*. A new popup window appears. + +. Optional. Add this team to a repository, and set the permissions to one of *Read*, *Write*, *Admin*, or *None*. + +. Optional. Add a team member or robot account. To add a team member, enter the name of their {productname} account. + +. Review and finish the information, then click *Review and Finish*. The new team appears under the *Teams and membership page*. From here, you can click the kebab menu, and select one of the following options: ++ +* **Manage Team Members**. On this page, you can view all members, team members, robot accounts, or users who have been invited. You can also add a new team member by clicking *Add new member*. + +* **Set repository permissions**. On this page, you can set the repository permissions to one of *Read*, *Write*, *Admin*, or *None*. + +* **Delete**. This popup windows allows you to delete the team by clicking *Delete*. + +. Optional. You can click the one of the following options to reveal more information about teams, members, and collaborators: + +* **Team View**. This menu shows all team names, the number of members, the number of repositories, and the role for each team. + +* **Members View**. This menu shows all usernames of team members, the teams that they are part of, the repository permissions of the user. + +* **Collaborators View**. This menu shows repository collaborators. Collaborators are users that do not belong to any team in the organization, but who have direct permissions on one or more repositories belonging to the organization. + +[id="creating-robot-account-v2-ui"] +=== Creating a robot account using the v2 UI + +Use the following procedure to create a robot account using the v2 UI. + +.Procedure + +. On the v2 UI, click *Organizations*. + +. Click the name of the organization that you will create the robot account for, for example, `test-org`. + +. Click the *Robot accounts* tab -> *Create robot account*. + +. In the *Provide a name for your robot account* box, enter a name, for example, `robot1`. + +. Optional. The following options are available if desired: + +.. Add the robot to a team. + +.. Add the robot to a repository. + +.. Adjust the robot's permissions. + +. On the *Review and finish* page, review the information you have provided, then click *Review and finish*. The following alert appears: *Successfully created robot account with robot name: + *. ++ +Alternatively, if you tried to create a robot account with the same name as another robot account, you might receive the following error message: *Error creating robot account*. + +. Optional. You can click *Expand* or *Collapse* to reveal descriptive information about the robot account. + +. Optional. You can change permissions of the robot account by clicking the kebab menu -> *Set repository permissions*. The following message appears: *Successfully updated repository permission*. + +. Optional. To delete your robot account, check the box of the robot account and click the trash can icon. A popup box appears. Type *confirm* in the text box, then, click *Delete*. Alternatively, you can click the kebab menu -> *Delete*. The following message appears: *Successfully deleted robot account*. + +[id="managing-robot-account-permissions-v2-ui"] +==== Bulk managing robot account repository access using the {productname} v2 UI + +Use the following procedure to manage, in bulk, robot account repository access using the {productname} v2 UI. + +.Prerequisites + +* You have created a robot account. +* You have created multiple repositories under a single organization. + +.Procedure + +. On the {productname} v2 UI landing page, click *Organizations* in the navigation pane. + +. On the *Organizations* page, select the name of the organization that has multiple repositories. The number of repositories under a single organization can be found under the *Repo Count* column. + +. On your organization's page, click *Robot accounts*. + +. For the robot account that will be added to multiple repositories, click the kebab icon -> *Set repository permissions*. + +. On the *Set repository permissions* page, check the boxes of the repositories that the robot account will be added to. For example: ++ +image:set-repository-permissions-robot-account.png[Set repository permissions] + +. Set the permissions for the robot account, for example, *None*, *Read*, *Write*, *Admin*. + +. Click *save*. An alert that says *Success alert: Successfully updated repository permission* appears on the *Set repository permissions* page, confirming the changes. + +. Return to the *Organizations* -> *Robot accounts* page. Now, the *Repositories* column of your robot account shows the number of repositories that the robot account has been added to. + +[id="default-permissions-v2-ui"] +=== Creating default permissions using the {productname} v2 UI + +Default permissions defines permissions that should be granted automatically to a repository when it is created, in addition to the default of the repository's creator. Permissions are assigned based on the user who created the repository. + +Use the following procedure to create default permissions using the {productname} v2 UI. + +.Procedure + +. Click the name of an organization. + +. Click *Default permissions*. + +. Click *create default permissions*. A toggle drawer appears. + +. Select either *Anyone* or *Specific user* to create a default permission when a repository is created. + +.. If selecting *Anyone*, the following information must be provided: ++ +* **Applied to**. Search, invite, or add a user/robot/team. +* **Permission**. Set the permission to one of *Read*, *Write*, or *Admin*. + +.. If selecting *Specific user*, the following information must be provided: ++ +* **Repository creator**. Provide either a user or robot account. +* **Applied to**. Provide a username, robot account, or team name. +* **Permission**. Set the permission to one of *Read*, *Write*, or *Admin*. + +. Click *Create default permission*. A confirmation box appears, returning the following alert: *Successfully created default permission for creator*. + +[id="organization-settings-v2-ui"] +=== Organization settings for the v2 UI + +Use the following procedure to alter your organization settings using the v2 UI. + +.Procedure + +. On the v2 UI, click *Organizations*. + +. Click the name of the organization that you will create the robot account for, for example, `test-org`. + +. Click the *Settings* tab. + +. Optional. Enter the email address associated with the organization. + +. Optional. Set the allotted time for the *Time Machine* feature to one of the following: ++ +* *1 week* +* *1 month* +* *1 year* +* *Never* + +. Click *Save*. + +[id="tag-overview-v2-ui"] +=== Viewing image tag information using the v2 UI + +Use the following procedure to view image tag information by using the v2 UI. + +.Procedure + +. On the v2 UI, click *Repositories*. + +. Click the name of a repository, for example, `quayadmin/busybox`. + +. Click the name of the tag, for example, `test`. You are taken to the *Details* page of the tag. The page reveals the following information: ++ +* Name +* Repository +* Digest +* Vulnerabilities +* Creation +* Modified +* Size +* Labels +* How to fetch the image tag + +. Optional. Click *Security Report* to view the tag's vulnerabilities. You can expand an advisory column to open up CVE data. + +. Optional. Click *Packages* to view the tag's packages. + +. Click the name of the repository, for example, `busybox`, to return to the *Tags* page. + +. Optional. Hover over the *Pull* icon to reveal the ways to fetch the tag. + +. Check the box of the tag, or multiple tags, click the *Actions* drop down menu, and then *Delete* to delete the tag. Confirm deletion by clicking *Delete* in the popup box. + +[id="settings-overview-v2-ui"] +=== Adjusting repository settings using the v2 UI + +Use the following procedure to adjust various settings for a repository using the v2 UI. + +.Procedure + +. On the v2 UI, click *Repositories*. + +. Click the name of a repository, for example, `quayadmin/busybox`. + +. Click the *Settings* tab. + +. Optional. Click *User and robot permissions*. You can adjust the settings for a user or robot account by clicking the dropdown menu option under *Permissions*. You can change the settings to *Read*, *Write*, or *Admin*. + +. Optional. Click *Events and notifications*. You can create an event and notification by clicking *Create Notification*. The following event options are available: ++ +* Push to Repository +* Package Vulnerability Found +* Image build failed +* Image build queued +* Image build started +* Image build success +* Image build cancelled ++ +Then, issue a notification. The following options are available: ++ +* Email Notification +* Flowdock Team Notification +* HipChat Room Notification +* Slack Notification +* Webhook POST ++ +After selecting an event option and the method of notification, include a *Room ID #*, a *Room Notification Token*, then, click *Submit*. + +. Optional. Click *Repository visibility*. You can make the repository private, or public, by clicking *Make Public*. + +. Optional. Click *Delete repository*. You can delete the repository by clicking *Delete Repository*. + +[id="viewing-tag-history-v2-ui"] +== Viewing {productname} tag history + +Use the following procedure to view tag history on the {productname} v2 UI. + +.Procedure + +. On the {productname} v2 UI dashboard, click *Repositories* in the navigation pane. + +. Click the name of a repository that has image tags. + +. Click *Tag History*. On this page, you can perform the following actions: ++ +* Search by tag name +* Select a date range +* View tag changes +* View tag modification dates and the time at which they were changed + +[id="adding-managing-labels"] +== Adding and managing labels on the {productname} v2 UI + +{productname} administrators can add and manage labels for tags by using the following procedure. + +.Procedure + +. On the {productname} v2 UI dashboard, click *Repositories* in the navigation pane. + +. Click the name of a repository that has image tags. + +. Click the menu kebab for an image and select *Edit labels*. + +. In the *Edit labels* window, click *Add new label*. + +. Enter a label for the image tag using the `key=value` format, for example, `com.example.release-date=2023-11-14`. ++ +[NOTE] +==== +The following error is returned when failing to use the `key=value` format: `Invalid label format, must be key value separated by =`. +==== + +. Click the whitespace of the box to add the label. + +. Optional. Add a second label. + +. Click *Save labels* to save the label to the image tag. The following notification is returned: `Created labels successfully`. + +. Optional. Click the same image tag's menu kebab -> *Edit labels* -> *X* on the label to remove it; alternatively, you can edit the text. Click *Save labels*. The label is now removed or edited. + +[id="setting-tag-expirations-v2-ui"] +== Setting tag expirations on the {productname} v2 UI + +{productname} administrators can set expiration dates for certain tags in a repository. This helps automate the cleanup of older or unused tags, helping to reduce storage space. + +.Procedure + +. On the {productname} v2 UI dashboard, click *Repositories* in the navigation pane. + +. Click the name of a repository that has image tags. + +. Click the menu kebab for an image and select *Change expiration*. + +. Optional. Alternatively, you can bulk add expiration dates by clicking the box of multiple tags, and then select *Actions* -> *Set expiration*. + +. In the *Change Tags Expiration* window, set an expiration date, specifying the day of the week, month, day of the month, and year. For example, `Wednesday, November 15, 2023`. Alternatively, you can click the calendar button and manually select the date. + +. Set the time, for example, `2:30 PM`. + +. Click *Change Expiration* to confirm the date and time. The following notification is returned: `Successfully set expiration for tag test to Nov 15, 2023, 2:26 PM`. + +. On the {productname} v2 UI *Tags* page, you can see when the tag is set to expire. For example: ++ +image:tag-expiration-v2-ui.png[{productname} v2 UI tag expiration] + +[id="selecting-dark-mode-ui"] +== Selecting color theme preference on the {productname} v2 UI + +Users can switch between light and dark modes when using the v2 UI. This feature also includes an automatic mode selection, which chooses between light or dark modes depending on the user's browser preference. + +Use the following procedure to switch between automatic, light, and dark modes. + +.Procedure + +. Log in to your {productname} repository. + +. In the navigation pane, click your username, for example, *quayadmin*. + +. Under *Appearance*, select between *Light theme*, *Dark theme*, and *Device-based theme*. Device based theme sets the mode depending on your browser's color preference. + +[id="viewing-usage-logs-v2-ui"] +== Viewing usage logs on the {productname} v2 UI + +{productname} logs can provide valuable information about the way that your {productname} registry is being used. Logs can be viewed by Organization, repository, or namespace on the v2 UI by using the following procedure. + +.Procedure + +. Log in to your {productname} registry. + +. Navigate to an Organization, repository, or namespace for which you are an administrator of. + +. Click *Logs*. ++ +image:logsv2-ui.png[Logs page] + +. Optional. Set the date range for viewing log entries by adding dates to the *From* and *To* boxes. + +. Optional. Export the logs by clicking *Export*. You must enter an email address or a valid callback URL that starts with `http://` or `https://`. This process can take an hour depending on how many logs there are. + +[id="enabling-legacy-ui"] +== Enabling the legacy UI + +. In the navigation pane, you are given the option to toggle between *Current UI* and *New UI*. Click the toggle button to set it to *Current UI*. ++ +image:38-ui-toggle.png[{productname} v2 UI toggle] \ No newline at end of file diff --git a/modules/viewing-additional-info-about-team-ui.adoc b/modules/viewing-additional-info-about-team-ui.adoc new file mode 100644 index 000000000..def39a34e --- /dev/null +++ b/modules/viewing-additional-info-about-team-ui.adoc @@ -0,0 +1,20 @@ +// module included in the following assemblies: + +// * use_quay/master.adoc +// * quay_io/master.adoc + +:_content-type: PROCEDURE +[id="viewing-additional-info-about-team-ui"] +== Viewing additional information about a team + +Use the following procedure to view general information about the team. + +.Procedure + +* On the *Teams and membership* page of your organization, you can click the one of the following options to reveal more information about teams, members, and collaborators: + +** **Team View**. This menu shows all team names, the number of members, the number of repositories, and the role for each team. + +** **Members View**. This menu shows all usernames of team members, the teams that they are part of, the repository permissions of the user. + +** **Collaborators View**. This menu shows repository collaborators. Collaborators are users that do not belong to any team in the organization, but who have direct permissions on one or more repositories belonging to the organization. \ No newline at end of file diff --git a/modules/viewing-and-modifying-tags.adoc b/modules/viewing-and-modifying-tags.adoc new file mode 100644 index 000000000..b57edb7e6 --- /dev/null +++ b/modules/viewing-and-modifying-tags.adoc @@ -0,0 +1,33 @@ +:_content-type: PROCEDURE +[id="viewing-and-modifying-tags"] += Viewing image tag information by using the UI + +Use the following procedure to view image tag information using the v2 UI. + +.Prerequisites + +* You have pushed an image tag to a repository. + +.Procedure + +. On the v2 UI, click *Repositories*. + +. Click the name of a repository. + +. Click the name of a tag. You are taken to the *Details* page of that tag. The page reveals the following information: ++ +* Name +* Repository +* Digest +* Vulnerabilities +* Creation +* Modified +* Size +* Labels +* How to fetch the image tag + +. Click *Security Report* to view the tag's vulnerabilities. You can expand an advisory column to open up CVE data. + +. Click *Packages* to view the tag's packages. + +. Click the name of the repository to return to the *Tags* page. \ No newline at end of file diff --git a/modules/viewing-model-card-information.adoc b/modules/viewing-model-card-information.adoc new file mode 100644 index 000000000..82bae429a --- /dev/null +++ b/modules/viewing-model-card-information.adoc @@ -0,0 +1,38 @@ +:_content-type: PROCEDURE +[id="viewing-model-card-information"] += Viewing model card information by using the UI + +Model card information can be viewed on the v2 UI. Model cards are essentially markdown (`.md`) files with additional metadata that provide information about a machine learning application. To view model card information, a manifest must have an annotation that is defined in your `config.yaml` file (for example, `application/x-mlmodel`) and include a model card stored as a layer in the manifest. When these conditions are met, a *Model Card* tab appears on the *Details* page of a tag. + +* You have pushed an artifact of that annotation type, and it includes a model card (`.md`) file. + +.Procedure + +. Update your `config.yaml` file to include the following information: ++ +.Example model card YAML +[source,yaml] +---- +FEATURE_UI_MODELCARD: true <1> +UI_MODELCARD_ARTIFACT_TYPE: application/x-mlmodel <2> +UI_MODELCARD_ANNOTATION: <3> + org.opencontainers.image.description: "Model card metadata" +UI_MODELCARD_LAYER_ANNOTATION: <4> + org.opencontainers.image.title: README.md +---- +<1> Enables the Model Card image tab in the UI. +<2> Defines the model card artifact type. In this example, the artifact type is `application/x-mlmodel`. +<3> Optional. If an image does not have an `artifactType` defined, this field is checked at the manifest level. If a matching annotation is found, the system then searches for a layer with an annotation matching `UI_MODELCARD_LAYER_ANNOTATION`. +<4> Optional. If an image has an `artifactType` defined and multiple layers, this field is used to locate the specific layer containing the model card. + +. Push an artifact of that annotation type, and one that includes a model card (`.md`) file, to your repository. + +. On the v2 UI, click *Repositories*. + +. Click the name of a repository. + +. Click the name of a tag. You are taken to the *Details* page of that tag. + +. Click *ModelCard* to view information about the image. For example: ++ +image::modelcard.png[Modelcard information] \ No newline at end of file diff --git a/modules/viewing-tag-history-v2-api.adoc b/modules/viewing-tag-history-v2-api.adoc new file mode 100644 index 000000000..df2a86088 --- /dev/null +++ b/modules/viewing-tag-history-v2-api.adoc @@ -0,0 +1,51 @@ +:_content-type: PROCEDURE +[id="viewing-tag-history-v2-api"] += Viewing {productname} tag history by using the API + +{productname} offers a comprehensive history of images and their respective image tags. + +.Prerequisites + +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +. Enter the following command to view tag history by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listrepotags[`GET /api/v1/repository/{repository}/tag/`] command and passing in one of the following queries: ++ +* *onlyActiveTags=*: Filters to only include active tags. + +* *page=*: Specifies the page number of results to retrieve. + +* *limit=*: Limits the number of results per page. + +* *specificTag=*: Filters the tags to include only the tag with the specified name. ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + "https:///api/v1/repository///tag/?onlyActiveTags=true&page=1&limit=10" +---- ++ +.Example output ++ +[source,terminal] +---- +{"tags": [{"name": "test-two", "reversion": false, "start_ts": 1717680780, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Thu, 06 Jun 2024 13:33:00 -0000"}, {"name": "tag-test", "reversion": false, "start_ts": 1717680378, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Thu, 06 Jun 2024 13:26:18 -0000"}, {"name": "example", "reversion": false, "start_ts": 1715698131, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Tue, 14 May 2024 14:48:51 -0000"}], "page": 1, "has_additional": false} +---- + +. By using the `specificTag=` query, you can filter results for a specific tag. For example: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " -H "Accept: application/json" "/api/v1/repository/quayadmin/busybox/tag/?onlyActiveTags=true&page=1&limit=20&specificTag=test-two" +---- ++ +.Example output ++ +[source,terminal] +---- +{"tags": [{"name": "test-two", "reversion": true, "start_ts": 1718737153, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Tue, 18 Jun 2024 18:59:13 -0000"}], "page": 1, "has_additional": false} +---- \ No newline at end of file diff --git a/modules/viewing-tag-history-v2-ui.adoc b/modules/viewing-tag-history-v2-ui.adoc new file mode 100644 index 000000000..d6d24302f --- /dev/null +++ b/modules/viewing-tag-history-v2-ui.adoc @@ -0,0 +1,24 @@ +:_content-type: PROCEDURE +[id="viewing-tag-history-v2-ui"] += Viewing {productname} tag history by using the UI + +ifeval::["{context}" == "quay-io"] +{quayio} +endif::[] +ifeval::["{context}" == "use-quay"] +{productname} +endif::[] +offers a comprehensive history of images and their respective image tags. + +.Procedure + +. On the {productname} v2 UI dashboard, click *Repositories* in the navigation pane. + +. Click the name of a repository that has image tags. + +. Click *Tag History*. On this page, you can perform the following actions: ++ +* Search by tag name +* Select a date range +* View tag changes +* View tag modification dates and the time at which they were changed \ No newline at end of file diff --git a/modules/viewing-tags-api.adoc b/modules/viewing-tags-api.adoc new file mode 100644 index 000000000..10163a0b6 --- /dev/null +++ b/modules/viewing-tags-api.adoc @@ -0,0 +1,47 @@ +:_content-type: CONCEPT +[id="viewing-and-modifying-tags-api"] += Viewing image tag information by using the API + +Use the following procedure to view image tag information by using the API + +.Prerequisites + +* You have pushed an image tag to a {productname} repository. +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +. To obtain tag information, you must use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getrepo[`GET /api/v1/repository/{repository}`] API endpoint and pass in the `includeTags` parameter. For example: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + https:///api/v1/repository//?includeTags=true +---- ++ +.Example output ++ +[source,terminal] +---- +{"namespace": "quayadmin", "name": "busybox", "kind": "image", "description": null, "is_public": false, "is_organization": false, "is_starred": false, "status_token": "d8f5e074-690a-46d7-83c8-8d4e3d3d0715", "trust_enabled": false, "tag_expiration_s": 1209600, "is_free_account": true, "state": "NORMAL", "tags": {"example": {"name": "example", "size": 2275314, "last_modified": "Tue, 14 May 2024 14:48:51 -0000", "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d"}, "test": {"name": "test", "size": 2275314, "last_modified": "Tue, 14 May 2024 14:04:48 -0000", "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d"}}, "can_write": true, "can_admin": true} +---- + +. Alternatively, you can use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listrepotags[`GET /api/v1/repository/{repository}/tag/`] endpoint. For example: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + https:///api/v1/repository///tag/ +---- ++ +.Example output ++ +[source,terminal] +---- +{"tags": [{"name": "test-two", "reversion": true, "start_ts": 1718737153, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Tue, 18 Jun 2024 18:59:13 -0000"}, {"name": "test-two", "reversion": false, "start_ts": 1718737029, "end_ts": 1718737153, "manifest_digest": "sha256:0cd3dd6236e246b349e63f76ce5f150e7cd5dbf2f2f1f88dbd734430418dbaea", "is_manifest_list": false, "size": 2275317, "last_modified": "Tue, 18 Jun 2024 18:57:09 -0000", "expiration": "Tue, 18 Jun 2024 18:59:13 -0000"}, {"name": "test-two", "reversion": false, "start_ts": 1718737018, "end_ts": 1718737029, "manifest_digest": "sha256:0cd3dd6236e246b349e63f76ce5f150e7cd5dbf2f2f1f88dbd734430418dbaea", "is_manifest_list": false, "size": 2275317, "last_modified": "Tue, 18 Jun 2024 18:56:58 -0000", "expiration": "Tue, 18 Jun 2024 18:57:09 -0000"}, {"name": "sample_tag", "reversion": false, "start_ts": 1718736147, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Tue, 18 Jun 2024 18:42:27 -0000"}, {"name": "test-two", "reversion": false, "start_ts": 1717680780, "end_ts": 1718737018, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Thu, 06 Jun 2024 13:33:00 -0000", "expiration": "Tue, 18 Jun 2024 18:56:58 -0000"}, {"name": "tag-test", "reversion": false, "start_ts": 1717680378, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Thu, 06 Jun 2024 13:26:18 -0000"}, {"name": "example", "reversion": false, "start_ts": 1715698131, "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d", "is_manifest_list": false, "size": 2275314, "last_modified": "Tue, 14 May 2024 14:48:51 -0000"}], "page": 1, "has_additional": false} +---- \ No newline at end of file diff --git a/modules/viewing-usage-logs-api.adoc b/modules/viewing-usage-logs-api.adoc new file mode 100644 index 000000000..7700a9149 --- /dev/null +++ b/modules/viewing-usage-logs-api.adoc @@ -0,0 +1,115 @@ +:_content-type: CONCEPT +[id="viewing-usage-logs-v2-api"] += Viewing usage logs by using the API + +Logs can be viewed by Organization or repository by using the API. They can also be aggregated (grouped), or listed with more detailed. Logs can also be viewed by user, a specific date range, or by page. + +[id="viewing-aggregated-logs-api"] +== Viewing aggregated logs + +Aggregated logs can be viewed by Organization, repository, a specific user, or the current user. You can also pass in optional commands like `performer`, `starttime/endtime`, and `next_page` to filter results. + +.Prerequisites + +* You have link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#creating-oauth-access-token[Created an OAuth access token]. +* You have set `BROWSER_API_CALLS_XHR_ONLY: false` in your `config.yaml` file. + +.Procedure + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getaggregateuserlogs[`GET /api/v1/user/aggregatelogs`] API endpoint to return the aggregated (or grouped) logs for the current user: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + "https:///api/v1/user/aggregatelogs" +---- ++ +.Example output ++ +[source,terminal] +---- +{"aggregated": [{"kind": "create_tag", "count": 1, "datetime": "Tue, 18 Jun 2024 00:00:00 -0000"}, {"kind": "manifest_label_add", "count": 1, "datetime": "Tue, 18 Jun 2024 00:00:00 -0000"}, {"kind": "push_repo", "count": 2, "datetime": "Tue, 18 Jun 2024 00:00:00 -0000"}, {"kind": "revert_tag", "count": 1, "datetime": "Tue, 18 Jun 2024 00:00:00 -0000"}]} +---- ++ +You can also pass in the `performer` and `starttime/endtime` queries to obtain aggregated logs for a specific user between a specific time period. For example: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + "/api/v1/user/aggregatelogs?performer=&starttime=&endtime=" +---- + + +. Aggregated logs can also be viewed by Organization by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getaggregateorglogs[`GET /api/v1/organization/{orgname}/aggregatelogs`]. For example: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + "/api/v1/organization/{orgname}/aggregatelogs" +---- + +. Aggregated logs can also be viewed by repository by using the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#getaggregaterepologs[`GET /api/v1/repository/{repository}/aggregatelogs`] command. The following example includes the `starttime/endtime` fields: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + "/api/v1/repository///aggregatelogs?starttime=2024-01-01&endtime=2024-06-18"" +---- + +[id="viewing-logs-api"] +== Viewing detailed logs + +Detailed logs can be viewed by Organization, repository, a specific user, or the current user. You can also pass in optional fields like `performer`, `starttime/endtime`, and `next_page` to filter results. + +.Procedure + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listuserlogs[`GET /api/v1/user/logs`] API endpoint to return a list of log entries for a user. For example: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " -H "Accept: application/json" "/api/v1/user/logs" +---- ++ +You can also pass in the `performer` and `startime/endtime` queries to obtain logs for a specific user between a specific time period. For example: ++ +[source,terminal] +---- +$ curl -X GET -H "Authorization: Bearer " -H "Accept: application/json" "http://quay-server.example.com/api/v1/user/logs?performer=quayuser&starttime=01/01/2024&endtime=06/18/2024" +---- ++ +.Example output ++ +[source,terminal] +---- +--- +{"start_time": "Mon, 01 Jan 2024 00:00:00 -0000", "end_time": "Wed, 19 Jun 2024 00:00:00 -0000", "logs": [{"kind": "revert_tag", "metadata": {"username": "quayuser", "repo": "busybox", "tag": "test-two", "manifest_digest": "sha256:57583a1b9c0a7509d3417387b4f43acf80d08cdcf5266ac87987be3f8f919d5d"}, "ip": "192.168.1.131", "datetime": "Tue, 18 Jun 2024 18:59:13 -0000", "performer": {"kind": "user", "name": "quayuser", "is_robot": false, "avatar": {"name": "quayuser", "hash": "b28d563a6dc76b4431fc7b0524bbff6b810387dac86d9303874871839859c7cc", "color": "#17becf", "kind": "user"}}}, {"kind": "push_repo", "metadata": {"repo": "busybox", "namespace": "quayuser", "user-agent": "containers/5.30.1 (github.com/containers/image)", "tag": "test-two", "username": "quayuser", } +--- +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listorglogs[`GET /api/v1/organization/{orgname}/logs`] endpoint to return logs for a specified organization: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + "http:///api/v1/organization/{orgname}/logs" +---- + +. Use the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index#listrepologs[`GET /api/v1/repository/{repository}/logs`] endpoint to return logs for a specified repository: ++ +[source,terminal] +---- +$ curl -X GET \ + -H "Authorization: Bearer " \ + -H "Accept: application/json" \ + "http:///api/v1/repository/{repository}/logs" +---- \ No newline at end of file diff --git a/modules/viewing-usage-logs-v2-ui.adoc b/modules/viewing-usage-logs-v2-ui.adoc new file mode 100644 index 000000000..1350fedc8 --- /dev/null +++ b/modules/viewing-usage-logs-v2-ui.adoc @@ -0,0 +1,19 @@ +:_content-type: CONCEPT +[id="viewing-usage-logs-v2-ui"] += Viewing usage logs + +Logs can provide valuable information about the way that your registry is being used. Logs can be viewed by Organization, repository, or namespace on the v2 UI by using the following procedure. + +.Procedure + +. Log in to your {productname} registry. + +. Navigate to an Organization, repository, or namespace for which you are an administrator of. + +. Click *Logs*. ++ +image:logsv2-ui.png[Logs page] + +. Optional. Set the date range for viewing log entries by adding dates to the *From* and *To* boxes. + +. Optional. Export the logs by clicking *Export*. You must enter an email address or a valid callback URL that starts with `http://` or `https://`. This process can take an hour depending on how many logs there are. \ No newline at end of file diff --git a/quay_io/docinfo.xml b/quay_io/docinfo.xml new file mode 100644 index 000000000..a6b55b15a --- /dev/null +++ b/quay_io/docinfo.xml @@ -0,0 +1,10 @@ +{productname} +{producty} +About Quay IO + + About Quay IO + + + Red Hat OpenShift Documentation Team + + diff --git a/quay_io/master.adoc b/quay_io/master.adoc new file mode 100644 index 000000000..3e709b54f --- /dev/null +++ b/quay_io/master.adoc @@ -0,0 +1,113 @@ +:_content-type: ASSEMBLY + +include::modules/attributes.adoc[] + +[id="quay-io"] += About Quay IO + +:context: quay-io + +This comprehensive guide provides users with the knowledge and tools needed to make the most of our robust and feature-rich container registry service, {quayio}. + +include::modules/quayio-overview.adoc[leveloffset=+1] +include::modules/quayio-support.adoc[leveloffset=+1] +//ui +include::modules/quayio-ui-overview.adoc[leveloffset=+1] +include::modules/quayio-main-page.adoc[leveloffset=+2] + +include::modules/user-org-intro.adoc[leveloffset=+1] +include::modules/tenancy-model.adoc[leveloffset=+2] +include::modules/logging-into-quayio.adoc[leveloffset=+2] +//organization +include::modules/organizations-overview.adoc[leveloffset=+1] +include::modules/org-create.adoc[leveloffset=+2] +include::modules/organization-settings-v2-ui.adoc[leveloffset=+2] +//repo +include::modules/proc_use-quay-create-repo.adoc[leveloffset=+1] +include::modules/creating-an-image-repository-via-the-ui.adoc[leveloffset=+2] +include::modules/creating-an-image-repository-via-docker.adoc[leveloffset=+2] +include::modules/deleting-an-image-repository-via-ui.adoc[leveloffset=+2] +include::modules/proc_configure-user-settings.adoc[leveloffset=+2] +//robot accounts +include::modules/robot-account-overview.adoc[leveloffset=+1] +include::modules/creating-robot-account-v2-ui.adoc[leveloffset=+2] +include::modules/managing-robot-account-permissions-v2-ui.adoc[leveloffset=+2] +include::modules/disabling-robot-account.adoc[leveloffset=+2] +include::modules/deleting-robot-account-v2-ui.adoc[leveloffset=+2] +// federation +include::modules/keyless-authentication-robot-accounts.adoc[leveloffset=+2] + +//access management repositories +include::modules/proc_use-quay-manage-repo.adoc[leveloffset=+1] +include::modules/teams-overview.adoc[leveloffset=+2] +include::modules/creating-a-team-ui.adoc[leveloffset=+3] + +include::modules/managing-team-ui.adoc[leveloffset=+3] +include::modules/add-users-to-team.adoc[leveloffset=+4] +include::modules/set-team-role.adoc[leveloffset=+4] +include::modules/managing-team-members-repo-permissions-ui.adoc[leveloffset=+4] +include::modules/viewing-additional-info-about-team-ui.adoc[leveloffset=+4] + +include::modules/managing-a-team-api.adoc[leveloffset=+3] +include::modules/setting-role-of-team-within-organization-api.adoc[leveloffset=+4] + +include::modules/default-permissions-v2-ui.adoc[leveloffset=+2] +include::modules/allow-access-user-repo.adoc[leveloffset=+2] + +//tags +include::modules/image-tags-overview.adoc[leveloffset=+1] +include::modules/viewing-and-modifying-tags.adoc[leveloffset=+2] +include::modules/adding-a-new-tag-to-image.adoc[leveloffset=+2] +include::modules/adding-managing-labels.adoc[leveloffset=+2] +include::modules/setting-tag-expirations-v2-ui.adoc[leveloffset=+2] +include::modules/fetching-images-and-tags.adoc[leveloffset=+2] +include::modules/viewing-tag-history-v2-ui.adoc[leveloffset=+2] +include::modules/deleting-a-tag.adoc[leveloffset=+2] +include::modules/reverting-tag-changes.adoc[leveloffset=+2] + +//view and export logs +include::modules/proc_use-quay-view-export-logs.adoc[leveloffset=+1] +include::modules/viewing-usage-logs-v2-ui.adoc[leveloffset=+2] +include::modules/use-quay-export-logs.adoc[leveloffset=+2] + +//clair +include::modules/clair-vulnerability-scanner-overview.adoc[leveloffset=+1] +include::modules/about-clair.adoc[leveloffset=+2] +include::modules/security-scanning-ui.adoc[leveloffset=+2] +include::modules/clair-severity-mapping.adoc[leveloffset=+2] + +//docker files +//include::modules/proc_use-quay-build-workers-dockerfiles.adoc[leveloffset=+1] +include::modules/proc_use-quay-build-dockerfiles.adoc[leveloffset=+1] +include::modules/understanding-tag-naming-build-triggers.adoc[leveloffset=+2] +include::modules/skipping-source-control-triggered-build.adoc[leveloffset=+2] +include::modules/starting-a-build.adoc[leveloffset=+2] + + +include::modules/build-trigger-overview.adoc[leveloffset=+2] +include::modules/red-hat-quay-builders-ui.adoc[leveloffset=+3] +include::modules/manually-triggering-a-build-trigger.adoc[leveloffset=+3] + +//Custom Git Triggers +include::modules/proc_use-quay-git-trigger.adoc[leveloffset=+2] +//Notifications +include::modules/proc_use-quay-notifications.adoc[leveloffset=+1] +include::modules/notification-actions.adoc[leveloffset=+2] +include::modules/creating-notifications.adoc[leveloffset=+2] +include::modules/creating-image-expiration-notification.adoc[leveloffset=+3] +include::modules/creating-notifications-api.adoc[leveloffset=+2] +include::modules/repository-events.adoc[leveloffset=+2] +//helm +include::modules/oci-intro.adoc[leveloffset=+1] +include::modules/helm-oci-prereqs.adoc[leveloffset=+2] +include::modules/helm-oci-quay.adoc[leveloffset=+2] + +//cosign +include::modules/cosign-oci-intro.adoc[leveloffset=+2] +include::modules/cosign-oci-with-quay.adoc[leveloffset=+2] + +//other oci media types +//include::modules/using-other-oci-artifacts-with-quay.adoc[leveloffset=+2] + +//v2 UI +//include::modules/using-v2-ui.adoc[leveloffset=+1] \ No newline at end of file diff --git a/quay_io/modules b/quay_io/modules new file mode 120000 index 000000000..43aab75b5 --- /dev/null +++ b/quay_io/modules @@ -0,0 +1 @@ +../modules/ \ No newline at end of file diff --git a/quick_start/docinfo.xml b/quick_start/docinfo.xml new file mode 100644 index 000000000..6b9e463fe --- /dev/null +++ b/quick_start/docinfo.xml @@ -0,0 +1,10 @@ +{productname} +{producty} +{productname} Quick start guide + + {productname} Quick start guide + + + Red Hat OpenShift Documentation Team + + diff --git a/quick_start/master.adoc b/quick_start/master.adoc new file mode 100644 index 000000000..d0bf396e5 --- /dev/null +++ b/quick_start/master.adoc @@ -0,0 +1,32 @@ +include::modules/attributes.adoc[] + +[id='quay-quick-start'] += {productname} Quick Start Guide + + +== General + +=== Using SSL to protect connections to {productname} +//include::modules/proc_manage-quay-ssl.adoc[leveloffset=+1] +include::modules/ssl-intro.adoc[leveloffset=+3] +include::modules/ssl-create-certs.adoc[leveloffset=+3] +include::modules/ssl-config-ui.adoc[leveloffset=+3] +include::modules/ssl-config-cli.adoc[leveloffset=+3] +include::modules/ssl-testing-cli.adoc[leveloffset=+3] +include::modules/ssl-testing-ui.adoc[leveloffset=+3] +include::modules/ssl-trust-ca-podman.adoc[leveloffset=+3] +include::modules/ssl-trust-ca-system.adoc[leveloffset=+3] + +//// +include::modules/proc_deploy_quay_poc_dns.adoc[leveloffset=+2] +//// +include::modules/proc_deploy_quay_common_superuser.adoc[leveloffset=+2] + +=== Repository Mirroring + +include::modules/mirroring-intro.adoc[leveloffset=+3] +include::modules/config-ui-mirroring.adoc[leveloffset=+3] +include::modules/mirroring-worker.adoc[leveloffset=+3] +include::modules/mirroring-creating-repo.adoc[leveloffset=+3] +include::modules/mirroring-tag-patterns.adoc[leveloffset=+3] + diff --git a/quick_start/modules b/quick_start/modules new file mode 120000 index 000000000..464b823ac --- /dev/null +++ b/quick_start/modules @@ -0,0 +1 @@ +../modules \ No newline at end of file diff --git a/red_hat_quay_operator_features/docinfo.xml b/red_hat_quay_operator_features/docinfo.xml new file mode 100644 index 000000000..096b57724 --- /dev/null +++ b/red_hat_quay_operator_features/docinfo.xml @@ -0,0 +1,10 @@ +{productname} +{producty} +Advanced {productname} Operator features + + Advanced {productname} Operator features + + + Red Hat OpenShift Documentation Team + + diff --git a/red_hat_quay_operator_features/master.adoc b/red_hat_quay_operator_features/master.adoc new file mode 100644 index 000000000..19b3f86e9 --- /dev/null +++ b/red_hat_quay_operator_features/master.adoc @@ -0,0 +1,135 @@ +include::modules/attributes.adoc[] + +[id="quay-operator-advanced-features"] += {productname} Operator features +:context: operator-features + +// fips +include::modules/fips-overview.adoc[leveloffset=+1] + +//monitoring +include::modules/operator-console-monitoring-alerting.adoc[leveloffset=+1] + +//// +include::modules/configuring-port-mapping.adoc[leveloffset=+3] +include::modules/proc_deploy_quay_poc_db.adoc[leveloffset=+3] +include::modules/proc_deploy_quay_poc_redis.adoc[leveloffset=+3] +include::modules/operator-config-ui-access.adoc[leveloffset=+2] +include::modules/operator-config-ui-change.adoc[leveloffset=+2] +include::modules/operator-config-ui-monitoring.adoc[leveloffset=+2] +include::modules/operator-config-ui-updated.adoc[leveloffset=+2] +include::modules/config-ui-custom-ssl-certs.adoc[leveloffset=+2] + +include::modules/operator-external-access.adoc[leveloffset=+2] + +//move to using Operator +include::modules/operator-quayregistry-api.adoc[leveloffset=+2] +//// + +//clair +include::modules/clair-vulnerability-scanner-overview.adoc[leveloffset=+1] +include::modules/clair-vulnerability-scanner-hosts.adoc[leveloffset=+2] +include::modules/clair-openshift.adoc[leveloffset=+2] +include::modules/clair-testing.adoc[leveloffset=+2] +include::modules/clair-advanced-configuration-overview.adoc[leveloffset=+2] +include::modules/clair-unmanaged.adoc[leveloffset=+3] +include::modules/unmanaging-clair-database.adoc[leveloffset=+4] +include::modules/configuring-custom-clair-database.adoc[leveloffset=+4] +include::modules/custom-clair-configuration-managed-database.adoc[leveloffset=+3] +include::modules/managed-clair-database.adoc[leveloffset=+4] +include::modules/configuring-custom-clair-database-managed.adoc[leveloffset=+4] +include::modules/clair-disconnected.adoc[leveloffset=+3] +include::modules/clair-clairctl.adoc[leveloffset=+4] +include::modules/clair-openshift-config.adoc[leveloffset=+5] +include::modules/clair-export-bundle.adoc[leveloffset=+5] +include::modules/clair-openshift-airgap-database.adoc[leveloffset=+5] +include::modules/clair-openshift-airgap-import-bundle.adoc[leveloffset=+5] +include::modules/clair-clairctl-standalone.adoc[leveloffset=+4] +include::modules/clair-standalone-config-location.adoc[leveloffset=+5] +include::modules/clair-export-bundle-standalone.adoc[leveloffset=+5] +include::modules/clair-openshift-airgap-database-standalone.adoc[leveloffset=+5] +include::modules/clair-openshift-airgap-import-bundle-standalone.adoc[leveloffset=+5] +//include::modules/clair-crda-configuration.adoc[leveloffset=+3] +include::modules/mapping-repositories-to-cpe-information.adoc[leveloffset=+3] + +//infrastructure +include::modules/operator-deploy-infrastructure.adoc[leveloffset=+1] + +//single namespace +//include::modules/monitoring-single-namespace.adoc[leveloffset=+2] + +//resize storage +include::modules/operator-resize-storage.adoc[leveloffset=+2] + +//customize images +include::modules/operator-customize-images.adoc[leveloffset=+2] + +//cloudfront +include::modules/operator-cloudfront.adoc[leveloffset=+2] + +// builders +include::modules/build-enhancements.adoc[leveloffset=+1] +//include::modules/build-enhanced-arch.adoc[leveloffset=+2] +//include::modules/build-limitations.adoc[leveloffset=+2] +//include::modules/builders-virtual-environment.adoc[leveloffset=+2] + +//geo-replication +include::modules/georepl-intro.adoc[leveloffset=+1] +[discrete] +== Additional resources +* For more information about the geo-replication feature's architecture, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html-single/red_hat_quay_architecture/index#georepl-intro[the architecture guide], which includes technical diagrams and a high-level overview. + +include::modules/arch-georpl-features.adoc[leveloffset=+2] +include::modules/georepl-prereqs.adoc[leveloffset=+2] +//include::modules/georepl-arch-operator.adoc[leveloffset=+2] +include::modules/georepl-deploy-operator.adoc[leveloffset=+3] +include::modules/georepl-mixed-storage.adoc[leveloffset=+3] +include::modules/upgrading-geo-repl-quay-operator.adoc[leveloffset=+2] +include::modules/operator-georepl-site-removal.adoc[leveloffset=+3] + + +//backup and restore +include::modules/backing-up-and-restoring-intro.adoc[leveloffset=+1] +include::modules/optional-enabling-read-only-mode-backup-restore-ocp.adoc[leveloffset=+2] +include::modules/backing-up-red-hat-quay-operator.adoc[leveloffset=+2] +include::modules/restoring-red-hat-quay.adoc[leveloffset=+2] + +//helm OCI +//include::modules/operator-helm-oci.adoc[leveloffset=+1] + +//volume size overrides +include::modules/operator-volume-size-overrides.adoc[leveloffset=+1] + +//cso +include::modules/proc_container-security-operator-setup.adoc[leveloffset=+1] + +//oidc + +//awssts +include::modules/configuring-aws-sts-quay.adoc[leveloffset=+1] +include::modules/configuring-quay-ocp-aws-sts.adoc[leveloffset=+2] + +//qbo +include::modules/conc_quay-bridge-operator.adoc[leveloffset=+1] +include::modules/proc_setting-up-quay-for-qbo.adoc[leveloffset=+2] +include::modules/proc_installing-qbo-on-ocp.adoc[leveloffset=+2] +include::modules/proc_creating-ocp-secret-for-oauth-token.adoc[leveloffset=+2] +include::modules/proc_creating-quay-integration-cr.adoc[leveloffset=+2] +include::modules/quay-bridge-operator-test.adoc[leveloffset=+2] + +//ipv6 +include::modules/operator-ipv6-dual-stack.adoc[leveloffset=+1] + +//custom certs on kubernetes +include::modules/config-custom-ssl-certs-kubernetes.adoc[leveloffset=+1] + + +//operator upgrade +include::modules/operator-upgrade.adoc[leveloffset=+1] + + + +[discrete] +== Additional resources +* For more details on the {productname} Operator, see the upstream +link:https://github.com/quay/quay-operator/[quay-operator] project. diff --git a/red_hat_quay_operator_features/modules b/red_hat_quay_operator_features/modules new file mode 120000 index 000000000..464b823ac --- /dev/null +++ b/red_hat_quay_operator_features/modules @@ -0,0 +1 @@ +../modules \ No newline at end of file diff --git a/red_hat_quay_overview/docinfo.xml b/red_hat_quay_overview/docinfo.xml new file mode 100644 index 000000000..aedbca614 --- /dev/null +++ b/red_hat_quay_overview/docinfo.xml @@ -0,0 +1,10 @@ +{productname} +{producty} +{productname} overview + + {productname} overview + + + Red Hat OpenShift Documentation Team + + diff --git a/red_hat_quay_overview/master.adoc b/red_hat_quay_overview/master.adoc new file mode 100644 index 000000000..6889fba55 --- /dev/null +++ b/red_hat_quay_overview/master.adoc @@ -0,0 +1,17 @@ +:_content-type: ASSEMBLY +include::modules/attributes.adoc[] + +[id="quay-overview"] += {productname} overview + +{productname} is a security-focused and scalable private registry platform for managing content across globally distributed data center and cloud environments. It provides a single and resilient content repository for delivering containerized software to development and production across {ocp} and Kubernetes clusters. {productname} is a distributed and highly available container image registry for your enterprise. + +{productname} can be used for storing, building, and distributing container images and other OCI artifacts. It offers an intuitive web interface that allows users to quickly upload and manage their container images. Administrators can create private repositories, ensuring sensitive or proprietary code remains secure within their organization. Additionally, access controls and team collaboration can be managed, which enables seamless sharing of container images among designated team members. + +{productname} addresses container security concerns through its image scanner, link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/vulnerability_reporting_with_clair_on_red_hat_quay/index[Clair]. When enabled, the service automatically scans container images for known vulnerabilities and security issues, providing developers with valuable insights into potential risks and suggesting remediation steps. + +{productname} excels in automation, and supports integration with popular Continuous Integration/Continuous Deployment (CI/CD) tools and platforms, enabling seamless automation of the container build and deployment processes. As a result, developers can streamline their workflows, significantly reducing manual intervention and improving overall development efficiency. + +{productname} caters to the needs of both large and small-scale deployments. Its high availability support ensures that organizations can rely on it for mission-critical applications. The platform can handle significant container image traffic and offers efficient replication and distribution mechanisms to deliver container images to various geographical locations. + +include::modules/con_quay_intro.adoc[leveloffset=+1] \ No newline at end of file diff --git a/red_hat_quay_overview/modules b/red_hat_quay_overview/modules new file mode 120000 index 000000000..43aab75b5 --- /dev/null +++ b/red_hat_quay_overview/modules @@ -0,0 +1 @@ +../modules/ \ No newline at end of file diff --git a/release_notes/docinfo.xml b/release_notes/docinfo.xml index ec397f6e0..570c95762 100644 --- a/release_notes/docinfo.xml +++ b/release_notes/docinfo.xml @@ -1,5 +1,5 @@ {productname} -3 +{producty} {productname} {productname} Release Notes diff --git a/release_notes/master.adoc b/release_notes/master.adoc index eff62c54f..9710c7257 100644 --- a/release_notes/master.adoc +++ b/release_notes/master.adoc @@ -1,39 +1,35 @@ include::modules/attributes.adoc[] -[id='quay-release-notes'] +[id="quay-release-notes"] = {productname} Release Notes -Red Hat Quay is regularly released, containing new features, bug fixes, and software updates. -We highly recommend deploying the latest version of Red Hat Quay. - -For Red Hat Quay documentation, you should know that: - -* Documentation is versioned along with each major release -* The latest Red Hat Quay documentation is available from the link:https://access.redhat.com/documentation/en-us/red_hat_quay[Red Hat Quay Documentation] page -* Prior to version 2.9.2, the product was referred to as Quay Enterprise -* Documentation versions prior to 2.9.2 are archived on the link:https://coreos.com/quay-enterprise/docs/latest/[CoreOS] site - -{productname}, version 3 is the latest major version. - -include::modules/rn_3_10.adoc[leveloffset=+1] -include::modules/rn_3_00.adoc[leveloffset=+1] -include::modules/rn_2_90.adoc[leveloffset=+1] -include::modules/rn_2_80.adoc[leveloffset=+1] -include::modules/rn_2_70.adoc[leveloffset=+1] -include::modules/rn_2_60.adoc[leveloffset=+1] -include::modules/rn_2_50.adoc[leveloffset=+1] -include::modules/rn_2_40.adoc[leveloffset=+1] -include::modules/rn_2_30.adoc[leveloffset=+1] -include::modules/rn_2_20.adoc[leveloffset=+1] -include::modules/rn_2_10.adoc[leveloffset=+1] -include::modules/rn_2_00.adoc[leveloffset=+1] -include::modules/rn_1_18.adoc[leveloffset=+1] -include::modules/rn_1_17.adoc[leveloffset=+1] -include::modules/rn_1_16.adoc[leveloffset=+1] -include::modules/rn_1_15.adoc[leveloffset=+1] -include::modules/rn_1_14.adoc[leveloffset=+1] -include::modules/rn_1_13.adoc[leveloffset=+1] -include::modules/rn_1_12.adoc[leveloffset=+1] - - -[discrete] +{productname} container registry platform provides secure storage, distribution, and governance of containers and cloud-native artifacts on any infrastructure. It is available as a standalone component or as an Operator on {ocp}. {productname} includes the following features and benefits: + +* Granular security management +* Fast and robust at any scale +* High velocity CI/CD +* Automated installation and updates +* Enterprise authentication and team-based access control +* {ocp} integration + +{productname} is regularly released, containing new features, bug fixes, and software updates. To upgrade {productname} for both standalone and {ocp} deployments, see link:https://access.redhat.com/documentation/en-us/red_hat_quay/{producty}/html/upgrade_red_hat_quay/index[Upgrade {productname}]. + +[IMPORTANT] +==== +{productname} only supports rolling back, or downgrading, to previous z-stream versions, for example, {producty-n1}.2 -> {producty-n1}.1. Rolling back to previous y-stream versions ({producty} -> {producty-n1}) is not supported. This is because {productname} updates might contain database schema upgrades that are applied when upgrading to a new version of {productname}. Database schema upgrades are not considered backwards compatible. + +Downgrading to previous z-streams is neither recommended nor supported by either Operator based deployments or virtual machine based deployments. Downgrading should only be done in extreme circumstances. The decision to rollback your {productname} deployment must be made in conjunction with the {productname} support and development teams. For more information, contact {productname} support. +==== + +ifdef::downstream[] + +Documentation for {productname} is versioned with each release. The latest {productname} documentation is available from the link:https://access.redhat.com/documentation/en-us/red_hat_quay[{productname} Documentation] page. Currently, version 3 is the latest major version. + +[NOTE] +==== +Prior to version 2.9.2, {productname} was called Quay Enterprise. Documentation for 2.9.2 and prior versions are archived on the link:https://access.redhat.com/documentation/en-us/red_hat_quay/2.9[Product Documentation for Red Hat Quay 2.9] page. +==== + +endif::downstream[] + +include::modules/rn_3_14_0.adoc[leveloffset=+1] \ No newline at end of file diff --git a/resources/notes.md b/resources/notes.md index 362b44360..0aabd1e6a 100644 --- a/resources/notes.md +++ b/resources/notes.md @@ -157,9 +157,9 @@ quay.quaylab.lan image_uri="http://porkchop.redhat.com/released/RHEL-7/7.5/Serve 1. Follow the [automation scripts]() to standalone install. - Run `ansible-playbook -i quay.inv_sample quaylab.yml -k` and fill in the prompts - You then need to install ceph - if you want to use it - - Note: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/3/html-single/installation_guide_for_red_hat_enterprise_linux/#installing-a-red-hat-ceph-storage-cluster + - Note: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/{producty}/html-single/installation_guide_for_red_hat_enterprise_linux/#installing-a-red-hat-ceph-storage-cluster - You then need to create a USER: - - https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/3/html/object_gateway_guide_for_red_hat_enterprise_linux/configuration#creating_a_literal_radosgw_literal_user_for_s3_access + - https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/{producty}/html/object_gateway_guide_for_red_hat_enterprise_linux/configuration#creating_a_literal_radosgw_literal_user_for_s3_access - You then need to create a bucket: (use the python script for this in the test directory). - `python s3bucket_create.py` << Be sure to edit variables in this using data from user create. diff --git a/resources/test/s3bucket_create.py b/resources/test/s3bucket_create.py index 238f1f8da..6420cd954 100644 --- a/resources/test/s3bucket_create.py +++ b/resources/test/s3bucket_create.py @@ -1,11 +1,11 @@ -# From: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/3/html/object_gateway_guide_for_red_hat_enterprise_linux/configuration#creating_a_literal_radosgw_literal_user_for_s3_access +# From: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/{producty}/html/object_gateway_guide_for_red_hat_enterprise_linux/configuration#creating_a_literal_radosgw_literal_user_for_s3_access import boto import boto.s3.connection rdgw_hostname = "quay.quaylab.lan" rdgw_port = 8880 ## -# Fill this in after running: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/3/html/object_gateway_guide_for_red_hat_enterprise_linux/administration_cli#create_a_user +# Fill this in after running: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/{producty}/html/object_gateway_guide_for_red_hat_enterprise_linux/administration_cli#create_a_user # $ radosgw-admin user create --uid=janedoe --display-name="Jane Doe" --email=jane@example.com ## access_key = $access diff --git a/securing_quay/docinfo.xml b/securing_quay/docinfo.xml new file mode 100644 index 000000000..858180e42 --- /dev/null +++ b/securing_quay/docinfo.xml @@ -0,0 +1,10 @@ +{productname} +{producty} +Securing {productname} + + Securing {productname}: SSL/TLS, Certificates, and Encryption + + + Red Hat OpenShift Documentation Team + + diff --git a/securing_quay/master.adoc b/securing_quay/master.adoc new file mode 100644 index 000000000..a73b4fda0 --- /dev/null +++ b/securing_quay/master.adoc @@ -0,0 +1,45 @@ +include::modules/attributes.adoc[] + +:_content-type: ASSEMBLY +[id="securing-quay"] += Securing {productname} +:context: quay-security + +{productname} offers administrators the ability to secure communication and trusted access to their repositories through the use of Transport Layer Security (TLS), certificate management, and encryption techniques. Properly configuring SSL/TLS and implementing custom certificates can help safeguard data, secure external connections, and maintain trust between {productname} and the integrated services of your choosing. + +The following topics are covered: + +* Configuring custom SSL/TLS certificates for standalone {productname} deployments +* Configuring custom SSL/TLS certificates for {productname-ocp} +* Adding additional Certificate Authorities to the {productname} container +* Adding additional Certificate Authorities to {productname-ocp} + +//creating ssl-tls-certificates +include::modules/ssl-tls-quay-overview.adoc[leveloffset=+1] +include::modules/ssl-create-certs.adoc[leveloffset=+2] +//SSL/TLS Standalone +include::modules/configuring-ssl-tls.adoc[leveloffset=+2] +include::modules/ssl-config-cli.adoc[leveloffset=+3] +include::modules/ssl-trust-ca-podman.adoc[leveloffset=+3] +include::modules/ssl-trust-ca-system.adoc[leveloffset=+3] +//SSL/TLS Operator +include::modules/operator-custom-ssl-certs-config-bundle.adoc[leveloffset=+2] +include::modules/creating-custom-ssl-certs-config-bundle.adoc[leveloffset=+3] + +//PostgreSQL SSL/TLS certificates +include::modules/ssl-tls-sql.adoc[leveloffset=+1] +include::modules/configuring-cert-based-auth-quay-cloudsql.adoc[leveloffset=+2] + + +//additional ca certificates +include::modules/config-extra-ca-certs-quay.adoc[leveloffset=+1] +//Additional CA Certificates standalone +include::modules/config-custom-ssl-certs-manual.adoc[leveloffset=+2] +//Additional CA Certificates Operator +include::modules/config-additional-ca-certs-operator.adoc[leveloffset=+2] +include::modules/operator-config-cli-download.adoc[leveloffset=+3] +include::modules/adding-ca-certs-to-config.adoc[leveloffset=+3] +//Kubernetes +include::modules/config-custom-ssl-certs-kubernetes.adoc[leveloffset=+2] + +//isolated builds diff --git a/securing_quay/modules b/securing_quay/modules new file mode 120000 index 000000000..43aab75b5 --- /dev/null +++ b/securing_quay/modules @@ -0,0 +1 @@ +../modules/ \ No newline at end of file diff --git a/troubleshooting_quay/docinfo.xml b/troubleshooting_quay/docinfo.xml new file mode 100644 index 000000000..09aac9ba9 --- /dev/null +++ b/troubleshooting_quay/docinfo.xml @@ -0,0 +1,10 @@ +{productname} +{producty} +Troubleshooting {productname} + + Troubleshooting {productname} + + + Red Hat OpenShift Documentation Team + + diff --git a/troubleshooting_quay/master.adoc b/troubleshooting_quay/master.adoc new file mode 100644 index 000000000..4df6116fc --- /dev/null +++ b/troubleshooting_quay/master.adoc @@ -0,0 +1,115 @@ +include::modules/attributes.adoc[] +:_content-type: ASSEMBLY +[id="support-overview"] += Troubleshooting {productname} + +Red Hat offers administrators tools for gathering data for your {productname} deployment. You can use this data to troubleshoot your {productname} deployment yourself, or file a support ticket. + +//Support +include::modules/getting-support.adoc[leveloffset=+1] + +//Debug mode +include::modules/running-quay-debug-mode-intro.adoc[leveloffset=+1] +include::modules/running-quay-debug-mode.adoc[leveloffset=+2] +include::modules/running-ldap-debug-mode.adoc[leveloffset=+2] +include::modules/running-operator-debug-mode.adoc[leveloffset=+2] + +//quay logs +include::modules/obtaining-quay-logs.adoc[leveloffset=+1] + +//quay config +include::modules/obtaining-quay-config-information.adoc[leveloffset=+1] + +//health-check +include::modules/health-check-quay.adoc[leveloffset=+1] + +//Troubleshooting components +include::modules/troubleshooting-components.adoc[leveloffset=+1] +// Database +include::modules/database-troubleshooting.adoc[leveloffset=+2] +include::modules/database-troubleshooting-issues.adoc[leveloffset=+3] +include::modules/troubleshooting-forgotten-passwords.adoc[leveloffset=+3] +include::modules/resetting-superuser-password-on-operator.adoc[leveloffset=+3] + +// Authentication +include::modules/authentication-troubleshooting.adoc[leveloffset=+2] +include::modules/authentication-troubleshooting-issues.adoc[leveloffset=+3] + +//Storage +include::modules/storage-troubleshooting.adoc[leveloffset=+2] +include::modules/storage-troubleshooting-issues.adoc[leveloffset=+3] +//include::modules/changing-storage-solution.adoc[leveloffset=+3] +//include::modules/connecting-s3-timeout.adoc[leveloffset=+3] + +//Geo replication +include::modules/georepl-intro.adoc[leveloffset=+2] +include::modules/geo-repl-troubleshooting-issues.adoc[leveloffset=+3] +//include::modules/storage-health-check-geo-repl.adoc[leveloffset=+3] +//include::modules/storage-buckets-not-synced.adoc[leveloffset=+3] +//include::modules/geo-repl-sslerror.adoc[leveloffset=+3] + +//Repository mirroring +include::modules/mirroring-intro.adoc[leveloffset=+2] +include::modules/repo-mirroring-troubleshooting-issues.adoc[leveloffset=+3] +//include::modules/mirroring-invalid-credentials.adoc[leveloffset=+3] +//include::modules/missing-runc-files.adoc[leveloffset=+3] +//include::modules/signature-does-not-exist.adoc[leveloffset=+3] + + +//Clair +include::modules/clair-vulnerability-scanner-overview.adoc[leveloffset=+2] +//include::modules/clair-concepts.adoc[leveloffset=+3] +include::modules/clair-troubleshooting-issues.adoc[leveloffset=+3] +//include::modules/unsupported-security-scan.adoc[leveloffset=+3] +//include::modules/scans-not-working-behind-proxy.adoc[leveloffset=+3] +//include::modules/connection-issues-clair-quay-db.adoc[leveloffset=+3] +//include::modules/java-image-scan-not-working.adoc[leveloffset=+3] + + +//// + + +[id="troubleshooting-quay"] += Troubleshooting {productname} + +Use the content in this guide to troubleshoot your {productname} registry on both standalone and Operator based deployments. + + +//General Troubleshooting +include::modules/troubleshooting-general.adoc[leveloffset=+1] +include::modules/troubleshooting-401-helm.adoc[leveloffset=+2] +include::modules/error-403-troubleshooting.adoc[leveloffset=+2] +include::modules/error-406-dockerfile.adoc[leveloffset=+2] +include::modules/error-429-troubleshooting.adoc[leveloffset=+2] +include::modules/error-500-troubleshooting.adoc[leveloffset=+2] +include::modules/error-502-troubleshooting.adoc[leveloffset=+2] +include::modules/build-trigger-error.adoc[leveloffset=+2] +include::modules/build-logs-not-loading.adoc[leveloffset=+2] +include::modules/cannot-access-private-repo.adoc[leveloffset=+2] +include::modules/cannot-locate-dockerfile.adoc[leveloffset=+2] +include::modules/cannot-reach-registry-endpoint.adoc[leveloffset=+2] +include::modules/docker-failing-pulls.adoc[leveloffset=+2] +include::modules/docker-io-timeout.adoc[leveloffset=+2] +include::modules/docker-login-error.adoc[leveloffset=+2] +include::modules/docker-timestamp-error.adoc[leveloffset=+2] +include::modules/marathon-mesos-fail.adoc[leveloffset=+2] +include::modules/mirrored-images-unable-pull-rhocp.adoc[leveloffset=+2] +include::modules/secrets-garbage-collected.adoc[leveloffset=+2] +include::modules/troubleshooting-slow-pushes.adoc[leveloffset=+2] + + + +//how tos +//include::modules/troubleshooting-how-tos.adoc[leveloffset=+2] +//include::modules/how-to-list-quay-repos.adoc[leveloffset=+3] +//include::modules/rotating-log-files.adoc[leveloffset=+3] + +//faqs +include::modules/frequently-asked-questions.adoc[leveloffset=+2] +include::modules/clair-distroless-container-images.adoc[leveloffset=+3] +include::modules/operator-geo-replication.adoc[leveloffset=+3] +include::modules/ldap-timeouts-quay.adoc[leveloffset=+3] +include::modules/limit-organization-creation.adoc[leveloffset=+3] +include::modules/resource-demand-failed-operator.adoc[leveloffset=+3] +include::modules/nested-ldap-team-sync.adoc[leveloffset=+3] +//// \ No newline at end of file diff --git a/troubleshooting_quay/modules b/troubleshooting_quay/modules new file mode 120000 index 000000000..43aab75b5 --- /dev/null +++ b/troubleshooting_quay/modules @@ -0,0 +1 @@ +../modules/ \ No newline at end of file diff --git a/upgrade_quay/docinfo.xml b/upgrade_quay/docinfo.xml index 714a86cdc..709a0bfd9 100644 --- a/upgrade_quay/docinfo.xml +++ b/upgrade_quay/docinfo.xml @@ -1,5 +1,5 @@ {productname} -3 +{producty} Upgrade {productname} Upgrade {productname} diff --git a/upgrade_quay/master.adoc b/upgrade_quay/master.adoc index 3597bb44a..9891ffac7 100644 --- a/upgrade_quay/master.adoc +++ b/upgrade_quay/master.adoc @@ -1,48 +1,18 @@ include::modules/attributes.adoc[] - -[id='upgrade-quay-v3'] +[id="upgrade-quay-v3"] = Upgrade {productname} -:imagesdir: ./images - -This guide describes how to upgrade to {productname} v3.1. -To upgrade from {productname} v2.y.z to v{productmin}, you must: - -* Upgrade from {productname} v2.y.z to v3.0.z -* Then upgrade from {productname} v3.0.z to v{productmin} - -To upgrade from v3.0.z to v{productmin} you simply need to do a rolling -upgrade of your {productname} v3.0.z containers to the latest v{productmin} containers. -Procedures for these upgrades are described below. - -== {productname} v2.y.z to v3.0.z upgrade -For the v2.y.z to v3.0.z portion, you can either do -the whole upgrade with {productname} down (Synchronous) or only bring down -{productname} for a few minutes and have bulk of the upgrade continue with -{productname} running (Background). - -In a background upgrade, it could take much longer -to run the upgrade (depending on how many tags need to be processed), -but it takes less total downtime. The downside of a background upgrade is -that you won't have access to the latest features until the upgrade completes -(the cluster runs from the quay v3 container -in v2 compatibility mode until the upgrade is done). - -See the link:https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/red_hat_quay_release_notes/index[Red Hat Quay Release Notes] for information on v3 features. - -== {productname} v3.0.z to v{productmin} upgrade - -Once your Red Hat Quay cluster is running on any v3.0.z version and your V3_UPGRADE_MODE -is completed, you can upgrade that cluster from v3.0.z to v{productmin} by simply: - -* Adding new v{productmin} quay containers into your Red Hat Quay cluster one at a time - -* Checking that the new containers are performing well +The upgrade procedure for {productname} depends on the type of installation that you are using. -* Continuing to add more containers into the cluster +The {productname} Operator provides a simple method to deploy and manage a {productname} cluster. This is the preferred procedure for deploying {productname} on {ocp}. -* Removing the older versions of the quay container (a rolling upgrade) until all the old containers are replaced +The {productname} Operator should be upgraded using the link:https://docs.openshift.com/container-platform/{ocp-y}/operators/understanding/olm/olm-understanding-olm.html[Operator Lifecycle Manager (OLM)] as described in the section "Upgrading Quay using the Quay Operator". -include::modules/con_upgrade_v3.adoc[leveloffset=+1] +The procedure for upgrading a proof of concept or highly available installation of {productname} and Clair is documented in the section "Standalone upgrade". -include::modules/proc_upgrade_v3.adoc[leveloffset=+1] +include::modules/operator-upgrade.adoc[leveloffset=+1] +include::modules/proc_upgrade_standalone.adoc[leveloffset=+1] +include::modules/upgrading-geo-repl-quay.adoc[leveloffset=+1] +include::modules/upgrading-geo-repl-quay-operator.adoc[leveloffset=+1] +include::modules/qbo-operator-upgrade.adoc[leveloffset=+1] +include::modules/downgrade-quay-deployment.adoc[leveloffset=+1] diff --git a/use_quay/docinfo.xml b/use_quay/docinfo.xml index 6b85dc2ce..58f87f09d 100644 --- a/use_quay/docinfo.xml +++ b/use_quay/docinfo.xml @@ -1,5 +1,5 @@ {productname} -3 +{producty} Use {productname} Learn to use {productname} diff --git a/use_quay/master.adoc b/use_quay/master.adoc index 581e543fa..f155ae1f2 100644 --- a/use_quay/master.adoc +++ b/use_quay/master.adoc @@ -1,31 +1,164 @@ include::modules/attributes.adoc[] +:_content-type: ASSEMBLY [id='use-quay'] = Use {productname} +:context: use-quay -Whether you deployed your own {productname} service or are using the Quay.io -registry, follow descriptions here to start using your Quay repository to -store and work with images. +{productname} container image registries serve as centralized hubs for storing container images. Users of {productname} can create repositories to effectively manage images and grant specific read (pull) and write (push) permissions to the repositories as deemed necessary. Administrative privileges expand these capabilities, allowing users to perform a broader set of tasks, like the ability to add users and control default settings. +This guide offers an overview of {productname}'s users and organizations, its tenancy model, and basic operations like creating and deleting users, organizations, and repositories, handling access, and interacting with tags. It includes both UI and API operations. + +[NOTE] +==== +The following API endpoints are linked to their associated entry in the link:https://docs.redhat.com/en/documentation/red_hat_quay/{producty}/html-single/red_hat_quay_api_reference/index[{productname} API guide]. The {productname} API guide provides more information about each endpoint, such as response codes and optional query parameters. +==== + +//intro and tenancy +include::modules/user-org-intro.adoc[leveloffset=+1] +include::modules/tenancy-model.adoc[leveloffset=+2] +//Red Hat Quay API +include::modules/enabling-using-the-api.adoc[leveloffset=+1] + +//creating and deleting users +include::modules/user-create.adoc[leveloffset=+1] +include::modules/creating-user-account-quay-ui.adoc[leveloffset=+2] +include::modules/creating-user-account-quay-api.adoc[leveloffset=+2] +include::modules/deleting-user-ui.adoc[leveloffset=+2] +include::modules/deleting-user-cli-api.adoc[leveloffset=+2] +//organizations overview +include::modules/organizations-overview.adoc[leveloffset=+1] +include::modules/org-create.adoc[leveloffset=+2] +include::modules/org-create-api.adoc[leveloffset=+2] +include::modules/organization-settings-v2-ui.adoc[leveloffset=+2] +//organization settings API? +include::modules/org-delete.adoc[leveloffset=+2] +include::modules/org-delete-api.adoc[leveloffset=+2] +//repositories overview include::modules/proc_use-quay-create-repo.adoc[leveloffset=+1] +include::modules/creating-an-image-repository-via-the-ui.adoc[leveloffset=+2] +include::modules/creating-an-image-repository-via-docker.adoc[leveloffset=+2] +include::modules/creating-an-image-repository-via-skopeo-copy.adoc[leveloffset=+2] +include::modules/creating-an-image-repository-via-the-api.adoc[leveloffset=+2] +include::modules/deleting-an-image-repository-via-ui.adoc[leveloffset=+2] +include::modules/deleting-an-image-repository-via-the-api.adoc[leveloffset=+2] + +//robot accounts +include::modules/robot-account-overview.adoc[leveloffset=+1] +include::modules/creating-robot-account-v2-ui.adoc[leveloffset=+2] +include::modules/creating-robot-account-api.adoc[leveloffset=+2] +include::modules/managing-robot-account-permissions-v2-ui.adoc[leveloffset=+2] +include::modules/disabling-robot-account.adoc[leveloffset=+2] +include::modules/regenerating-robot-account-token-api.adoc[leveloffset=+2] +include::modules/deleting-robot-account-v2-ui.adoc[leveloffset=+2] +include::modules/deleting-robot-account-api.adoc[leveloffset=+2] +// federation +include::modules/keyless-authentication-robot-accounts.adoc[leveloffset=+2] + +//access management repositories +include::modules/proc_use-quay-manage-repo.adoc[leveloffset=+1] +include::modules/teams-overview.adoc[leveloffset=+2] +include::modules/creating-a-team-ui.adoc[leveloffset=+3] +include::modules/creating-a-team-api.adoc[leveloffset=+3] + +include::modules/managing-team-ui.adoc[leveloffset=+3] +include::modules/add-users-to-team.adoc[leveloffset=+4] +include::modules/set-team-role.adoc[leveloffset=+4] +include::modules/managing-team-members-repo-permissions-ui.adoc[leveloffset=+4] +include::modules/viewing-additional-info-about-team-ui.adoc[leveloffset=+4] + +include::modules/managing-a-team-api.adoc[leveloffset=+3] +include::modules/managing-team-members-api.adoc[leveloffset=+4] +include::modules/setting-role-of-team-within-organization-api.adoc[leveloffset=+4] +include::modules/deleting-team-within-organization-api.adoc[leveloffset=+4] -include::modules/proc_use-quay-tags.adoc[leveloffset=+1] +include::modules/default-permissions-v2-ui.adoc[leveloffset=+2] +include::modules/default-permissions-api.adoc[leveloffset=+2] +include::modules/allow-access-user-repo.adoc[leveloffset=+2] +include::modules/adjust-access-user-repo-api.adoc[leveloffset=+2] -include::modules/proc_use-quay-git-trigger.adoc[leveloffset=+1] +//image tags overview +include::modules/image-tags-overview.adoc[leveloffset=+1] +include::modules/viewing-and-modifying-tags.adoc[leveloffset=+2] +include::modules/viewing-model-card-information.adoc[leveloffset=+3] +include::modules/viewing-tags-api.adoc[leveloffset=+2] +include::modules/adding-a-new-tag-to-image.adoc[leveloffset=+2] +include::modules/adding-a-new-tag-to-image-api.adoc[leveloffset=+2] +include::modules/adding-managing-labels.adoc[leveloffset=+2] +include::modules/adding-managing-labels-api.adoc[leveloffset=+2] +include::modules/setting-tag-expirations-v2-ui.adoc[leveloffset=+2] +include::modules/setting-tag-expiration-api.adoc[leveloffset=+3] +include::modules/fetching-images-and-tags.adoc[leveloffset=+2] -include::modules/proc_use-quay-skip-trigger.adoc[leveloffset=+1] +include::modules/viewing-tag-history-v2-ui.adoc[leveloffset=+2] +include::modules/viewing-tag-history-v2-api.adoc[leveloffset=+2] +include::modules/deleting-a-tag.adoc[leveloffset=+2] +include::modules/deleting-a-tag-api.adoc[leveloffset=+2] +include::modules/reverting-tag-changes.adoc[leveloffset=+2] +include::modules/reverting-tag-changes-api.adoc[leveloffset=+2] +//logs +include::modules/proc_use-quay-view-export-logs.adoc[leveloffset=+1] +include::modules/viewing-usage-logs-v2-ui.adoc[leveloffset=+2] +include::modules/viewing-usage-logs-api.adoc[leveloffset=+2] +include::modules/use-quay-export-logs.adoc[leveloffset=+2] +include::modules/use-quay-export-logs-api.adoc[leveloffset=+2] +//security scans +include::modules/security-scanning.adoc[leveloffset=+1] +include::modules/security-scanning-ui.adoc[leveloffset=+2] +include::modules/security-scanning-api.adoc[leveloffset=+2] +//Notifications and events include::modules/proc_use-quay-notifications.adoc[leveloffset=+1] +include::modules/notification-actions.adoc[leveloffset=+2] +include::modules/creating-notifications.adoc[leveloffset=+2] +include::modules/creating-image-expiration-notification.adoc[leveloffset=+3] +include::modules/creating-notifications-api.adoc[leveloffset=+2] +include::modules/repository-events.adoc[leveloffset=+2] + +//docker files +//include::modules/proc_use-quay-build-workers-dockerfiles.adoc[leveloffset=+1] +//include::modules/proc_use-quay-build-dockerfiles.adoc[leveloffset=+1] + +//custom trigger +//include::modules/proc_use-quay-git-trigger.adoc[leveloffset=+2] + +//include::modules/proc_use-quay-skip-trigger.adoc[leveloffset=+1] + +//include::modules/proc_github-build-triggers.adoc[leveloffset=+1] + +//github oauth? +//include::modules/proc_github-app.adoc[leveloffset=+1] + + +// Quota Management +include::modules/quota-management-and-enforcement.adoc[leveloffset=+1] +include::modules/quota-management-arch.adoc[leveloffset=+2] +include::modules/quota-management-limitations.adoc[leveloffset=+2] +include::modules/config-fields-quota-management.adoc[leveloffset=+2] -include::modules/proc_use-quay-build-dockerfiles.adoc[leveloffset=+1] -include::modules/proc_github-build-triggers.adoc[leveloffset=+1] +include::modules/quota-establishment-api.adoc[leveloffset=+2] -include::modules/proc_use-quay-build-workers-dockerfiles.adoc[leveloffset=+1] +//Proxy getProxyCache +include::modules/quay-as-cache-proxy.adoc[leveloffset=+1] +include::modules/proxy-cache-arch.adoc[leveloffset=+2] +include::modules/proxy-cache-limitations.adoc[leveloffset=+2] +include::modules/proxy-cache-procedure.adoc[leveloffset=+2] +include::modules/proxy-cache-leveraging-storage-quota-limits.adoc[leveloffset=+2] -include::modules/proc_github-app.adoc[leveloffset=+1] +// Virtual builders +//include::modules/build-enhancements.adoc[leveloffset=+1] +//include::modules/build-enhanced-arch.adoc[leveloffset=+2] +//include::modules/build-limitations.adoc[leveloffset=+2] +//include::modules/builders-virtual-environment.adoc[leveloffset=+2] -include::modules/proc_use-quay-squash-images.adoc[leveloffset=+1] +//oci +include::modules/oci-intro.adoc[leveloffset=+1] +include::modules/helm-oci-prereqs.adoc[leveloffset=+2] +include::modules/helm-oci-quay.adoc[leveloffset=+2] +include::modules/oras-annotation-parsing.adoc[leveloffset=+2] +include::modules/testing-oci-support.adoc[leveloffset=+2] -[discrete] -== Additional resources +//cosign +//include::modules/cosign-oci-intro.adoc[leveloffset=+2] +//include::modules/cosign-oci-with-quay.adoc[leveloffset=+2] diff --git a/welcome.adoc b/welcome.adoc index ce8a6a513..7c7e436ed 100644 --- a/welcome.adoc +++ b/welcome.adoc @@ -1,14 +1,44 @@ include::modules/attributes.adoc[] -:toc: left -Welcome to {productname} documentation. Here you can find all information that you need for setting and running with {productshortname}. += {productname} Documentation -include::deploy_quay/master.adoc[Deploy {productname} - Basic] +== Project Quay Releases -include::deploy_quay_ha/master.adoc[Deploy {productname} - HA] +Project Quay releases can be found on https://github.com/quay/quay/releases[github]. These are built regularly from the latest development activity and form the basis for quay.io deployments. Each release is tagged for the last development sprint completed. -include::manage_quay/master.adoc[Manage {productname}] +== Getting Started -include::use_quay/master.adoc[Use {productname}] +Looking to try out Quay? Please review our https://github.com/quay/quay/blob/master/docs/quick-local-deployment.md[Getting Started Guide]. -include::deploy_quay_on_openshift/master.adoc[Deploy {productname} on OpenShift] +If you want to develop Quay, please see https://github.com/quay/quay/blob/master/docs/getting-started.md#running-quay-for-development[Getting Started For Development Guide]. + +== Deploying Quay + +Quay can be deployed in a variety of configurations, both within and outside of Kubernetes. For automated deployments, the Quay Operator is recommended. The documentation below provides instructions on how to set up Quay via the operator or manually. + +xref:deploy_red_hat_quay_operator.adoc[Deploy with Openshift Operator] + +xref:deploy_quay.adoc[Deploy Proof of Concept] + +xref:deploy_quay_ha.adoc[Deploy High Availability] + +== Managing Quay + +xref:config_quay.adoc[Configure {productname}] + +xref:manage_quay.adoc[Manage {productname}] + +xref:upgrade_quay.adoc[Upgrade {productname}] + +== Using Quay + +xref:red_hat_quay_operator_features[Red Hat Quay Operator features] + +xref:use_quay.adoc[Use {productname}] + +xref:api_quay.adoc[{productname} API Guide] + +xref:quay_io.adoc[{quayio}] + + +NOTE: Help make {productname} docs better on https://github.com/quay/quay-docs[github]