diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 000000000000..b5c503067e67
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,6 @@
+# Dockerfile
+FROM centos:8
+RUN dnf install git python3 python3-devel ruby rubygems -y
+RUN gem install asciidoctor asciidoctor-diagram
+COPY . $HOME/src/
+RUN pip3 install pyyaml /src/aura.tar.gz
diff --git a/Gemfile b/Gemfile
index b79fdd8e1628..a10ec53b1be4 100644
--- a/Gemfile
+++ b/Gemfile
@@ -1,4 +1,4 @@
source "https://rubygems.org"
-gem 'ascii_binder', '~>0.1.5'
+gem 'ascii_binder', '~>0.2.0'
diff --git a/LICENSE b/LICENSE
index 3a287cc1c0a3..261eeb9e9f8b 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,9 +1,3 @@
-OpenShift documentation is licensed under the Apache License 2.0 or,
-alternatively, under the Creative Commons Attribution-ShareAlike 3.0
-Unported license.
-
-
-
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
@@ -205,365 +199,3 @@ Unported license.
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-
-
-
-Creative Commons Legal Code
-
-Attribution-ShareAlike 3.0 Unported
-
- CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE
- LEGAL SERVICES. DISTRIBUTION OF THIS LICENSE DOES NOT CREATE AN
- ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS
- INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES
- REGARDING THE INFORMATION PROVIDED, AND DISCLAIMS LIABILITY FOR
- DAMAGES RESULTING FROM ITS USE.
-
-License
-
-THE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS OF THIS CREATIVE
-COMMONS PUBLIC LICENSE ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY
-COPYRIGHT AND/OR OTHER APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS
-AUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
-
-BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE
-TO BE BOUND BY THE TERMS OF THIS LICENSE. TO THE EXTENT THIS LICENSE MAY
-BE CONSIDERED TO BE A CONTRACT, THE LICENSOR GRANTS YOU THE RIGHTS
-CONTAINED HERE IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND
-CONDITIONS.
-
-1. Definitions
-
- a. "Adaptation" means a work based upon the Work, or upon the Work and
- other pre-existing works, such as a translation, adaptation,
- derivative work, arrangement of music or other alterations of a
- literary or artistic work, or phonogram or performance and includes
- cinematographic adaptations or any other form in which the Work may be
- recast, transformed, or adapted including in any form recognizably
- derived from the original, except that a work that constitutes a
- Collection will not be considered an Adaptation for the purpose of
- this License. For the avoidance of doubt, where the Work is a musical
- work, performance or phonogram, the synchronization of the Work in
- timed-relation with a moving image ("synching") will be considered an
- Adaptation for the purpose of this License.
- b. "Collection" means a collection of literary or artistic works, such as
- encyclopedias and anthologies, or performances, phonograms or
- broadcasts, or other works or subject matter other than works listed
- in Section 1(f) below, which, by reason of the selection and
- arrangement of their contents, constitute intellectual creations, in
- which the Work is included in its entirety in unmodified form along
- with one or more other contributions, each constituting separate and
- independent works in themselves, which together are assembled into a
- collective whole. A work that constitutes a Collection will not be
- considered an Adaptation (as defined below) for the purposes of this
- License.
- c. "Creative Commons Compatible License" means a license that is listed
- at https://creativecommons.org/compatiblelicenses that has been
- approved by Creative Commons as being essentially equivalent to this
- License, including, at a minimum, because that license: (i) contains
- terms that have the same purpose, meaning and effect as the License
- Elements of this License; and, (ii) explicitly permits the relicensing
- of adaptations of works made available under that license under this
- License or a Creative Commons jurisdiction license with the same
- License Elements as this License.
- d. "Distribute" means to make available to the public the original and
- copies of the Work or Adaptation, as appropriate, through sale or
- other transfer of ownership.
- e. "License Elements" means the following high-level license attributes
- as selected by Licensor and indicated in the title of this License:
- Attribution, ShareAlike.
- f. "Licensor" means the individual, individuals, entity or entities that
- offer(s) the Work under the terms of this License.
- g. "Original Author" means, in the case of a literary or artistic work,
- the individual, individuals, entity or entities who created the Work
- or if no individual or entity can be identified, the publisher; and in
- addition (i) in the case of a performance the actors, singers,
- musicians, dancers, and other persons who act, sing, deliver, declaim,
- play in, interpret or otherwise perform literary or artistic works or
- expressions of folklore; (ii) in the case of a phonogram the producer
- being the person or legal entity who first fixes the sounds of a
- performance or other sounds; and, (iii) in the case of broadcasts, the
- organization that transmits the broadcast.
- h. "Work" means the literary and/or artistic work offered under the terms
- of this License including without limitation any production in the
- literary, scientific and artistic domain, whatever may be the mode or
- form of its expression including digital form, such as a book,
- pamphlet and other writing; a lecture, address, sermon or other work
- of the same nature; a dramatic or dramatico-musical work; a
- choreographic work or entertainment in dumb show; a musical
- composition with or without words; a cinematographic work to which are
- assimilated works expressed by a process analogous to cinematography;
- a work of drawing, painting, architecture, sculpture, engraving or
- lithography; a photographic work to which are assimilated works
- expressed by a process analogous to photography; a work of applied
- art; an illustration, map, plan, sketch or three-dimensional work
- relative to geography, topography, architecture or science; a
- performance; a broadcast; a phonogram; a compilation of data to the
- extent it is protected as a copyrightable work; or a work performed by
- a variety or circus performer to the extent it is not otherwise
- considered a literary or artistic work.
- i. "You" means an individual or entity exercising rights under this
- License who has not previously violated the terms of this License with
- respect to the Work, or who has received express permission from the
- Licensor to exercise rights under this License despite a previous
- violation.
- j. "Publicly Perform" means to perform public recitations of the Work and
- to communicate to the public those public recitations, by any means or
- process, including by wire or wireless means or public digital
- performances; to make available to the public Works in such a way that
- members of the public may access these Works from a place and at a
- place individually chosen by them; to perform the Work to the public
- by any means or process and the communication to the public of the
- performances of the Work, including by public digital performance; to
- broadcast and rebroadcast the Work by any means including signs,
- sounds or images.
- k. "Reproduce" means to make copies of the Work by any means including
- without limitation by sound or visual recordings and the right of
- fixation and reproducing fixations of the Work, including storage of a
- protected performance or phonogram in digital form or other electronic
- medium.
-
-2. Fair Dealing Rights. Nothing in this License is intended to reduce,
-limit, or restrict any uses free from copyright or rights arising from
-limitations or exceptions that are provided for in connection with the
-copyright protection under copyright law or other applicable laws.
-
-3. License Grant. Subject to the terms and conditions of this License,
-Licensor hereby grants You a worldwide, royalty-free, non-exclusive,
-perpetual (for the duration of the applicable copyright) license to
-exercise the rights in the Work as stated below:
-
- a. to Reproduce the Work, to incorporate the Work into one or more
- Collections, and to Reproduce the Work as incorporated in the
- Collections;
- b. to create and Reproduce Adaptations provided that any such Adaptation,
- including any translation in any medium, takes reasonable steps to
- clearly label, demarcate or otherwise identify that changes were made
- to the original Work. For example, a translation could be marked "The
- original work was translated from English to Spanish," or a
- modification could indicate "The original work has been modified.";
- c. to Distribute and Publicly Perform the Work including as incorporated
- in Collections; and,
- d. to Distribute and Publicly Perform Adaptations.
- e. For the avoidance of doubt:
-
- i. Non-waivable Compulsory License Schemes. In those jurisdictions in
- which the right to collect royalties through any statutory or
- compulsory licensing scheme cannot be waived, the Licensor
- reserves the exclusive right to collect such royalties for any
- exercise by You of the rights granted under this License;
- ii. Waivable Compulsory License Schemes. In those jurisdictions in
- which the right to collect royalties through any statutory or
- compulsory licensing scheme can be waived, the Licensor waives the
- exclusive right to collect such royalties for any exercise by You
- of the rights granted under this License; and,
- iii. Voluntary License Schemes. The Licensor waives the right to
- collect royalties, whether individually or, in the event that the
- Licensor is a member of a collecting society that administers
- voluntary licensing schemes, via that society, from any exercise
- by You of the rights granted under this License.
-
-The above rights may be exercised in all media and formats whether now
-known or hereafter devised. The above rights include the right to make
-such modifications as are technically necessary to exercise the rights in
-other media and formats. Subject to Section 8(f), all rights not expressly
-granted by Licensor are hereby reserved.
-
-4. Restrictions. The license granted in Section 3 above is expressly made
-subject to and limited by the following restrictions:
-
- a. You may Distribute or Publicly Perform the Work only under the terms
- of this License. You must include a copy of, or the Uniform Resource
- Identifier (URI) for, this License with every copy of the Work You
- Distribute or Publicly Perform. You may not offer or impose any terms
- on the Work that restrict the terms of this License or the ability of
- the recipient of the Work to exercise the rights granted to that
- recipient under the terms of the License. You may not sublicense the
- Work. You must keep intact all notices that refer to this License and
- to the disclaimer of warranties with every copy of the Work You
- Distribute or Publicly Perform. When You Distribute or Publicly
- Perform the Work, You may not impose any effective technological
- measures on the Work that restrict the ability of a recipient of the
- Work from You to exercise the rights granted to that recipient under
- the terms of the License. This Section 4(a) applies to the Work as
- incorporated in a Collection, but this does not require the Collection
- apart from the Work itself to be made subject to the terms of this
- License. If You create a Collection, upon notice from any Licensor You
- must, to the extent practicable, remove from the Collection any credit
- as required by Section 4(c), as requested. If You create an
- Adaptation, upon notice from any Licensor You must, to the extent
- practicable, remove from the Adaptation any credit as required by
- Section 4(c), as requested.
- b. You may Distribute or Publicly Perform an Adaptation only under the
- terms of: (i) this License; (ii) a later version of this License with
- the same License Elements as this License; (iii) a Creative Commons
- jurisdiction license (either this or a later license version) that
- contains the same License Elements as this License (e.g.,
- Attribution-ShareAlike 3.0 US)); (iv) a Creative Commons Compatible
- License. If you license the Adaptation under one of the licenses
- mentioned in (iv), you must comply with the terms of that license. If
- you license the Adaptation under the terms of any of the licenses
- mentioned in (i), (ii) or (iii) (the "Applicable License"), you must
- comply with the terms of the Applicable License generally and the
- following provisions: (I) You must include a copy of, or the URI for,
- the Applicable License with every copy of each Adaptation You
- Distribute or Publicly Perform; (II) You may not offer or impose any
- terms on the Adaptation that restrict the terms of the Applicable
- License or the ability of the recipient of the Adaptation to exercise
- the rights granted to that recipient under the terms of the Applicable
- License; (III) You must keep intact all notices that refer to the
- Applicable License and to the disclaimer of warranties with every copy
- of the Work as included in the Adaptation You Distribute or Publicly
- Perform; (IV) when You Distribute or Publicly Perform the Adaptation,
- You may not impose any effective technological measures on the
- Adaptation that restrict the ability of a recipient of the Adaptation
- from You to exercise the rights granted to that recipient under the
- terms of the Applicable License. This Section 4(b) applies to the
- Adaptation as incorporated in a Collection, but this does not require
- the Collection apart from the Adaptation itself to be made subject to
- the terms of the Applicable License.
- c. If You Distribute, or Publicly Perform the Work or any Adaptations or
- Collections, You must, unless a request has been made pursuant to
- Section 4(a), keep intact all copyright notices for the Work and
- provide, reasonable to the medium or means You are utilizing: (i) the
- name of the Original Author (or pseudonym, if applicable) if supplied,
- and/or if the Original Author and/or Licensor designate another party
- or parties (e.g., a sponsor institute, publishing entity, journal) for
- attribution ("Attribution Parties") in Licensor's copyright notice,
- terms of service or by other reasonable means, the name of such party
- or parties; (ii) the title of the Work if supplied; (iii) to the
- extent reasonably practicable, the URI, if any, that Licensor
- specifies to be associated with the Work, unless such URI does not
- refer to the copyright notice or licensing information for the Work;
- and (iv) , consistent with Ssection 3(b), in the case of an
- Adaptation, a credit identifying the use of the Work in the Adaptation
- (e.g., "French translation of the Work by Original Author," or
- "Screenplay based on original Work by Original Author"). The credit
- required by this Section 4(c) may be implemented in any reasonable
- manner; provided, however, that in the case of a Adaptation or
- Collection, at a minimum such credit will appear, if a credit for all
- contributing authors of the Adaptation or Collection appears, then as
- part of these credits and in a manner at least as prominent as the
- credits for the other contributing authors. For the avoidance of
- doubt, You may only use the credit required by this Section for the
- purpose of attribution in the manner set out above and, by exercising
- Your rights under this License, You may not implicitly or explicitly
- assert or imply any connection with, sponsorship or endorsement by the
- Original Author, Licensor and/or Attribution Parties, as appropriate,
- of You or Your use of the Work, without the separate, express prior
- written permission of the Original Author, Licensor and/or Attribution
- Parties.
- d. Except as otherwise agreed in writing by the Licensor or as may be
- otherwise permitted by applicable law, if You Reproduce, Distribute or
- Publicly Perform the Work either by itself or as part of any
- Adaptations or Collections, You must not distort, mutilate, modify or
- take other derogatory action in relation to the Work which would be
- prejudicial to the Original Author's honor or reputation. Licensor
- agrees that in those jurisdictions (e.g. Japan), in which any exercise
- of the right granted in Section 3(b) of this License (the right to
- make Adaptations) would be deemed to be a distortion, mutilation,
- modification or other derogatory action prejudicial to the Original
- Author's honor and reputation, the Licensor will waive or not assert,
- as appropriate, this Section, to the fullest extent permitted by the
- applicable national law, to enable You to reasonably exercise Your
- right under Section 3(b) of this License (right to make Adaptations)
- but not otherwise.
-
-5. Representations, Warranties and Disclaimer
-
-UNLESS OTHERWISE MUTUALLY AGREED TO BY THE PARTIES IN WRITING, LICENSOR
-OFFERS THE WORK AS-IS AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY
-KIND CONCERNING THE WORK, EXPRESS, IMPLIED, STATUTORY OR OTHERWISE,
-INCLUDING, WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTIBILITY,
-FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF
-LATENT OR OTHER DEFECTS, ACCURACY, OR THE PRESENCE OF ABSENCE OF ERRORS,
-WHETHER OR NOT DISCOVERABLE. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION
-OF IMPLIED WARRANTIES, SO SUCH EXCLUSION MAY NOT APPLY TO YOU.
-
-6. Limitation on Liability. EXCEPT TO THE EXTENT REQUIRED BY APPLICABLE
-LAW, IN NO EVENT WILL LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY FOR
-ANY SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES
-ARISING OUT OF THIS LICENSE OR THE USE OF THE WORK, EVEN IF LICENSOR HAS
-BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
-
-7. Termination
-
- a. This License and the rights granted hereunder will terminate
- automatically upon any breach by You of the terms of this License.
- Individuals or entities who have received Adaptations or Collections
- from You under this License, however, will not have their licenses
- terminated provided such individuals or entities remain in full
- compliance with those licenses. Sections 1, 2, 5, 6, 7, and 8 will
- survive any termination of this License.
- b. Subject to the above terms and conditions, the license granted here is
- perpetual (for the duration of the applicable copyright in the Work).
- Notwithstanding the above, Licensor reserves the right to release the
- Work under different license terms or to stop distributing the Work at
- any time; provided, however that any such election will not serve to
- withdraw this License (or any other license that has been, or is
- required to be, granted under the terms of this License), and this
- License will continue in full force and effect unless terminated as
- stated above.
-
-8. Miscellaneous
-
- a. Each time You Distribute or Publicly Perform the Work or a Collection,
- the Licensor offers to the recipient a license to the Work on the same
- terms and conditions as the license granted to You under this License.
- b. Each time You Distribute or Publicly Perform an Adaptation, Licensor
- offers to the recipient a license to the original Work on the same
- terms and conditions as the license granted to You under this License.
- c. If any provision of this License is invalid or unenforceable under
- applicable law, it shall not affect the validity or enforceability of
- the remainder of the terms of this License, and without further action
- by the parties to this agreement, such provision shall be reformed to
- the minimum extent necessary to make such provision valid and
- enforceable.
- d. No term or provision of this License shall be deemed waived and no
- breach consented to unless such waiver or consent shall be in writing
- and signed by the party to be charged with such waiver or consent.
- e. This License constitutes the entire agreement between the parties with
- respect to the Work licensed here. There are no understandings,
- agreements or representations with respect to the Work not specified
- here. Licensor shall not be bound by any additional provisions that
- may appear in any communication from You. This License may not be
- modified without the mutual written agreement of the Licensor and You.
- f. The rights granted under, and the subject matter referenced, in this
- License were drafted utilizing the terminology of the Berne Convention
- for the Protection of Literary and Artistic Works (as amended on
- September 28, 1979), the Rome Convention of 1961, the WIPO Copyright
- Treaty of 1996, the WIPO Performances and Phonograms Treaty of 1996
- and the Universal Copyright Convention (as revised on July 24, 1971).
- These rights and subject matter take effect in the relevant
- jurisdiction in which the License terms are sought to be enforced
- according to the corresponding provisions of the implementation of
- those treaty provisions in the applicable national law. If the
- standard suite of rights granted under applicable copyright law
- includes additional rights not granted under this License, such
- additional rights are deemed to be included in the License; this
- License is not intended to restrict the license of any rights under
- applicable law.
-
-
-Creative Commons Notice
-
- Creative Commons is not a party to this License, and makes no warranty
- whatsoever in connection with the Work. Creative Commons will not be
- liable to You or any party on any legal theory for any damages
- whatsoever, including without limitation any general, special,
- incidental or consequential damages arising in connection to this
- license. Notwithstanding the foregoing two (2) sentences, if Creative
- Commons has expressly identified itself as the Licensor hereunder, it
- shall have all rights and obligations of Licensor.
-
- Except for the limited purpose of indicating to the public that the
- Work is licensed under the CCPL, Creative Commons does not authorize
- the use by either party of the trademark "Creative Commons" or any
- related trademark or logo of Creative Commons without the prior
- written consent of Creative Commons. Any permitted use will be in
- compliance with Creative Commons' then-current trademark usage
- guidelines, as may be published on its website or otherwise made
- available upon request from time to time. For the avoidance of doubt,
- this trademark restriction does not form part of the License.
-
- Creative Commons may be contacted at https://creativecommons.org/.
diff --git a/README.adoc b/README.adoc
index a599a824e424..5b20908d8f04 100644
--- a/README.adoc
+++ b/README.adoc
@@ -1,3 +1,41 @@
-= OpenShift Documentation
+= OpenShift documentation
-Welcome to the OpenShift documentation GitHub repository. To contribute to OpenShift docs, see https://github.com/openshift/openshift-docs/blob/enterprise-4.1/contributing_to_docs/contributing.adoc
+* https://www.okd.io/[OKD]
+* https://www.openshift.com/products/online/[OpenShift Online]
+* https://www.openshift.com/products/container-platform/[OpenShift Container Platform]
+* https://www.openshift.com/products/dedicated/[OpenShift Dedicated]
+
+All OpenShift documentation is sourced in https://www.methods.co.nz/asciidoc/[AsciiDoc] and transformed into HTML/CSS and other formats through automation that is based on https://asciidoctor.org/[AsciiDoctor].
+
+The documentation published from these source files can be viewed at https://docs.openshift.com.
+
+== Contributing to OpenShift documentation
+If you are interested in contributing to OpenShift technical documentation, you can view all our link:./contributing_to_docs[resources] that will help you get set up and provide more information.
+
+
+The following table provides quick links to help you get started.
+
+[options="header"]
+|===
+
+|Question |Link
+
+|I'm interested, how do I contribute?
+|See the link:/contributing_to_docs/contributing.adoc[contributing] topic to learn more about this repository and how you can contribute.
+
+|Are there any basic guidelines to help me?
+|The link:/contributing_to_docs/doc_guidelines.adoc[documentation guidelines] topic provides some basic guidelines to help us keep our content consistent, and includes other style information.
+
+|How do I set up my workstation?
+|See the link:/contributing_to_docs/tools_and_setup.adoc[tools and setup] topic to set up your workstation.
+
+|How do I edit an existing topic, or create new content?
+|See the link:/contributing_to_docs/create_or_edit_content.adoc[create or edit content] topic to get started.
+
+|===
+
+== Contacts
+
+For questions or comments about OpenShift documentation:
+
+* Send an email to the OpenShift documentation team at openshift-docs@redhat.com.
diff --git a/_distro_map.yml b/_distro_map.yml
index dca7827d7018..3a6d57e35558 100644
--- a/_distro_map.yml
+++ b/_distro_map.yml
@@ -1,11 +1,196 @@
---
+openshift-origin:
+ name: OKD
+ author: OKD Documentation Project
+ site: community
+ site_name: Documentation
+ site_url: https://docs.okd.io/
+ branches:
+ main:
+ name: 4
+ dir: latest
+ enterprise-4.6:
+ name: '4.6'
+ dir: '4.6'
+ enterprise-4.7:
+ name: '4.7'
+ dir: '4.7'
+ enterprise-4.8:
+ name: '4.8'
+ dir: '4.8'
+ enterprise-3.6:
+ name: '3.6'
+ dir: '3.6'
+ enterprise-3.7:
+ name: '3.7'
+ dir: '3.7'
+ enterprise-3.9:
+ name: '3.9'
+ dir: '3.9'
+ enterprise-3.10:
+ name: '3.10'
+ dir: '3.10'
+ enterprise-3.11:
+ name: '3.11'
+ dir: '3.11'
+openshift-online:
+ name: OpenShift Online
+ author: OpenShift Documentation Project
+ site: commercial
+ site_name: Documentation
+ site_url: https://docs.openshift.com/
+ branches:
+ enterprise-3.11:
+ name: 'Pro'
+ dir: online/pro
openshift-enterprise:
name: OpenShift Container Platform
- author: OpenShift Documentation Project
+ author: OpenShift Documentation Project
site: commercial
site_name: Documentation
site_url: https://docs.openshift.com/
branches:
+ enterprise-3.0:
+ name: '3.0'
+ dir: enterprise/3.0
+ distro-overrides:
+ name: OpenShift Enterprise
+ enterprise-3.1:
+ name: '3.1'
+ dir: enterprise/3.1
+ distro-overrides:
+ name: OpenShift Enterprise
+ enterprise-3.2:
+ name: '3.2'
+ dir: enterprise/3.2
+ distro-overrides:
+ name: OpenShift Enterprise
+ enterprise-3.3:
+ name: '3.3'
+ dir: container-platform/3.3
+ enterprise-3.4:
+ name: '3.4'
+ dir: container-platform/3.4
+ enterprise-3.5:
+ name: '3.5'
+ dir: container-platform/3.5
+ enterprise-3.6:
+ name: '3.6'
+ dir: container-platform/3.6
+ enterprise-3.7:
+ name: '3.7'
+ dir: container-platform/3.7
+ enterprise-3.9:
+ name: '3.9'
+ dir: container-platform/3.9
+ enterprise-3.10:
+ name: '3.10'
+ dir: container-platform/3.10
+ enterprise-3.11:
+ name: '3.11'
+ dir: container-platform/3.11
enterprise-4.1:
name: '4.1'
dir: container-platform/4.1
+ enterprise-4.2:
+ name: '4.2'
+ dir: container-platform/4.2
+ enterprise-4.3:
+ name: '4.3'
+ dir: container-platform/4.3
+ enterprise-4.4:
+ name: '4.4'
+ dir: container-platform/4.4
+ enterprise-4.5:
+ name: '4.5'
+ dir: container-platform/4.5
+ enterprise-4.6:
+ name: '4.6'
+ dir: container-platform/4.6
+ enterprise-4.7:
+ name: '4.7'
+ dir: container-platform/4.7
+ enterprise-4.8:
+ name: '4.8'
+ dir: container-platform/4.8
+ enterprise-4.9:
+ name: '4.9'
+ dir: container-platform/4.9
+openshift-dedicated:
+ name: OpenShift Dedicated
+ author: OpenShift Documentation Project
+ site: commercial
+ site_name: Documentation
+ site_url: https://docs.openshift.com/
+ branches:
+ enterprise-3.11:
+ name: '3'
+ dir: dedicated/3
+ dedicated-4:
+ name: ''
+ dir: dedicated/
+openshift-aro:
+ name: Azure Red Hat OpenShift
+ author: OpenShift Documentation Project
+ site: commercial
+ site_name: Documentation
+ site_url: https://docs.openshift.com/
+ branches:
+ enterprise-3.11:
+ name: '3'
+ dir: aro/3
+ enterprise-4.3:
+ name: '4'
+ dir: aro/4
+openshift-rosa:
+ name: Red Hat OpenShift Service on AWS
+ author: OpenShift Documentation Project
+ site: commercial
+ site_name: Documentation
+ site_url: https://docs.openshift.com/
+ branches:
+ dedicated-4:
+ name: ''
+ dir: rosa/
+partner-roks:
+ name: Red Hat OpenShift on IBM Cloud
+ author: OpenShift Documentation Project
+ site: commercial
+ site_name: Documentation
+ site_url: https://docs.openshift.com/
+ branches:
+ enterprise-4.3:
+ name: '4'
+ dir: roks/4
+openshift-webscale:
+ name: OpenShift Container Platform
+ author: OpenShift Documentation Project
+ site: commercial
+ site_name: Documentation
+ site_url: https://docs.openshift.com/
+ branches:
+ enterprise-4.4:
+ name: '4.4'
+ dir: container-platform-ocp/4.4
+ enterprise-4.5:
+ name: '4.5'
+ dir: container-platform-ocp/4.5
+ enterprise-4.7:
+ name: '4.7'
+ dir: container-platform-ocp/4.7
+ enterprise-4.8:
+ name: '4.8'
+ dir: container-platform-ocp/4.8
+openshift-acs:
+ name: Red Hat Advanced Cluster Security for Kubernetes
+ author: OpenShift documentation team
+ site: commercial
+ site_name: Documentation
+ site_url: https://docs.openshift.com/
+ branches:
+ rhacs-docs:
+ name: '3.66'
+ dir: acs/3.66
+ rhacs-docs-3.65:
+ name: '3.65'
+ dir: acs/3.65
diff --git a/_images/commercial-masthead.jpg b/_images/commercial-masthead.jpg
new file mode 100644
index 000000000000..5793b3ae44f9
Binary files /dev/null and b/_images/commercial-masthead.jpg differ
diff --git a/_images/okd_logo.svg b/_images/okd_logo.svg
new file mode 100644
index 000000000000..8e470cee0e9a
--- /dev/null
+++ b/_images/okd_logo.svg
@@ -0,0 +1,24 @@
+
+
+
diff --git a/_javascripts/clipboard.js b/_javascripts/clipboard.js
new file mode 100644
index 000000000000..f9686171aa1c
--- /dev/null
+++ b/_javascripts/clipboard.js
@@ -0,0 +1,52 @@
+// This file runs the Clipboard.js functionality
+document.querySelectorAll('div.listingblock, div.literalblock').forEach((codeblock, index) => {
+ codeblock.getElementsByTagName('pre')[0].insertAdjacentHTML("beforebegin", "
";
+
+const collapsibleDetails = document.getElementsByTagName("details");
+
+for (var i=0; i < collapsibleDetails.length; i++) {
+ collapsibleDetails[i].insertAdjacentHTML('beforebegin', collapsibleButtonHTML);
+}
+
+function collapseExpandAll() {
+ const collapseExpandButtons = document.getElementsByClassName("button-collapse-expand");
+ const collapsibleTooltip = document.getElementsByClassName("span-collapse-expand-all");
+
+ if (collapsibleTooltip[0].innerHTML == "Collapse all") {
+ for (var i=0; i < collapsibleDetails.length; i++) {
+ collapsibleDetails[i].removeAttribute("open");
+ }
+ for (var j=0; j < collapsibleTooltip.length; j++) {
+ collapsibleTooltip[j].innerHTML = "Expand all";
+ }
+ for (var k=0; k < collapseExpandButtons.length; k++) {
+ collapseExpandButtons[k].classList.remove("fa-angle-double-up");
+ collapseExpandButtons[k].classList.add("fa-angle-double-down");
+ }
+ } else {
+ for (var i=0; i < collapsibleDetails.length; i++) {
+ collapsibleDetails[i].setAttribute("open", "");
+ }
+ for (var j=0; j < collapsibleTooltip.length; j++) {
+ collapsibleTooltip[j].innerHTML = "Collapse all";
+ }
+ for (var k=0; k < collapseExpandButtons.length; k++) {
+ collapseExpandButtons[k].classList.remove("fa-angle-double-down");
+ collapseExpandButtons[k].classList.add("fa-angle-double-up");
+ }
+ }
+}
diff --git a/_javascripts/hc-search.js b/_javascripts/hc-search.js
index ef2a386081e3..d8b47e1424dd 100644
--- a/_javascripts/hc-search.js
+++ b/_javascripts/hc-search.js
@@ -1,51 +1,107 @@
function hcSearchCategory(label, version) {
// optional version filters search results for a single specific product version
-// currently can be used with OCP and Origin only
+// currently can be used with OCP and OKD docs only
- var modalSearch = document.getElementById("hc-search-modal");
- var searchBtn = document.getElementById("hc-search-btn");
- var closeModal = document.getElementById("hc-modal-close");
- var searchResults = document.getElementById("hc-search-results");
- var query = document.getElementById("hc-search-input");
+ // elements used repeatedly:
+ var modalSearch = $("#hc-search-modal");
+ var searchBtn = $("#hc-search-btn");
+ var closeModal = $("#hc-modal-close");
+ var query = $("#hc-search-input");
// pressing enter in the input = search btn click
- query.addEventListener("keyup", function(event) {
+ query.keyup( function(event) {
event.preventDefault();
if (event.keyCode == 13) {
searchBtn.click();
}
});
- //prepare iframe (without source)
- var iframe = document.createElement("iframe");
- iframe.frameBorder=0;
- iframe.width="100%";
- iframe.height=0.7*window.innerHeight;
- iframe.id="search-result-iframe";
-
- // open the modal and finalize the iframe on click
- searchBtn.onclick = function() {
- if (query.value) {
- modalSearch.style.display = "block";
- // limit search to a signle version, if specified
- var urlFilter = (typeof version === "undefined" || version == "Branch Build") ? "" : (" url:*\\/" + version + "\\/*");
- var iframeSrc = "https://help.openshift.com/customsearch.html?q=" +
- encodeURIComponent(query.value) +
- encodeURIComponent(urlFilter) +
- "&l=" + encodeURIComponent(label);
- iframe.setAttribute("src", iframeSrc);
- searchResults.appendChild(iframe);
+ // open the modal and fetch the first set of results on click
+ searchBtn.click(function() {
+ if (query.val()) {
+ // remove any results from previous searches
+ $("#hc-search-results").empty();
+ var searchParams = {
+ si: 0,
+ q: query.val(),
+ label: label,
+ urlFilter: (typeof version === "undefined" || version == "Branch Build") ? "" : (" url:*\\/" + version.toLowerCase() + "\\/*")
+ };
+ // work around the current OKD-specific version=4 and URL discrepancy
+ if (window.location.href.includes("docs.okd.io/latest/") && version == 4) searchParams.urlFilter = " url:*\\/latest\\/*"
+ modalSearch.show();
+ hcsearch(searchParams);
+ }
+ });
+
+ // hide search modal by 'X' or by clicking outside of the modal
+ closeModal.click(function() {
+ modalSearch.hide();
+ });
+ $(window).click(function(event) {
+ if ($(event.target).is(modalSearch)) {
+ modalSearch.hide();
}
- }
+ });
+} // hcSearchCategory(label, version)
+
+// fetch search results
+function hcsearch(searchParams) {
+ // elements used repeatedly
+ var hcMoreBtn = $("#hc-search-more-btn");
+ var hcSearchIndicator = $("#hc-search-progress-indicator");
+ var hcSearchResult = $("#hc-search-results");
- // hide search modal
- closeModal.onclick = function() {
- modalSearch.style.display = "none";
- }
+ // the "searchprovider" is to return a JSON response in the expected format
+ var searchprovider = "https://search.help.openshift.com/json";
+ var searchReq = { "q" : searchParams.q + searchParams.urlFilter,
+ "fields.label" : searchParams.label,
+ "start" : searchParams.si }
- window.onclick = function(event) {
- if (event.target == modalSearch) {
- modalSearch.style.display = "none";
+ hcMoreBtn.hide();
+ hcSearchIndicator.show();
+ $.get(searchprovider, searchReq).done(function (hcsearchresults) {
+ // GET success
+ if (hcsearchresults == "") {
+ // success, but no response (response code mismatch)
+ $("#hc-search-result").append("
An error occurred while retrieving search results. Please try again later.
");
+ hcSearchIndicator.hide();
}
- }
-} // hcSearchCategory(label)
+ if (!$.isEmptyObject(hcsearchresults.response.result)) {
+ // if there are any results
+ $(hcsearchresults.response.result).each(function () {
+ var row = '
';
+ hcSearchResult.append(row);
+ });
+ if (hcsearchresults.response.page_number < hcsearchresults.response.page_count) {
+ // if there are more results beyond the retrieved ones
+ // index of the first item on the next page (first item = 0, first page = 1)
+ searchParams.si = hcsearchresults.response.page_number * hcsearchresults.response.page_size;
+ // replace any existing click handler with one to fetch the next set of results
+ hcMoreBtn.off('click');
+ hcMoreBtn.click(function() {
+ hcsearch(searchParams);
+ });
+ hcMoreBtn.show();
+ } else {
+ // no more results beyond the retrieved ones
+ hcSearchResult.append("
No more results.
");
+ }
+ } else {
+ if (searchParams.si > 0) {
+ // no results reurned, but some already displayed
+ hcSearchResult.append("
No more results.
");
+ } else {
+ // no results on initial search
+ hcSearchResult.append("
An error occurred while retrieving search results. Please try again later.
");
+ hcSearchIndicator.hide();
+ });
+} // function hcsearch()
\ No newline at end of file
diff --git a/_javascripts/page-loader.js b/_javascripts/page-loader.js
index bc0a643d8f39..9b04ded6476d 100644
--- a/_javascripts/page-loader.js
+++ b/_javascripts/page-loader.js
@@ -48,26 +48,161 @@ function versionSelector(list) {
}
+// checks what language was selected and then sends the user to the portal for their localized version
+function selectLang(langList) {
+
+ var lang = langList[langList.selectedIndex].value;
+ var winPath = window.location.pathname;
+
+ console.log("Lang: " + lang);
+ console.log("Win Path: " + winPath);
+
+ var currentVersion = document.getElementById("version-selector").value;
+ // var currentVersion = "4.7";
+ console.log("CurrentVersion: " + currentVersion);
+
+ // path for the file to reference on portal (the last bit removes .html)
+ var path = winPath.substring(winPath.lastIndexOf(currentVersion) + currentVersion.length, winPath.length - 5);
+
+ console.log("Path: " + path);
+
+ var portalBaseURL = "https://access.redhat.com/documentation";
+ var finalURL = portalBaseURL + "/" + lang + "/openshift_container_platform/" + currentVersion + "/html/" + path;
+
+ console.log("Final URL: " + finalURL);
+
+ // alert(finalURL);
+ window.location.href = finalURL;
+
+}
+
+// sets the current version in the drop down and sets up suggest an edit options
function selectVersion(currentVersion) {
+
+ // currentVersion = "3.11"; // for testing
+
+ // set the version selector to what the current version is
var el = document.getElementById("version-selector");
if(el) {
el.value = currentVersion;
}
- // alert(currentVersion);
-
- // in enterprise branch 4, we have modules and this is an attempt to load the
- // modules by double clicking on them.
- if(currentVersion.charAt(0) === "4") {
- var element = document.getElementsByTagName('h2');
- Object.entries(element).map(( object ) => {
- object[1].addEventListener("dblclick", function() {
- // alert(this.id);
- // alert(this.id.split("_", 1)[0] + ".adoc");
- var fn = this.id.split("_", 1)[0] + ".adoc";
- window.open("https://github.com/openshift/openshift-docs/tree/enterprise-" +
- currentVersion + "/modules/" + fn, "_new");
+
+ // check the docs referrer to add warning box based on whether we are coming from rosa docs or elsewhere
+ addReferrer();
+
+ // the rest creates an suggest an edit element for h1 and h2 elements
+
+ // only enabled at the moment on the 3.11 docs
+ if(currentVersion != "3.11") return;
+
+ var is3 = (currentVersion.charAt(0) == 3);
+ var is4 = (currentVersion.charAt(0) == 4);
+
+ // in version 4 and version 3 books are put together differently. In 3,
+ // the WYSIWYG (mostly) and there are not many includes. In 4, everything
+ // (mostly) is an include and the wrapper is just an assembly.
+
+ // in version 3, there are generally no modules, and the page you are on, is
+ // the page you will edit, so the logic is a bit different.
+
+ // there is always just one h1 whether you are on version 4 or 3.
+ // In 4, this is the main assembly, in 3, this is the file to edit.
+ // in version 4 it assumes that the h2 section's id is correctly named as per the file that it resides in. This is the convention.
+
+ // we start with adding suggest an edit to the main assembly/file
+ var h1s = document.getElementsByTagName('h1');
+ var h1 = h1s[0]; // there is only one ever
+
+ // main file to edit is the file path after the version to the html at
+ // the end.
+ // Example: https://docs.openshift.com/container-platform/4.4/updating/updating-cluster-between-minor.html
+ // file path is updating/updating-cluster-between-minor.adoc
+
+ mainFileToEdit =
+ window.location.pathname.substring(
+ window.location.pathname.lastIndexOf(currentVersion) +
+ currentVersion.length, window.location.pathname.length - 4);
+
+ // rest api is put together automatically, so ignore
+ if(mainFileToEdit.includes("rest_api")) return;
+
+ var fn = mainFileToEdit + "adoc"; // add adoc to file name
+
+ var message = "message=[Suggested Edit] for " + fn + "' target='_new' id='" + fn + "' style='font-size: x-small; display: inline; visibility: hidden'>Suggest an edit";
+
+ // in 4, edit the file in master, so it can cped to the right places. In 3,
+ // edit in the branch
+ h1.innerHTML += " Suggest an edit";
+
+ // add mouseover and out to the h2 tag to show or hide the link
+ // in 4, the h2 also has an 'a' tag already, so the tag we are looking for
+ // here is the second one ([1] and not [0])
+ h2.addEventListener("mouseover", function() {
+ this.getElementsByTagName('a')[1].style.visibility = "visible";
});
- });
+
+ h2.addEventListener("mouseout", function() {
+ this.getElementsByTagName('a')[1].style.visibility = "hidden";
+ });
+ }
}
+}
+
+function addReferrer() {
+
+ // grab target element reference
+
+ // we want to add a notice to the top of the OCP docs page if the reader is coming from ROSA docs
+
+ // check the referrer
+ // alert(document.referrer);
+
+ // var ref = "http://127.0.0.1/addreferrer";
+ // var ref = "http://127.0.0.1/addreferrer/authentication/understanding-authentication.html";
+
+ var ref = "https://docs.openshift.com/rosa";
+
+ if(document.referrer && document.referrer.startsWith(ref) && !document.location.href.startsWith(ref)) {
+
+ // get the first section/header
+ var elements = document.getElementsByClassName('sect1');
+ var requiredElement = elements[0];
+
+ // the warning text
+ var text = '
This is the OpenShift Container Platform documentation. There may be some sections that don\'t apply to ROSA docs.
- This documentation is for Beta only and might not be complete or fully tested.
+ <% if (version == "4.10" || distro_key == "openshift-webscale") %>
+ This documentation is work in progress and might not be complete or fully tested.
+
+ This documentation is work in progress and might not be complete or fully tested.
+
+ <% end %>
+ <%
+ if ((unsupported_versions.include? version) && (distro_key == "openshift-enterprise"))
+ %>
+ You are viewing documentation for a release that is no longer supported. The latest supported version of version 3 is [3.11]. For the most recent version 4, see [4]
+
+ You are viewing documentation for a release that is no longer supported. The latest supported version of version 3 is [3.11]. For the most recent version 4, see [4]
+
+ <% end %>
+ <% if (distro_key == "openshift-aro" && version == "3") %>
+
+
+ Important
+
+
+
Azure Red Hat OpenShift 3.11 will be retired 30 June 2022. Support for creation of new Azure Red Hat OpenShift 3.11 clusters continues through 30 November 2020. Following retirement, remaining Azure Red Hat OpenShift 3.11 clusters will be shut down to prevent security vulnerabilities.
+
+ <% end %>
+ <% if (distro_key == "openshift-aro" && version == "4") %>
+
+
+ Important
+
+
Azure Red Hat OpenShift is supported by Red Hat and Microsoft. As of February 2021, the documentation will be hosted by Microsoft and Red Hat as outlined below.
diff --git a/_templates/page.html.erb b/_templates/page.html.erb
index 9d7908e1e7b6..7661b0fa28fa 100644
--- a/_templates/page.html.erb
+++ b/_templates/page.html.erb
@@ -1,4 +1,19 @@
-<%= render((distro_key['atomic'] ? "_templates/_page_atomic.html.erb" : "_templates/_page_openshift.html.erb"),
+<%
+# Asciibinder doesn't set subgroups for multi groups (more than 2). So this
+# hack sets it here based on the topic id, which is of the form:
+# Welcome1::Welcome2::Welcome3::Welcome3TopicTitle
+
+ if topic_id.scan(/::/).length == 3
+ topics = topic_id.split("::")
+ subgroup_id = topics[0] + "::" + topics[1]
+ subsubgroup_id = topics[0] + "::" + topics[1] + "::" + topics[2]
+ # TO DO if breadcrumbs become an issue
+ breadcrumb_subsubgroup_block = ""
+ breadcrumb_topic = topic_title
+ end
+%>
+
+<%= render("_templates/_page_openshift.html.erb",
:distro_key => distro_key,
:distro => distro,
:version => version,
@@ -6,6 +21,7 @@
:group_id => group_id,
:group_title => group_title,
:subgroup_id => subgroup_id,
+ :subsubgroup_id => subsubgroup_id,
:subgroup_title => subgroup_title,
:topic_id => topic_id,
:topic_title => topic_title,
@@ -20,6 +36,7 @@
:breadcrumb_group => breadcrumb_group,
:breadcrumb_root => breadcrumb_root,
:breadcrumb_subgroup_block => breadcrumb_subgroup_block,
+ :breadcrumb_subsubgroup_block => breadcrumb_subsubgroup_block,
:breadcrumb_topic => breadcrumb_topic,
:subtopic_shim => subtopic_shim,
:repo_path => repo_path) %>
diff --git a/_topic_map.yml b/_topic_map.yml
index c02171560559..a3cea9afdb3c 100644
--- a/_topic_map.yml
+++ b/_topic_map.yml
@@ -7,7 +7,7 @@
# Dir: origin_of_the_species <= Directory name of topic group
# Topics:
# - Name: The Majestic Marmoset <= Topic name
-# File: the_majestic_marmoset <= Topic file under group dir +/- .adoc
+# File: the_majestic_marmoset <= Topic file under group dir +/-
# - Name: The Curious Crocodile <= Topic 2 name
# File: the_curious_crocodile <= Topic 2 file
# - Name: The Numerous Nematodes <= Sub-topic group name
@@ -22,42 +22,97 @@
# topic groups and topics on the main page.
---
-Name: Welcome
+Name: About
Dir: welcome
-Distros: openshift-enterprise,openshift-origin,openshift-dedicated
+Distros: openshift-enterprise,openshift-webscale,openshift-origin,openshift-dedicated,openshift-online
Topics:
- Name: Welcome
File: index
-- Name: Accessing your services
- File: accessing-your-services
- Distros: openshift-dedicated
+- Name: Learn more about OpenShift Container Platform
+ File: learn_more_about_openshift
+ Distros: openshift-enterprise
+- Name: About OpenShift Kubernetes Engine
+ File: oke_about
+ Distros: openshift-enterprise
- Name: Legal notice
File: legal-notice
+ Distros: openshift-enterprise,openshift-dedicated,openshift-online
+---
+Name: What's new?
+Dir: whats_new
+Distros: openshift-origin
+Topics:
+- Name: New features and enhancements
+ File: new-features
+- Name: Deprecated features
+ File: deprecated-features
+---
+Name: Getting started
+Dir: getting_started
+Distros: openshift-dedicated
+Topics:
+- Name: Accessing your services
+ File: accessing-your-services
+- Name: Scaling your cluster
+ File: scaling-your-cluster
+- Name: Deleting your cluster
+ File: deleting-your-cluster
+- Name: Networking
+ File: dedicated-networking
+---
+Name: Cloud infrastructure access
+Dir: cloud_infrastructure_access
+Distros: openshift-dedicated
+Topics:
+- Name: Understanding cloud infrastructure access
+ File: dedicated-understanding-aws
+- Name: Accessing AWS infrastructure
+ File: dedicated-aws-access
+- Name: Configuring AWS VPC peering
+ File: dedicated-aws-peering
+- Name: Configuring AWS VPN
+ File: dedicated-aws-vpn
+- Name: Configuring AWS Direct Connect
+ File: dedicated-aws-dc
+- Name: Configuring a private cluster
+ File: dedicated-aws-private-cluster
---
Name: Release notes
Dir: release_notes
Distros: openshift-enterprise
Topics:
-- Name: OpenShift Container Platform 4.1 release notes
- File: ocp-4-1-release-notes
+- Name: OpenShift Container Platform 4.10 release notes
+ File: ocp-4-10-release-notes
- Name: Versioning policy
File: versioning-policy
---
Name: Architecture
Dir: architecture
-Distros: openshift-enterprise,openshift-origin,openshift-dedicated
+Distros: openshift-enterprise,openshift-origin,openshift-dedicated,openshift-online
Topics:
- Name: Product architecture
File: architecture
- Name: Installation and update
+ Distros: openshift-enterprise,openshift-origin
File: architecture-installation
- Name: The control plane
File: control-plane
- Distros: openshift-enterprise,openshift-origin,openshift-dedicated
+ Distros: openshift-enterprise,openshift-origin,openshift-dedicated,openshift-online
- Name: Understanding OpenShift development
File: understanding-development
+ Distros: openshift-enterprise
+- Name: Understanding OKD development
+ File: understanding-development
+ Distros: openshift-origin
+- Name: Fedora CoreOS
+ File: architecture-rhcos
+ Distros: openshift-origin
- Name: Red Hat Enterprise Linux CoreOS
File: architecture-rhcos
+ Distros: openshift-enterprise
+- Name: Admission plug-ins
+ File: admission-plug-ins
+ Distros: openshift-enterprise,openshift-aro
---
Name: Administering a cluster
Dir: administering_a_cluster
@@ -65,181 +120,940 @@ Distros: openshift-dedicated
Topics:
- Name: The dedicated-admin role
File: dedicated-admin-role
+- Name: The cluster-admin role
+ File: cluster-admin-role
---
Name: Installing
Dir: installing
-Distros: openshift-origin,openshift-enterprise
+Distros: openshift-origin,openshift-enterprise,openshift-webscale
Topics:
+- Name: Installation overview
+ File: index
+ Distros: openshift-origin,openshift-enterprise
+- Name: Selecting an installation method and preparing a cluster
+ File: installing-preparing
+ Distros: openshift-origin,openshift-enterprise
+- Name: Mirroring images for a disconnected installation
+ File: installing-mirroring-installation-images
+ Distros: openshift-origin,openshift-enterprise
- Name: Installing on AWS
Dir: installing_aws
+ Distros: openshift-origin,openshift-enterprise
Topics:
+ - Name: Preparing to install on AWS
+ File: preparing-to-install-on-aws
- Name: Configuring an AWS account
File: installing-aws-account
+ - Name: Manually creating IAM
+ File: manually-creating-iam
- Name: Installing a cluster quickly on AWS
File: installing-aws-default
- Name: Installing a cluster on AWS with customizations
File: installing-aws-customizations
- Name: Installing a cluster on AWS with network customizations
File: installing-aws-network-customizations
+ - Name: Installing a cluster on AWS in a restricted network
+ File: installing-restricted-networks-aws-installer-provisioned
+ - Name: Installing a cluster on AWS into an existing VPC
+ File: installing-aws-vpc
+ - Name: Installing a private cluster on AWS
+ File: installing-aws-private
+ - Name: Installing a cluster on AWS into a government or secret region
+ File: installing-aws-government-region
+ - Name: Installing a cluster on AWS into a China region
+ File: installing-aws-china
+ - Name: Installing a cluster on AWS using CloudFormation templates
+ File: installing-aws-user-infra
+ - Name: Installing a cluster on AWS in a restricted network with user-provisioned infrastructure
+ File: installing-restricted-networks-aws
- Name: Uninstalling a cluster on AWS
File: uninstalling-cluster-aws
-- Name: Installing on user-provisioned AWS
- Dir: installing_aws_user_infra
+- Name: Installing on Azure
+ Dir: installing_azure
+ Distros: openshift-origin,openshift-enterprise
Topics:
- - Name: Installing a cluster on AWS using CloudFormation templates
- File: installing-aws-user-infra
+ - Name: Preparing to install on Azure
+ File: preparing-to-install-on-azure
+ - Name: Configuring an Azure account
+ File: installing-azure-account
+ - Name: Manually creating IAM
+ File: manually-creating-iam-azure
+ - Name: Installing a cluster quickly on Azure
+ File: installing-azure-default
+ - Name: Installing a cluster on Azure with customizations
+ File: installing-azure-customizations
+ - Name: Installing a cluster on Azure with network customizations
+ File: installing-azure-network-customizations
+ - Name: Installing a cluster on Azure into an existing VNet
+ File: installing-azure-vnet
+ - Name: Installing a private cluster on Azure
+ File: installing-azure-private
+ - Name: Installing a cluster on Azure into a government region
+ File: installing-azure-government-region
+ - Name: Installing a cluster on Azure using ARM templates
+ File: installing-azure-user-infra
+ - Name: Uninstalling a cluster on Azure
+ File: uninstalling-cluster-azure
+- Name: Installing on Azure Stack Hub
+ Dir: installing_azure_stack_hub
+ Distros: openshift-origin,openshift-enterprise
+ Topics:
+ - Name: Preparing to install on Azure Stack Hub
+ File: preparing-to-install-on-azure-stack-hub
+ - Name: Configuring an Azure Stack Hub account
+ File: installing-azure-stack-hub-account
+ - Name: Manually creating IAM for Azure Stack Hub
+ File: manually-creating-iam-azure-stack-hub
+ - Name: Installing a cluster on Azure Stack Hub using ARM templates
+ File: installing-azure-stack-hub-user-infra
+- Name: Installing on GCP
+ Dir: installing_gcp
+ Distros: openshift-origin,openshift-enterprise
+ Topics:
+ - Name: Preparing to install on GCP
+ File: preparing-to-install-on-gcp
+ - Name: Configuring a GCP project
+ File: installing-gcp-account
+ - Name: Manually creating IAM
+ File: manually-creating-iam-gcp
+ - Name: Installing a cluster quickly on GCP
+ File: installing-gcp-default
+ - Name: Installing a cluster on GCP with customizations
+ File: installing-gcp-customizations
+ - Name: Installing a cluster on GCP with network customizations
+ File: installing-gcp-network-customizations
+ - Name: Installing a cluster on GCP in a restricted network
+ File: installing-restricted-networks-gcp-installer-provisioned
+ - Name: Installing a cluster on GCP into an existing VPC
+ File: installing-gcp-vpc
+ - Name: Installing a private cluster on GCP
+ File: installing-gcp-private
+ - Name: Installing a cluster on GCP using Deployment Manager templates
+ File: installing-gcp-user-infra
+ - Name: Installing a cluster into a shared VPC on GCP using Deployment Manager templates
+ File: installing-gcp-user-infra-vpc
+ - Name: Installing a cluster on GCP in a restricted network with user-provisioned infrastructure
+ File: installing-restricted-networks-gcp
+ - Name: Uninstalling a cluster on GCP
+ File: uninstalling-cluster-gcp
- Name: Installing on bare metal
Dir: installing_bare_metal
+ Distros: openshift-origin,openshift-enterprise
Topics:
- - Name: Installing a cluster on bare metal
+ - Name: Preparing to install on bare metal
+ File: preparing-to-install-on-bare-metal
+ - Name: Installing a user-provisioned cluster on bare metal
File: installing-bare-metal
+ - Name: Installing a user-provisioned bare metal cluster with network customizations
+ File: installing-bare-metal-network-customizations
+ - Name: Installing a user-provisioned bare metal cluster on a restricted network
+ File: installing-restricted-networks-bare-metal
+- Name: Installing on a single node
+ Dir: installing_sno
+ Distros: openshift-origin,openshift-enterprise
+ Topics:
+ - Name: Preparing to install on a single node
+ File: install-sno-preparing-to-install-sno
+ - Name: Installing on a single node
+ File: install-sno-installing-sno
+- Name: Deploying installer-provisioned clusters on bare metal
+ Dir: installing_bare_metal_ipi
+ Distros: openshift-origin,openshift-enterprise
+ Topics:
+ - Name: Overview
+ File: ipi-install-overview
+ - Name: Prerequisites
+ File: ipi-install-prerequisites
+ - Name: Setting up the environment for an OpenShift installation
+ File: ipi-install-installation-workflow
+ - Name: Post-installation configuration
+ File: ipi-install-post-installation-configuration
+ - Name: Expanding the cluster
+ File: ipi-install-expanding-the-cluster
+ - Name: Troubleshooting
+ File: ipi-install-troubleshooting
+- Name: Deploying installer-provisioned clusters on IBM Cloud
+ Dir: installing_ibm_cloud
+ Distros: openshift-origin,openshift-enterprise
+ Topics:
+ - Name: Prerequisites
+ File: install-ibm-cloud-prerequisites
+ - Name: Installation workflow
+ File: install-ibm-cloud-installation-workflow
+- Name: Installing with z/VM on IBM Z and LinuxONE
+ Dir: installing_ibm_z
+ Distros: openshift-enterprise
+ Topics:
+ - Name: Preparing to install with z/VM on IBM Z and LinuxONE
+ File: preparing-to-install-on-ibm-z
+ - Name: Installing a cluster with z/VM on IBM Z and LinuxONE
+ File: installing-ibm-z
+ - Name: Restricted network IBM Z installation with z/VM
+ File: installing-restricted-networks-ibm-z
+- Name: Installing with RHEL KVM on IBM Z and LinuxONE
+ Dir: installing_ibm_z
+ Distros: openshift-enterprise
+ Topics:
+ - Name: Preparing to install with RHEL KVM on IBM Z and LinuxONE
+ File: preparing-to-install-on-ibm-z-kvm
+ - Name: Installing a cluster with RHEL KVM on IBM Z and LinuxONE
+ File: installing-ibm-z-kvm
+ - Name: Restricted network IBM Z installation with RHEL KVM
+ File: installing-restricted-networks-ibm-z-kvm
+- Name: Installing on IBM Power Systems
+ Dir: installing_ibm_power
+ Distros: openshift-enterprise
+ Topics:
+ - Name: Preparing to install on IBM Power Systems
+ File: preparing-to-install-on-ibm-power
+ - Name: Installing a cluster on IBM Power Systems
+ File: installing-ibm-power
+ - Name: Restricted network IBM Power Systems installation
+ File: installing-restricted-networks-ibm-power
+- Name: Installing on OpenStack
+ Dir: installing_openstack
+ Distros: openshift-origin,openshift-enterprise
+ Topics:
+ - Name: Preparing to install on OpenStack
+ File: preparing-to-install-on-openstack
+# - Name: Installing a cluster on OpenStack
+# File: installing-openstack-installer
+ - Name: Installing a cluster on OpenStack with customizations
+ File: installing-openstack-installer-custom
+ - Name: Installing a cluster on OpenStack with Kuryr
+ File: installing-openstack-installer-kuryr
+ - Name: Installing a cluster that supports SR-IOV compute machines on OpenStack
+ File: installing-openstack-installer-sr-iov
+ - Name: Installing a cluster on OpenStack on your own infrastructure
+ File: installing-openstack-user
+ - Name: Installing a cluster on OpenStack with Kuryr on your own infrastructure
+ File: installing-openstack-user-kuryr
+ - Name: Installing a cluster on OpenStack on your own SR-IOV infrastructure
+ File: installing-openstack-user-sr-iov
+ # - Name: Installing a cluster on OpenStack with Kuryr on your own SR-IOV infrastructure
+ # File: installing-openstack-user-sr-iov-kuryr
+ - Name: Installing a cluster on OpenStack in a restricted network
+ File: installing-openstack-installer-restricted
+ # - Name: Load balancing deployments on OpenStack
+ # File: installing-openstack-load-balancing
+ - Name: Uninstalling a cluster on OpenStack
+ File: uninstalling-cluster-openstack
+ - Name: Uninstalling a cluster on OpenStack from your own infrastructure
+ File: uninstalling-openstack-user
+- Name: Installing on RHV
+ Dir: installing_rhv
+ Distros: openshift-enterprise
+ Topics:
+ - Name: Preparing to install on RHV
+ File: preparing-to-install-on-rhv
+ - Name: Installing a cluster quickly on RHV
+ File: installing-rhv-default
+ - Name: Installing a cluster on RHV with customizations
+ File: installing-rhv-customizations
+ - Name: Installing a cluster on RHV with user-provisioned infrastructure
+ File: installing-rhv-user-infra
+ - Name: Installing a cluster on RHV in a restricted network
+ File: installing-rhv-restricted-network
+ - Name: Uninstalling a cluster on RHV
+ File: uninstalling-cluster-rhv
+- Name: Installing on oVirt
+ Dir: installing_rhv
+ Distros: openshift-origin
+ Topics:
+ - Name: Installing a cluster quickly on oVirt
+ File: installing-rhv-default
+ - Name: Installing a cluster on oVirt with customizations
+ File: installing-rhv-customizations
+ - Name: Installing a cluster on oVirt with user-provisioned infrastructure
+ File: installing-rhv-user-infra
+ - Name: Uninstalling a cluster on oVirt
+ File: uninstalling-cluster-rhv
- Name: Installing on vSphere
Dir: installing_vsphere
+ Distros: openshift-origin,openshift-enterprise
Topics:
+ - Name: Preparing to install on vSphere
+ File: preparing-to-install-on-vsphere
- Name: Installing a cluster on vSphere
+ File: installing-vsphere-installer-provisioned
+ - Name: Installing a cluster on vSphere with customizations
+ File: installing-vsphere-installer-provisioned-customizations
+ - Name: Installing a cluster on vSphere with network customizations
+ File: installing-vsphere-installer-provisioned-network-customizations
+ - Name: Installing a cluster on vSphere with user-provisioned infrastructure
File: installing-vsphere
-- Name: Gathering installation logs
- File: installing-gather-logs
+ - Name: Installing a cluster on vSphere with user-provisioned infrastructure and network customizations
+ File: installing-vsphere-network-customizations
+ - Name: Installing a cluster on vSphere in a restricted network
+ File: installing-restricted-networks-installer-provisioned-vsphere
+ - Name: Installing a cluster on vSphere in a restricted network with user-provisioned infrastructure
+ File: installing-restricted-networks-vsphere
+ - Name: Uninstalling a cluster on vSphere that uses installer-provisioned infrastructure
+ File: uninstalling-cluster-vsphere-installer-provisioned
+ - Name: Using the vSphere Problem Detector Operator
+ File: using-vsphere-problem-detector-operator
+- Name: Installing on VMC
+ Dir: installing_vmc
+ Distros: openshift-origin,openshift-enterprise
+ Topics:
+ - Name: Preparing to install on VMC
+ File: preparing-to-install-on-vmc
+ - Name: Installing a cluster on VMC
+ File: installing-vmc
+ - Name: Installing a cluster on VMC with customizations
+ File: installing-vmc-customizations
+ - Name: Installing a cluster on VMC with network customizations
+ File: installing-vmc-network-customizations
+ - Name: Installing a cluster on VMC in a restricted network
+ File: installing-restricted-networks-vmc
+ - Name: Installing a cluster on VMC with user-provisioned infrastructure
+ File: installing-vmc-user-infra
+ - Name: Installing a cluster on VMC with user-provisioned infrastructure and network customizations
+ File: installing-vmc-network-customizations-user-infra
+ - Name: Installing a cluster on VMC in a restricted network with user-provisioned infrastructure
+ File: installing-restricted-networks-vmc-user-infra
+ - Name: Uninstalling a cluster on VMC
+ File: uninstalling-cluster-vmc
+- Name: Installing on any platform
+ Dir: installing_platform_agnostic
+ Distros: openshift-origin,openshift-enterprise
+ Topics:
+ - Name: Installing a cluster on any platform
+ File: installing-platform-agnostic
- Name: Installation configuration
Dir: install_config
+ Distros: openshift-origin,openshift-enterprise
Topics:
- - Name: Available cluster customizations
- File: customizations
- Distros: openshift-enterprise,openshift-origin
+ - Name: Customizing nodes
+ File: installing-customizing
- Name: Configuring your firewall
File: configuring-firewall
+- Name: Validating an installation
+ File: validating-an-installation
+ Distros: openshift-origin,openshift-enterprise
+- Name: Troubleshooting installation issues
+ File: installing-troubleshooting
+ Distros: openshift-origin,openshift-enterprise
+- Name: Support for FIPS cryptography
+ File: installing-fips
+ Distros: openshift-enterprise,openshift-dedicated,openshift-online
+---
+Name: Post-installation configuration
+Dir: post_installation_configuration
+Distros: openshift-origin,openshift-enterprise
+Topics:
+- Name: Post-installation configuration overview
+ File: index
+- Name: Configuring a private cluster
+ Distros: openshift-enterprise,openshift-origin
+ File: configuring-private-cluster
+- Name: Machine configuration tasks
+ File: machine-configuration-tasks
+- Name: Cluster tasks
+ File: cluster-tasks
+- Name: Node tasks
+ File: node-tasks
+- Name: Network configuration
+ File: network-configuration
+- Name: Storage configuration
+ File: storage-configuration
+- Name: Preparing for users
+ File: preparing-for-users
+- Name: Configuring alert notifications
+ File: configuring-alert-notifications
---
Name: Updating clusters
Dir: updating
Distros: openshift-origin,openshift-enterprise
Topics:
-- Name: Updating a cluster to a minor version from the web console
+- Name: Understanding the OpenShift Update Service
+ File: understanding-the-update-service
+- Name: Installing and configuring the OpenShift Update Service
+ File: installing-update-service
+# TODO: Remove below assembly for 4.10:
+- Name: Preparing to update to OpenShift Container Platform 4.10
+ File: updating-cluster-prepare
+ Distros: openshift-enterprise
+# TODO: Remove below assembly for 4.10:
+- Name: Preparing to update to OKD 4.9
+ File: updating-cluster-prepare
+ Distros: openshift-origin
+- Name: Updating a cluster between minor versions
+ File: updating-cluster-between-minor
+- Name: Updating a cluster within a minor version from the web console
File: updating-cluster
-# - Name: Updating a cluster to a minor version by using the CLI
-# File: updating-cluster-cli
+- Name: Updating a cluster within a minor version by using the CLI
+ File: updating-cluster-cli
+- Name: Performing update using canary rollout strategy
+ File: update-using-custom-machine-config-pools
- Name: Updating a cluster that includes RHEL compute machines
File: updating-cluster-rhel-compute
+ Distros: openshift-enterprise
+- Name: Updating a restricted network cluster
+ File: updating-restricted-network-cluster
+ Distros: openshift-enterprise
+- Name: Updating hardware on nodes running on vSphere
+ File: updating-hardware-on-nodes-running-on-vsphere
# - Name: Troubleshooting an update
# File: updating-troubleshooting
---
+Name: Support
+Dir: support
+Distros: openshift-enterprise,openshift-online,openshift-dedicated,openshift-origin
+Topics:
+- Name: Getting support
+ File: getting-support
+ Distros: openshift-enterprise,openshift-dedicated
+- Name: Remote health monitoring with connected clusters
+ Dir: remote_health_monitoring
+ Distros: openshift-enterprise,openshift-dedicated,openshift-origin
+ Topics:
+ - Name: About remote health monitoring
+ File: about-remote-health-monitoring
+ - Name: Showing data collected by remote health monitoring
+ File: showing-data-collected-by-remote-health-monitoring
+ - Name: Opting out of remote health reporting
+ File: opting-out-of-remote-health-reporting
+ - Name: Using Insights to identify issues with your cluster
+ File: using-insights-to-identify-issues-with-your-cluster
+ - Name: Using remote health reporting in a restricted network
+ File: remote-health-reporting-from-restricted-network
+ - Name: Configuring RHEL Simple Content Access
+ File: insights-operator-simple-access
+- Name: Gathering data about your cluster
+ File: gathering-cluster-data
+ Distros: openshift-enterprise,openshift-origin
+- Name: Summarizing cluster specifications
+ File: summarizing-cluster-specifications
+ Distros: openshift-enterprise,openshift-origin
+- Name: Troubleshooting
+ Dir: troubleshooting
+ Distros: openshift-enterprise,openshift-dedicated,openshift-origin
+ Topics:
+ - Name: Troubleshooting installations
+ File: troubleshooting-installations
+ - Name: Verifying node health
+ File: verifying-node-health
+ - Name: Troubleshooting CRI-O container runtime issues
+ File: troubleshooting-crio-issues
+ - Name: Troubleshooting operating system issues
+ File: troubleshooting-operating-system-issues
+ Distros: openshift-enterprise,openshift-origin
+ - Name: Troubleshooting network issues
+ File: troubleshooting-network-issues
+ Distros: openshift-enterprise,openshift-origin
+ - Name: Troubleshooting Operator issues
+ File: troubleshooting-operator-issues
+ - Name: Investigating pod issues
+ File: investigating-pod-issues
+ - Name: Troubleshooting the Source-to-Image process
+ File: troubleshooting-s2i
+ - Name: Troubleshooting storage issues
+ File: troubleshooting-storage-issues
+ - Name: Troubleshooting Windows container workload issues
+ File: troubleshooting-windows-container-workload-issues
+ - Name: Investigating monitoring issues
+ File: investigating-monitoring-issues
+ - Name: Diagnosing OpenShift CLI (oc) issues
+ File: diagnosing-oc-issues
+---
Name: Web console
-Dir: web-console
-Distros: openshift-enterprise,openshift-origin,openshift-dedicated
+Dir: web_console
+Distros: openshift-enterprise,openshift-origin,openshift-dedicated,openshift-online
Topics:
- Name: Accessing the web console
File: web-console
+- Name: Viewing cluster information
+ File: using-dashboard-to-get-cluster-information
+- Name: Adding user preferences
+ File: adding-user-preferences
+ Distros: openshift-enterprise,openshift-origin
- Name: Configuring the web console
File: configuring-web-console
+ Distros: openshift-enterprise,openshift-origin
+- Name: Customizing the web console
+ File: customizing-the-web-console
+ Distros: openshift-enterprise,openshift-origin
+- Name: Developer perspective
+ File: odc-about-developer-perspective
+- Name: Web terminal
+ File: odc-about-web-terminal
- Name: Disabling the web console
File: disabling-web-console
-- Name: Working with projects
- File: working-with-projects
+ Distros: openshift-enterprise,openshift-origin
+- Name: Creating quick start tutorials
+ File: creating-quick-start-tutorials
+ Distros: openshift-enterprise,openshift-origin
+---
+Name: CLI tools
+Dir: cli_reference
+Distros: openshift-enterprise,openshift-origin,openshift-dedicated,openshift-online
+Topics:
+- Name: CLI tools overview
+ File: index
+- Name: OpenShift CLI (oc)
+ Dir: openshift_cli
+ Topics:
+ - Name: Getting started with the OpenShift CLI
+ File: getting-started-cli
+ - Name: Configuring the OpenShift CLI
+ File: configuring-cli
+ - Name: Extending the OpenShift CLI with plug-ins
+ File: extending-cli-plugins
+ Distros: openshift-enterprise,openshift-origin
+ - Name: OpenShift CLI developer command reference
+ File: developer-cli-commands
+ - Name: OpenShift CLI administrator command reference
+ File: administrator-cli-commands
+ Distros: openshift-enterprise,openshift-origin
+ - Name: Usage of oc and kubectl commands
+ File: usage-oc-kubectl
+- Name: Developer CLI (odo)
+ Dir: developer_cli_odo
+ Distros: openshift-enterprise,openshift-origin,openshift-dedicated,openshift-online
+ Topics:
+ - Name: odo release notes
+ File: odo-release-notes
+ - Name: Understanding odo
+ File: understanding-odo
+ - Name: Installing odo
+ File: installing-odo
+ - Name: Creating and deploying applications with odo
+ Dir: creating_and_deploying_applications_with_odo
+ Topics:
+ - Name: Working with projects
+ File: working-with-projects
+ - Name: Creating a single-component application with odo
+ File: creating-a-single-component-application-with-odo
+ - Name: Creating a multicomponent application with odo
+ File: creating-a-multicomponent-application-with-odo
+ - Name: Creating an application with a database
+ File: creating-an-application-with-a-database
+ - Name: Creating a Java application with a database
+ File: creating-a-java-application-with-a-database
+ - Name: Using devfiles in odo
+ File: using-devfiles-in-odo
+ - Name: Working with storage
+ File: working-with-storage
+ - Name: Deleting applications
+ File: deleting-applications
+ - Name: Debugging applications in odo
+ File: debugging-applications-in-odo
+ - Name: Sample applications
+ File: sample-applications
+ - Name: Using odo in a restricted environment
+ Dir: using_odo_in_a_restricted_environment
+ Topics:
+ - Name: About odo in a restricted environment
+ File: about-odo-in-a-restricted-environment
+ - Name: Pushing the odo init image to the restricted cluster registry
+ File: pushing-the-odo-init-image-to-the-restricted-cluster-registry
+ - Name: Creating and deploying a component to the disconnected cluster
+ File: creating-and-deploying-a-component-to-the-disconnected-cluster
+ - Name: Creating and deploying devfile components to the disconnected cluster
+ File: creating-and-deploying-devfile-components-to-the-disconnected-cluster
+ - Name: Creating instances of services managed by Operators
+ File: creating-instances-of-services-managed-by-operators
+ - Name: Managing environment variables in odo
+ File: managing-environment-variables-in-odo
+ - Name: Configuring the odo CLI
+ File: configuring-the-odo-cli
+ - Name: odo CLI reference
+ File: odo-cli-reference
+ - Name: odo architecture
+ File: odo-architecture
+- Name: Knative CLI (kn) for use with OpenShift Serverless
+ File: kn-cli-tools
+ Distros: openshift-enterprise,openshift-origin
+- Name: Pipelines CLI (tkn)
+ Dir: tkn_cli
+ Distros: openshift-enterprise
+ Topics:
+ - Name: Installing tkn
+ File: installing-tkn
+ - Name: Configuring tkn
+ File: op-configuring-tkn
+ - Name: Basic tkn commands
+ File: op-tkn-reference
+- Name: opm CLI
+ Dir: opm
+ Distros: openshift-enterprise,openshift-origin
+ Topics:
+ - Name: Installing the opm CLI
+ File: cli-opm-install
+ - Name: opm CLI reference
+ File: cli-opm-ref
+- Name: Operator SDK
+ Dir: osdk
+ Distros: openshift-enterprise,openshift-origin
+ Topics:
+ - Name: Installing the Operator SDK CLI
+ File: cli-osdk-install
+ - Name: Operator SDK CLI reference
+ File: cli-osdk-ref
---
-Name: Authentication
+Name: Security and compliance
+Dir: security
+Distros: openshift-enterprise,openshift-origin,openshift-aro
+Topics:
+- Name: Security and compliance overview
+ File: index
+- Name: Container security
+ Dir: container_security
+ Topics:
+ - Name: Understanding container security
+ File: security-understanding
+ - Name: Understanding host and VM security
+ File: security-hosts-vms
+ - Name: Hardening Red Hat Enterprise Linux CoreOS
+ File: security-hardening
+ Distros: openshift-enterprise,openshift-aro
+ - Name: Container image signatures
+ File: security-container-signature
+ - Name: Hardening Fedora CoreOS
+ File: security-hardening
+ Distros: openshift-origin
+ - Name: Understanding compliance
+ File: security-compliance
+ - Name: Securing container content
+ File: security-container-content
+ - Name: Using container registries securely
+ File: security-registries
+ - Name: Securing the build process
+ File: security-build
+ - Name: Deploying containers
+ File: security-deploy
+ - Name: Securing the container platform
+ File: security-platform
+ - Name: Securing networks
+ File: security-network
+ - Name: Securing attached storage
+ File: security-storage
+ - Name: Monitoring cluster events and logs
+ File: security-monitoring
+- Name: Configuring certificates
+ Dir: certificates
+ Distros: openshift-enterprise,openshift-origin
+ Topics:
+ - Name: Replacing the default ingress certificate
+ File: replacing-default-ingress-certificate
+ - Name: Adding API server certificates
+ File: api-server
+ - Name: Securing service traffic using service serving certificates
+ File: service-serving-certificate
+- Name: Certificate types and descriptions
+ Dir: certificate_types_descriptions
+ Distros: openshift-enterprise,openshift-origin
+ Topics:
+ - Name: User-provided certificates for the API server
+ File: user-provided-certificates-for-api-server
+ - Name: Proxy certificates
+ File: proxy-certificates
+ - Name: Service CA certificates
+ File: service-ca-certificates
+ - Name: Node certificates
+ File: node-certificates
+ - Name: Bootstrap certificates
+ File: bootstrap-certificates
+ - Name: etcd certificates
+ File: etcd-certificates
+ - Name: OLM certificates
+ File: olm-certificates
+ - Name: User-provided certificates for default ingress
+ File: user-provided-certificates-for-default-ingress
+ - Name: Ingress certificates
+ File: ingress-certificates
+ - Name: Monitoring and cluster logging Operator component certificates
+ File: monitoring-and-cluster-logging-operator-component-certificates
+ - Name: Control plane certificates
+ File: control-plane-certificates
+- Name: Compliance Operator
+ Dir: compliance_operator
+ Topics:
+ - Name: Installing the Compliance Operator
+ File: compliance-operator-installation
+ - Name: Compliance Operator scans
+ File: compliance-scans
+ - Name: Understanding the Compliance Operator
+ File: compliance-operator-understanding
+ - Name: Managing the Compliance Operator
+ File: compliance-operator-manage
+ - Name: Tailoring the Compliance Operator
+ File: compliance-operator-tailor
+ - Name: Retrieving Compliance Operator raw results
+ File: compliance-operator-raw-results
+ - Name: Managing Compliance Operator remediation
+ File: compliance-operator-remediation
+ - Name: Performing advanced Compliance Operator tasks
+ File: compliance-operator-advanced
+ - Name: Troubleshooting the Compliance Operator
+ File: compliance-operator-troubleshooting
+ - Name: Uninstalling the Compliance Operator
+ File: compliance-operator-uninstallation
+ - Name: Using the oc-compliance plug-in
+ File: oc-compliance-plug-in-using
+- Name: File Integrity Operator
+ Dir: file_integrity_operator
+ Topics:
+ - Name: Installing the File Integrity Operator
+ File: file-integrity-operator-installation
+ - Name: Understanding the File Integrity Operator
+ File: file-integrity-operator-understanding
+ - Name: Configuring the File Integrity Operator
+ File: file-integrity-operator-configuring
+ - Name: Performing advanced File Integrity Operator tasks
+ File: file-integrity-operator-advanced-usage
+ - Name: Troubleshooting the File Integrity Operator
+ File: file-integrity-operator-troubleshooting
+- Name: Viewing audit logs
+ File: audit-log-view
+- Name: Configuring the audit log policy
+ File: audit-log-policy-config
+- Name: Configuring TLS security profiles
+ File: tls-security-profiles
+- Name: Configuring seccomp profiles
+ File: seccomp-profiles
+- Name: Allowing JavaScript-based access to the API server from additional hosts
+ File: allowing-javascript-access-api-server
+ Distros: openshift-enterprise,openshift-origin
+- Name: Encrypting etcd data
+ File: encrypting-etcd
+ Distros: openshift-enterprise,openshift-origin
+- Name: Scanning pods for vulnerabilities
+ File: pod-vulnerability-scan
+ Distros: openshift-enterprise,openshift-origin
+- Name: Network-Bound Disk Encryption (NBDE)
+ Dir: network_bound_disk_encryption
+ Topics:
+ - Name: About disk encryption technology
+ File: nbde-about-disk-encryption-technology
+ - Name: Tang server installation considerations
+ File: nbde-tang-server-installation-considerations
+ - Name: Tang server encryption key management
+ File: nbde-managing-encryption-keys
+ - Name: Disaster recovery considerations
+ File: nbde-disaster-recovery-considerations
+ Distros: openshift-enterprise,openshift-origin
+---
+Name: Authentication and authorization
Dir: authentication
-Distros: openshift-enterprise,openshift-origin
+Distros: openshift-enterprise,openshift-origin,openshift-dedicated
Topics:
- Name: Understanding authentication
File: understanding-authentication
- Distros: openshift-enterprise,openshift-origin
+ Distros: openshift-enterprise,openshift-origin,openshift-dedicated,openshift-online
+- Name: Understanding identity provider configuration
+ File: dedicated-understanding-authentication
+ Distros: openshift-dedicated
- Name: Configuring the internal OAuth server
File: configuring-internal-oauth
+- Name: Configuring OAuth clients
+ File: configuring-oauth-clients
+- Name: Managing user-owned OAuth access tokens
+ File: managing-oauth-access-tokens
Distros: openshift-enterprise,openshift-origin
- Name: Understanding identity provider configuration
File: understanding-identity-provider
Distros: openshift-enterprise,openshift-origin
- Name: Configuring identity providers
Dir: identity_providers
- Distros: openshift-enterprise,openshift-origin
Topics:
- Name: Configuring an HTPasswd identity provider
File: configuring-htpasswd-identity-provider
+ Distros: openshift-enterprise,openshift-origin
- Name: Configuring a Keystone identity provider
File: configuring-keystone-identity-provider
+ Distros: openshift-enterprise,openshift-origin
- Name: Configuring an LDAP identity provider
File: configuring-ldap-identity-provider
- Name: Configuring a basic authentication identity provider
File: configuring-basic-authentication-identity-provider
+ Distros: openshift-enterprise,openshift-origin
- Name: Configuring a request header identity provider
File: configuring-request-header-identity-provider
+ Distros: openshift-enterprise,openshift-origin
- Name: Configuring a GitHub or GitHub Enterprise identity provider
File: configuring-github-identity-provider
- Name: Configuring a GitLab identity provider
File: configuring-gitlab-identity-provider
+ Distros: openshift-enterprise,openshift-origin
- Name: Configuring a Google identity provider
File: configuring-google-identity-provider
- Name: Configuring an OpenID Connect identity provider
File: configuring-oidc-identity-provider
-- Name: Configuring certificates
- Dir: certificates
- Topics:
- - Name: Replacing the default ingress certificate
- File: replacing-default-ingress-certificate
- - Name: Adding API server certificates
- File: api-server
- - Name: Securing service traffic using service serving certificates
- File: service-serving-certificate
- Name: Using RBAC to define and apply permissions
File: using-rbac
- Distros: openshift-enterprise,openshift-origin
- Name: Removing the kubeadmin user
File: remove-kubeadmin
Distros: openshift-enterprise,openshift-origin
#- Name: Configuring LDAP failover
# File: configuring-ldap-failover
- Distros: openshift-enterprise,openshift-origin
-- Name: Configuring the user agent
- File: configuring-user-agent
- Distros: openshift-enterprise,openshift-origin
- Name: Understanding and creating service accounts
File: understanding-and-creating-service-accounts
- Distros: openshift-enterprise,openshift-origin
- Name: Using service accounts in applications
File: using-service-accounts-in-applications
- Distros: openshift-enterprise,openshift-origin
- Name: Using a service account as an OAuth client
File: using-service-accounts-as-oauth-client
- Distros: openshift-enterprise,openshift-origin
- Name: Scoping tokens
File: tokens-scoping
-- Name: Managing Security Context Constraints
+- Name: Using bound service account tokens
+ File: bound-service-account-tokens
+- Name: Managing security context constraints
File: managing-security-context-constraints
Distros: openshift-enterprise,openshift-origin
- Name: Impersonating the system:admin user
File: impersonating-system-admin
Distros: openshift-enterprise,openshift-origin
-- Name: Creating a project as another user
- File: creating-project-other-user
- Distros: openshift-enterprise,openshift-origin
+- Name: Syncing LDAP groups
+ File: ldap-syncing
+ Distros: openshift-enterprise,openshift-origin,openshift-dedicated
+- Name: Managing cloud provider credentials
+ Dir: managing_cloud_provider_credentials
+ Topics:
+ - Name: About the Cloud Credential Operator
+ File: about-cloud-credential-operator
+ - Name: Using mint mode
+ File: cco-mode-mint
+ - Name: Using passthrough mode
+ File: cco-mode-passthrough
+ - Name: Using manual mode
+ File: cco-mode-manual
+ - Name: Using manual mode with STS
+ File: cco-mode-sts
---
Name: Networking
Dir: networking
-Distros: openshift-enterprise,openshift-origin
+Distros: openshift-enterprise,openshift-origin,openshift-dedicated
Topics:
- Name: Understanding networking
File: understanding-networking
-- Name: Understanding the Cluster Network Operator (CNO)
+- Name: Accessing hosts
+ File: accessing-hosts
+- Name: Understanding the Cluster Network Operator
File: cluster-network-operator
+ Distros: openshift-enterprise,openshift-origin
- Name: Understanding the DNS Operator
File: dns-operator
+ Distros: openshift-enterprise,openshift-origin,openshift-dedicated
- Name: Understanding the Ingress Operator
File: ingress-operator
+ Distros: openshift-enterprise,openshift-origin
+- Name: About the Contour Operator
+ File: about-contour-operator
+ Distros: openshift-origin
+- Name: Verifying connectivity to an endpoint
+ File: verifying-connectivity-endpoint
+- Name: Configuring the node port service range
+ File: configuring-node-port-service-range
+- Name: Configuring IP failover
+ File: configuring-ipfailover
+- Name: Using SCTP
+ File: using-sctp
+ Distros: openshift-enterprise,openshift-origin
+- Name: Using PTP hardware
+ File: using-ptp
+- Name: Network policy
+ Dir: network_policy
+ Topics:
+ - Name: About network policy
+ File: about-network-policy
+ - Name: Logging network policy
+ File: logging-network-policy
+ - Name: Creating a network policy
+ File: creating-network-policy
+ - Name: Viewing a network policy
+ File: viewing-network-policy
+ - Name: Editing a network policy
+ File: editing-network-policy
+ - Name: Deleting a network policy
+ File: deleting-network-policy
+ - Name: Defining a default network policy for projects
+ File: default-network-policy
+ - Name: Configuring multitenant network policy
+ File: multitenant-network-policy
- Name: Multiple networks
- File: managing-multinetworking
+ Dir: multiple_networks
Distros: openshift-enterprise,openshift-origin
-- Name: Configuring network policy
- File: configuring-networkpolicy
- Distros: openshift-origin,openshift-enterprise
-- Name: OpenShift SDN
- Dir: openshift-sdn
Topics:
- - Name: About OpenShift SDN
+ - Name: Understanding multiple networks
+ File: understanding-multiple-networks
+ - Name: About virtual routing and forwarding
+ File: about-virtual-routing-and-forwarding
+ - Name: Configuring multi-network policy
+ File: configuring-multi-network-policy
+ - Name: Attaching a pod to an additional network
+ File: attaching-pod
+ - Name: Removing a pod from an additional network
+ File: removing-pod
+ - Name: Configuring a bridge network
+ File: configuring-bridge
+ - Name: Configuring a host-device network
+ File: configuring-host-device
+ - Name: Configuring an ipvlan network
+ File: configuring-ipvlan
+ - Name: Configuring a macvlan network with basic customizations
+ File: configuring-macvlan-basic
+ - Name: Configuring a macvlan network
+ File: configuring-macvlan
+ - Name: Editing an additional network
+ File: edit-additional-network
+ - Name: Removing an additional network
+ File: remove-additional-network
+ - Name: Assigning a secondary network to a VRF
+ File: assigning-a-secondary-network-to-a-vrf
+- Name: Hardware networks
+ Dir: hardware_networks
+ Distros: openshift-enterprise,openshift-origin
+ Topics:
+ - Name: About Single Root I/O Virtualization (SR-IOV) hardware networks
+ File: about-sriov
+ - Name: Installing the SR-IOV Operator
+ File: installing-sriov-operator
+ - Name: Configuring the SR-IOV Operator
+ File: configuring-sriov-operator
+ - Name: Configuring an SR-IOV network device
+ File: configuring-sriov-device
+ - Name: Configuring an SR-IOV Ethernet network attachment
+ File: configuring-sriov-net-attach
+ - Name: Configuring an SR-IOV InfiniBand network attachment
+ File: configuring-sriov-ib-attach
+ - Name: Adding a pod to an SR-IOV network
+ File: add-pod
+ - Name: Using high performance multicast
+ File: using-sriov-multicast
+ - Name: Using DPDK and RDMA
+ File: using-dpdk-and-rdma
+- Name: OpenShift SDN default CNI network provider
+ Dir: openshift_sdn
+ Topics:
+ - Name: About the OpenShift SDN default CNI network provider
File: about-openshift-sdn
- - Name: Enabling egress IPs for a project
+ - Name: Configuring egress IPs for a project
File: assigning-egress-ips
Distros: openshift-origin,openshift-enterprise
- - Name: Using multicast
- File: using-multicast
+ - Name: Configuring an egress firewall for a project
+ File: configuring-egress-firewall
+ - Name: Viewing an egress firewall for a project
+ File: viewing-egress-firewall
+ - Name: Editing an egress firewall for a project
+ File: editing-egress-firewall
+ - Name: Removing an egress firewall from a project
+ File: removing-egress-firewall
+ - Name: Considerations for the use of an egress router pod
+ File: using-an-egress-router
+ - Name: Deploying an egress router pod in redirect mode
+ File: deploying-egress-router-layer3-redirection
+ - Name: Deploying an egress router pod in HTTP proxy mode
+ File: deploying-egress-router-http-redirection
+ - Name: Deploying an egress router pod in DNS proxy mode
+ File: deploying-egress-router-dns-redirection
+ - Name: Configuring an egress router pod destination list from a config map
+ File: configuring-egress-router-configmap
+ - Name: Enabling multicast for a project
+ File: enabling-multicast
+ Distros: openshift-origin,openshift-enterprise
+ - Name: Disabling multicast for a project
+ File: disabling-multicast
Distros: openshift-origin,openshift-enterprise
- Name: Configuring multitenant isolation
File: multitenant-isolation
@@ -247,6 +1061,44 @@ Topics:
- Name: Configuring kube-proxy
File: configuring-kube-proxy
Distros: openshift-enterprise,openshift-origin
+- Name: OVN-Kubernetes default CNI network provider
+ Dir: ovn_kubernetes_network_provider
+ Distros: openshift-origin,openshift-enterprise
+ Topics:
+ - Name: About the OVN-Kubernetes network provider
+ File: about-ovn-kubernetes
+ - Name: Migrating from the OpenShift SDN cluster network provider
+ File: migrate-from-openshift-sdn
+ - Name: Rolling back to the OpenShift SDN cluster network provider
+ File: rollback-to-openshift-sdn
+ - Name: Converting to IPv4/IPv6 dual stack networking
+ File: converting-to-dual-stack
+ - Name: IPsec encryption configuration
+ File: about-ipsec-ovn
+ - Name: Configuring an egress firewall for a project
+ File: configuring-egress-firewall-ovn
+ - Name: Viewing an egress firewall for a project
+ File: viewing-egress-firewall-ovn
+ - Name: Editing an egress firewall for a project
+ File: editing-egress-firewall-ovn
+ - Name: Removing an egress firewall from a project
+ File: removing-egress-firewall-ovn
+ - Name: Configuring an egress IP address
+ File: configuring-egress-ips-ovn
+ - Name: Assigning an egress IP address
+ File: assigning-egress-ips-ovn
+ - Name: Considerations for the use of an egress router pod
+ File: using-an-egress-router-ovn
+ - Name: Deploying an egress router pod in redirect mode
+ File: deploying-egress-router-ovn-redirection
+ - Name: Enabling multicast for a project
+ File: enabling-multicast
+ - Name: Disabling multicast for a project
+ File: disabling-multicast
+ - Name: Tracking network flows
+ File: tracking-network-flows
+ - Name: Configuring hybrid networking
+ File: configuring-hybrid-networking
- Name: Configuring Routes
Dir: routes
Topics:
@@ -254,31 +1106,141 @@ Topics:
File: route-configuration
- Name: Secured routes
File: secured-routes
+- Name: Configuring ingress cluster traffic
+ Dir: configuring_ingress_cluster_traffic
+ Topics:
+ - Name: Overview
+ File: overview-traffic
+ Distros: openshift-enterprise,openshift-origin
+ - Name: Configuring ExternalIPs for services
+ File: configuring-externalip
+ Distros: openshift-enterprise,openshift-origin
+ - Name: Configuring ingress cluster traffic using an Ingress Controller
+ File: configuring-ingress-cluster-traffic-ingress-controller
+ - Name: Configuring ingress cluster traffic using a load balancer
+ File: configuring-ingress-cluster-traffic-load-balancer
+ Distros: openshift-enterprise,openshift-origin
+ - Name: Configuring ingress cluster traffic on AWS using a Network Load Balancer
+ File: configuring-ingress-cluster-traffic-aws-network-load-balancer
+ Distros: openshift-enterprise,openshift-origin
+ - Name: Configuring ingress cluster traffic using a service external IP
+ File: configuring-ingress-cluster-traffic-service-external-ip
+ Distros: openshift-enterprise,openshift-origin
+ - Name: Configuring ingress cluster traffic using a NodePort
+ File: configuring-ingress-cluster-traffic-nodeport
+ Distros: openshift-enterprise,openshift-origin
+ # Kubernetes NMState (TECHNOLOGY PREVIEW)
+- Name: Kubernetes NMState
+ Dir: k8s_nmstate
+ Topics:
+ - Name: About the Kubernetes NMState Operator
+ File: k8s-nmstate-about-the-k8s-nmstate-operator
+ - Name: Observing node network state
+ File: k8s-nmstate-observing-node-network-state
+ - Name: Updating node network configuration
+ File: k8s-nmstate-updating-node-network-config
+ - Name: Troubleshooting node network configuration
+ File: k8s-nmstate-troubleshooting-node-network
+- Name: Configuring the cluster-wide proxy
+ File: enable-cluster-wide-proxy
+ Distros: openshift-enterprise,openshift-origin
+- Name: Configuring a custom PKI
+ File: configuring-a-custom-pki
+ Distros: openshift-enterprise,openshift-origin
+- Name: Load balancing on OpenStack
+ File: load-balancing-openstack
+- Name: Load balancing with MetalLB
+ Dir: metallb
+ Topics:
+ - Name: About MetalLB and the MetalLB Operator
+ File: about-metallb
+ - Name: Installing the MetalLB Operator
+ File: metallb-operator-install
+ - Name: Configuring MetalLB address pools
+ File: metallb-configure-address-pools
+ - Name: Configuring services to use MetalLB
+ File: metallb-configure-services
+- Name: Associating secondary interfaces metrics to network attachments
+ File: associating-secondary-interfaces-metrics-to-network-attachments
---
Name: Storage
Dir: storage
-Distros: openshift-enterprise,openshift-origin,openshift-dedicated
+Distros: openshift-enterprise,openshift-origin,openshift-dedicated,openshift-online
Topics:
+- Name: Storage overview
+ File: index
+ Distros: openshift-enterprise,openshift-origin,openshift-dedicated,openshift-online
+- Name: Understanding ephemeral storage
+ File: understanding-ephemeral-storage
+ Distros: openshift-enterprise,openshift-origin,openshift-dedicated,openshift-online
- Name: Understanding persistent storage
File: understanding-persistent-storage
- Distros: openshift-enterprise,openshift-origin,openshift-dedicated
+ Distros: openshift-enterprise,openshift-origin,openshift-dedicated,openshift-online
- Name: Configuring persistent storage
- Dir: persistent-storage
+ Dir: persistent_storage
Distros: openshift-enterprise,openshift-origin
Topics:
- Name: Persistent storage using AWS Elastic Block Store
File: persistent-storage-aws
- - Name: Persistent storage using NFS
- File: persistent-storage-nfs
+ - Name: Persistent storage using Azure Disk
+ File: persistent-storage-azure
+ - Name: Persistent storage using Azure File
+ File: persistent-storage-azure-file
+ - Name: Persistent storage using Cinder
+ File: persistent-storage-cinder
+ - Name: Persistent storage using Fibre Channel
+ File: persistent-storage-fibre
+ - Name: Persistent storage using FlexVolume
+ File: persistent-storage-flexvolume
+ - Name: Persistent storage using GCE Persistent Disk
+ File: persistent-storage-gce
+ - Name: Persistent storage using hostPath
+ File: persistent-storage-hostpath
- Name: Persistent Storage using iSCSI
File: persistent-storage-iscsi
- - Name: Persistent storage using Container Storage Interface (CSI)
+ - Name: Persistent storage using local volumes
+ File: persistent-storage-local
+ - Name: Persistent storage using NFS
+ File: persistent-storage-nfs
+ - Name: Persistent storage using Red Hat OpenShift Container Storage
+ File: persistent-storage-ocs
+ - Name: Persistent storage using VMware vSphere
+ File: persistent-storage-vsphere
+- Name: Using Container Storage Interface (CSI)
+ Dir: container_storage_interface
+ Distros: openshift-enterprise,openshift-origin
+ Topics:
+ - Name: Configuring CSI volumes
File: persistent-storage-csi
- - Name: Persistent storage using OpenStack Manila
- File: persistent-storage-manila
+ - Name: CSI inline ephemeral volumes
+ File: ephemeral-storage-csi-inline
+ - Name: CSI volume snapshots
+ File: persistent-storage-csi-snapshots
+ - Name: CSI volume cloning
+ File: persistent-storage-csi-cloning
+ - Name: CSI automatic migration
+ File: persistent-storage-csi-migration
+ - Name: AWS Elastic Block Store CSI Driver Operator
+ File: persistent-storage-csi-ebs
+ - Name: AWS Elastic File Service CSI Driver Operator
+ File: persistent-storage-csi-aws-efs
+ - Name: Azure Disk CSI Driver Operator
+ File: persistent-storage-csi-azure
+ - Name: Azure Stack Hub CSI Driver Operator
+ File: persistent-storage-csi-azure-stack-hub
+ - Name: GCP PD CSI Driver Operator
+ File: persistent-storage-csi-gcp-pd
+ - Name: OpenStack Cinder CSI Driver Operator
+ File: persistent-storage-csi-cinder
+ - Name: OpenStack Manila CSI Driver Operator
+ File: persistent-storage-csi-manila
+ - Name: Red Hat Virtualization CSI Driver Operator
+ File: persistent-storage-csi-ovirt
+ - Name: VMware vSphere CSI Driver Operator
+ File: persistent-storage-csi-vsphere
- Name: Expanding persistent volumes
File: expanding-persistent-volumes
- Distros: openshift-enterprise,openshift-origin
+ Distros: openshift-enterprise,openshift-origin,openshift-dedicated
- Name: Dynamic provisioning
File: dynamic-provisioning
Distros: openshift-enterprise,openshift-origin
@@ -289,72 +1251,326 @@ Distros: openshift-enterprise,openshift-origin,openshift-dedicated
Topics:
- Name: Overview
File: architecture-component-imageregistry
-- Name: Image Registry Operator in Openshift Container Platform
+- Name: Image Registry Operator in OpenShift Container Platform
+ File: configuring-registry-operator
+ Distros: openshift-enterprise
+- Name: Image Registry Operator in OKD
File: configuring-registry-operator
+ Distros: openshift-origin
+- Name: Setting up and configuring the registry
+ Dir: configuring_registry_storage
Distros: openshift-enterprise,openshift-origin
+ Topics:
+ - Name: Configuring the registry for AWS user-provisioned infrastructure
+ File: configuring-registry-storage-aws-user-infrastructure
+ - Name: Configuring the registry for GCP user-provisioned infrastructure
+ File: configuring-registry-storage-gcp-user-infrastructure
+# - Name: Configuring the registry for OpenStack user-provisioned infrastructure
+# File: configuring-registry-storage-openstack-user-infrastructure
+ - Name: Configuring the registry for Azure user-provisioned infrastructure
+ File: configuring-registry-storage-azure-user-infrastructure
+ - Name: Configuring the registry for OpenStack
+ File: configuring-registry-storage-osp
+ - Name: Configuring the registry for bare metal
+ File: configuring-registry-storage-baremetal
+ - Name: Configuring the registry for vSphere
+ File: configuring-registry-storage-vsphere
- Name: Registry options
File: registry-options
Distros: openshift-enterprise,openshift-origin
- Name: Accessing the registry
File: accessing-the-registry
- Distros: openshift-enterprise,openshift-origin
- Name: Exposing the registry
File: securing-exposing-registry
Distros: openshift-enterprise,openshift-origin
---
-Name: Builds
-Dir: builds
-Distros: openshift-enterprise,openshift-origin,openshift-dedicated
+Name: Operators
+Dir: operators
+Distros: openshift-enterprise,openshift-origin
Topics:
-- Name: Understanding image builds
- File: understanding-image-builds
-- Name: Understanding build configurations
- File: understanding-buildconfigs
-- Name: Creating build inputs
- File: creating-build-inputs
-- Name: Managing build output
- File: managing-build-output
-- Name: Using build strategies
- File: build-strategies
-#- Name: OpenShift Pipelines
-# Dir: openshift_piplines
-# Distros: openshift-enterprise,openshift-origin
-- Name: Custom image builds with Buildah
- File: custom-builds-buildah
- Distros: openshift-enterprise,openshift-origin
-- Name: Performing basic builds
- File: basic-build-operations
- Distros: openshift-enterprise,openshift-origin
-- Name: Triggering and modifying builds
- File: triggering-builds-build-hooks
- Distros: openshift-enterprise,openshift-origin
-- Name: Performing advanced builds
- File: advanced-build-operations
- Distros: openshift-enterprise,openshift-origin
-- Name: Securing builds by strategy
- File: securing-builds-by-strategy
- Distros: openshift-enterprise,openshift-origin
-- Name: Build configuration resources
- File: build-configuration
- Distros: openshift-enterprise,openshift-origin
-- Name: Troubleshooting builds
- File: troubleshooting-builds
- Distros: openshift-enterprise,openshift-origin
-- Name: Setting up additional trusted certifying authorities for builds
- File: setting-up-trusted-ca
+- Name: Understanding Operators
+ Dir: understanding
+ Topics:
+ - Name: What are Operators?
+ File: olm-what-operators-are
+ - Name: Packaging format
+ File: olm-packaging-format
+ - Name: Common terms
+ File: olm-common-terms
+ - Name: Operator Lifecycle Manager (OLM)
+ Dir: olm
+ Distros: openshift-enterprise,openshift-origin
+ Topics:
+ - Name: Concepts and resources
+ File: olm-understanding-olm
+ - Name: Architecture
+ File: olm-arch
+ - Name: Workflow
+ File: olm-workflow
+ - Name: Dependency resolution
+ File: olm-understanding-dependency-resolution
+ - Name: Operator groups
+ File: olm-understanding-operatorgroups
+ - Name: Operator conditions
+ File: olm-operatorconditions
+ - Name: Metrics
+ File: olm-understanding-metrics
+ - Name: Webhooks
+ File: olm-webhooks
+ - Name: OperatorHub
+ Distros: openshift-enterprise,openshift-origin
+ File: olm-understanding-operatorhub
+ - Name: Red Hat-provided Operator catalogs
+ Distros: openshift-enterprise
+ File: olm-rh-catalogs
+ - Name: CRDs
+ Dir: crds
+ Topics:
+ - Name: Extending the Kubernetes API with CRDs
+ File: crd-extending-api-with-crds
+ Distros: openshift-origin,openshift-enterprise
+ - Name: Managing resources from CRDs
+ File: crd-managing-resources-from-crds
+ Distros: openshift-origin,openshift-enterprise
+- Name: User tasks
+ Dir: user
+ Topics:
+ - Name: Creating applications from installed Operators
+ File: olm-creating-apps-from-installed-operators
+ Distros: openshift-enterprise,openshift-origin,openshift-dedicated
+ - Name: Installing Operators in your namespace
+ File: olm-installing-operators-in-namespace
+ Distros: openshift-enterprise,openshift-origin
+- Name: Administrator tasks
+ Dir: admin
+ Topics:
+ - Name: Adding Operators to a cluster
+ File: olm-adding-operators-to-cluster
+ Distros: openshift-enterprise,openshift-origin,openshift-dedicated
+ - Name: Upgrading installed Operators
+ File: olm-upgrading-operators
+ Distros: openshift-enterprise,openshift-origin,openshift-dedicated
+ - Name: Deleting Operators from a cluster
+ File: olm-deleting-operators-from-cluster
+ Distros: openshift-enterprise,openshift-origin,openshift-dedicated
+ - Name: Configuring proxy support
+ File: olm-configuring-proxy-support
+ Distros: openshift-enterprise,openshift-origin
+ - Name: Viewing Operator status
+ File: olm-status
+ Distros: openshift-enterprise,openshift-origin,openshift-dedicated
+ - Name: Managing Operator conditions
+ File: olm-managing-operatorconditions
+ Distros: openshift-origin,openshift-enterprise
+ - Name: Allowing non-cluster administrators to install Operators
+ File: olm-creating-policy
+ Distros: openshift-origin,openshift-enterprise
+ - Name: Managing custom catalogs
+ File: olm-managing-custom-catalogs
+ Distros: openshift-origin,openshift-enterprise
+ - Name: Using OLM on restricted networks
+ File: olm-restricted-networks
+ Distros: openshift-origin,openshift-enterprise
+- Name: Developing Operators
+ Dir: operator_sdk
+ Distros: openshift-origin,openshift-enterprise
+ Topics:
+ - Name: About the Operator SDK
+ File: osdk-about
+ - Name: Installing the Operator SDK CLI
+ File: osdk-installing-cli
+ - Name: Upgrading projects for newer Operator SDK versions
+ File: osdk-upgrading-projects
+ - Name: Go-based Operators
+ Dir: golang
+ Topics:
+ - Name: Getting started
+ File: osdk-golang-quickstart
+ - Name: Tutorial
+ File: osdk-golang-tutorial
+ - Name: Project layout
+ File: osdk-golang-project-layout
+ - Name: Ansible-based Operators
+ Dir: ansible
+ Topics:
+ - Name: Getting started
+ File: osdk-ansible-quickstart
+ - Name: Tutorial
+ File: osdk-ansible-tutorial
+ - Name: Project layout
+ File: osdk-ansible-project-layout
+ - Name: Ansible support
+ File: osdk-ansible-support
+ - Name: Kubernetes Collection for Ansible
+ File: osdk-ansible-k8s-collection
+ - Name: Using Ansible inside an Operator
+ File: osdk-ansible-inside-operator
+ - Name: Custom resource status management
+ File: osdk-ansible-cr-status
+ - Name: Helm-based Operators
+ Dir: helm
+ Topics:
+ - Name: Getting started
+ File: osdk-helm-quickstart
+ - Name: Tutorial
+ File: osdk-helm-tutorial
+ - Name: Project layout
+ File: osdk-helm-project-layout
+ - Name: Helm support
+ File: osdk-helm-support
+ - Name: Defining cluster service versions (CSVs)
+ File: osdk-generating-csvs
+ - Name: Working with bundle images
+ File: osdk-working-bundle-images
+ - Name: Validating Operators using the scorecard
+ File: osdk-scorecard
+ - Name: High-availability or single node cluster detection and support
+ File: osdk-ha-sno
+ - Name: Configuring built-in monitoring with Prometheus
+ File: osdk-monitoring-prometheus
+ - Name: Configuring leader election
+ File: osdk-leader-election
+ - Name: Migrating package manifest projects to bundle format
+ File: osdk-pkgman-to-bundle
+ - Name: Operator SDK CLI reference
+ File: osdk-cli-ref
+ - Name: Migrating to Operator SDK v0.1.0
+ File: osdk-migrating-to-v0-1-0
+ Distros: openshift-origin
+- Name: Red Hat Operators reference
+ File: operator-reference
+---
+Name: CI/CD
+Dir: cicd
+Distros: openshift-enterprise,openshift-origin,openshift-dedicated,openshift-online
+Topics:
+- Name: Builds
+ Dir: builds
+ Distros: openshift-enterprise,openshift-origin,openshift-dedicated,openshift-online
+ Topics:
+ - Name: Understanding image builds
+ File: understanding-image-builds
+ - Name: Understanding build configurations
+ File: understanding-buildconfigs
+ - Name: Creating build inputs
+ File: creating-build-inputs
+ - Name: Managing build output
+ File: managing-build-output
+ - Name: Using build strategies
+ File: build-strategies
+ - Name: Custom image builds with Buildah
+ File: custom-builds-buildah
+ Distros: openshift-enterprise,openshift-origin
+ - Name: Performing and configuring basic builds
+ File: basic-build-operations
+ Distros: openshift-enterprise,openshift-origin,openshift-dedicated,openshift-online
+ - Name: Triggering and modifying builds
+ File: triggering-builds-build-hooks
+ Distros: openshift-enterprise,openshift-origin,openshift-dedicated,openshift-online
+ - Name: Performing advanced builds
+ File: advanced-build-operations
+ Distros: openshift-enterprise,openshift-origin
+ - Name: Using Red Hat subscriptions in builds
+ File: running-entitled-builds
+ Distros: openshift-enterprise,openshift-origin
+ - Name: Securing builds by strategy
+ File: securing-builds-by-strategy
+ Distros: openshift-enterprise,openshift-origin
+ - Name: Build configuration resources
+ File: build-configuration
+ Distros: openshift-enterprise,openshift-origin
+ - Name: Troubleshooting builds
+ File: troubleshooting-builds
+ Distros: openshift-enterprise,openshift-origin
+ - Name: Setting up additional trusted certificate authorities for builds
+ File: setting-up-trusted-ca
+ Distros: openshift-enterprise,openshift-origin,openshift-dedicated
+- Name: Migrating from Jenkins to Tekton
+ Dir: jenkins-tekton
+ Distros: openshift-enterprise
+ Topics:
+ - Name: Migrating from Jenkins to Tekton
+ File: migrating-from-jenkins-to-tekton
+- Name: Pipelines
+ Dir: pipelines
+ Distros: openshift-enterprise
+ Topics:
+ - Name: OpenShift Pipelines release notes
+ File: op-release-notes
+ - Name: Understanding OpenShift Pipelines
+ File: understanding-openshift-pipelines
+ - Name: Installing OpenShift Pipelines
+ File: installing-pipelines
+ - Name: Uninstalling OpenShift Pipelines
+ File: uninstalling-pipelines
+ - Name: Creating CI/CD solutions for applications using OpenShift Pipelines
+ File: creating-applications-with-cicd-pipelines
+ - Name: Working with OpenShift Pipelines using the Developer perspective
+ File: working-with-pipelines-using-the-developer-perspective
+ - Name: Reducing resource consumption of OpenShift Pipelines
+ File: reducing-pipelines-resource-consumption
+ - Name: Using pods in a privileged security context
+ File: using-pods-in-a-privileged-security-context
+ - Name: Securing webhooks with event listeners
+ File: securing-webhooks-with-event-listeners
+ - Name: Viewing pipeline logs using the OpenShift Logging Operator
+ File: viewing-pipeline-logs-using-the-openshift-logging-operator
+- Name: GitOps
+ Dir: gitops
+ Distros: openshift-enterprise
+ Topics:
+ - Name: OpenShift GitOps release notes
+ File: gitops-release-notes
+ - Name: Understanding OpenShift GitOps
+ File: understanding-openshift-gitops
+ - Name: Installing OpenShift GitOps
+ File: installing-openshift-gitops
+ - Name: Uninstalling OpenShift GitOps
+ File: uninstalling-openshift-gitops
+ - Name: Configuring Argo CD to recursively sync a Git repository with your application
+ Dir: configuring_argo_cd_to_recursively_sync_a_git_repository_with_your_application
+ Topics:
+ - Name: Configuring an OpenShift cluster by deploying an application with cluster configurations
+ File: configuring-an-openshift-cluster-by-deploying-an-application-with-cluster-configurations
+ - Name: Deploying a Spring Boot application with Argo CD
+ File: deploying-a-spring-boot-application-with-argo-cd
+ - Name: Configuring SSO for Argo CD on OpenShift
+ File: configuring-sso-for-argo-cd-on-openshift
---
Name: Images
Dir: openshift_images
-Distros: openshift-enterprise,openshift-origin,openshift-dedicated
+Distros: openshift-enterprise,openshift-origin,openshift-dedicated,openshift-online
Topics:
-- Name: Configuring the Samples Operator
+- Name: Configuring the Cluster Samples Operator
File: configuring-samples-operator
+ Distros: openshift-enterprise,openshift-origin
+- Name: Using the Cluster Samples Operator with an alternate registry
+ File: samples-operator-alt-registry
+ Distros: openshift-enterprise,openshift-origin
- Name: Understanding containers, images, and imagestreams
File: images-understand
- Name: Creating images
File: create-images
-- Name: Managing imagestreams
+- Name: Managing images
+ Dir: managing_images
+ Topics:
+ - Name: Managing images overview
+ File: managing-images-overview
+ - Name: Tagging images
+ File: tagging-images
+ - Name: Image pull policy
+ File: image-pull-policy
+ - Name: Using image pull secrets
+ File: using-image-pull-secrets
+- Name: Managing image streams
File: image-streams-manage
+ Distros: openshift-enterprise,openshift-origin
+- Name: Using image streams with Kubernetes resources
+ File: using-imagestreams-with-kube-resources
+ Distros: openshift-enterprise,openshift-origin
+- Name: Triggering updates on image stream changes
+ File: triggering-updates-on-imagestream-changes
+ Distros: openshift-enterprise,openshift-origin
- Name: Image configuration resources
File: image-configuration
Distros: openshift-enterprise,openshift-origin
@@ -362,48 +1578,75 @@ Topics:
File: using-templates
- Name: Using Ruby on Rails
File: templates-using-ruby-on-rails
+- Name: Using images
+ Dir: using_images
+ Distros: openshift-enterprise,openshift-origin
+ Topics:
+ - Name: Using images overview
+ File: using-images-overview
+ - Name: Configuring Jenkins images
+ File: images-other-jenkins
+ - Name: Jenkins agent
+ File: images-other-jenkins-agent
+ - Name: Source-to-image
+ File: using-s21-images
+ - Name: Customizing source-to-image images
+ File: customizing-s2i-images
---
-Name: Applications
+Name: Building applications
Dir: applications
-Distros: openshift-*
+Distros: openshift-enterprise,openshift-origin
Topics:
-- Name: Operators
- Dir: operators
+- Name: Projects
+ Dir: projects
Topics:
- - Name: Understanding Operators
- File: olm-what-operators-are
- - Name: Understanding the Operator Lifecycle Manager (OLM)
- File: olm-understanding-olm
- - Name: Understanding the OperatorHub
- File: olm-understanding-operatorhub
- - Name: Adding Operators to a cluster
- File: olm-adding-operators-to-cluster
+ - Name: Working with projects
+ File: working-with-projects
+ - Name: Creating a project as another user
+ File: creating-project-other-user
Distros: openshift-enterprise,openshift-origin
- - Name: Deleting Operators from a cluster
- File: olm-deleting-operators-from-cluster
+ - Name: Configuring project creation
+ File: configuring-project-creation
Distros: openshift-enterprise,openshift-origin
+- Name: Creating applications
+ Dir: creating_applications
+ Topics:
+ - Name: Creating applications using the Developer perspective
+ File: odc-creating-applications-using-developer-perspective
- Name: Creating applications from installed Operators
- File: olm-creating-apps-from-installed-operators
-- Name: Service brokers
- Dir: service_brokers
- Distros: openshift-enterprise,openshift-origin
- Topics:
- - Name: Installing the service catalog
- File: installing-service-catalog
- - Name: Installing the Template service Broker
- File: installing-template-service-broker
- - Name: Provisioning template applications
- File: provisioning-template-application
- - Name: Uninstalling the Template Service Broker
- File: uninstalling-template-service-broker
- - Name: Installing the OpenShift Ansible Broker
- File: installing-ansible-service-broker
- - Name: Configuring the OpenShift Ansible Broker
- File: configuring-ansible-service-broker
- - Name: Provisioning service bundles
- File: provisioning-service-bundle
- - Name: Uninstalling the OpenShift Ansible Broker
- File: uninstalling-ansible-service-broker
+ File: creating-apps-from-installed-operators
+ - Name: Creating applications using the CLI
+ File: creating-applications-using-cli
+- Name: Viewing application composition using the Topology view
+ File: odc-viewing-application-composition-using-topology-view
+- Name: Connecting applications to services
+ Dir: connecting_applications_to_services
+ Topics:
+ - Name: Service Binding Operator release notes
+ File: sbo-release-notes
+ - Name: Understanding Service Binding Operator
+ File: understanding-service-binding-operator
+ - Name: Installing Service Binding Operator
+ File: installing-sbo
+ - Name: Getting started with service binding
+ File: getting-started-with-service-binding
+ - Name: Exposing binding data from a service
+ File: exposing-binding-data-from-a-service
+ - Name: Projecting binding data
+ File: projecting-binding-data
+ - Name: Connecting an application to a service using the Developer perspective
+ File: odc-connecting-an-application-to-a-service-using-the-developer-perspective
+- Name: Working with Helm charts
+ Dir: working_with_helm_charts
+ Topics:
+ - Name: Understanding Helm
+ File: understanding-helm
+ - Name: Installing Helm
+ File: installing-helm
+ - Name: Configuring custom Helm chart repositories
+ File: configuring-custom-helm-chart-repositories
+ - Name: Working with Helm releases
+ File: odc-working-with-helm-releases
- Name: Deployments
Dir: deployments
Topics:
@@ -411,82 +1654,102 @@ Topics:
File: what-deployments-are
- Name: Managing deployment processes
File: managing-deployment-processes
- - Name: Using DeploymentConfig strategies
+ - Name: Using deployment strategies
File: deployment-strategies
- Name: Using route-based deployment strategies
File: route-based-deployment-strategies
-- Name: CRDs
- Dir: crds
- Topics:
- - Name: Extending the Kubernetes API with CRDs
- File: crd-extending-api-with-crds
- Distros: openshift-origin,openshift-enterprise
- - Name: Managing resources from CRDs
- File: crd-managing-resources-from-crds
- Name: Quotas
Dir: quotas
- Distros: openshift-origin,openshift-enterprise,openshift-dedicated
+ Distros: openshift-origin,openshift-enterprise
Topics:
- Name: Resource quotas per project
File: quotas-setting-per-project
- Name: Resource quotas across multiple projects
File: quotas-setting-across-multiple-projects
+ Distros: openshift-enterprise,openshift-origin
+- Name: Using config maps with applications
+ File: config-maps
+- Name: Monitoring project and application metrics using the Developer perspective
+ File: odc-monitoring-project-and-application-metrics-using-developer-perspective
+- Name: Monitoring application health
+ File: application-health
+- Name: Editing applications
+ File: odc-editing-applications
- Name: Working with quotas
File: working-with-quotas
Distros: openshift-online,openshift-dedicated
-- Name: Idling applications
- File: idling-applications
- Distros: openshift-origin,openshift-enterprise
- Name: Pruning objects to reclaim resources
File: pruning-objects
Distros: openshift-origin,openshift-enterprise
-- Name: Operator SDK
- Dir: operator_sdk
+- Name: Idling applications
+ File: idling-applications
+ Distros: openshift-origin,openshift-enterprise
+- Name: Deleting applications
+ File: odc-deleting-applications
+- Name: Using the Red Hat Marketplace
+ File: red-hat-marketplace
Distros: openshift-origin,openshift-enterprise
- Topics:
- - Name: Getting started with the Operator SDK
- File: osdk-getting-started
- - Name: Creating Ansible-based Operators
- File: osdk-ansible
- - Name: Creating Helm-based Operators
- File: osdk-helm
- - Name: Generating a ClusterServiceVersion (CSV)
- File: osdk-generating-csvs
- - Name: Configuring built-in monitoring with Prometheus
- File: osdk-monitoring-prometheus
- - Name: Configuring leader election
- File: osdk-leader-election
- - Name: Operator SDK CLI reference
- File: osdk-cli-reference
- - Name: Migrating to Operator SDK v0.1.0
- File: osdk-migrating-to-v0-1-0
- Distros: openshift-origin
- - Name: Appendices
- File: osdk-appendices
---
Name: Machine management
Dir: machine_management
Distros: openshift-origin,openshift-enterprise
Topics:
-- Name: Creating a MachineSet
- File: creating-machineset
-- Name: Manually scaling a MachineSet
+- Name: Creating machine sets
+ Dir: creating_machinesets
+ Distros: openshift-origin,openshift-enterprise
+ Topics:
+ - Name: Creating a machine set on AWS
+ File: creating-machineset-aws
+ - Name: Creating a machine set on Azure
+ File: creating-machineset-azure
+ - Name: Creating a machine set on GCP
+ File: creating-machineset-gcp
+ - Name: Creating a machine set on OpenStack
+ File: creating-machineset-osp
+ - Name: Creating a machine set on RHV
+ File: creating-machineset-rhv
+ Distros: openshift-enterprise
+ - Name: Creating a machine set on oVirt
+ File: creating-machineset-rhv
+ Distros: openshift-origin
+ - Name: Creating a machine set on vSphere
+ File: creating-machineset-vsphere
+- Name: Manually scaling a machine set
File: manually-scaling-machineset
+- Name: Modifying a machine set
+ File: modifying-machineset
+- Name: Deleting a machine
+ File: deleting-machine
- Name: Applying autoscaling to a cluster
File: applying-autoscaling
-- Name: Creating infrastructure MachineSets
+- Name: Creating infrastructure machine sets
File: creating-infrastructure-machinesets
- Name: Adding a RHEL compute machine
File: adding-rhel-compute
+ Distros: openshift-enterprise
- Name: Adding more RHEL compute machines
File: more-rhel-compute
+ Distros: openshift-enterprise
+- Name: User-provisioned infrastructure
+ Dir: user_infra
+ Topics:
+ - Name: Adding compute machines to user-provisioned infrastructure clusters
+ File: adding-compute-user-infra-general
+ - Name: Adding compute machines to AWS using CloudFormation templates
+ File: adding-aws-compute-user-infra
+ - Name: Adding compute machines to vSphere
+ File: adding-vsphere-compute-user-infra
+ - Name: Adding compute machines to bare metal
+ File: adding-bare-metal-compute-user-infra
- Name: Deploying machine health checks
File: deploying-machine-health-checks
---
Name: Nodes
Dir: nodes
-Distros: openshift-*
+Distros: openshift-enterprise,openshift-origin
Topics:
+- Name: Overview of nodes
+ File: index
- Name: Working with pods
Dir: pods
Topics:
@@ -497,10 +1760,14 @@ Topics:
- Name: Configuring a cluster for Pods
File: nodes-pods-configuring
Distros: openshift-enterprise,openshift-origin
- - Name: Automatically scaling pods
+ - Name: Automatically scaling pods with the horizontal pod autoscaler
File: nodes-pods-autoscaling
+ - Name: Automatically adjust pod resource levels with the vertical pod autoscaler
+ File: nodes-pods-vertical-autoscaler
- Name: Providing sensitive data to Pods
File: nodes-pods-secrets
+ - Name: Creating and using config maps
+ File: nodes-pods-configmaps
- Name: Using Device Manager to make devices available to nodes
File: nodes-pods-plugins
Distros: openshift-enterprise,openshift-origin
@@ -518,6 +1785,8 @@ Topics:
File: nodes-scheduler-about
- Name: Configuring the default scheduler to control pod placement
File: nodes-scheduler-default
+ - Name: Scheduling pods using a scheduler profile
+ File: nodes-scheduler-profiles
- Name: Placing pods relative to other pods using pod affinity and anti-affinity rules
File: nodes-scheduler-pod-affinity
- Name: Controlling pod placement on nodes using node affinity rules
@@ -528,12 +1797,18 @@ Topics:
File: nodes-scheduler-taints-tolerations
- Name: Placing pods on specific nodes using node selectors
File: nodes-scheduler-node-selectors
+ - Name: Controlling pod placement using pod topology spread constraints
+ File: nodes-scheduler-pod-topology-spread-constraints
# - Name: Placing a pod on a specific node by name
# File: nodes-scheduler-node-names
# - Name: Placing a pod in a specific project
# File: nodes-scheduler-node-projects
# - Name: Keeping your cluster balanced using the descheduler
# File: nodes-scheduler-descheduler
+ - Name: Running a custom scheduler
+ File: nodes-custom-scheduler
+ - Name: Evicting pods using the descheduler
+ File: nodes-descheduler
- Name: Using Jobs and DaemonSets
Dir: jobs
Topics:
@@ -550,24 +1825,31 @@ Topics:
File: nodes-nodes-viewing
- Name: Working with nodes
File: nodes-nodes-working
- - Name: Managing Nodes
+ - Name: Managing nodes
File: nodes-nodes-managing
- Name: Managing the maximum number of Pods per Node
File: nodes-nodes-managing-max-pods
- Name: Using the Node Tuning Operator
File: nodes-node-tuning-operator
+ - Name: Remediating nodes with the Poison Pill Operator
+ File: eco-poison-pill-operator
+ - Name: Deploying node health checks by using the Node Health Check Operator
+ File: eco-node-health-check-operator
- Name: Understanding node rebooting
File: nodes-nodes-rebooting
- Name: Freeing node resources using garbage collection
File: nodes-nodes-garbage-collection
- Name: Allocating resources for nodes
File: nodes-nodes-resources-configuring
- - Name: Advertising hidden resources for nodes
- File: nodes-nodes-opaque-resources
+ - Name: Allocating specific CPUs for nodes in a cluster
+ File: nodes-nodes-resources-cpus
+ - Name: Configuring the TLS security profile for the kubelet
+ File: nodes-nodes-tls
+ Distros: openshift-enterprise,openshift-origin
# - Name: Monitoring for problems in your nodes
# File: nodes-nodes-problem-detector
- - Name: Viewing node audit logs
- File: nodes-nodes-audit-log
+ - Name: Machine Config Daemon metrics
+ File: nodes-nodes-machine-config-daemon-metrics
- Name: Working with containers
Dir: containers
Topics:
@@ -588,8 +1870,8 @@ Topics:
File: nodes-containers-remote-commands
- Name: Using port forwarding to access applications in a container
File: nodes-containers-port-forwarding
- - Name: Monitoring container health
- File: nodes-containers-health
+ - Name: Using sysctls in containers
+ File: nodes-containers-sysctls
- Name: Working with clusters
Dir: clusters
Topics:
@@ -598,6 +1880,8 @@ Topics:
- Name: Analyzing cluster resource levels
File: nodes-cluster-resource-levels
Distros: openshift-enterprise,openshift-origin
+ - Name: Setting limit ranges
+ File: nodes-cluster-limit-ranges
- Name: Configuring cluster memory to meet container memory and risk requirements
File: nodes-cluster-resource-configure
Distros: openshift-enterprise,openshift-origin
@@ -607,167 +1891,1466 @@ Topics:
- Name: Enabling features using FeatureGates
File: nodes-cluster-enabling-features
Distros: openshift-enterprise,openshift-origin
- - Name: Disabling features using FeatureGates
- File: nodes-cluster-disabling-features
- Distros: openshift-enterprise,openshift-origin
+- Name: Remote worker nodes on the network edge
+ Dir: edge
+ Distros: openshift-enterprise
+ Topics:
+ - Name: Using remote worker node at the network edge
+ File: nodes-edge-remote-workers
+---
+Name: Windows Container Support for OpenShift
+Dir: windows_containers
+Distros: openshift-origin,openshift-enterprise
+Topics:
+- Name: Windows Container Support for OpenShift release notes
+ File: windows-containers-release-notes-4-x
+- Name: Understanding Windows container workloads
+ File: understanding-windows-container-workloads
+- Name: Enabling Windows container workloads
+ File: enabling-windows-container-workloads
+- Name: Creating Windows MachineSet objects
+ Dir: creating_windows_machinesets
+ Topics:
+ - Name: Creating a Windows MachineSet object on AWS
+ File: creating-windows-machineset-aws
+ - Name: Creating a Windows MachineSet object on Azure
+ File: creating-windows-machineset-azure
+ - Name: Creating a Windows MachineSet object on vSphere
+ File: creating-windows-machineset-vsphere
+- Name: Scheduling Windows container workloads
+ File: scheduling-windows-workloads
+- Name: Windows node upgrades
+ File: windows-node-upgrades
+- Name: Using Bring-Your-Own-Host Windows instances as nodes
+ File: byoh-windows-instance
+- Name: Removing Windows nodes
+ File: removing-windows-nodes
+- Name: Disabling Windows container workloads
+ File: disabling-windows-container-workloads
+---
+Name: Sandboxed Containers Support for OpenShift
+Dir: sandboxed_containers
+Distros: openshift-enterprise
+Topics:
+- Name: Understanding OpenShift sandboxed containers
+ File: understanding-sandboxed-containers
+- Name: Deploying OpenShift sandboxed containers workloads
+ File: deploying-sandboxed-container-workloads
+- Name: Uninstalling OpenShift sandboxed containers workloads
+ File: uninstalling-sandboxed-containers
+- Name: Upgrade OpenShift sandboxed containers
+ File: upgrade-sandboxed-containers
+- Name: Collecting OpenShift sandboxed containers data for Red Hat Support
+ File: troubleshooting-sandboxed-containers
---
Name: Logging
Dir: logging
-Distros: openshift-enterprise,openshift-origin
+Distros: openshift-enterprise,openshift-origin,openshift-dedicated
Topics:
-- Name: About cluster logging
- File: efk-logging
-- Name: About deploying cluster logging
- File: efk-logging-deploying-about
-- Name: Deploying cluster logging
- File: efk-logging-deploying
-- Name: Deploying and Configuring the Event Router
- File: efk-logging-eventrouter
-- Name: Configuring your cluster logging deployment
+- Name: Release notes
+ File: cluster-logging-release-notes
+- Name: About Logging
+ File: cluster-logging
+- Name: Installing Logging
+ File: cluster-logging-deploying
+ Distros: openshift-enterprise,openshift-origin
+- Name: Installing the Logging and Elasticsearch Operators
+ File: dedicated-cluster-deploying
+ Distros: openshift-dedicated
+- Name: Configuring your Logging deployment
Dir: config
+ Distros: openshift-enterprise,openshift-origin
+ Topics:
+ - Name: About the Cluster Logging custom resource
+ File: cluster-logging-configuring-cr
+ - Name: Configuring the logging collector
+ File: cluster-logging-collector
+ - Name: Configuring the log store
+ File: cluster-logging-log-store
+ - Name: Configuring the log visualizer
+ File: cluster-logging-visualizer
+ - Name: Configuring Logging storage
+ File: cluster-logging-storage-considerations
+ - Name: Configuring CPU and memory limits for Logging components
+ File: cluster-logging-memory
+ - Name: Using tolerations to control Logging pod placement
+ File: cluster-logging-tolerations
+ - Name: Moving the Logging resources with node selectors
+ File: cluster-logging-moving-nodes
+ - Name: Configuring systemd-journald for Logging
+ File: cluster-logging-systemd
+ - Name: Maintenance and support
+ File: cluster-logging-maintenance-support
+- Name: Viewing logs for a specific resource
+ File: viewing-resource-logs
+- Name: Viewing cluster logs in Kibana
+ File: cluster-logging-visualizer
+ Distros: openshift-enterprise,openshift-origin
+# TODO: This file doesn't exist anymore - update if necessary for dedicated
+# - Name: Viewing cluster logs using Kibana
+# File: cluster-logging-kibana-interface
+# Distros: openshift-dedicated
+- Name: Forwarding logs to third party systems
+ File: cluster-logging-external
+ Distros: openshift-enterprise,openshift-origin
+- Name: Enabling JSON logging
+ File: cluster-logging-enabling-json-logging
+- Name: Collecting and storing Kubernetes events
+ File: cluster-logging-eventrouter
+ Distros: openshift-enterprise,openshift-origin
+# - Name: Forwarding logs using ConfigMaps
+# File: cluster-logging-external-configmap
+# Distros: openshift-enterprise,openshift-origin
+- Name: Updating Logging
+ File: cluster-logging-upgrading
+- Name: Uninstalling Logging
+ File: cluster-logging-uninstall
+ Distros: openshift-dedicated
+- Name: Viewing cluster dashboards
+ File: cluster-logging-dashboards
+- Name: Troubleshooting Logging
+ Dir: troubleshooting
+ Distros: openshift-enterprise,openshift-origin
Topics:
- - Name: About configuring cluster logging
- File: efk-logging-configuring-about
- - Name: Changing cluster logging management state
- File: efk-logging-management
- - Name: Configuring cluster logging
- File: efk-logging-configuring
- - Name: Configuring Elasticsearch
- File: efk-logging-elasticsearch
- - Name: Configuring Kibana
- File: efk-logging-kibana
- - Name: Configuring Curator
- File: efk-logging-curator
- - Name: Configuring Fluentd
- File: efk-logging-fluentd
- - Name: Configuring systemd-journald
- File: efk-logging-systemd
- - Name: Sending logs to external devices
- File: efk-logging-external
-- Name: Viewing Elasticsearch status
- File: efk-logging-elasticsearch-status
-- Name: Manually rolling out Elasticsearch
- File: efk-logging-manual-rollout
-- Name: Troubleshooting Kibana
- File: efk-logging-troubleshooting
+ - Name: Viewing Logging status
+ File: cluster-logging-cluster-status
+ - Name: Viewing the status of the log store
+ File: cluster-logging-log-store-status
+ - Name: Understanding Logging alerts
+ File: cluster-logging-alerts
+ - Name: Collecting logging data for Red Hat Support
+ File: cluster-logging-must-gather
+ - Name: Troubleshooting for Critical Alerts
+ File: cluster-logging-troubleshooting-for-critical-alerts
+- Name: Uninstalling Logging
+ File: cluster-logging-uninstall
- Name: Exported fields
- File: efk-logging-exported-fields
-- Name: Uninstalling cluster logging
- File: efk-logging-uninstall
+ File: cluster-logging-exported-fields
+ Distros: openshift-enterprise,openshift-origin
---
Name: Monitoring
Dir: monitoring
Distros: openshift-enterprise,openshift-origin
Topics:
-- Name: Cluster monitoring
- Dir: cluster-monitoring
- Topics:
- - Name: About cluster monitoring
- File: about-cluster-monitoring
- - Name: Configuring the monitoring stack
- File: configuring-the-monitoring-stack
- - Name: Managing cluster alerts
- File: managing-cluster-alerts
- - Name: Accessing Prometheus, Alertmanager, and Grafana
- File: prometheus-alertmanager-and-grafana
-- Name: Exposing custom application metrics for autoscaling
- File: exposing-custom-application-metrics-for-autoscaling
----
-Name: Telemetry
-Dir: telemetry
-Distros: openshift-enterprise,openshift-origin
-Topics:
-- Name: About Telemetry
- File: about-telemetry
-- Name: Showing data collected by Telemetry
- File: showing-data-collected-by-telemetry
-- Name: Opting out of Telemetry
- File: opting-out-of-telemetry
+- Name: Understanding the monitoring stack
+ File: understanding-the-monitoring-stack
+- Name: Configuring the monitoring stack
+ File: configuring-the-monitoring-stack
+- Name: Enabling monitoring for user-defined projects
+ File: enabling-monitoring-for-user-defined-projects
+- Name: Managing metrics
+ File: managing-metrics
+- Name: Managing alerts
+ File: managing-alerts
+- Name: Reviewing monitoring dashboards
+ File: reviewing-monitoring-dashboards
+- Name: Accessing third-party UIs
+ File: accessing-third-party-uis
+- Name: Troubleshooting monitoring issues
+ File: troubleshooting-monitoring-issues
---
Name: Scalability and performance
Dir: scalability_and_performance
-Distros: openshift-origin,openshift-enterprise
+Distros: openshift-origin,openshift-enterprise,openshift-webscale
Topics:
+- Name: Recommended installation practices
+ File: recommended-install-practices
+ Distros: openshift-origin,openshift-enterprise
- Name: Recommended host practices
File: recommended-host-practices
+ Distros: openshift-origin,openshift-enterprise
+- Name: Recommended host practices for IBM Z & LinuxONE environments
+ File: ibm-z-recommended-host-practices
+ Distros: openshift-enterprise
+- Name: Recommended cluster scaling practices
+ File: recommended-cluster-scaling-practices
+ Distros: openshift-origin,openshift-enterprise
- Name: Using the Node Tuning Operator
File: using-node-tuning-operator
+ Distros: openshift-origin,openshift-enterprise
- Name: Using Cluster Loader
File: using-cluster-loader
+ Distros: openshift-origin,openshift-enterprise
- Name: Using CPU Manager
File: using-cpu-manager
+ Distros: openshift-origin,openshift-enterprise
+- Name: Using Topology Manager
+ File: using-topology-manager
+ Distros: openshift-origin,openshift-enterprise
- Name: Scaling the Cluster Monitoring Operator
File: scaling-cluster-monitoring-operator
-- Name: Planning your environment according to object limits
- File: planning-your-environment-according-to-object-limits
+ Distros: openshift-origin,openshift-enterprise
+- Name: Planning your environment according to object maximums
+ File: planning-your-environment-according-to-object-maximums
+ Distros: openshift-origin,openshift-enterprise
- Name: Optimizing storage
File: optimizing-storage
+ Distros: openshift-origin,openshift-enterprise
- Name: Optimizing routing
File: routing-optimization
+ Distros: openshift-origin,openshift-enterprise
+- Name: Optimizing networking
+ File: optimizing-networking
+- Name: Managing bare metal hosts
+ File: managing-bare-metal-hosts
+ Distros: openshift-origin,openshift-enterprise
- Name: What huge pages do and how they are consumed by apps
File: what-huge-pages-do-and-how-they-are-consumed-by-apps
+ Distros: openshift-origin,openshift-enterprise
+- Name: Performance Addon Operator for low latency nodes
+ File: cnf-performance-addon-operator-for-low-latency-nodes
+ Distros: openshift-origin,openshift-enterprise
+- Name: Creating a performance profile
+ File: cnf-create-performance-profiles
+ Distros: openshift-origin,openshift-enterprise
+- Name: Provisioning and deploying a distributed unit (DU)
+ File: cnf-provisioning-and-deploying-a-distributed-unit
+ Distros: openshift-webscale
+- Name: Deploying distributed units at scale in a disconnected environment
+ File: ztp-deploying-disconnected
+ Distros: openshift-origin,openshift-enterprise
---
-Name: Disaster recovery
-Dir: disaster_recovery
+Name: Specialized hardware and driver enablement
+Dir: hardware_enablement
Distros: openshift-origin,openshift-enterprise
Topics:
-- Name: Backing up etcd data
- File: backing-up-etcd
-- Name: Recovering from lost master hosts
- File: scenario-1-infra-recovery
-- Name: Restoring back to a previous cluster state
- File: scenario-2-restoring-cluster-state
-- Name: Recovering from expired control plane certificates
- File: scenario-3-expired-certs
+- Name: About specialized hardware and driver enablement
+ File: about-hardware-enablement
+- Name: Driver Toolkit
+ File: psap-driver-toolkit
+- Name: Special Resource Operator
+ File: psap-special-resource-operator
+- Name: Node Feature Discovery Operator
+ File: psap-node-feature-discovery-operator
---
-Name: CLI reference
-Dir: cli_reference
-Distros: openshift-enterprise,openshift-origin,openshift-dedicated
+Name: Backup and restore
+Dir: backup_and_restore
+Distros: openshift-origin,openshift-enterprise
+Topics:
+- Name: Shutting down a cluster gracefully
+ File: graceful-cluster-shutdown
+- Name: Restarting a cluster gracefully
+ File: graceful-cluster-restart
+# - Name: Application backup and restore
+# Dir: application_backup_and_restore
+# Topics:
+# - Name: Application backup and restore
+# File: placeholder
+- Name: Control plane backup and restore
+ Dir: control_plane_backup_and_restore
+ Topics:
+ - Name: Backing up etcd data
+ File: backing-up-etcd
+ - Name: Replacing an unhealthy etcd member
+ File: replacing-unhealthy-etcd-member
+ - Name: Disaster recovery
+ Dir: disaster_recovery
+ Topics:
+ - Name: About disaster recovery
+ File: about-disaster-recovery
+ - Name: Restoring to a previous cluster state
+ File: scenario-2-restoring-cluster-state
+ - Name: Recovering from expired control plane certificates
+ File: scenario-3-expired-certs
+---
+Name: Migrating from version 3 to 4
+Dir: migrating_from_ocp_3_to_4
+Distros: openshift-enterprise,openshift-origin
+Topics:
+- Name: About migrating from OpenShift Container Platform 3 to 4
+ File: about-migrating-from-3-to-4
+ Distros: openshift-enterprise
+- Name: About migrating from OKD 3 to 4
+ File: about-migrating-from-3-to-4
+ Distros: openshift-origin
+- Name: Differences between OpenShift Container Platform 3 and 4
+ File: planning-migration-3-4
+ Distros: openshift-enterprise
+- Name: Differences between OKD 3 and 4
+ File: planning-migration-3-4
+ Distros: openshift-origin
+- Name: Planning considerations
+ File: planning-considerations-3-4
+- Name: About MTC
+ File: about-mtc-3-4
+- Name: Installing MTC
+ File: installing-3-4
+- Name: Installing MTC in a restricted network environment
+ File: installing-restricted-3-4
+- Name: Upgrading MTC
+ File: upgrading-3-4
+- Name: Premigration checklists
+ File: premigration-checklists-3-4
+- Name: Migrating your applications
+ File: migrating-applications-3-4
+- Name: Advanced migration options
+ File: advanced-migration-options-3-4
+- Name: Troubleshooting
+ File: troubleshooting-3-4
+---
+Name: Migration Toolkit for Containers
+Dir: migration_toolkit_for_containers
+Distros: openshift-enterprise,openshift-origin
+Topics:
+- Name: About MTC
+ File: about-mtc
+- Name: MTC release notes
+ File: mtc-release-notes
+- Name: Installing MTC
+ File: installing-mtc
+- Name: Installing MTC in a restricted network environment
+ File: installing-mtc-restricted
+- Name: Upgrading MTC
+ File: upgrading-mtc
+- Name: Premigration checklists
+ File: premigration-checklists-mtc
+- Name: Migrating your applications
+ File: migrating-applications-with-mtc
+- Name: Advanced migration options
+ File: advanced-migration-options-mtc
+- Name: Troubleshooting
+ File: troubleshooting-mtc
+---
+Name: API reference
+Dir: rest_api
+Distros: openshift-enterprise,openshift-origin
Topics:
-- Name: Getting started with the CLI
- File: getting-started-cli
-- Name: Configuring the CLI
- File: configuring-cli
-- Name: Extending the CLI with plug-ins
- File: extending-cli-plugins
- Distros: openshift-enterprise,openshift-origin
-- Name: Developer CLI commands
- File: developer-cli-commands
-- Name: Administrator CLI commands
- File: administrator-cli-commands
- Distros: openshift-enterprise,openshift-origin
-#---
-#Name: Container-native Virtualization
-#Dir: cnv
-#Distros: openshift-enterprise
-#Topics:
-#- Name: Container-native Virtualization Installation
-# Dir: cnv_install
-# Topics:
-# - Name: CNV Install Assemblies Placeholder
-# File: cnv-install-placeholder
-#- Name: Container-native Virtualization User's Guide
-# Dir: cnv_users_guide
-# Topics:
-# - Name: CNV User's Guide Assemblies Placeholder
-# File: cnv-users-guide-placeholder
-#- Name: Container-native Virtualization Release Notes
-# Dir: cnv_release_notes
-# Topics:
-# - Name: CNV Release Notes Placeholder
-# File: cnv-release-notes-placeholder
-#---
-#Name: Service Mesh
-#Dir: service_mesh
-#Distros: openshift-enterprise
-#Topics:
-#- Name: Service Mesh Installation
-# Dir: service_mesh_install
-# Topics:
-# - Name: Service Mesh Insatll Assemblies Placeholder
-# File: service-mesh-install-placeholder
-#- Name: Service Mesh Release Notes
-# Dir: service_mesh_release_notes
-# Topics:
-# - Name: Service Mesh Release Notes Placeholder
-# File: service-mesh-release-notes-placeholder
+- Name: Understanding API tiers
+ File: understanding-api-support-tiers
+- Name: API compatibility guidelines
+ File: understanding-compatibility-guidelines
+- Name: Editing kubelet log level verbosity and gathering logs
+ File: editing-kubelet-log-level-verbosity
+- Name: API list
+ File: index
+- Name: Common object reference
+ Dir: objects
+ Topics:
+ - Name: Index
+ File: index
+- Name: Authorization APIs
+ Dir: authorization_apis
+ Topics:
+ - Name: About Authorization APIs
+ File: authorization-apis-index
+ - Name: 'LocalResourceAccessReview [authorization.openshift.io/v1]'
+ File: localresourceaccessreview-authorization-openshift-io-v1
+ - Name: 'LocalSubjectAccessReview [authorization.openshift.io/v1]'
+ File: localsubjectaccessreview-authorization-openshift-io-v1
+ - Name: 'ResourceAccessReview [authorization.openshift.io/v1]'
+ File: resourceaccessreview-authorization-openshift-io-v1
+ - Name: 'SelfSubjectRulesReview [authorization.openshift.io/v1]'
+ File: selfsubjectrulesreview-authorization-openshift-io-v1
+ - Name: 'SubjectAccessReview [authorization.openshift.io/v1]'
+ File: subjectaccessreview-authorization-openshift-io-v1
+ - Name: 'SubjectRulesReview [authorization.openshift.io/v1]'
+ File: subjectrulesreview-authorization-openshift-io-v1
+ - Name: 'TokenReview [authentication.k8s.io/v1]'
+ File: tokenreview-authentication-k8s-io-v1
+ - Name: 'LocalSubjectAccessReview [authorization.k8s.io/v1]'
+ File: localsubjectaccessreview-authorization-k8s-io-v1
+ - Name: 'SelfSubjectAccessReview [authorization.k8s.io/v1]'
+ File: selfsubjectaccessreview-authorization-k8s-io-v1
+ - Name: 'SelfSubjectRulesReview [authorization.k8s.io/v1]'
+ File: selfsubjectrulesreview-authorization-k8s-io-v1
+ - Name: 'SubjectAccessReview [authorization.k8s.io/v1]'
+ File: subjectaccessreview-authorization-k8s-io-v1
+- Name: Autoscale APIs
+ Dir: autoscale_apis
+ Topics:
+ - Name: About Autoscale APIs
+ File: autoscale-apis-index
+ - Name: 'ClusterAutoscaler [autoscaling.openshift.io/v1]'
+ File: clusterautoscaler-autoscaling-openshift-io-v1
+ - Name: 'MachineAutoscaler [autoscaling.openshift.io/v1beta1]'
+ File: machineautoscaler-autoscaling-openshift-io-v1beta1
+ - Name: 'HorizontalPodAutoscaler [autoscaling/v1]'
+ File: horizontalpodautoscaler-autoscaling-v1
+- Name: Config APIs
+ Dir: config_apis
+ Topics:
+ - Name: About Config APIs
+ File: config-apis-index
+ - Name: 'APIServer [config.openshift.io/v1]'
+ File: apiserver-config-openshift-io-v1
+ - Name: 'Authentication [config.openshift.io/v1]'
+ File: authentication-config-openshift-io-v1
+ - Name: 'Build [config.openshift.io/v1]'
+ File: build-config-openshift-io-v1
+ - Name: 'ClusterOperator [config.openshift.io/v1]'
+ File: clusteroperator-config-openshift-io-v1
+ - Name: 'ClusterVersion [config.openshift.io/v1]'
+ File: clusterversion-config-openshift-io-v1
+ - Name: 'Console [config.openshift.io/v1]'
+ File: console-config-openshift-io-v1
+ - Name: 'DNS [config.openshift.io/v1]'
+ File: dns-config-openshift-io-v1
+ - Name: 'FeatureGate [config.openshift.io/v1]'
+ File: featuregate-config-openshift-io-v1
+ - Name: 'HelmChartRepository [helm.openshift.io/v1beta1]'
+ File: helmchartrepository-helm-openshift-io-v1beta1
+ - Name: 'Image [config.openshift.io/v1]'
+ File: image-config-openshift-io-v1
+ - Name: 'Infrastructure [config.openshift.io/v1]'
+ File: infrastructure-config-openshift-io-v1
+ - Name: 'Ingress [config.openshift.io/v1]'
+ File: ingress-config-openshift-io-v1
+ - Name: 'Network [config.openshift.io/v1]'
+ File: network-config-openshift-io-v1
+ - Name: 'OAuth [config.openshift.io/v1]'
+ File: oauth-config-openshift-io-v1
+ - Name: 'OperatorHub [config.openshift.io/v1]'
+ File: operatorhub-config-openshift-io-v1
+ - Name: 'Project [config.openshift.io/v1]'
+ File: project-config-openshift-io-v1
+ - Name: 'Proxy [config.openshift.io/v1]'
+ File: proxy-config-openshift-io-v1
+ - Name: 'Scheduler [config.openshift.io/v1]'
+ File: scheduler-config-openshift-io-v1
+- Name: Console APIs
+ Dir: console_apis
+ Topics:
+ - Name: About Console APIs
+ File: console-apis-index
+ - Name: 'ConsoleCLIDownload [console.openshift.io/v1]'
+ File: consoleclidownload-console-openshift-io-v1
+ - Name: 'ConsoleExternalLogLink [console.openshift.io/v1]'
+ File: consoleexternalloglink-console-openshift-io-v1
+ - Name: 'ConsoleLink [console.openshift.io/v1]'
+ File: consolelink-console-openshift-io-v1
+ - Name: 'ConsoleNotification [console.openshift.io/v1]'
+ File: consolenotification-console-openshift-io-v1
+ - Name: 'ConsolePlugin [console.openshift.io/v1alpha1]'
+ File: consoleplugin-console-openshift-io-v1alpha1
+ - Name: 'ConsoleQuickStart [console.openshift.io/v1]'
+ File: consolequickstart-console-openshift-io-v1
+ - Name: 'ConsoleYAMLSample [console.openshift.io/v1]'
+ File: consoleyamlsample-console-openshift-io-v1
+- Name: Extension APIs
+ Dir: extension_apis
+ Topics:
+ - Name: About Extension APIs
+ File: extension-apis-index
+ - Name: 'APIService [apiregistration.k8s.io/v1]'
+ File: apiservice-apiregistration-k8s-io-v1
+ - Name: 'CustomResourceDefinition [apiextensions.k8s.io/v1]'
+ File: customresourcedefinition-apiextensions-k8s-io-v1
+ - Name: 'MutatingWebhookConfiguration [admissionregistration.k8s.io/v1]'
+ File: mutatingwebhookconfiguration-admissionregistration-k8s-io-v1
+ - Name: 'ValidatingWebhookConfiguration [admissionregistration.k8s.io/v1]'
+ File: validatingwebhookconfiguration-admissionregistration-k8s-io-v1
+- Name: Image APIs
+ Dir: image_apis
+ Topics:
+ - Name: About Image APIs
+ File: image-apis-index
+ - Name: 'Image [image.openshift.io/v1]'
+ File: image-image-openshift-io-v1
+ - Name: 'ImageSignature [image.openshift.io/v1]'
+ File: imagesignature-image-openshift-io-v1
+ - Name: 'ImageStreamImage [image.openshift.io/v1]'
+ File: imagestreamimage-image-openshift-io-v1
+ - Name: 'ImageStreamImport [image.openshift.io/v1]'
+ File: imagestreamimport-image-openshift-io-v1
+ - Name: 'ImageStreamMapping [image.openshift.io/v1]'
+ File: imagestreammapping-image-openshift-io-v1
+ - Name: 'ImageStream [image.openshift.io/v1]'
+ File: imagestream-image-openshift-io-v1
+ - Name: 'ImageStreamTag [image.openshift.io/v1]'
+ File: imagestreamtag-image-openshift-io-v1
+ - Name: 'ImageTag [image.openshift.io/v1]'
+ File: imagetag-image-openshift-io-v1
+- Name: Machine APIs
+ Dir: machine_apis
+ Topics:
+ - Name: About Machine APIs
+ File: machine-apis-index
+ - Name: 'ContainerRuntimeConfig [machineconfiguration.openshift.io/v1]'
+ File: containerruntimeconfig-machineconfiguration-openshift-io-v1
+ - Name: 'ControllerConfig [machineconfiguration.openshift.io/v1]'
+ File: controllerconfig-machineconfiguration-openshift-io-v1
+ - Name: 'KubeletConfig [machineconfiguration.openshift.io/v1]'
+ File: kubeletconfig-machineconfiguration-openshift-io-v1
+ - Name: 'MachineConfigPool [machineconfiguration.openshift.io/v1]'
+ File: machineconfigpool-machineconfiguration-openshift-io-v1
+ - Name: 'MachineConfig [machineconfiguration.openshift.io/v1]'
+ File: machineconfig-machineconfiguration-openshift-io-v1
+ - Name: 'MachineHealthCheck [machine.openshift.io/v1beta1]'
+ File: machinehealthcheck-machine-openshift-io-v1beta1
+ - Name: 'Machine [machine.openshift.io/v1beta1]'
+ File: machine-machine-openshift-io-v1beta1
+ - Name: 'MachineSet [machine.openshift.io/v1beta1]'
+ File: machineset-machine-openshift-io-v1beta1
+- Name: Metadata APIs
+ Dir: metadata_apis
+ Topics:
+ - Name: About Metadata APIs
+ File: metadata-apis-index
+ - Name: 'APIRequestCount [apiserver.openshift.io/v1]'
+ File: apirequestcount-apiserver-openshift-io-v1
+ - Name: 'Binding [core/v1]'
+ File: binding-core-v1
+ - Name: 'ComponentStatus [core/v1]'
+ File: componentstatus-core-v1
+ - Name: 'ConfigMap [core/v1]'
+ File: configmap-core-v1
+ - Name: 'ControllerRevision [apps/v1]'
+ File: controllerrevision-apps-v1
+ - Name: 'Event [events.k8s.io/v1]'
+ File: event-events-k8s-io-v1
+ - Name: 'Event [core/v1]'
+ File: event-core-v1
+ - Name: 'Lease [coordination.k8s.io/v1]'
+ File: lease-coordination-k8s-io-v1
+ - Name: 'Namespace [core/v1]'
+ File: namespace-core-v1
+- Name: Monitoring APIs
+ Dir: monitoring_apis
+ Topics:
+ - Name: About Monitoring APIs
+ File: monitoring-apis-index
+ - Name: 'Alertmanager [monitoring.coreos.com/v1]'
+ File: alertmanager-monitoring-coreos-com-v1
+ - Name: 'AlertmanagerConfig [monitoring.coreos.com/v1alpha1]'
+ File: alertmanagerconfig-monitoring-coreos-com-v1alpha1
+ - Name: 'PodMonitor [monitoring.coreos.com/v1]'
+ File: podmonitor-monitoring-coreos-com-v1
+ - Name: 'Probe [monitoring.coreos.com/v1]'
+ File: probe-monitoring-coreos-com-v1
+ - Name: 'Prometheus [monitoring.coreos.com/v1]'
+ File: prometheus-monitoring-coreos-com-v1
+ - Name: 'PrometheusRule [monitoring.coreos.com/v1]'
+ File: prometheusrule-monitoring-coreos-com-v1
+ - Name: 'ServiceMonitor [monitoring.coreos.com/v1]'
+ File: servicemonitor-monitoring-coreos-com-v1
+ - Name: 'ThanosRuler [monitoring.coreos.com/v1]'
+ File: thanosruler-monitoring-coreos-com-v1
+- Name: Network APIs
+ Dir: network_apis
+ Topics:
+ - Name: About Network APIs
+ File: network-apis-index
+ - Name: 'ClusterNetwork [network.openshift.io/v1]'
+ File: clusternetwork-network-openshift-io-v1
+ - Name: 'Endpoints [core/v1]'
+ File: endpoints-core-v1
+ - Name: 'EndpointSlice [discovery.k8s.io/v1]'
+ File: endpointslice-discovery-k8s-io-v1
+ - Name: 'EgressNetworkPolicy [network.openshift.io/v1]'
+ File: egressnetworkpolicy-network-openshift-io-v1
+ - Name: 'EgressRouter [network.operator.openshift.io/v1]'
+ File: egressrouter-network-operator-openshift-io-v1
+ - Name: 'HostSubnet [network.openshift.io/v1]'
+ File: hostsubnet-network-openshift-io-v1
+ - Name: 'Ingress [networking.k8s.io/v1]'
+ File: ingress-networking-k8s-io-v1
+ - Name: 'IngressClass [networking.k8s.io/v1]'
+ File: ingressclass-networking-k8s-io-v1
+ - Name: 'IPPool [whereabouts.cni.cncf.io/v1alpha1]'
+ File: ippool-whereabouts-cni-cncf-io-v1alpha1
+ - Name: 'NetNamespace [network.openshift.io/v1]'
+ File: netnamespace-network-openshift-io-v1
+ - Name: 'NetworkAttachmentDefinition [k8s.cni.cncf.io/v1]'
+ File: networkattachmentdefinition-k8s-cni-cncf-io-v1
+ - Name: 'NetworkPolicy [networking.k8s.io/v1]'
+ File: networkpolicy-networking-k8s-io-v1
+ - Name: 'PodNetworkConnectivityCheck [controlplane.operator.openshift.io/v1alpha1]'
+ File: podnetworkconnectivitycheck-controlplane-operator-openshift-io-v1alpha1
+ - Name: 'Route [route.openshift.io/v1]'
+ File: route-route-openshift-io-v1
+ - Name: 'Service [core/v1]'
+ File: service-core-v1
+- Name: Node APIs
+ Dir: node_apis
+ Topics:
+ - Name: About Node APIs
+ File: node-apis-index
+ - Name: 'Node [core/v1]'
+ File: node-core-v1
+ - Name: 'Profile [tuned.openshift.io/v1]'
+ File: profile-tuned-openshift-io-v1
+ - Name: 'RuntimeClass [node.k8s.io/v1]'
+ File: runtimeclass-node-k8s-io-v1
+ - Name: 'Tuned [tuned.openshift.io/v1]'
+ File: tuned-tuned-openshift-io-v1
+- Name: OAuth APIs
+ Dir: oauth_apis
+ Topics:
+ - Name: About OAuth APIs
+ File: oauth-apis-index
+ - Name: 'OAuthAccessToken [oauth.openshift.io/v1]'
+ File: oauthaccesstoken-oauth-openshift-io-v1
+ - Name: 'OAuthAuthorizeToken [oauth.openshift.io/v1]'
+ File: oauthauthorizetoken-oauth-openshift-io-v1
+ - Name: 'OAuthClientAuthorization [oauth.openshift.io/v1]'
+ File: oauthclientauthorization-oauth-openshift-io-v1
+ - Name: 'OAuthClient [oauth.openshift.io/v1]'
+ File: oauthclient-oauth-openshift-io-v1
+ - Name: 'UserOAuthAccessToken [oauth.openshift.io/v1]'
+ File: useroauthaccesstoken-oauth-openshift-io-v1
+- Name: Operator APIs
+ Dir: operator_apis
+ Topics:
+ - Name: About Operator APIs
+ File: operator-apis-index
+ - Name: 'Authentication [operator.openshift.io/v1]'
+ File: authentication-operator-openshift-io-v1
+ - Name: 'CloudCredential [operator.openshift.io/v1]'
+ File: cloudcredential-operator-openshift-io-v1
+ - Name: 'ClusterCSIDriver [operator.openshift.io/v1]'
+ File: clustercsidriver-operator-openshift-io-v1
+ - Name: 'Console [operator.openshift.io/v1]'
+ File: console-operator-openshift-io-v1
+ - Name: 'Config [operator.openshift.io/v1]'
+ File: config-operator-openshift-io-v1
+ - Name: 'Config [imageregistry.operator.openshift.io/v1]'
+ File: config-imageregistry-operator-openshift-io-v1
+ - Name: 'Config [samples.operator.openshift.io/v1]'
+ File: config-samples-operator-openshift-io-v1
+ - Name: 'CSISnapshotController [operator.openshift.io/v1]'
+ File: csisnapshotcontroller-operator-openshift-io-v1
+ - Name: 'DNS [operator.openshift.io/v1]'
+ File: dns-operator-openshift-io-v1
+ - Name: 'DNSRecord [ingress.operator.openshift.io/v1]'
+ File: dnsrecord-ingress-operator-openshift-io-v1
+ - Name: 'Etcd [operator.openshift.io/v1]'
+ File: etcd-operator-openshift-io-v1
+ - Name: 'ImageContentSourcePolicy [operator.openshift.io/v1alpha1]'
+ File: imagecontentsourcepolicy-operator-openshift-io-v1alpha1
+ - Name: 'ImagePruner [imageregistry.operator.openshift.io/v1]'
+ File: imagepruner-imageregistry-operator-openshift-io-v1
+ - Name: 'IngressController [operator.openshift.io/v1]'
+ File: ingresscontroller-operator-openshift-io-v1
+ - Name: 'KubeAPIServer [operator.openshift.io/v1]'
+ File: kubeapiserver-operator-openshift-io-v1
+ - Name: 'KubeControllerManager [operator.openshift.io/v1]'
+ File: kubecontrollermanager-operator-openshift-io-v1
+ - Name: 'KubeScheduler [operator.openshift.io/v1]'
+ File: kubescheduler-operator-openshift-io-v1
+ - Name: 'KubeStorageVersionMigrator [operator.openshift.io/v1]'
+ File: kubestorageversionmigrator-operator-openshift-io-v1
+ - Name: 'Network [operator.openshift.io/v1]'
+ File: network-operator-openshift-io-v1
+ - Name: 'OpenShiftAPIServer [operator.openshift.io/v1]'
+ File: openshiftapiserver-operator-openshift-io-v1
+ - Name: 'OpenShiftControllerManager [operator.openshift.io/v1]'
+ File: openshiftcontrollermanager-operator-openshift-io-v1
+ - Name: 'OperatorPKI [network.operator.openshift.io/v1]'
+ File: operatorpki-network-operator-openshift-io-v1
+ - Name: 'ServiceCA [operator.openshift.io/v1]'
+ File: serviceca-operator-openshift-io-v1
+ - Name: 'Storage [operator.openshift.io/v1]'
+ File: storage-operator-openshift-io-v1
+- Name: OperatorHub APIs
+ Dir: operatorhub_apis
+ Topics:
+ - Name: About OperatorHub APIs
+ File: operatorhub-apis-index
+ - Name: 'CatalogSource [operators.coreos.com/v1alpha1]'
+ File: catalogsource-operators-coreos-com-v1alpha1
+ - Name: 'ClusterServiceVersion [operators.coreos.com/v1alpha1]'
+ File: clusterserviceversion-operators-coreos-com-v1alpha1
+ - Name: 'InstallPlan [operators.coreos.com/v1alpha1]'
+ File: installplan-operators-coreos-com-v1alpha1
+ - Name: 'Operator [operators.coreos.com/v1]'
+ File: operator-operators-coreos-com-v1
+ - Name: 'OperatorCondition [operators.coreos.com/v2]'
+ File: operatorcondition-operators-coreos-com-v2
+ - Name: 'OperatorGroup [operators.coreos.com/v1]'
+ File: operatorgroup-operators-coreos-com-v1
+ - Name: 'PackageManifest [packages.operators.coreos.com/v1]'
+ File: packagemanifest-packages-operators-coreos-com-v1
+ - Name: 'Subscription [operators.coreos.com/v1alpha1]'
+ File: subscription-operators-coreos-com-v1alpha1
+- Name: Policy APIs
+ Dir: policy_apis
+ Topics:
+ - Name: About Policy APIs
+ File: policy-apis-index
+ - Name: 'PodDisruptionBudget [policy/v1]'
+ File: poddisruptionbudget-policy-v1
+- Name: Project APIs
+ Dir: project_apis
+ Topics:
+ - Name: About Project APIs
+ File: project-apis-index
+ - Name: 'Project [project.openshift.io/v1]'
+ File: project-project-openshift-io-v1
+ - Name: 'ProjectRequest [project.openshift.io/v1]'
+ File: projectrequest-project-openshift-io-v1
+- Name: Provisioning APIs
+ Dir: provisioning_apis
+ Topics:
+ - Name: About Provisioning APIs
+ File: provisioning-apis-index
+ - Name: 'BareMetalHost [metal3.io/v1alpha1]'
+ File: baremetalhost-metal3-io-v1alpha1
+ - Name: 'Provisioning [metal3.io/v1alpha1]'
+ File: provisioning-metal3-io-v1alpha1
+- Name: RBAC APIs
+ Dir: rbac_apis
+ Topics:
+ - Name: About RBAC APIs
+ File: rbac-apis-index
+ - Name: 'ClusterRoleBinding [rbac.authorization.k8s.io/v1]'
+ File: clusterrolebinding-rbac-authorization-k8s-io-v1
+ - Name: 'ClusterRole [rbac.authorization.k8s.io/v1]'
+ File: clusterrole-rbac-authorization-k8s-io-v1
+ - Name: 'RoleBinding [rbac.authorization.k8s.io/v1]'
+ File: rolebinding-rbac-authorization-k8s-io-v1
+ - Name: 'Role [rbac.authorization.k8s.io/v1]'
+ File: role-rbac-authorization-k8s-io-v1
+- Name: Role APIs
+ Dir: role_apis
+ Topics:
+ - Name: About Role APIs
+ File: role-apis-index
+ - Name: 'ClusterRoleBinding [authorization.openshift.io/v1]'
+ File: clusterrolebinding-authorization-openshift-io-v1
+ - Name: 'ClusterRole [authorization.openshift.io/v1]'
+ File: clusterrole-authorization-openshift-io-v1
+ - Name: 'RoleBindingRestriction [authorization.openshift.io/v1]'
+ File: rolebindingrestriction-authorization-openshift-io-v1
+ - Name: 'RoleBinding [authorization.openshift.io/v1]'
+ File: rolebinding-authorization-openshift-io-v1
+ - Name: 'Role [authorization.openshift.io/v1]'
+ File: role-authorization-openshift-io-v1
+- Name: Schedule and quota APIs
+ Dir: schedule_and_quota_apis
+ Topics:
+ - Name: About Schedule and quota APIs
+ File: schedule-and-quota-apis-index
+ - Name: 'AppliedClusterResourceQuota [quota.openshift.io/v1]'
+ File: appliedclusterresourcequota-quota-openshift-io-v1
+ - Name: 'ClusterResourceQuota [quota.openshift.io/v1]'
+ File: clusterresourcequota-quota-openshift-io-v1
+ - Name: 'FlowSchema [flowcontrol.apiserver.k8s.io/v1beta1]'
+ File: flowschema-flowcontrol-apiserver-k8s-io-v1beta1
+ - Name: 'LimitRange [core/v1]'
+ File: limitrange-core-v1
+ - Name: 'PriorityClass [scheduling.k8s.io/v1]'
+ File: priorityclass-scheduling-k8s-io-v1
+ - Name: 'PriorityLevelConfiguration [flowcontrol.apiserver.k8s.io/v1beta1]'
+ File: prioritylevelconfiguration-flowcontrol-apiserver-k8s-io-v1beta1
+ - Name: 'ResourceQuota [core/v1]'
+ File: resourcequota-core-v1
+- Name: Security APIs
+ Dir: security_apis
+ Topics:
+ - Name: About Security APIs
+ File: security-apis-index
+ - Name: 'CertificateSigningRequest [certificates.k8s.io/v1]'
+ File: certificatesigningrequest-certificates-k8s-io-v1
+ - Name: 'CredentialsRequest [cloudcredential.openshift.io/v1]'
+ File: credentialsrequest-cloudcredential-openshift-io-v1
+ - Name: 'PodSecurityPolicyReview [security.openshift.io/v1]'
+ File: podsecuritypolicyreview-security-openshift-io-v1
+ - Name: 'PodSecurityPolicySelfSubjectReview [security.openshift.io/v1]'
+ File: podsecuritypolicyselfsubjectreview-security-openshift-io-v1
+ - Name: 'PodSecurityPolicySubjectReview [security.openshift.io/v1]'
+ File: podsecuritypolicysubjectreview-security-openshift-io-v1
+ - Name: 'RangeAllocation [security.openshift.io/v1]'
+ File: rangeallocation-security-openshift-io-v1
+ - Name: 'Secret [core/v1]'
+ File: secret-core-v1
+ - Name: 'SecurityContextConstraints [security.openshift.io/v1]'
+ File: securitycontextconstraints-security-openshift-io-v1
+ - Name: 'ServiceAccount [core/v1]'
+ File: serviceaccount-core-v1
+- Name: Storage APIs
+ Dir: storage_apis
+ Topics:
+ - Name: About Storage APIs
+ File: storage-apis-index
+ - Name: 'CSIDriver [storage.k8s.io/v1]'
+ File: csidriver-storage-k8s-io-v1
+ - Name: 'CSINode [storage.k8s.io/v1]'
+ File: csinode-storage-k8s-io-v1
+ - Name: 'CSIStorageCapacity [storage.k8s.io/v1beta1]'
+ File: csistoragecapacity-storage-k8s-io-v1beta1
+ - Name: 'PersistentVolumeClaim [core/v1]'
+ File: persistentvolumeclaim-core-v1
+ - Name: 'StorageClass [storage.k8s.io/v1]'
+ File: storageclass-storage-k8s-io-v1
+ - Name: 'StorageState [migration.k8s.io/v1alpha1]'
+ File: storagestate-migration-k8s-io-v1alpha1
+ - Name: 'StorageVersionMigration [migration.k8s.io/v1alpha1]'
+ File: storageversionmigration-migration-k8s-io-v1alpha1
+ - Name: 'VolumeAttachment [storage.k8s.io/v1]'
+ File: volumeattachment-storage-k8s-io-v1
+ - Name: 'VolumeSnapshot [snapshot.storage.k8s.io/v1]'
+ File: volumesnapshot-snapshot-storage-k8s-io-v1
+ - Name: 'VolumeSnapshotClass [snapshot.storage.k8s.io/v1]'
+ File: volumesnapshotclass-snapshot-storage-k8s-io-v1
+ - Name: 'VolumeSnapshotContent [snapshot.storage.k8s.io/v1]'
+ File: volumesnapshotcontent-snapshot-storage-k8s-io-v1
+- Name: Template APIs
+ Dir: template_apis
+ Topics:
+ - Name: About Template APIs
+ File: template-apis-index
+ - Name: 'BrokerTemplateInstance [template.openshift.io/v1]'
+ File: brokertemplateinstance-template-openshift-io-v1
+ - Name: 'PodTemplate [core/v1]'
+ File: podtemplate-core-v1
+ - Name: 'Template [template.openshift.io/v1]'
+ File: template-template-openshift-io-v1
+ - Name: 'TemplateInstance [template.openshift.io/v1]'
+ File: templateinstance-template-openshift-io-v1
+- Name: User and group APIs
+ Dir: user_and_group_apis
+ Topics:
+ - Name: About User and group APIs
+ File: user-and-group-apis-index
+ - Name: 'Group [user.openshift.io/v1]'
+ File: group-user-openshift-io-v1
+ - Name: 'Identity [user.openshift.io/v1]'
+ File: identity-user-openshift-io-v1
+ - Name: 'UserIdentityMapping [user.openshift.io/v1]'
+ File: useridentitymapping-user-openshift-io-v1
+ - Name: 'User [user.openshift.io/v1]'
+ File: user-user-openshift-io-v1
+- Name: Workloads APIs
+ Dir: workloads_apis
+ Topics:
+ - Name: About Workloads APIs
+ File: workloads-apis-index
+ - Name: 'BuildConfig [build.openshift.io/v1]'
+ File: buildconfig-build-openshift-io-v1
+ - Name: 'Build [build.openshift.io/v1]'
+ File: build-build-openshift-io-v1
+ - Name: 'CronJob [batch/v1]'
+ File: cronjob-batch-v1
+ - Name: 'DaemonSet [apps/v1]'
+ File: daemonset-apps-v1
+ - Name: 'Deployment [apps/v1]'
+ File: deployment-apps-v1
+ - Name: 'DeploymentConfig [apps.openshift.io/v1]'
+ File: deploymentconfig-apps-openshift-io-v1
+ - Name: 'Job [batch/v1]'
+ File: job-batch-v1
+ - Name: 'Pod [core/v1]'
+ File: pod-core-v1
+ - Name: 'ReplicationController [core/v1]'
+ File: replicationcontroller-core-v1
+ - Name: 'PersistentVolume [core/v1]'
+ File: persistentvolume-core-v1
+ - Name: 'ReplicaSet [apps/v1]'
+ File: replicaset-apps-v1
+ - Name: 'StatefulSet [apps/v1]'
+ File: statefulset-apps-v1
+---
+Name: Service Mesh
+Dir: service_mesh
+Distros: openshift-enterprise
+Topics:
+- Name: Service Mesh 2.x
+ Dir: v2x
+ Topics:
+ - Name: About OpenShift Service Mesh
+ File: ossm-about
+ - Name: Service Mesh 2.x release notes
+ File: servicemesh-release-notes
+ - Name: Service Mesh architecture
+ File: ossm-architecture
+ - Name: Service Mesh and Istio differences
+ File: ossm-vs-community
+ - Name: Preparing to install Service Mesh
+ File: preparing-ossm-installation
+ - Name: Installing the Operators
+ File: installing-ossm
+ - Name: Creating the ServiceMeshControlPlane
+ File: ossm-create-smcp
+ - Name: Adding workloads to a service mesh
+ File: ossm-create-mesh
+ - Name: Enabling sidecar injection
+ File: prepare-to-deploy-applications-ossm
+ - Name: Upgrading from 1.1 to 2.0
+ File: upgrading-ossm
+ - Name: Managing users and profiles
+ File: ossm-profiles-users
+ - Name: Security
+ File: ossm-security
+ - Name: Traffic management
+ File: ossm-traffic-manage
+ - Name: Metrics and traces
+ File: ossm-observability
+ - Name: Performance and scalability
+ File: ossm-performance-scalability
+ - Name: Deploying to production
+ File: ossm-deploy-production
+ - Name: Extensions
+ File: ossm-extensions
+ - Name: Using the 3scale Istio adapter
+ File: threescale-adapter
+ - Name: Troubleshooting Service Mesh
+ File: ossm-troubleshooting-istio
+ - Name: Service Mesh configuration reference
+ File: ossm-reference-smcp
+ - Name: Jaeger configuration reference
+ File: ossm-reference-jaeger
+ - Name: Uninstalling Service Mesh
+ File: removing-ossm
+- Name: Service Mesh 1.x
+ Dir: v1x
+ Topics:
+ - Name: Service Mesh 1.x release notes
+ File: servicemesh-release-notes
+ - Name: Service Mesh architecture
+ File: ossm-architecture
+ - Name: Service Mesh and Istio differences
+ File: ossm-vs-community
+ - Name: Preparing to install Service Mesh
+ File: preparing-ossm-installation
+ - Name: Installing Service Mesh
+ File: installing-ossm
+ - Name: Security
+ File: ossm-security
+ - Name: Traffic management
+ File: ossm-traffic-manage
+ - Name: Deploying applications on Service Mesh
+ File: prepare-to-deploy-applications-ossm
+ - Name: Data visualization and observability
+ File: ossm-observability
+ - Name: Using the 3scale Istio adapter
+ File: threescale-adapter
+ - Name: Removing Service Mesh
+ File: removing-ossm
+---
+Name: Jaeger
+Dir: jaeger
+Distros: openshift-enterprise
+Topics:
+- Name: Jaeger release notes
+ File: rhbjaeger-release-notes
+- Name: Jaeger architecture
+ Dir: jaeger_arch
+ Topics:
+ - Name: Jaeger architecture
+ File: rhbjaeger-architecture
+- Name: Jaeger installation
+ Dir: jaeger_install
+ Topics:
+ - Name: Installing Jaeger
+ File: rhbjaeger-installation
+ - Name: Configuring Jaeger
+ File: rhbjaeger-deploying
+ - Name: Upgrading Jaeger
+ File: rhbjaeger-updating
+ - Name: Removing Jaeger
+ File: rhbjaeger-removing
+---
+Name: Virtualization
+Dir: virt
+Distros: openshift-enterprise,openshift-origin
+Topics:
+- Name: About OpenShift Virtualization
+ File: about-virt
+ Distros: openshift-enterprise
+- Name: About OKD Virtualization
+ File: about-virt
+ Distros: openshift-origin
+- Name: Start here with OpenShift Virtualization
+ File: virt-learn-more-about-openshift-virtualization
+ Distros: openshift-enterprise
+- Name: Start here with OKD Virtualization
+ File: virt-learn-more-about-openshift-virtualization
+ Distros: openshift-origin
+- Name: OpenShift Virtualization release notes
+ File: virt-4-8-release-notes
+ Distros: openshift-enterprise
+#- Name: OKD Virtualization release notes
+# File: virt-4-8-release-notes
+# Distros: openshift-origin
+- Name: Installing
+ Dir: install
+ Topics:
+ - Name: Preparing your OpenShift cluster for OpenShift Virtualization
+ File: preparing-cluster-for-virt
+ Distros: openshift-enterprise
+ - Name: Preparing your OKD cluster for OKD Virtualization
+ File: preparing-cluster-for-virt
+ Distros: openshift-origin
+ - Name: Planning your environment according to OpenShift Virtualization object maximums
+ File: virt-planning-environment-object-maximums
+ Distros: openshift-enterprise
+ - Name: Planning your environment according to OKD Virtualization object maximums
+ File: virt-planning-environment-object-maximums
+ Distros: openshift-origin
+ - Name: Specifying nodes for OpenShift Virtualization components
+ File: virt-specifying-nodes-for-virtualization-components
+ Distros: openshift-enterprise
+ - Name: Specifying nodes for OKD Virtualization components
+ File: virt-specifying-nodes-for-virtualization-components
+ Distros: openshift-origin
+ - Name: Installing OpenShift Virtualization using the web console
+ File: installing-virt-web
+ Distros: openshift-enterprise
+ - Name: Installing OKD Virtualization using the web console
+ File: installing-virt-web
+ Distros: openshift-origin
+ - Name: Installing OpenShift Virtualization using the CLI
+ File: installing-virt-cli
+ Distros: openshift-enterprise
+ - Name: Installing OKD Virtualization using the CLI
+ File: installing-virt-cli
+ Distros: openshift-origin
+ - Name: Enabling the virtctl client
+ File: virt-enabling-virtctl
+ - Name: Uninstalling OpenShift Virtualization using the web console
+ File: uninstalling-virt-web
+ Distros: openshift-enterprise
+ - Name: Uninstalling OKD Virtualization using the web console
+ File: uninstalling-virt-web
+ Distros: openshift-origin
+ - Name: Uninstalling OpenShift Virtualization using the CLI
+ File: uninstalling-virt-cli
+ Distros: openshift-enterprise
+ - Name: Uninstalling OKD Virtualization using the CLI
+ File: uninstalling-virt-cli
+ Distros: openshift-origin
+- Name: Upgrading OpenShift Virtualization
+ File: upgrading-virt
+ Distros: openshift-enterprise
+- Name: Upgrading OKD Virtualization
+ File: upgrading-virt
+ Distros: openshift-origin
+- Name: Additional security privileges granted for kubevirt-controller and virt-launcher
+ File: virt-additional-security-privileges-controller-and-launcher
+- Name: Using the CLI tools
+ File: virt-using-the-cli-tools
+- Name: Virtual machines
+ Dir: virtual_machines
+ Topics:
+###VIRTUAL MACHINE CHESS SALAD (silly name to highlight that the commented out assemblies need to be checked against merged filenams)
+ - Name: Creating virtual machines
+ File: virt-create-vms
+ - Name: Editing virtual machines
+ File: virt-edit-vms
+ - Name: Editing boot order
+ File: virt-edit-boot-order
+ - Name: Deleting virtual machines
+ File: virt-delete-vms
+ - Name: Managing virtual machine instances
+ File: virt-manage-vmis
+ - Name: Controlling virtual machines states
+ File: virt-controlling-vm-states
+ - Name: Accessing virtual machine consoles
+ File: virt-accessing-vm-consoles
+ - Name: Triggering virtual machine failover by resolving a failed node
+ File: virt-triggering-vm-failover-resolving-failed-node
+ - Name: Installing the QEMU guest agent on virtual machines
+ File: virt-installing-qemu-guest-agent
+ - Name: Viewing the QEMU guest agent information for virtual machines
+ File: virt-viewing-qemu-guest-agent-web
+ - Name: Managing config maps, secrets, and service accounts in virtual machines
+ File: virt-managing-configmaps-secrets-service-accounts
+ - Name: Installing VirtIO driver on an existing Windows virtual machine
+ File: virt-installing-virtio-drivers-on-existing-windows-vm
+ - Name: Installing VirtIO driver on a new Windows virtual machine
+ File: virt-installing-virtio-drivers-on-new-windows-vm
+ - Name: Advanced virtual machine management
+ Dir: advanced_vm_management
+ Topics:
+#Advanced virtual machine configuration
+ - Name: Specifying nodes for virtual machines
+ File: virt-specifying-nodes-for-vms
+ - Name: Automating management tasks
+ File: virt-automating-management-tasks
+ - Name: EFI mode for virtual machines
+ File: virt-efi-mode-for-vms
+ - Name: Configuring PXE booting for virtual machines
+ File: virt-configuring-pxe-booting
+ - Name: Managing guest memory
+ File: virt-managing-guest-memory
+ - Name: Using huge pages with virtual machines
+ File: virt-using-huge-pages-with-vms
+ - Name: Enabling dedicated resources for a virtual machine
+ File: virt-dedicated-resources-vm
+ - Name: Scheduling virtual machines
+ File: virt-schedule-vms
+ - Name: Configuring PCI passthrough
+ File: virt-configuring-pci-passthrough
+ - Name: Configuring a watchdog device
+ File: virt-configuring-a-watchdog
+# Importing virtual machines
+ - Name: Importing virtual machines
+ Dir: importing_vms
+ Topics:
+ - Name: TLS certificates for data volume imports
+ File: virt-tls-certificates-for-dv-imports
+ - Name: Importing virtual machine images with data volumes
+ File: virt-importing-virtual-machine-images-datavolumes
+ - Name: Importing virtual machine images to block storage with data volumes
+ File: virt-importing-virtual-machine-images-datavolumes-block
+# Cloning virtual machines
+ - Name: Cloning virtual machines
+ Dir: cloning_vms
+ Topics:
+ - Name: Enabling user permissions to clone data volumes across namespaces
+ File: virt-enabling-user-permissions-to-clone-datavolumes
+ - Name: Cloning a virtual machine disk into a new data volume
+ File: virt-cloning-vm-disk-into-new-datavolume
+ - Name: Cloning a virtual machine by using a data volume template
+ File: virt-cloning-vm-using-datavolumetemplate
+ - Name: Cloning a virtual machine disk into a new block storage data volume
+ File: virt-cloning-vm-disk-into-new-datavolume-block
+# Virtual machine networking
+ - Name: Virtual machine networking
+ Dir: vm_networking
+ Topics:
+ - Name: Using the default pod network with OpenShift Virtualization
+ File: virt-using-the-default-pod-network-with-virt
+ Distros: openshift-enterprise
+ - Name: Using the default pod network with OKD Virtualization
+ File: virt-using-the-default-pod-network-with-virt
+ Distros: openshift-origin
+ - Name: Attaching a virtual machine to multiple networks
+ File: virt-attaching-vm-multiple-networks
+ - Name: Configuring IP addresses for virtual machines
+ File: virt-configuring-ip-for-vms
+ - Name: Configuring an SR-IOV network device for virtual machines
+ File: virt-configuring-sriov-device-for-vms
+ - Name: Configuring certificate rotation
+ File: virt-configuring-certificate-rotation
+ # TODO: Add the assembly back for 4.10
+# - Name: Connecting virtual machines to a service mesh
+# File: virt-connecting-vm-to-service-mesh
+ - Name: Defining an SR-IOV network
+ File: virt-defining-an-sriov-network
+ - Name: Attaching a virtual machine to an SR-IOV network
+ File: virt-attaching-vm-to-sriov-network
+ - Name: Viewing the IP address of NICs on a virtual machine
+ File: virt-viewing-ip-of-vm-nic
+ - Name: Using a MAC address pool for virtual machines
+ File: virt-using-mac-address-pool-for-vms
+#A BETTER NAME THAN 'STORAGE 4 U'
+ - Name: Virtual machine disks
+ Dir: virtual_disks
+ Topics:
+ - Name: Features for storage
+ File: virt-features-for-storage
+ - Name: Configuring local storage for virtual machines
+ File: virt-configuring-local-storage-for-vms
+ - Name: Creating data volumes
+ File: virt-creating-data-volumes
+ - Name: Reserving PVC space for file system overhead
+ File: virt-reserving-pvc-space-fs-overhead
+ - Name: Configuring CDI to work with namespaces that have a compute resource quota
+ File: virt-configuring-cdi-for-namespace-resourcequota
+ - Name: Managing data volume annotations
+ File: virt-managing-data-volume-annotations
+ - Name: Using preallocation for data volumes
+ File: virt-using-preallocation-for-datavolumes
+ - Name: Uploading local disk images by using the web console
+ File: virt-uploading-local-disk-images-web
+ - Name: Uploading local disk images by using the virtctl tool
+ File: virt-uploading-local-disk-images-virtctl
+ - Name: Uploading a local disk image to a block storage data volume
+ File: virt-uploading-local-disk-images-block
+ - Name: Managing virtual machine snapshots
+ File: virt-managing-vm-snapshots
+ - Name: Moving a local virtual machine disk to a different node
+ File: virt-moving-local-vm-disk-to-different-node
+ - Name: Expanding virtual storage by adding blank disk images
+ File: virt-expanding-virtual-storage-with-blank-disk-images
+ - Name: Cloning a data volume using smart-cloning
+ File: virt-cloning-a-datavolume-using-smart-cloning
+ - Name: Storage defaults for data volumes
+ File: virt-storage-defaults-for-datavolumes
+ - Name: Creating and using boot sources
+ File: virt-creating-and-using-boot-sources
+ - Name: Hot-plugging virtual disks
+ File: virt-hot-plugging-virtual-disks
+ - Name: Using container disks with virtual machines
+ File: virt-using-container-disks-with-vms
+ - Name: Preparing CDI scratch space
+ File: virt-preparing-cdi-scratch-space
+ - Name: Re-using statically provisioned persistent volumes
+ File: virt-reusing-statically-provisioned-persistent-volumes
+ - Name: Deleting data volumes
+ File: virt-deleting-datavolumes
+# Templates
+- Name: Virtual machine templates
+ Dir: vm_templates
+ Topics:
+ - Name: Creating virtual machine templates
+ File: virt-creating-vm-template
+ - Name: Editing a virtual machine template
+ File: virt-editing-vm-template
+ - Name: Enabling dedicated resources for a virtual machine template
+ File: virt-dedicated-resources-vm-template
+ - Name: Deleting a virtual machine template
+ File: virt-deleting-vm-template
+# Virtual machine live migration
+- Name: Live migration
+ Dir: live_migration
+ Topics:
+ - Name: Virtual machine live migration
+ File: virt-live-migration
+ - Name: Live migration limits and timeouts
+ File: virt-live-migration-limits
+ - Name: Migrating a virtual machine instance to another node
+ File: virt-migrate-vmi
+ - Name: Monitoring live migration of a virtual machine instance
+ File: virt-monitor-vmi-migration
+ - Name: Cancelling the live migration of a virtual machine instance
+ File: virt-cancel-vmi-migration
+ - Name: Configuring virtual machine eviction strategy
+ File: virt-configuring-vmi-eviction-strategy
+# Node maintenance mode
+- Name: Node maintenance
+ Dir: node_maintenance
+ Topics:
+ - Name: About node maintenance
+ File: virt-about-node-maintenance
+ - Name: Setting a node to maintenance mode
+ File: virt-setting-node-maintenance
+ - Name: Resuming a node from maintenance mode
+ File: virt-resuming-node
+ - Name: Automatic renewal of TLS certificates
+ File: virt-automatic-certificates
+ - Name: Managing node labeling for obsolete CPU models
+ File: virt-managing-node-labeling-obsolete-cpu-models
+ - Name: Preventing node reconciliation
+ File: virt-preventing-node-reconciliation
+# Node Networking
+- Name: Node networking
+ Dir: node_network
+ Topics:
+ - Name: Observing node network state
+ File: virt-observing-node-network-state
+ - Name: Updating node network configuration
+ File: virt-updating-node-network-config
+ - Name: Troubleshooting node network configuration
+ File: virt-troubleshooting-node-network
+# Logging, events, and monitoring
+- Name: Logging, events, and monitoring
+ Dir: logging_events_monitoring
+ Topics:
+ - Name: Viewing logs
+ File: virt-logs
+ - Name: Viewing events
+ File: virt-events
+ - Name: Diagnosing data volumes using events and conditions
+ File: virt-diagnosing-datavolumes-using-events-and-conditions
+ - Name: Viewing information about virtual machine workloads
+ File: virt-viewing-information-about-vm-workloads
+ - Name: Monitoring virtual machine health
+ File: virt-monitoring-vm-health
+ - Name: Viewing cluster information
+ File: virt-using-dashboard-to-get-cluster-info
+ - Name: Reviewing resource usage by virtual machines
+ File: virt-reviewing-vm-dashboard
+ - Name: OpenShift cluster monitoring, logging, and Telemetry
+ File: virt-openshift-cluster-monitoring
+ - Name: Prometheus queries for virtual resources
+ File: virt-prometheus-queries
+ - Name: Collecting OpenShift Virtualization data for Red Hat Support
+ File: virt-collecting-virt-data
+ Distros: openshift-enterprise
+ - Name: Collecting OKD Virtualization data for community report
+ File: virt-collecting-virt-data
+ Distros: openshift-origin
+---
+# OpenShift Serverless
+Name: Serverless
+Dir: serverless
+Distros: openshift-enterprise
+Topics:
+# Release notes
+- Name: Release Notes
+ File: serverless-release-notes
+# Support
+- Name: Support
+ File: serverless-support
+# Intro / getting started
+- Name: Getting started
+ File: serverless-getting-started
+#
+# Admin guide
+- Name: Administration guide
+ Dir: admin_guide
+ Topics:
+ #Installation
+ - Name: Installing the OpenShift Serverless Operator
+ File: install-serverless-operator
+ - Name: Installing Knative Serving
+ File: installing-knative-serving
+ - Name: Installing Knative Eventing
+ File: installing-knative-eventing
+ - Name: Upgrading the OpenShift Serverless Operator
+ File: upgrading-serverless
+ - Name: Removing OpenShift Serverless
+ File: removing-openshift-serverless
+ # Ingress options
+ - Name: Integrating Service Mesh with OpenShift Serverless
+ File: serverless-ossm-setup
+ # Eventing
+ - Name: Creating Knative Eventing components in the Administrator perspective
+ File: serverless-cluster-admin-eventing
+# - Name: Configuring the Knative Eventing custom resource
+# File: knative-eventing-CR-config
+# Uncomment once we add some configs to this section, adding now for consistency with Serving docs as a placeholder
+ # Serving
+ - Name: Creating Knative Serving components in the Administrator perspective
+ File: serverless-cluster-admin-serving
+ - Name: Configuring the Knative Serving custom resource
+ File: knative-serving-CR-config
+ # Monitoring
+ - Name: Monitoring serverless components
+ File: serverless-admin-monitoring
+ # Metrics
+ - Name: Metrics
+ File: serverless-admin-metrics
+ # HA
+ - Name: High availability on OpenShift Serverless
+ File: serverless-ha
+# Security
+- Name: Security
+ Dir: security
+ Topics:
+ - Name: Configuring JSON Web Token authentication for Knative services
+ File: serverless-ossm-with-kourier-jwt
+ - Name: Configuring a custom domain for a Knative service
+ File: serverless-custom-domains
+ - Name: Configuring TLS for a custom domain using Kourier
+ File: serverless-ossm-tls-with-kourier
+ - Name: Using a custom TLS certificate for domain mapping
+ File: serverless-custom-tls-cert-domain-mapping
+#
+# TODO: Add developer guide
+#
+# Knative Serving
+- Name: Knative Serving
+ Dir: knative_serving
+ Topics:
+ # Understanding serving
+ - Name: Understanding Knative Serving
+ File: serverless-understanding-serving
+ # Knative services
+ - Name: Serverless applications
+ File: serverless-applications
+ # Autoscaling
+ - Name: Configuring Knative Serving autoscaling
+ File: configuring-knative-serving-autoscaling
+ - Name: Traffic management
+ File: serverless-traffic-management
+ - Name: Cluster logging with OpenShift Serverless
+ File: cluster-logging-serverless
+ # Tracing
+ - Name: Tracing requests using Jaeger
+ File: serverless-tracing
+ # Routes
+ - Name: Configuring routes for Knative services
+ File: serverless-configuring-routes
+ - Name: Monitoring Knative services
+ File: serverless-service-monitoring
+ # Metrics
+ - Name: Metrics
+ File: serverless-serving-metrics
+#
+# Knative Eventing
+- Name: Knative Eventing
+ Dir: knative_eventing
+ Topics:
+# Understanding eventing
+ - Name: Understanding Knative Eventing
+ File: serverless-understanding-eventing
+# Event sinks
+ - Name: Event sinks
+ File: serverless-event-sinks
+# Brokers
+ - Name: Brokers
+ File: serverless-using-brokers
+# Triggers
+ - Name: Triggers
+ File: serverless-triggers
+# Event delivery
+ - Name: Event delivery
+ File: serverless-event-delivery
+# Knative Kafka
+ - Name: Knative Kafka
+ File: serverless-kafka
+# Event sources
+- Name: Event sources
+ Dir: event_sources
+ Topics:
+ - Name: Understanding event sources
+ File: knative-event-sources
+ - Name: Listing event sources and event source types
+ File: serverless-listing-event-sources
+ - Name: Using the API server source
+ File: serverless-apiserversource
+ - Name: Using a ping source
+ File: serverless-pingsource
+ - Name: Using a Kafka source
+ File: serverless-kafka-source
+ - File: serverless-custom-event-sources
+ Name: Custom event sources
+# Channels
+- Name: Channels
+ Dir: channels
+ Topics:
+ - Name: Understanding channels
+ File: serverless-channels
+ - Name: Creating and deleting channels
+ File: serverless-creating-channels
+ - Name: Subscriptions
+ File: serverless-subs
+ - Name: Configuring channel defaults
+ File: serverless-configuring-channels
+#
+# Functions
+- Name: Functions
+ Dir: functions
+ Topics:
+ - Name: About OpenShift Serverless Functions
+ File: serverless-functions-about
+ - Name: Setting up OpenShift Serverless Functions
+ File: serverless-functions-setup
+ - Name: Getting started with functions
+ File: serverless-functions-getting-started
+ - Name: Developing Node.js functions
+ File: serverless-developing-nodejs-functions
+ - Name: Developing TypeScript functions
+ File: serverless-developing-typescript-functions
+ - Name: Developing Golang functions
+ File: serverless-developing-go-functions
+ - Name: Developing Python functions
+ File: serverless-developing-python-functions
+ - Name: Developing Quarkus functions
+ File: serverless-developing-quarkus-functions
+ - Name: Using functions with Knative Eventing
+ File: serverless-functions-eventing
+ - Name: Function project configuration in func.yaml
+ File: serverless-functions-yaml
+ - Name: Accessing secrets and config maps from Serverless functions
+ File: serverless-functions-accessing-secrets-configmaps
+ - Name: Adding annotations to functions
+ File: serverless-functions-annotations
+ - Name: Functions development reference guide
+ File: serverless-functions-reference-guide
+# Integrations
+- Name: Integrations
+ Dir: integrations
+ Topics:
+ - Name: Using NVIDIA GPU resources with serverless applications
+ File: gpu-resources
+#
+# CLI tools
+- Name: CLI tools
+ Dir: cli_tools
+ Topics:
+ - Name: Installing the Knative CLI
+ File: installing-kn
+ - Name: Knative CLI advanced configuration
+ File: advanced-kn-config
+ - Name: kn flags reference
+ File: kn-flags-reference
+ - Name: Knative Serving CLI commands
+ File: kn-serving-ref
+ - Name: Knative Eventing CLI commands
+ File: kn-eventing-ref
+ - Name: kn func
+ File: kn-func-ref
diff --git a/_unused_topics/README b/_unused_topics/README
new file mode 100644
index 000000000000..5636d8245a15
--- /dev/null
+++ b/_unused_topics/README
@@ -0,0 +1,2 @@
+Placeholder file. Any modules that are not included will be placed here
+by the `scripts/find_unused.py` script.
diff --git a/modules/adding-new-devices.adoc b/_unused_topics/adding-new-devices.adoc
similarity index 100%
rename from modules/adding-new-devices.adoc
rename to _unused_topics/adding-new-devices.adoc
diff --git a/modules/architecture-new-content.adoc b/_unused_topics/architecture-new-content.adoc
similarity index 100%
rename from modules/architecture-new-content.adoc
rename to _unused_topics/architecture-new-content.adoc
diff --git a/modules/builds-output-image-digest.adoc b/_unused_topics/builds-output-image-digest.adoc
similarity index 89%
rename from modules/builds-output-image-digest.adoc
rename to _unused_topics/builds-output-image-digest.adoc
index 4e7501084532..ee610a5fda0b 100644
--- a/modules/builds-output-image-digest.adoc
+++ b/_unused_topics/builds-output-image-digest.adoc
@@ -1,6 +1,6 @@
// Module included in the following assemblies:
//
-// * assembly/builds
+// * unused_topics/builds-output-image-digest
[id="builds-output-image-digest_{context}"]
= Output image digest
@@ -8,7 +8,7 @@
Built images can be uniquely identified by their digest, which can
later be used to pull the image by digest regardless of its current tag.
-ifdef::openshift-enterprise,openshift-origin,openshift-dedicated[]
+ifdef::openshift-enterprise,openshift-webscale,openshift-origin,openshift-dedicated[]
`Docker` and
endif::[]
`Source-to-Image (S2I)` builds store the digest in
diff --git a/_unused_topics/cluster-logging-collector-envvar.adoc b/_unused_topics/cluster-logging-collector-envvar.adoc
new file mode 100644
index 000000000000..d1a96e696399
--- /dev/null
+++ b/_unused_topics/cluster-logging-collector-envvar.adoc
@@ -0,0 +1,30 @@
+// Module included in the following assemblies:
+//
+// * logging/cluster-logging-collector.adoc
+
+[id="cluster-logging-collector-envvar_{context}"]
+= Configuring the logging collector using environment variables
+
+You can use environment variables to modify the configuration of the Fluentd log
+collector.
+
+See the link:https://github.com/openshift/origin-aggregated-logging/blob/master/fluentd/README.md[Fluentd README] in Github for lists of the
+available environment variables.
+
+.Prerequisites
+
+* Set OpenShift Logging to the unmanaged state. Operators in an unmanaged state are unsupported and the cluster administrator assumes full control of the individual component configurations and upgrades.
+
+.Procedure
+
+Set any of the Fluentd environment variables as needed:
+
+----
+$ oc set env ds/fluentd =
+----
+
+For example:
+
+----
+$ oc set env ds/fluentd BUFFER_SIZE_LIMIT=24
+----
diff --git a/_unused_topics/cluster-logging-configuring-node-selector.adoc b/_unused_topics/cluster-logging-configuring-node-selector.adoc
new file mode 100644
index 000000000000..be955156efaa
--- /dev/null
+++ b/_unused_topics/cluster-logging-configuring-node-selector.adoc
@@ -0,0 +1,57 @@
+// Module included in the following assemblies:
+//
+// * logging/cluster-logging-elasticsearch.adoc
+
+[id="cluster-logging-configuring-node-selector_{context}"]
+= Specifying a node for OpenShift Logging components using node selectors
+
+Each component specification allows the component to target a specific node.
+
+.Procedure
+
+. Edit the Cluster Logging custom resource (CR) in the `openshift-logging` project:
++
+----
+$ oc edit ClusterLogging instance
+----
++
+[source,yaml]
+----
+apiVersion: "logging.openshift.io/v1"
+kind: "ClusterLogging"
+metadata:
+ name: "nodeselector"
+spec:
+ managementState: "Managed"
+ logStore:
+ type: "elasticsearch"
+ elasticsearch:
+ nodeSelector: <1>
+ logging: es
+ nodeCount: 1
+ resources:
+ limits:
+ memory: 2Gi
+ requests:
+ cpu: 200m
+ memory: 2Gi
+ storage:
+ size: "20G"
+ storageClassName: "gp2"
+ redundancyPolicy: "ZeroRedundancy"
+ visualization:
+ type: "kibana"
+ kibana:
+ nodeSelector: <2>
+ logging: kibana
+ replicas: 1
+ collection:
+ logs:
+ type: "fluentd"
+ fluentd:
+ nodeSelector: <3>
+ logging: fluentd
+----
+<1> Node selector for Elasticsearch.
+<2> Node selector for Kibana.
+<3> Node selector for Fluentd.
diff --git a/_unused_topics/cluster-logging-elasticsearch-admin.adoc b/_unused_topics/cluster-logging-elasticsearch-admin.adoc
new file mode 100644
index 000000000000..b1b3843deb19
--- /dev/null
+++ b/_unused_topics/cluster-logging-elasticsearch-admin.adoc
@@ -0,0 +1,43 @@
+// Module included in the following assemblies:
+//
+// * logging/cluster-logging-elasticsearch.adoc
+
+[id="cluster-logging-elasticsearch-admin_{context}"]
+= Performing administrative Elasticsearch operations
+
+An administrator certificate, key, and CA that can be used to communicate with and perform administrative operations on Elasticsearch are provided within the *elasticsearch* secret in the `openshift-logging` project.
+
+[NOTE]
+====
+To confirm whether your OpenShift Logging installation provides these, run:
+----
+$ oc describe secret elasticsearch -n openshift-logging
+----
+====
+
+. Connect to an Elasticsearch pod that is in the cluster on which you are attempting to perform maintenance.
+
+. To find a pod in a cluster use:
++
+----
+$ oc get pods -l component=elasticsearch -o name -n openshift-logging | head -1
+----
+
+. Connect to a pod:
++
+----
+$ oc rsh
+----
+
+. Once connected to an Elasticsearch container, you can use the certificates mounted from the secret to communicate with Elasticsearch per its link:https://www.elastic.co/guide/en/elasticsearch/reference/2.3/indices.html[Indices APIs documentation].
++
+Fluentd sends its logs to Elasticsearch using the index format *infra-00000x* or *app-00000x*.
++
+For example, to delete all logs for the openshift-logging index, *app-000001*, we can run:
++
+----
+$ curl --key /etc/elasticsearch/secret/admin-key \
+--cert /etc/elasticsearch/secret/admin-cert \
+--cacert /etc/elasticsearch/secret/admin-ca -XDELETE \
+"https://localhost:9200/app-000001"
+----
diff --git a/_unused_topics/cluster-logging-exported-fields-aushape.2021-06-04.adoc b/_unused_topics/cluster-logging-exported-fields-aushape.2021-06-04.adoc
new file mode 100644
index 000000000000..3223ed28b26e
--- /dev/null
+++ b/_unused_topics/cluster-logging-exported-fields-aushape.2021-06-04.adoc
@@ -0,0 +1,63 @@
+// Module included in the following assemblies:
+//
+// * logging/cluster-logging-exported-fields.adoc
+
+[id="cluster-logging-exported-fields-aushape_{context}"]
+= Aushape exported fields
+
+These are the Aushape fields exported by OpenShift Logging available for searching
+from Elasticsearch and Kibana.
+
+Audit events converted with Aushape. For more information, see
+link:https://github.com/Scribery/aushape[Aushape].
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `aushape.serial`
+|Audit event serial number.
+
+| `aushape.node`
+|Name of the host where the audit event occurred.
+
+| `aushape.error`
+|The error aushape encountered while converting the event.
+
+| `aushape.trimmed`
+|An array of JSONPath expressions relative to the event object, specifying
+objects or arrays with the content removed as the result of event size limiting.
+An empty string means the event removed the content, and an empty array means
+the trimming occurred by unspecified objects and arrays.
+
+| `aushape.text`
+|An array log record strings representing the original audit event.
+|===
+
+[discrete]
+[id="exported-fields-aushape.data_{context}"]
+=== `aushape.data` Fields
+
+Parsed audit event data related to Aushape.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `aushape.data.avc`
+|type: nested
+
+| `aushape.data.execve`
+|type: string
+
+| `aushape.data.netfilter_cfg`
+|type: nested
+
+| `aushape.data.obj_pid`
+|type: nested
+
+| `aushape.data.path`
+|type: nested
+|===
diff --git a/_unused_topics/cluster-logging-exported-fields-collectd.adoc b/_unused_topics/cluster-logging-exported-fields-collectd.adoc
new file mode 100644
index 000000000000..75dfb4c71428
--- /dev/null
+++ b/_unused_topics/cluster-logging-exported-fields-collectd.adoc
@@ -0,0 +1,993 @@
+// Module included in the following assemblies:
+//
+// * logging/cluster-logging-exported-fields.adoc
+
+[id="cluster-logging-exported-fields-collectd_{context}"]
+= `collectd` exported fields
+
+These are the `collectd` and `collectd-*` fields exported by the logging system and available for searching
+from Elasticsearch and Kibana.
+
+[discrete]
+[id="exported-fields-collectd_{context}"]
+=== `collectd` Fields
+
+The following fields represent namespace metrics metadata.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.interval`
+|type: float
+
+The `collectd` interval.
+
+| `collectd.plugin`
+|type: string
+
+The `collectd` plug-in.
+
+| `collectd.plugin_instance`
+|type: string
+
+The `collectd` plugin_instance.
+
+| `collectd.type_instance`
+|type: string
+
+The `collectd` `type_instance`.
+
+| `collectd.type`
+|type: string
+
+The `collectd` type.
+
+| `collectd.dstypes`
+|type: string
+
+The `collectd` dstypes.
+|===
+
+[discrete]
+[id="exported-fields-collectd.processes_{context}"]
+=== `collectd.processes` Fields
+
+The following field corresponds to the `collectd` processes plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.processes.ps_state`
+|type: integer
+The `collectd ps_state` type of processes plug-in.
+|===
+
+[discrete]
+[id="exported-fields-collectd.processes.ps_disk_ops_{context}"]
+=== `collectd.processes.ps_disk_ops` Fields
+
+The `collectd` `ps_disk_ops` type of processes plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.processes.ps_disk_ops.read`
+|type: float
+
+`TODO`
+
+| `collectd.processes.ps_disk_ops.write`
+|type: float
+
+`TODO`
+
+| `collectd.processes.ps_vm`
+|type: integer
+
+The `collectd` `ps_vm` type of processes plug-in.
+
+| `collectd.processes.ps_rss`
+|type: integer
+
+The `collectd` `ps_rss` type of processes plug-in.
+
+| `collectd.processes.ps_data`
+|type: integer
+
+The `collectd` `ps_data` type of processes plug-in.
+
+| `collectd.processes.ps_code`
+|type: integer
+
+The `collectd` `ps_code` type of processes plug-in.
+
+| `collectd.processes.ps_stacksize`
+| type: integer
+
+The `collectd` `ps_stacksize` type of processes plug-in.
+|===
+
+[discrete]
+[id="exported-fields-collectd.processes.ps_cputime_{context}"]
+=== `collectd.processes.ps_cputime` Fields
+
+The `collectd` `ps_cputime` type of processes plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.processes.ps_cputime.user`
+|type: float
+
+`TODO`
+
+| `collectd.processes.ps_cputime.syst`
+|type: float
+
+`TODO`
+|===
+
+[discrete]
+[id="exported-fields-collectd.processes.ps_count_{context}"]
+=== `collectd.processes.ps_count` Fields
+
+The `collectd` `ps_count` type of processes plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.processes.ps_count.processes`
+|type: integer
+
+`TODO`
+
+| `collectd.processes.ps_count.threads`
+|type: integer
+
+`TODO`
+|===
+
+[discrete]
+[id="exported-fields-collectd.processes.ps_pagefaults_{context}"]
+=== `collectd.processes.ps_pagefaults` Fields
+
+The `collectd` `ps_pagefaults` type of processes plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.processes.ps_pagefaults.majflt`
+|type: float
+
+`TODO`
+
+| `collectd.processes.ps_pagefaults.minflt`
+|type: float
+
+`TODO`
+|===
+
+[discrete]
+[id="exported-fields-collectd.processes.ps_disk_octets_{context}"]
+=== `collectd.processes.ps_disk_octets` Fields
+
+The `collectd ps_disk_octets` type of processes plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.processes.ps_disk_octets.read`
+|type: float
+
+`TODO`
+
+| `collectd.processes.ps_disk_octets.write`
+|type: float
+
+`TODO`
+
+| `collectd.processes.fork_rate`
+|type: float
+
+The `collectd` `fork_rate` type of processes plug-in.
+|===
+
+[discrete]
+[id="exported-fields-collectd.disk_{context}"]
+=== `collectd.disk` Fields
+
+Corresponds to `collectd` disk plug-in.
+
+[discrete]
+[id="exported-fields-collectd.disk.disk_merged_{context}"]
+=== `collectd.disk.disk_merged` Fields
+
+The `collectd` `disk_merged` type of disk plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.disk.disk_merged.read`
+|type: float
+
+`TODO`
+
+| `collectd.disk.disk_merged.write`
+|type: float
+
+`TODO`
+|===
+
+[discrete]
+[id="exported-fields-collectd.disk.disk_octets_{context}"]
+=== `collectd.disk.disk_octets` Fields
+
+The `collectd` `disk_octets` type of disk plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.disk.disk_octets.read`
+|type: float
+
+`TODO`
+
+| `collectd.disk.disk_octets.write`
+|type: float
+
+`TODO`
+|===
+
+[discrete]
+[id="exported-fields-collectd.disk.disk_time_{context}"]
+=== `collectd.disk.disk_time` Fields
+
+The `collectd` `disk_time` type of disk plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.disk.disk_time.read`
+|type: float
+
+`TODO`
+
+| `collectd.disk.disk_time.write`
+|type: float
+
+`TODO`
+|===
+
+[discrete]
+[id="exported-fields-collectd.disk.disk_ops_{context}"]
+=== `collectd.disk.disk_ops` Fields
+
+The `collectd` `disk_ops` type of disk plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.disk.disk_ops.read`
+|type: float
+
+`TODO`
+
+| `collectd.disk.disk_ops.write`
+|type: float
+
+`TODO`
+
+| `collectd.disk.pending_operations`
+|type: integer
+
+The `collectd` `pending_operations` type of disk plug-in.
+|===
+
+[discrete]
+[id="exported-fields-collectd.disk.disk_io_time_{context}"]
+=== `collectd.disk.disk_io_time` Fields
+
+The `collectd disk_io_time` type of disk plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.disk.disk_io_time.io_time`
+|type: float
+
+`TODO`
+
+| `collectd.disk.disk_io_time.weighted_io_time`
+|type: float
+
+`TODO`
+|===
+
+[discrete]
+[id="exported-fields-collectd.interface_{context}"]
+=== `collectd.interface` Fields
+
+Corresponds to the `collectd` interface plug-in.
+
+[discrete]
+[id="exported-fields-collectd.interface.if_octets_{context}"]
+=== `collectd.interface.if_octets` Fields
+
+The `collectd` `if_octets` type of interface plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.interface.if_octets.rx`
+|type: float
+
+`TODO`
+
+| `collectd.interface.if_octets.tx`
+|type: float
+
+`TODO`
+|===
+
+[discrete]
+[id="exported-fields-collectd.interface.if_packets_{context}"]
+=== `collectd.interface.if_packets` Fields
+
+The `collectd` `if_packets` type of interface plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.interface.if_packets.rx`
+|type: float
+
+`TODO`
+
+| `collectd.interface.if_packets.tx`
+|type: float
+
+`TODO`
+|===
+
+[discrete]
+[id="exported-fields-collectd.interface.if_errors_{context}"]
+=== `collectd.interface.if_errors` Fields
+
+The `collectd` `if_errors` type of interface plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.interface.if_errors.rx`
+|type: float
+
+`TODO`
+
+| `collectd.interface.if_errors.tx`
+|type: float
+
+`TODO`
+|===
+
+[discrete]
+[id="exported-fields-collectd.interface.if_dropped_{context}"]
+=== collectd.interface.if_dropped Fields
+
+The `collectd` `if_dropped` type of interface plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.interface.if_dropped.rx`
+|type: float
+
+`TODO`
+
+| `collectd.interface.if_dropped.tx`
+|type: float
+
+`TODO`
+|===
+
+[discrete]
+[id="exported-fields-collectd.virt_{context}"]
+=== `collectd.virt` Fields
+
+Corresponds to `collectd` virt plug-in.
+
+[discrete]
+[id="exported-fields-collectd.virt.if_octets_{context}"]
+=== `collectd.virt.if_octets` Fields
+
+The `collectd if_octets` type of virt plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.virt.if_octets.rx`
+|type: float
+
+`TODO`
+
+| `collectd.virt.if_octets.tx`
+|type: float
+
+`TODO`
+|===
+
+[discrete]
+[id="exported-fields-collectd.virt.if_packets_{context}"]
+=== `collectd.virt.if_packets` Fields
+
+The `collectd` `if_packets` type of virt plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.virt.if_packets.rx`
+|type: float
+
+`TODO`
+
+| `collectd.virt.if_packets.tx`
+|type: float
+
+`TODO`
+|===
+
+[discrete]
+[id="exported-fields-collectd.virt.if_errors_{context}"]
+=== `collectd.virt.if_errors` Fields
+
+The `collectd` `if_errors` type of virt plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.virt.if_errors.rx`
+|type: float
+
+`TODO`
+
+| `collectd.virt.if_errors.tx`
+|type: float
+
+`TODO`
+|===
+
+[discrete]
+[id="exported-fields-collectd.virt.if_dropped_{context}"]
+=== `collectd.virt.if_dropped` Fields
+
+The `collectd` `if_dropped` type of virt plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.virt.if_dropped.rx`
+|type: float
+
+`TODO`
+
+| `collectd.virt.if_dropped.tx`
+|type: float
+
+`TODO`
+|===
+
+[discrete]
+[id="exported-fields-collectd.virt.disk_ops_{context}"]
+=== `collectd.virt.disk_ops` Fields
+
+The `collectd` `disk_ops` type of virt plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| collectd.virt.disk_ops.read
+|type: float
+
+`TODO`
+
+| `collectd.virt.disk_ops.write`
+|type: float
+
+`TODO`
+|===
+
+[discrete]
+[id="exported-fields-collectd.virt.disk_octets_{context}"]
+=== `collectd.virt.disk_octets` Fields
+
+The `collectd` `disk_octets` type of virt plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.virt.disk_octets.read`
+|type: float
+
+`TODO`
+
+| `collectd.virt.disk_octets.write`
+|type: float
+
+`TODO`
+
+| `collectd.virt.memory`
+|type: float
+
+The `collectd` memory type of virt plug-in.
+
+| `collectd.virt.virt_vcpu`
+|type: float
+
+The `collectd` `virt_vcpu` type of virt plug-in.
+
+| `collectd.virt.virt_cpu_total`
+|type: float
+
+The `collectd` `virt_cpu_total` type of virt plug-in.
+|===
+
+[discrete]
+[id="exported-fields-collectd.CPU_{context}"]
+=== `collectd.CPU` Fields
+
+Corresponds to the `collectd` CPU plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.CPU.percent`
+|type: float
+
+The `collectd` type percent of plug-in CPU.
+|===
+
+[discrete]
+[id="exported-fields-collectd.df_{context}"]
+=== collectd.df Fields
+
+Corresponds to the `collectd` `df` plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.df.df_complex`
+|type: float
+
+The `collectd` type `df_complex` of plug-in `df`.
+
+| `collectd.df.percent_bytes`
+|type: float
+
+The `collectd` type `percent_bytes` of plug-in `df`.
+|===
+
+[discrete]
+[id="exported-fields-collectd.entropy_{context}"]
+=== `collectd.entropy` Fields
+
+Corresponds to the `collectd` entropy plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.entropy.entropy`
+|type: integer
+
+The `collectd` entropy type of entropy plug-in.
+|===
+
+////
+[discrete]
+[id="exported-fields-collectd.nfs_{context}"]
+=== `collectd.nfs` Fields
+
+Corresponds to the `collectd` NFS plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.nfs.nfs_procedure`
+|type: integer
+
+The `collectd` `nfs_procedure` type of nfs plug-in.
+|===
+////
+
+[discrete]
+[id="exported-fields-collectd.memory_{context}"]
+=== `collectd.memory` Fields
+
+Corresponds to the `collectd` memory plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.memory.memory`
+|type: float
+
+The `collectd` memory type of memory plug-in.
+
+| `collectd.memory.percent`
+|type: float
+
+The `collectd` percent type of memory plug-in.
+|===
+
+[discrete]
+[id="exported-fields-collectd.swap_{context}"]
+=== `collectd.swap` Fields
+
+Corresponds to the `collectd` swap plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.swap.swap`
+|type: integer
+
+The `collectd` swap type of swap plug-in.
+
+| `collectd.swap.swap_io`
+|type: integer
+
+The `collectd swap_io` type of swap plug-in.
+|===
+
+[discrete]
+[id="exported-fields-collectd.load_{context}"]
+=== `collectd.load` Fields
+
+Corresponds to the `collectd` load plug-in.
+
+[discrete]
+[id="exported-fields-collectd.load.load_{context}"]
+=== `collectd.load.load` Fields
+
+The `collectd` load type of load plug-in
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.load.load.shortterm`
+|type: float
+
+`TODO`
+
+| `collectd.load.load.midterm`
+|type: float
+
+`TODO`
+
+| `collectd.load.load.longterm`
+|type: float
+
+`TODO`
+|===
+
+[discrete]
+[id="exported-fields-collectd.aggregation_{context}"]
+=== `collectd.aggregation` Fields
+
+Corresponds to `collectd` aggregation plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.aggregation.percent`
+|type: float
+
+`TODO`
+|===
+
+[discrete]
+[id="exported-fields-collectd.statsd_{context}"]
+=== `collectd.statsd` Fields
+
+Corresponds to `collectd` `statsd` plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.statsd.host_cpu`
+|type: integer
+
+The `collectd` CPU type of `statsd` plug-in.
+
+| `collectd.statsd.host_elapsed_time`
+|type: integer
+
+The `collectd` `elapsed_time` type of `statsd` plug-in.
+
+| `collectd.statsd.host_memory`
+|type: integer
+
+The `collectd` memory type of `statsd` plug-in.
+
+| `collectd.statsd.host_nic_speed`
+|type: integer
+
+The `collectd` `nic_speed` type of `statsd` plug-in.
+
+| `collectd.statsd.host_nic_rx`
+|type: integer
+
+The `collectd` `nic_rx` type of `statsd` plug-in.
+
+| `collectd.statsd.host_nic_tx`
+|type: integer
+
+The `collectd` `nic_tx` type of `statsd` plug-in.
+
+| `collectd.statsd.host_nic_rx_dropped`
+|type: integer
+
+The `collectd` `nic_rx_dropped` type of `statsd` plug-in.
+
+| `collectd.statsd.host_nic_tx_dropped`
+|type: integer
+
+The `collectd` `nic_tx_dropped` type of `statsd` plug-in.
+
+| `collectd.statsd.host_nic_rx_errors`
+|type: integer
+
+The `collectd` `nic_rx_errors` type of `statsd` plug-in.
+
+| `collectd.statsd.host_nic_tx_errors`
+|type: integer
+
+The `collectd` `nic_tx_errors` type of `statsd` plug-in.
+
+| `collectd.statsd.host_storage`
+|type: integer
+
+The `collectd` storage type of `statsd` plug-in.
+
+| `collectd.statsd.host_swap`
+|type: integer
+
+The `collectd` swap type of `statsd` plug-in.
+
+| `collectd.statsd.host_vdsm`
+|type: integer
+
+The `collectd` VDSM type of `statsd` plug-in.
+
+| `collectd.statsd.host_vms`
+|type: integer
+
+The `collectd` VMS type of `statsd` plug-in.
+
+| `collectd.statsd.vm_nic_tx_dropped`
+|type: integer
+
+The `collectd` `nic_tx_dropped` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_nic_rx_bytes`
+|type: integer
+
+The `collectd` `nic_rx_bytes` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_nic_tx_bytes`
+|type: integer
+
+The `collectd` `nic_tx_bytes` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_balloon_min`
+|type: integer
+
+The `collectd` `balloon_min` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_balloon_max`
+|type: integer
+
+The `collectd` `balloon_max` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_balloon_target`
+|type: integer
+
+The `collectd` `balloon_target` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_balloon_cur`
+| type: integer
+
+The `collectd` `balloon_cur` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_cpu_sys`
+|type: integer
+
+The `collectd` `cpu_sys` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_cpu_usage`
+|type: integer
+
+The `collectd` `cpu_usage` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_disk_read_ops`
+|type: integer
+
+The `collectd` `disk_read_ops` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_disk_write_ops`
+|type: integer
+
+The collectd` `disk_write_ops` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_disk_flush_latency`
+|type: integer
+
+The `collectd` `disk_flush_latency` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_disk_apparent_size`
+|type: integer
+
+The `collectd` `disk_apparent_size` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_disk_write_bytes`
+|type: integer
+
+The `collectd` `disk_write_bytes` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_disk_write_rate`
+|type: integer
+
+The `collectd` `disk_write_rate` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_disk_true_size`
+|type: integer
+
+The `collectd` `disk_true_size` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_disk_read_rate`
+|type: integer
+
+The `collectd` `disk_read_rate` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_disk_write_latency`
+|type: integer
+
+The `collectd` `disk_write_latency` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_disk_read_latency`
+|type: integer
+
+The `collectd` `disk_read_latency` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_disk_read_bytes`
+|type: integer
+
+The `collectd` `disk_read_bytes` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_nic_rx_dropped`
+|type: integer
+
+The `collectd` `nic_rx_dropped` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_cpu_user`
+|type: integer
+
+The `collectd` `cpu_user` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_nic_rx_errors`
+|type: integer
+
+The `collectd` `nic_rx_errors` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_nic_tx_errors`
+|type: integer
+
+The `collectd` `nic_tx_errors` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_nic_speed`
+|type: integer
+
+The `collectd` `nic_speed` type of `statsd` plug-in.
+|===
+
+[discrete]
+[id="exported-fields-collectd.postgresql_{context}"]
+=== `collectd.postgresql Fields`
+
+Corresponds to `collectd` `postgresql` plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.postgresql.pg_n_tup_g`
+|type: integer
+
+The `collectd` type `pg_n_tup_g` of plug-in postgresql.
+
+| `collectd.postgresql.pg_n_tup_c`
+|type: integer
+
+The `collectd` type `pg_n_tup_c` of plug-in postgresql.
+
+| `collectd.postgresql.pg_numbackends`
+|type: integer
+
+The `collectd` type `pg_numbackends` of plug-in postgresql.
+
+| `collectd.postgresql.pg_xact`
+|type: integer
+
+The `collectd` type `pg_xact` of plug-in postgresql.
+
+| `collectd.postgresql.pg_db_size`
+|type: integer
+
+The `collectd` type `pg_db_size` of plug-in postgresql.
+
+| `collectd.postgresql.pg_blks`
+|type: integer
+
+The `collectd` type `pg_blks` of plug-in postgresql.
+|===
diff --git a/_unused_topics/cluster-logging-exported-fields-container.2021-06-04.adoc b/_unused_topics/cluster-logging-exported-fields-container.2021-06-04.adoc
new file mode 100644
index 000000000000..d893b804f0cc
--- /dev/null
+++ b/_unused_topics/cluster-logging-exported-fields-container.2021-06-04.adoc
@@ -0,0 +1,89 @@
+// Module included in the following assemblies:
+//
+// * logging/cluster-logging-exported-fields.adoc
+
+[id="cluster-logging-exported-fields-container_{context}"]
+= Container exported fields
+
+These are the Docker fields exported by OpenShift Logging available for searching from Elasticsearch and Kibana.
+Namespace for docker container-specific metadata. The docker.container_id is the Docker container ID.
+
+
+[discrete]
+[id="exported-fields-pipeline_metadata.collector_{context}"]
+=== `pipeline_metadata.collector` Fields
+
+This section contains metadata specific to the collector.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `pipeline_metadata.collector.hostname`
+|FQDN of the collector. It might be different from the FQDN of the actual emitter
+of the logs.
+
+| `pipeline_metadata.collector.name`
+|Name of the collector.
+
+| `pipeline_metadata.collector.version`
+|Version of the collector.
+
+| `pipeline_metadata.collector.ipaddr4`
+|IP address v4 of the collector server, can be an array.
+
+| `pipeline_metadata.collector.ipaddr6`
+|IP address v6 of the collector server, can be an array.
+
+| `pipeline_metadata.collector.inputname`
+|How the log message was received by the collector whether it was TCP/UDP, or
+imjournal/imfile.
+
+| `pipeline_metadata.collector.received_at`
+|Time when the message was received by the collector.
+
+| `pipeline_metadata.collector.original_raw_message`
+|The original non-parsed log message, collected by the collector or as close to the
+source as possible.
+|===
+
+[discrete]
+[id="exported-fields-pipeline_metadata.normalizer_{context}"]
+=== `pipeline_metadata.normalizer` Fields
+
+This section contains metadata specific to the normalizer.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `pipeline_metadata.normalizer.hostname`
+|FQDN of the normalizer.
+
+| `pipeline_metadata.normalizer.name`
+|Name of the normalizer.
+
+| `pipeline_metadata.normalizer.version`
+|Version of the normalizer.
+
+| `pipeline_metadata.normalizer.ipaddr4`
+|IP address v4 of the normalizer server, can be an array.
+
+| `pipeline_metadata.normalizer.ipaddr6`
+|IP address v6 of the normalizer server, can be an array.
+
+| `pipeline_metadata.normalizer.inputname`
+|how the log message was received by the normalizer whether it was TCP/UDP.
+
+| `pipeline_metadata.normalizer.received_at`
+|Time when the message was received by the normalizer.
+
+| `pipeline_metadata.normalizer.original_raw_message`
+|The original non-parsed log message as it is received by the normalizer.
+
+| `pipeline_metadata.trace`
+|The field records the trace of the message. Each collector and normalizer appends
+information about itself and the date and time when the message was processed.
+|===
diff --git a/_unused_topics/cluster-logging-exported-fields-default.2021-06-04.adoc b/_unused_topics/cluster-logging-exported-fields-default.2021-06-04.adoc
new file mode 100644
index 000000000000..e26b60808513
--- /dev/null
+++ b/_unused_topics/cluster-logging-exported-fields-default.2021-06-04.adoc
@@ -0,0 +1,1100 @@
+// Module included in the following assemblies:
+//
+// * logging/cluster-logging-exported-fields.adoc
+
+[id="cluster-logging-exported-fields-default_{context}"]
+= Default exported fields
+
+These are the default fields exported by the logging system and available for searching
+from Elasticsearch and Kibana. The default fields are Top Level and `collectd*`
+
+[discrete]
+=== Top Level Fields
+
+The top level fields are common to every application and can be present in
+every record. For the Elasticsearch template, top level fields populate the actual
+mappings of `default` in the template's mapping section.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `@timestamp`
+| The UTC value marking when the log payload was created, or when the log payload
+was first collected if the creation time is not known. This is the log
+processing pipeline's best effort determination of when the log payload was
+generated. Add the `@` prefix convention to note a field as being reserved for a
+particular use. With Elasticsearch, most tools look for `@timestamp` by default.
+For example, the format would be 2015-01-24 14:06:05.071000.
+
+| `geoip`
+|This is geo-ip of the machine.
+
+| `hostname`
+|The `hostname` is the fully qualified domain name (FQDN) of the entity
+generating the original payload. This field is an attempt to derive this
+context. Sometimes the entity generating it knows the context. While other times
+that entity has a restricted namespace itself, which is known by the collector
+or normalizer.
+
+| `ipaddr4`
+|The IP address V4 of the source server, which can be an array.
+
+| `ipaddr6`
+|The IP address V6 of the source server, if available.
+
+| `level`
+|The logging level as provided by rsyslog (severitytext property), python's
+logging module. Possible values are as listed at
+link:http://sourceware.org/git/?p=glibc.git;a=blob;f=misc/sys/syslog.h;h=ee01478c4b19a954426a96448577c5a76e6647c0;hb=HEAD#l74[`misc/sys/syslog.h`]
+plus `trace` and `unknown`. For example, _alert crit debug emerg err info notice
+trace unknown warning_. Note that `trace` is not in the `syslog.h` list but many
+applications use it.
+
+* You should only use `unknown` when the logging system gets a value it does not
+understand, and note that it is the highest level.
+
+* Consider `trace` as higher or more verbose, than `debug`.
+
+* `error` is deprecated, use `err`.
+
+* Convert `panic` to `emerg`.
+
+* Convert `warn` to `warning`.
+
+Numeric values from `syslog/journal PRIORITY` can usually be mapped using the
+priority values as listed at
+link:http://sourceware.org/git/?p=glibc.git;a=blob;f=misc/sys/syslog.h;h=ee01478c4b19a954426a96448577c5a76e6647c0;hb=HEAD#l51[misc/sys/syslog.h].
+
+Log levels and priorities from other logging systems should be mapped to the
+nearest match. See
+link:https://docs.python.org/2.7/library/logging.html#logging-levels[python
+logging] for an example.
+
+| `message`
+|A typical log entry message, or payload. It can be stripped of metadata pulled
+out of it by the collector or normalizer, that is UTF-8 encoded.
+
+| `pid`
+|This is the process ID of the logging entity, if available.
+
+| `service`
+|The name of the service associated with the logging entity, if available. For
+example, the `syslog APP-NAME` property is mapped to
+the service field.
+
+| `tags`
+|Optionally provided operator defined list of tags placed on each log by the
+collector or normalizer. The payload can be a string with whitespace-delimited
+string tokens, or a JSON list of string tokens.
+
+| `file`
+|Optional path to the file containing the log entry local to the collector `TODO`
+analyzer for file paths.
+
+| `offset`
+|The offset value can represent bytes to the start of the log line in the file
+(zero or one based), or log line numbers (zero or one based), as long as the
+values are strictly monotonically increasing in the context of a single log
+file. The values are allowed to wrap, representing a new version of the log file
+(rotation).
+
+| `namespace_name`
+|Associate this record with the `namespace` that shares it's name. This value
+will not be stored, but it is used to associate the record with the appropriate
+`namespace` for access control and visualization. Normally this value will be
+given in the tag, but if the protocol does not support sending a tag, this field
+can be used. If this field is present, it will override the `namespace` given in
+the tag or in `kubernetes.namespace_name`.
+
+| `namespace_uuid`
+|This is the `uuid` associated with the `namespace_name`. This value will not be
+stored, but is used to associate the record with the appropriate namespace for
+access control and visualization. If this field is present, it will override the
+`uuid` given in `kubernetes.namespace_uuid`. This will also cause the Kubernetes
+metadata lookup to be skipped for this log record.
+|===
+
+[discrete]
+=== `collectd` Fields
+
+The following fields represent namespace metrics metadata.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.interval`
+|type: float
+
+The `collectd` interval.
+
+| `collectd.plugin`
+|type: string
+
+The `collectd` plug-in.
+
+| `collectd.plugin_instance`
+|type: string
+
+The `collectd` plugin_instance.
+
+| `collectd.type_instance`
+|type: string
+
+The `collectd` `type_instance`.
+
+| `collectd.type`
+|type: string
+
+The `collectd` type.
+
+| `collectd.dstypes`
+|type: string
+
+The `collectd` dstypes.
+|===
+
+[discrete]
+[id="exported-fields-collectd.processes_{context}"]
+=== `collectd.processes` Fields
+
+The following field corresponds to the `collectd` processes plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.processes.ps_state`
+|type: integer
+The `collectd ps_state` type of processes plug-in.
+|===
+
+[discrete]
+[id="exported-fields-collectd.processes.ps_disk_ops_{context}"]
+=== `collectd.processes.ps_disk_ops` Fields
+
+The `collectd` `ps_disk_ops` type of processes plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.processes.ps_disk_ops.read`
+|type: float
+
+`TODO`
+
+| `collectd.processes.ps_disk_ops.write`
+|type: float
+
+`TODO`
+
+| `collectd.processes.ps_vm`
+|type: integer
+
+The `collectd` `ps_vm` type of processes plug-in.
+
+| `collectd.processes.ps_rss`
+|type: integer
+
+The `collectd` `ps_rss` type of processes plug-in.
+
+| `collectd.processes.ps_data`
+|type: integer
+
+The `collectd` `ps_data` type of processes plug-in.
+
+| `collectd.processes.ps_code`
+|type: integer
+
+The `collectd` `ps_code` type of processes plug-in.
+
+| `collectd.processes.ps_stacksize`
+| type: integer
+
+The `collectd` `ps_stacksize` type of processes plug-in.
+|===
+
+[discrete]
+[id="exported-fields-collectd.processes.ps_cputime_{context}"]
+=== `collectd.processes.ps_cputime` Fields
+
+The `collectd` `ps_cputime` type of processes plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.processes.ps_cputime.user`
+|type: float
+
+`TODO`
+
+| `collectd.processes.ps_cputime.syst`
+|type: float
+
+`TODO`
+|===
+
+[discrete]
+[id="exported-fields-collectd.processes.ps_count_{context}"]
+=== `collectd.processes.ps_count` Fields
+
+The `collectd` `ps_count` type of processes plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.processes.ps_count.processes`
+|type: integer
+
+`TODO`
+
+| `collectd.processes.ps_count.threads`
+|type: integer
+
+`TODO`
+|===
+
+[discrete]
+[id="exported-fields-collectd.processes.ps_pagefaults_{context}"]
+=== `collectd.processes.ps_pagefaults` Fields
+
+The `collectd` `ps_pagefaults` type of processes plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.processes.ps_pagefaults.majflt`
+|type: float
+
+`TODO`
+
+| `collectd.processes.ps_pagefaults.minflt`
+|type: float
+
+`TODO`
+|===
+
+[discrete]
+[id="exported-fields-collectd.processes.ps_disk_octets_{context}"]
+=== `collectd.processes.ps_disk_octets` Fields
+
+The `collectd ps_disk_octets` type of processes plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.processes.ps_disk_octets.read`
+|type: float
+
+`TODO`
+
+| `collectd.processes.ps_disk_octets.write`
+|type: float
+
+`TODO`
+
+| `collectd.processes.fork_rate`
+|type: float
+
+The `collectd` `fork_rate` type of processes plug-in.
+|===
+
+[discrete]
+[id="exported-fields-collectd.disk_{context}"]
+=== `collectd.disk` Fields
+
+Corresponds to `collectd` disk plug-in.
+
+[discrete]
+[id="exported-fields-collectd.disk.disk_merged_{context}"]
+=== `collectd.disk.disk_merged` Fields
+
+The `collectd` `disk_merged` type of disk plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.disk.disk_merged.read`
+|type: float
+
+`TODO`
+
+| `collectd.disk.disk_merged.write`
+|type: float
+
+`TODO`
+|===
+
+[discrete]
+[id="exported-fields-collectd.disk.disk_octets_{context}"]
+=== `collectd.disk.disk_octets` Fields
+
+The `collectd` `disk_octets` type of disk plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.disk.disk_octets.read`
+|type: float
+
+`TODO`
+
+| `collectd.disk.disk_octets.write`
+|type: float
+
+`TODO`
+|===
+
+[discrete]
+[id="exported-fields-collectd.disk.disk_time_{context}"]
+=== `collectd.disk.disk_time` Fields
+
+The `collectd` `disk_time` type of disk plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.disk.disk_time.read`
+|type: float
+
+`TODO`
+
+| `collectd.disk.disk_time.write`
+|type: float
+
+`TODO`
+|===
+
+[discrete]
+[id="exported-fields-collectd.disk.disk_ops_{context}"]
+=== `collectd.disk.disk_ops` Fields
+
+The `collectd` `disk_ops` type of disk plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.disk.disk_ops.read`
+|type: float
+
+`TODO`
+
+| `collectd.disk.disk_ops.write`
+|type: float
+
+`TODO`
+
+| `collectd.disk.pending_operations`
+|type: integer
+
+The `collectd` `pending_operations` type of disk plug-in.
+|===
+
+[discrete]
+[id="exported-fields-collectd.disk.disk_io_time_{context}"]
+=== `collectd.disk.disk_io_time` Fields
+
+The `collectd disk_io_time` type of disk plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.disk.disk_io_time.io_time`
+|type: float
+
+`TODO`
+
+| `collectd.disk.disk_io_time.weighted_io_time`
+|type: float
+
+`TODO`
+|===
+
+[discrete]
+[id="exported-fields-collectd.interface_{context}"]
+=== `collectd.interface` Fields
+
+Corresponds to the `collectd` interface plug-in.
+
+[discrete]
+[id="exported-fields-collectd.interface.if_octets_{context}"]
+=== `collectd.interface.if_octets` Fields
+
+The `collectd` `if_octets` type of interface plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.interface.if_octets.rx`
+|type: float
+
+`TODO`
+
+| `collectd.interface.if_octets.tx`
+|type: float
+
+`TODO`
+|===
+
+[discrete]
+[id="exported-fields-collectd.interface.if_packets_{context}"]
+=== `collectd.interface.if_packets` Fields
+
+The `collectd` `if_packets` type of interface plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.interface.if_packets.rx`
+|type: float
+
+`TODO`
+
+| `collectd.interface.if_packets.tx`
+|type: float
+
+`TODO`
+|===
+
+[discrete]
+[id="exported-fields-collectd.interface.if_errors_{context}"]
+=== `collectd.interface.if_errors` Fields
+
+The `collectd` `if_errors` type of interface plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.interface.if_errors.rx`
+|type: float
+
+`TODO`
+
+| `collectd.interface.if_errors.tx`
+|type: float
+
+`TODO`
+|===
+
+[discrete]
+[id="exported-fields-collectd.interface.if_dropped_{context}"]
+=== collectd.interface.if_dropped Fields
+
+The `collectd` `if_dropped` type of interface plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.interface.if_dropped.rx`
+|type: float
+
+`TODO`
+
+| `collectd.interface.if_dropped.tx`
+|type: float
+
+`TODO`
+|===
+
+[discrete]
+[id="exported-fields-collectd.virt_{context}"]
+=== `collectd.virt` Fields
+
+Corresponds to `collectd` virt plug-in.
+
+[discrete]
+[id="exported-fields-collectd.virt.if_octets_{context}"]
+=== `collectd.virt.if_octets` Fields
+
+The `collectd if_octets` type of virt plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.virt.if_octets.rx`
+|type: float
+
+`TODO`
+
+| `collectd.virt.if_octets.tx`
+|type: float
+
+`TODO`
+|===
+
+[discrete]
+[id="exported-fields-collectd.virt.if_packets_{context}"]
+=== `collectd.virt.if_packets` Fields
+
+The `collectd` `if_packets` type of virt plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.virt.if_packets.rx`
+|type: float
+
+`TODO`
+
+| `collectd.virt.if_packets.tx`
+|type: float
+
+`TODO`
+|===
+
+[discrete]
+[id="exported-fields-collectd.virt.if_errors_{context}"]
+=== `collectd.virt.if_errors` Fields
+
+The `collectd` `if_errors` type of virt plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.virt.if_errors.rx`
+|type: float
+
+`TODO`
+
+| `collectd.virt.if_errors.tx`
+|type: float
+
+`TODO`
+|===
+
+[discrete]
+[id="exported-fields-collectd.virt.if_dropped_{context}"]
+=== `collectd.virt.if_dropped` Fields
+
+The `collectd` `if_dropped` type of virt plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.virt.if_dropped.rx`
+|type: float
+
+`TODO`
+
+| `collectd.virt.if_dropped.tx`
+|type: float
+
+`TODO`
+|===
+
+[discrete]
+[id="exported-fields-collectd.virt.disk_ops_{context}"]
+=== `collectd.virt.disk_ops` Fields
+
+The `collectd` `disk_ops` type of virt plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.virt.disk_ops.read`
+|type: float
+
+`TODO`
+
+| `collectd.virt.disk_ops.write`
+|type: float
+
+`TODO`
+|===
+
+[discrete]
+[id="exported-fields-collectd.virt.disk_octets_{context}"]
+=== `collectd.virt.disk_octets` Fields
+
+The `collectd` `disk_octets` type of virt plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.virt.disk_octets.read`
+|type: float
+
+`TODO`
+
+| `collectd.virt.disk_octets.write`
+|type: float
+
+`TODO`
+
+| `collectd.virt.memory`
+|type: float
+
+The `collectd` memory type of virt plug-in.
+
+| `collectd.virt.virt_vcpu`
+|type: float
+
+The `collectd` `virt_vcpu` type of virt plug-in.
+
+| `collectd.virt.virt_cpu_total`
+|type: float
+
+The `collectd` `virt_cpu_total` type of virt plug-in.
+|===
+
+[discrete]
+[id="exported-fields-collectd.CPU_{context}"]
+=== `collectd.CPU` Fields
+
+Corresponds to the `collectd` CPU plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.CPU.percent`
+|type: float
+
+The `collectd` type percent of plug-in CPU.
+|===
+
+[discrete]
+[id="exported-fields-collectd.df_{context}"]
+=== collectd.df Fields
+
+Corresponds to the `collectd` `df` plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.df.df_complex`
+|type: float
+
+The `collectd` type `df_complex` of plug-in `df`.
+
+| `collectd.df.percent_bytes`
+|type: float
+
+The `collectd` type `percent_bytes` of plug-in `df`.
+|===
+
+[discrete]
+[id="exported-fields-collectd.entropy_{context}"]
+=== `collectd.entropy` Fields
+
+Corresponds to the `collectd` entropy plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.entropy.entropy`
+|type: integer
+
+The `collectd` entropy type of entropy plug-in.
+|===
+
+////
+[discrete]
+[id="exported-fields-collectd.nfs_{context}"]
+=== `collectd.nfs` Fields
+
+Corresponds to the `collectd` NFS plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.nfs.nfs_procedure`
+|type: integer
+
+The `collectd` `nfs_procedure` type of nfs plug-in.
+|===
+////
+
+[discrete]
+[id="exported-fields-collectd.memory_{context}"]
+=== `collectd.memory` Fields
+
+Corresponds to the `collectd` memory plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.memory.memory`
+|type: float
+
+The `collectd` memory type of memory plug-in.
+
+| `collectd.memory.percent`
+|type: float
+
+The `collectd` percent type of memory plug-in.
+|===
+
+[discrete]
+[id="exported-fields-collectd.swap_{context}"]
+=== `collectd.swap` Fields
+
+Corresponds to the `collectd` swap plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.swap.swap`
+|type: integer
+
+The `collectd` swap type of swap plug-in.
+
+| `collectd.swap.swap_io`
+|type: integer
+
+The `collectd swap_io` type of swap plug-in.
+|===
+
+[discrete]
+[id="exported-fields-collectd.load_{context}"]
+=== `collectd.load` Fields
+
+Corresponds to the `collectd` load plug-in.
+
+[discrete]
+[id="exported-fields-collectd.load.load_{context}"]
+=== `collectd.load.load` Fields
+
+The `collectd` load type of load plug-in
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.load.load.shortterm`
+|type: float
+
+`TODO`
+
+| `collectd.load.load.midterm`
+|type: float
+
+`TODO`
+
+| `collectd.load.load.longterm`
+|type: float
+
+`TODO`
+|===
+
+[discrete]
+[id="exported-fields-collectd.aggregation_{context}"]
+=== `collectd.aggregation` Fields
+
+Corresponds to `collectd` aggregation plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.aggregation.percent`
+|type: float
+
+`TODO`
+|===
+
+[discrete]
+[id="exported-fields-collectd.statsd_{context}"]
+=== `collectd.statsd` Fields
+
+Corresponds to `collectd` `statsd` plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.statsd.host_cpu`
+|type: integer
+
+The `collectd` CPU type of `statsd` plug-in.
+
+| `collectd.statsd.host_elapsed_time`
+|type: integer
+
+The `collectd` `elapsed_time` type of `statsd` plug-in.
+
+| `collectd.statsd.host_memory`
+|type: integer
+
+The `collectd` memory type of `statsd` plug-in.
+
+| `collectd.statsd.host_nic_speed`
+|type: integer
+
+The `collectd` `nic_speed` type of `statsd` plug-in.
+
+| `collectd.statsd.host_nic_rx`
+|type: integer
+
+The `collectd` `nic_rx` type of `statsd` plug-in.
+
+| `collectd.statsd.host_nic_tx`
+|type: integer
+
+The `collectd` `nic_tx` type of `statsd` plug-in.
+
+| `collectd.statsd.host_nic_rx_dropped`
+|type: integer
+
+The `collectd` `nic_rx_dropped` type of `statsd` plug-in.
+
+| `collectd.statsd.host_nic_tx_dropped`
+|type: integer
+
+The `collectd` `nic_tx_dropped` type of `statsd` plug-in.
+
+| `collectd.statsd.host_nic_rx_errors`
+|type: integer
+
+The `collectd` `nic_rx_errors` type of `statsd` plug-in.
+
+| `collectd.statsd.host_nic_tx_errors`
+|type: integer
+
+The `collectd` `nic_tx_errors` type of `statsd` plug-in.
+
+| `collectd.statsd.host_storage`
+|type: integer
+
+The `collectd` storage type of `statsd` plug-in.
+
+| `collectd.statsd.host_swap`
+|type: integer
+
+The `collectd` swap type of `statsd` plug-in.
+
+| `collectd.statsd.host_vdsm`
+|type: integer
+
+The `collectd` VDSM type of `statsd` plug-in.
+
+| `collectd.statsd.host_vms`
+|type: integer
+
+The `collectd` VMS type of `statsd` plug-in.
+
+| `collectd.statsd.vm_nic_tx_dropped`
+|type: integer
+
+The `collectd` `nic_tx_dropped` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_nic_rx_bytes`
+|type: integer
+
+The `collectd` `nic_rx_bytes` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_nic_tx_bytes`
+|type: integer
+
+The `collectd` `nic_tx_bytes` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_balloon_min`
+|type: integer
+
+The `collectd` `balloon_min` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_balloon_max`
+|type: integer
+
+The `collectd` `balloon_max` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_balloon_target`
+|type: integer
+
+The `collectd` `balloon_target` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_balloon_cur`
+| type: integer
+
+The `collectd` `balloon_cur` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_cpu_sys`
+|type: integer
+
+The `collectd` `cpu_sys` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_cpu_usage`
+|type: integer
+
+The `collectd` `cpu_usage` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_disk_read_ops`
+|type: integer
+
+The `collectd` `disk_read_ops` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_disk_write_ops`
+|type: integer
+
+The `collectd` `disk_write_ops` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_disk_flush_latency`
+|type: integer
+
+The `collectd` `disk_flush_latency` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_disk_apparent_size`
+|type: integer
+
+The `collectd` `disk_apparent_size` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_disk_write_bytes`
+|type: integer
+
+The `collectd` `disk_write_bytes` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_disk_write_rate`
+|type: integer
+
+The `collectd` `disk_write_rate` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_disk_true_size`
+|type: integer
+
+The `collectd` `disk_true_size` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_disk_read_rate`
+|type: integer
+
+The `collectd` `disk_read_rate` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_disk_write_latency`
+|type: integer
+
+The `collectd` `disk_write_latency` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_disk_read_latency`
+|type: integer
+
+The `collectd` `disk_read_latency` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_disk_read_bytes`
+|type: integer
+
+The `collectd` `disk_read_bytes` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_nic_rx_dropped`
+|type: integer
+
+The `collectd` `nic_rx_dropped` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_cpu_user`
+|type: integer
+
+The `collectd` `cpu_user` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_nic_rx_errors`
+|type: integer
+
+The `collectd` `nic_rx_errors` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_nic_tx_errors`
+|type: integer
+
+The `collectd` `nic_tx_errors` type of `statsd` plug-in.
+
+| `collectd.statsd.vm_nic_speed`
+|type: integer
+
+The `collectd` `nic_speed` type of `statsd` plug-in.
+|===
+
+[discrete]
+[id="exported-fields-collectd.postgresql_{context}"]
+=== `collectd.postgresql Fields`
+
+Corresponds to `collectd` `postgresql` plug-in.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `collectd.postgresql.pg_n_tup_g`
+|type: integer
+
+The `collectd` type `pg_n_tup_g` of plug-in postgresql.
+
+| `collectd.postgresql.pg_n_tup_c`
+|type: integer
+
+The `collectd` type `pg_n_tup_c` of plug-in postgresql.
+
+| `collectd.postgresql.pg_numbackends`
+|type: integer
+
+The `collectd` type `pg_numbackends` of plug-in postgresql.
+
+| `collectd.postgresql.pg_xact`
+|type: integer
+
+The `collectd` type `pg_xact` of plug-in postgresql.
+
+| `collectd.postgresql.pg_db_size`
+|type: integer
+
+The `collectd` type `pg_db_size` of plug-in postgresql.
+
+| `collectd.postgresql.pg_blks`
+|type: integer
+
+The `collectd` type `pg_blks` of plug-in postgresql.
+|===
diff --git a/_unused_topics/cluster-logging-exported-fields-docker.adoc b/_unused_topics/cluster-logging-exported-fields-docker.adoc
new file mode 100644
index 000000000000..26d77f062ca0
--- /dev/null
+++ b/_unused_topics/cluster-logging-exported-fields-docker.adoc
@@ -0,0 +1,89 @@
+// Module included in the following assemblies:
+//
+// * logging/cluster-logging-exported-fields.adoc
+
+[id="cluster-logging-exported-fields-container_{context}"]
+= Container exported fields
+
+These are the Docker fields exported by OpenShift Logging available for searching from Elasticsearch and Kibana.
+Namespace for docker container-specific metadata. The docker.container_id is the Docker container ID.
+
+
+[discrete]
+[id="pipeline_metadata.collector_{context}"]
+=== `pipeline_metadata.collector` Fields
+
+This section contains metadata specific to the collector.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `pipeline_metadata.collector.hostname`
+|FQDN of the collector. It might be different from the FQDN of the actual emitter
+of the logs.
+
+| `pipeline_metadata.collector.name`
+|Name of the collector.
+
+| `pipeline_metadata.collector.version`
+|Version of the collector.
+
+| `pipeline_metadata.collector.ipaddr4`
+|IP address v4 of the collector server, can be an array.
+
+| `pipeline_metadata.collector.ipaddr6`
+|IP address v6 of the collector server, can be an array.
+
+| `pipeline_metadata.collector.inputname`
+|How the log message was received by the collector whether it was TCP/UDP, or
+imjournal/imfile.
+
+| `pipeline_metadata.collector.received_at`
+|Time when the message was received by the collector.
+
+| `pipeline_metadata.collector.original_raw_message`
+|The original non-parsed log message, collected by the collector or as close to the
+source as possible.
+|===
+
+[discrete]
+[id="exported-fields-pipeline_metadata.normalizer_{context}"]
+=== `pipeline_metadata.normalizer` Fields
+
+This section contains metadata specific to the normalizer.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `pipeline_metadata.normalizer.hostname`
+|FQDN of the normalizer.
+
+| `pipeline_metadata.normalizer.name`
+|Name of the normalizer.
+
+| `pipeline_metadata.normalizer.version`
+|Version of the normalizer.
+
+| `pipeline_metadata.normalizer.ipaddr4`
+|IP address v4 of the normalizer server, can be an array.
+
+| `pipeline_metadata.normalizer.ipaddr6`
+|IP address v6 of the normalizer server, can be an array.
+
+| `pipeline_metadata.normalizer.inputname`
+|how the log message was received by the normalizer whether it was TCP/UDP.
+
+| `pipeline_metadata.normalizer.received_at`
+|Time when the message was received by the normalizer.
+
+| `pipeline_metadata.normalizer.original_raw_message`
+|The original non-parsed log message as it is received by the normalizer.
+
+| `pipeline_metadata.trace`
+|The field records the trace of the message. Each collector and normalizer appends
+information about itself and the date and time when the message was processed.
+|===
diff --git a/_unused_topics/cluster-logging-exported-fields-kubernetes.2021-06-04.adoc b/_unused_topics/cluster-logging-exported-fields-kubernetes.2021-06-04.adoc
new file mode 100644
index 000000000000..88fb91344743
--- /dev/null
+++ b/_unused_topics/cluster-logging-exported-fields-kubernetes.2021-06-04.adoc
@@ -0,0 +1,83 @@
+[id="cluster-logging-exported-fields-kubernetes_{context}"]
+= Kubernetes
+
+The following fields can be present in the namespace for kubernetes-specific metadata.
+
+== kubernetes.pod_name
+
+The name of the pod
+
+[horizontal]
+Data type:: keyword
+
+
+== kubernetes.pod_id
+
+Kubernetes ID of the pod.
+
+[horizontal]
+Data type:: keyword
+
+
+== kubernetes.namespace_name
+
+The name of the namespace in Kubernetes.
+
+[horizontal]
+Data type:: keyword
+
+
+== kubernetes.namespace_id
+
+ID of the namespace in Kubernetes.
+
+[horizontal]
+Data type:: keyword
+
+
+== kubernetes.host
+
+Kubernetes node name
+
+[horizontal]
+Data type:: keyword
+
+
+== kubernetes.master_url
+
+Kubernetes Master URL
+
+[horizontal]
+Data type:: keyword
+
+
+== kubernetes.container_name
+
+The name of the container in Kubernetes.
+
+[horizontal]
+Data type:: text
+
+
+== kubernetes.annotations
+
+Annotations associated with the Kubernetes object
+
+[horizontal]
+Data type:: group
+
+
+== kubernetes.labels
+
+Labels attached to the Kubernetes object Each label name is a subfield of labels field. Each label name is de-dotted: dots in the name are replaced with underscores.
+
+[horizontal]
+Data type:: group
+
+
+== kubernetes.event
+
+The kubernetes event obtained from kubernetes master API The event is already JSON object and as whole nested under kubernetes field This description should loosely follow 'type Event' in https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#event-v1-core
+
+[horizontal]
+Data type:: group
diff --git a/_unused_topics/cluster-logging-exported-fields-ovirt.2021-06-04.adoc b/_unused_topics/cluster-logging-exported-fields-ovirt.2021-06-04.adoc
new file mode 100644
index 000000000000..6c5dcd5b4470
--- /dev/null
+++ b/_unused_topics/cluster-logging-exported-fields-ovirt.2021-06-04.adoc
@@ -0,0 +1,30 @@
+// Module included in the following assemblies:
+//
+// * logging/cluster-logging-exported-fields.adoc
+
+[id="cluster-logging-exported-fields-ovirt_{context}"]
+= oVirt exported fields
+
+These are the oVirt fields exported by OpenShift Logging available for searching
+from Elasticsearch and Kibana.
+
+Namespace for oVirt metadata.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `ovirt.entity`
+|The type of the data source, hosts, VMS, and engine.
+
+| `ovirt.host_id`
+|The oVirt host UUID.
+|===
+
+[discrete]
+[id="exported-fields-ovirt.engine_{context}"]
+=== `ovirt.engine` Fields
+
+Namespace for metadata related to the {rh-virtualization-engine-name}. The FQDN of the {rh-virtualization-engine-name} is
+`ovirt.engine.fqdn`
diff --git a/_unused_topics/cluster-logging-exported-fields-rsyslog.2021-06-04.adoc b/_unused_topics/cluster-logging-exported-fields-rsyslog.2021-06-04.adoc
new file mode 100644
index 000000000000..fec43d97ad1a
--- /dev/null
+++ b/_unused_topics/cluster-logging-exported-fields-rsyslog.2021-06-04.adoc
@@ -0,0 +1,34 @@
+// Module included in the following assemblies:
+//
+// * logging/cluster-logging-exported-fields.adoc
+
+[id="cluster-logging-exported-fields-rsyslog_{context}"]
+= `rsyslog` exported fields
+
+These are the `rsyslog` fields exported by the logging system and available for searching
+from Elasticsearch and Kibana.
+
+The following fields are RFC5424 based metadata.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `rsyslog.facility`
+|See `syslog` specification for more information on `rsyslog`.
+
+| `rsyslog.protocol-version`
+|This is the `rsyslog` protocol version.
+
+| `rsyslog.structured-data`
+|See `syslog` specification for more information on `syslog` structured-data.
+
+| `rsyslog.msgid`
+|This is the `syslog` msgid field.
+
+| `rsyslog.appname`
+|If `app-name` is the same as `programname`, then only fill top-level field `service`.
+If `app-name` is not equal to `programname`, this field will hold `app-name`.
+See syslog specifications for more information.
+|===
diff --git a/_unused_topics/cluster-logging-exported-fields-systemd.2021-06-04.adoc b/_unused_topics/cluster-logging-exported-fields-systemd.2021-06-04.adoc
new file mode 100644
index 000000000000..19e1d6a4cdca
--- /dev/null
+++ b/_unused_topics/cluster-logging-exported-fields-systemd.2021-06-04.adoc
@@ -0,0 +1,195 @@
+// Module included in the following assemblies:
+//
+// * logging/cluster-logging-exported-fields.adoc
+
+[id="cluster-logging-exported-fields-systemd_{context}"]
+= systemd exported fields
+
+These are the `systemd` fields exported by OpenShift Logging available for searching
+from Elasticsearch and Kibana.
+
+Contains common fields specific to `systemd` journal.
+link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html[Applications]
+can write their own fields to the journal. These will be available under the
+`systemd.u` namespace. `RESULT` and `UNIT` are two such fields.
+
+[discrete]
+[id="exported-fields-systemd.k_{context}"]
+=== `systemd.k` Fields
+
+The following table contains `systemd` kernel-specific metadata.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `systemd.k.KERNEL_DEVICE`
+|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_KERNEL_DEVICE=[`systemd.k.KERNEL_DEVICE`]
+is the kernel device name.
+
+| `systemd.k.KERNEL_SUBSYSTEM`
+|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_KERNEL_SUBSYSTEM=[`systemd.k.KERNEL_SUBSYSTEM`]
+is the kernel subsystem name.
+
+| `systemd.k.UDEV_DEVLINK`
+|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_UDEV_DEVLINK=[`systemd.k.UDEV_DEVLINK`]
+includes additional symlink names that point to the node.
+
+| `systemd.k.UDEV_DEVNODE`
+|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_UDEV_DEVNODE=[`systemd.k.UDEV_DEVNODE`]
+is the node path of the device.
+
+| `systemd.k.UDEV_SYSNAME`
+|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_UDEV_SYSNAME=[ `systemd.k.UDEV_SYSNAME`]
+is the kernel device name.
+
+|===
+
+[discrete]
+[id="exported-fields-systemd.t_{context}"]
+=== `systemd.t` Fields
+
+`systemd.t Fields` are trusted journal fields, fields that are implicitly added
+by the journal, and cannot be altered by client code.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `systemd.t.AUDIT_LOGINUID`
+|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_AUDIT_SESSION=[`systemd.t.AUDIT_LOGINUID`]
+is the user ID for the journal entry process.
+
+| `systemd.t.BOOT_ID`
+|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_BOOT_ID=[`systemd.t.BOOT_ID`]
+is the kernel boot ID.
+
+| `systemd.t.AUDIT_SESSION`
+|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_AUDIT_SESSION=[`systemd.t.AUDIT_SESSION`]
+is the session for the journal entry process.
+
+| `systemd.t.CAP_EFFECTIVE`
+|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_CAP_EFFECTIVE=[`systemd.t.CAP_EFFECTIVE`]
+represents the capabilities of the journal entry process.
+
+| `systemd.t.CMDLINE`
+|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_COMM=[`systemd.t.CMDLINE`]
+is the command line of the journal entry process.
+
+| `systemd.t.COMM`
+|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_COMM=[`systemd.t.COMM`]
+is the name of the journal entry process.
+
+| `systemd.t.EXE`
+|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_COMM=[`systemd.t.EXE`]
+is the executable path of the journal entry process.
+
+| `systemd.t.GID`
+|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_PID=[`systemd.t.GID`]
+is the group ID for the journal entry process.
+
+| `systemd.t.HOSTNAME`
+|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_HOSTNAME=[`systemd.t.HOSTNAME`]
+is the name of the host.
+
+| `systemd.t.MACHINE_ID`
+|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_MACHINE_ID=[`systemd.t.MACHINE_ID`]
+is the machine ID of the host.
+
+| `systemd.t.PID`
+|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_PID=[`systemd.t.PID`]
+is the process ID for the journal entry process.
+
+| `systemd.t.SELINUX_CONTEXT`
+|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_SELINUX_CONTEXT=[`systemd.t.SELINUX_CONTEXT`]
+is the security context, or label, for the journal entry process.
+
+| `systemd.t.SOURCE_REALTIME_TIMESTAMP`
+|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_SOURCE_REALTIME_TIMESTAMP=[`systemd.t.SOURCE_REALTIME_TIMESTAMP`]
+is the earliest and most reliable timestamp of the message. This is converted to RFC 3339 NS format.
+
+| `systemd.t.SYSTEMD_CGROUP`
+|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_SYSTEMD_CGROUP=[`systemd.t.SYSTEMD_CGROUP`]
+is the `systemd` control group path.
+
+| `systemd.t.SYSTEMD_OWNER_UID`
+|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_SYSTEMD_CGROUP=[`systemd.t.SYSTEMD_OWNER_UID`]
+is the owner ID of the session.
+
+| `systemd.t.SYSTEMD_SESSION`
+|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_SYSTEMD_CGROUP=[`systemd.t.SYSTEMD_SESSION`],
+if applicable, is the `systemd` session ID.
+
+| `systemd.t.SYSTEMD_SLICE`
+|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_SYSTEMD_CGROUP=[`systemd.t.SYSTEMD_SLICE`]
+is the slice unit of the journal entry process.
+
+| `systemd.t.SYSTEMD_UNIT`
+|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_SYSTEMD_CGROUP=[`systemd.t.SYSTEMD_UNIT`]
+is the unit name for a session.
+
+| `systemd.t.SYSTEMD_USER_UNIT`
+|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_SYSTEMD_CGROUP=[`systemd.t.SYSTEMD_USER_UNIT`],
+if applicable, is the user unit name for a session.
+
+| `systemd.t.TRANSPORT`
+|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_TRANSPORT=[`systemd.t.TRANSPORT`]
+is the method of entry by the journal service. This includes, `audit`, `driver`,
+`syslog`, `journal`, `stdout`, and `kernel`.
+
+| `systemd.t.UID`
+|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#_PID=[`systemd.t.UID`]
+is the user ID for the journal entry process.
+
+| `systemd.t.SYSLOG_FACILITY`
+|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#SYSLOG_FACILITY=[`systemd.t.SYSLOG_FACILITY`]
+is the field containing the facility, formatted as a decimal string, for `syslog`.
+
+| `systemd.t.SYSLOG_IDENTIFIER`
+|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#SYSLOG_FACILITY=[`systemd.t.systemd.t.SYSLOG_IDENTIFIER`]
+is the identifier for `syslog`.
+
+| `systemd.t.SYSLOG_PID`
+|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#SYSLOG_FACILITY=[`SYSLOG_PID`]
+is the client process ID for `syslog`.
+|===
+
+[discrete]
+[id="exported-fields-systemd.u_{context}"]
+=== `systemd.u` Fields
+
+`systemd.u Fields` are directly passed from clients and stored in the journal.
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `systemd.u.CODE_FILE`
+|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#CODE_FILE=[`systemd.u.CODE_FILE`]
+is the code location containing the filename of the source.
+
+| `systemd.u.CODE_FUNCTION`
+|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#CODE_FILE=[`systemd.u.CODE_FUNCTION`]
+is the code location containing the function of the source.
+
+| `systemd.u.CODE_LINE`
+|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#CODE_FILE=[`systemd.u.CODE_LINE`]
+is the code location containing the line number of the source.
+
+| `systemd.u.ERRNO`
+|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#ERRNO=[`systemd.u.ERRNO`],
+if present, is the low-level error number formatted in numeric value, as a decimal string.
+
+| `systemd.u.MESSAGE_ID`
+|link:https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html#MESSAGE_ID=[`systemd.u.MESSAGE_ID`]
+is the message identifier ID for recognizing message types.
+
+| `systemd.u.RESULT`
+|For private use only.
+
+| `systemd.u.UNIT`
+|For private use only.
+|===
diff --git a/_unused_topics/cluster-logging-exported-fields-tlog.2021-06-04.adoc b/_unused_topics/cluster-logging-exported-fields-tlog.2021-06-04.adoc
new file mode 100644
index 000000000000..82724afc1591
--- /dev/null
+++ b/_unused_topics/cluster-logging-exported-fields-tlog.2021-06-04.adoc
@@ -0,0 +1,51 @@
+// Module included in the following assemblies:
+//
+// * logging/cluster-logging-exported-fields.adoc
+
+[id="cluster-logging-exported-fields-tlog_{context}"]
+= Tlog exported fields
+
+These are the Tlog fields exported by the OpenShift Logging system and available for searching
+from Elasticsearch and Kibana.
+
+Tlog terminal I/O recording messages. For more information see
+link:https://github.com/Scribery/tlog[Tlog].
+
+[cols="3,7",options="header"]
+|===
+|Parameter
+|Description
+
+| `tlog.ver`
+|Message format version number.
+
+| `tlog.user`
+|Recorded user name.
+
+| `tlog.term`
+|Terminal type name.
+
+| `tlog.session`
+|Audit session ID of the recorded session.
+
+| `tlog.id`
+|ID of the message within the session.
+
+| `tlog.pos`
+|Message position in the session, milliseconds.
+
+| `tlog.timing`
+|Distribution of this message's events in time.
+
+| `tlog.in_txt`
+|Input text with invalid characters scrubbed.
+
+| `tlog.in_bin`
+|Scrubbed invalid input characters as bytes.
+
+| `tlog.out_txt`
+|Output text with invalid characters scrubbed.
+
+| `tlog.out_bin`
+|Scrubbed invalid output characters as bytes.
+|===
diff --git a/_unused_topics/cluster-logging-kibana-console-launch.adoc b/_unused_topics/cluster-logging-kibana-console-launch.adoc
new file mode 100644
index 000000000000..d7a36efb42a9
--- /dev/null
+++ b/_unused_topics/cluster-logging-kibana-console-launch.adoc
@@ -0,0 +1,28 @@
+// Module included in the following assemblies:
+//
+// * logging/cluster-logging-kibana-console.adoc
+// * logging/cluster-logging-visualizer.adoc
+
+[id="cluster-logging-kibana-visualize_{context}"]
+= Launching the Kibana interface
+
+The Kibana interface is a browser-based console
+to query, discover, and visualize your Elasticsearch data through histograms, line graphs,
+pie charts, heat maps, built-in geospatial support, and other visualizations.
+
+.Procedure
+
+To launch the Kibana interface:
+
+. In the {product-title} console, click *Monitoring* -> *Logging*.
+
+. Log in using the same credentials you use to log in to the {product-title} console.
++
+The Kibana interface launches. You can now:
++
+* Search and browse your data using the Discover page.
+* Chart and map your data using the Visualize page.
+* Create and view custom dashboards using the Dashboard page.
++
+Use and configuration of the Kibana interface is beyond the scope of this documentation. For more information,
+on using the interface, see the link:https://www.elastic.co/guide/en/kibana/5.6/connect-to-elasticsearch.html[Kibana documentation].
diff --git a/_unused_topics/cluster-logging-log-forwarding-disable.adoc b/_unused_topics/cluster-logging-log-forwarding-disable.adoc
new file mode 100644
index 000000000000..680ea9b95686
--- /dev/null
+++ b/_unused_topics/cluster-logging-log-forwarding-disable.adoc
@@ -0,0 +1,47 @@
+// Module included in the following assemblies:
+//
+// * logging/cluster-logging-external.adoc
+
+[id="cluster-logging-log-forwarding-disable_{context}"]
+= Disabling the Log Forwarding feature
+
+To disable the Log Forwarding feature, remove the `clusterlogging.openshift.io/logforwardingtechpreview:enabled` parameter from the Cluster Logging custom resource (CR) and delete the `ClusterLogForwarder` CR. The container and node logs will be forwarded to the internal {product-title} Elasticsearch instance.
+
+[IMPORTANT]
+====
+You cannot disable Log Forwarding by setting the `disableDefaultForwarding` to `false` in the `ClusterLogForwarder` CR. This prevents OpenShift Logging from sending logs to the specified endpoints *and* to default internal {product-title} Elasticsearch instance.
+====
+
+.Procedure
+
+To disable the Log Forwarding feature:
+
+. Edit the OpenShift Logging CR in the `openshift-logging` project:
++
+[source,terminal]
+----
+$ oc edit ClusterLogging instance
+----
+
+. Remove the `clusterlogging.openshift.io/logforwardingtechpreview` annotation:
++
+[source,yaml]
+----
+apiVersion: "logging.openshift.io/v1"
+kind: "ClusterLogging"
+metadata:
+ annotations:
+ clusterlogging.openshift.io/logforwardingtechpreview: enabled <1>
+ name: "instance"
+ namespace: "openshift-logging"
+...
+----
+<1> Remove this annotation.
+
+. Delete the `ClusterLogForwarder` CR:
++
+[source,terminal]
+----
+$ oc delete LogForwarding instance -n openshift-logging
+----
+
diff --git a/_unused_topics/cluster-logging-uninstall-cluster-ops.adoc b/_unused_topics/cluster-logging-uninstall-cluster-ops.adoc
new file mode 100644
index 000000000000..ec4c0d37eac0
--- /dev/null
+++ b/_unused_topics/cluster-logging-uninstall-cluster-ops.adoc
@@ -0,0 +1,19 @@
+// Module included in the following assemblies:
+//
+// * logging/cluster-logging-uninstall.adoc
+
+[id="cluster-logging-uninstall-ops_{context}"]
+= Uninstall the infra cluster
+
+You can uninstall the infra cluster from OpenShift Logging.
+After uninstalling, Fluentd no longer splits logs.
+
+.Procedure
+
+To uninstall the infra cluster:
+
+.
+
+.
+
+.
diff --git a/_unused_topics/cnv-accessing-vmi-web.adoc b/_unused_topics/cnv-accessing-vmi-web.adoc
new file mode 100644
index 000000000000..f733d2873fd5
--- /dev/null
+++ b/_unused_topics/cnv-accessing-vmi-web.adoc
@@ -0,0 +1,18 @@
+// Module included in the following assemblies:
+//
+
+[id="virt-accessing-vmi-web_{context}"]
+= Connecting to a virtual machine with the web console
+
+You can connect to a virtual machine by using the web console.
+
+.Procedure
+
+. Ensure you are in the correct project. If not, click the *Project*
+list and select the appropriate project.
+. Click *Workloads* -> *Virtual Machines* to display the virtual
+machines in the project.
+. Select a virtual machine.
+. In the *Overview* tab, click the `virt-launcher-` pod.
+. Click the *Terminal* tab. If the terminal is blank, click the
+terminal and press any key to initiate connection.
diff --git a/modules/completing-installation.adoc b/_unused_topics/completing-installation.adoc
similarity index 87%
rename from modules/completing-installation.adoc
rename to _unused_topics/completing-installation.adoc
index a981b6cdc3f7..911997c61e1a 100644
--- a/modules/completing-installation.adoc
+++ b/_unused_topics/completing-installation.adoc
@@ -5,7 +5,7 @@
[id="completing-installation_{context}"]
= Completing and verifying the {product-title} installation
-When the bootstrap node is done with its work and has handed off control to the new {product-title} cluster, the bootstrap node is destroyed. The installer waits for the cluster to initialize, creates a route to the {product-title} console, and presents the information and credentials you require to log into the cluster. Here’s an example:
+When the bootstrap node is done with its work and has handed off control to the new {product-title} cluster, the bootstrap node is destroyed. The installation program waits for the cluster to initialize, creates a route to the {product-title} console, and presents the information and credentials you require to log in to the cluster. Here’s an example:
----
INFO Install complete!
diff --git a/modules/con-pod-reset-policy.adoc b/_unused_topics/con-pod-reset-policy.adoc
similarity index 100%
rename from modules/con-pod-reset-policy.adoc
rename to _unused_topics/con-pod-reset-policy.adoc
diff --git a/modules/configuration-resource-configure.adoc b/_unused_topics/configuration-resource-configure.adoc
similarity index 100%
rename from modules/configuration-resource-configure.adoc
rename to _unused_topics/configuration-resource-configure.adoc
diff --git a/modules/configuring-local-provisioner.adoc b/_unused_topics/configuring-local-provisioner.adoc
similarity index 100%
rename from modules/configuring-local-provisioner.adoc
rename to _unused_topics/configuring-local-provisioner.adoc
diff --git a/authentication/configuring-user-agent.adoc b/_unused_topics/configuring-user-agent.adoc
similarity index 99%
rename from authentication/configuring-user-agent.adoc
rename to _unused_topics/configuring-user-agent.adoc
index e72c4f7abfbb..fa81389ca9bc 100644
--- a/authentication/configuring-user-agent.adoc
+++ b/_unused_topics/configuring-user-agent.adoc
@@ -2,6 +2,7 @@
= Configuring the user agent
include::modules/common-attributes.adoc[]
:context: configuring-user-agent
+
toc::[]
include::modules/user-agent-overview.adoc[leveloffset=+1]
diff --git a/modules/customize-certificates-api-add-default.adoc b/_unused_topics/customize-certificates-api-add-default.adoc
similarity index 88%
rename from modules/customize-certificates-api-add-default.adoc
rename to _unused_topics/customize-certificates-api-add-default.adoc
index f8dbef3276f4..a70aeb11709a 100644
--- a/modules/customize-certificates-api-add-default.adoc
+++ b/_unused_topics/customize-certificates-api-add-default.adoc
@@ -1,6 +1,6 @@
// Module included in the following assemblies:
//
-// * authentication/certificates/api-server.adoc
+// * security/certificates/api-server.adoc
[id="add-default-api-server_{context}"]
= Add an API server default certificate
@@ -46,11 +46,11 @@ the previous step.
referenced.
+
----
-$ oc describe apiserver cluster
+$ oc get apiserver cluster -o yaml
...
-Spec:
- Serving Certs:
- Default Serving Certificate:
- Name:
+spec:
+ servingCerts:
+ defaultServingCertificate:
+ name:
...
----
diff --git a/modules/deploying-local-provisioner.adoc b/_unused_topics/deploying-local-provisioner.adoc
similarity index 100%
rename from modules/deploying-local-provisioner.adoc
rename to _unused_topics/deploying-local-provisioner.adoc
diff --git a/modules/exploring-cvo.adoc b/_unused_topics/exploring-cvo.adoc
similarity index 97%
rename from modules/exploring-cvo.adoc
rename to _unused_topics/exploring-cvo.adoc
index 2c6a30435f6e..416394623c91 100644
--- a/modules/exploring-cvo.adoc
+++ b/_unused_topics/exploring-cvo.adoc
@@ -11,7 +11,7 @@ To see the current version that your cluster is on, type:
$ oc get clusterversion
NAME VERSION AVAILABLE PROGRESSING SINCE STATUS
-version 4.1.0-0.7 True False 10h Cluster version is 4.1.0-0.7
+version 4.5.4 True False 10h Cluster version is 4.5.4
----
Each release version is represented by a set of images. To see basic release information and a list of those images, type:
@@ -67,4 +67,4 @@ openshift-samples True False
operator-lifecycle-manager True False False 10h
----
-While most of the Cluster Operators listed provide services to the {product-title} cluster, the machine-config Operator in particular is tasked with managing the {op-system} operating systems in the nodes.
\ No newline at end of file
+While most of the Cluster Operators listed provide services to the {product-title} cluster, the machine-config Operator in particular is tasked with managing the {op-system} operating systems in the nodes.
diff --git a/modules/identity-provider-create-CR.adoc b/_unused_topics/identity-provider-create-CR.adoc
similarity index 100%
rename from modules/identity-provider-create-CR.adoc
rename to _unused_topics/identity-provider-create-CR.adoc
diff --git a/modules/identity-provider-provisioning-user-lookup-mapping.adoc b/_unused_topics/identity-provider-provisioning-user-lookup-mapping.adoc
similarity index 100%
rename from modules/identity-provider-provisioning-user-lookup-mapping.adoc
rename to _unused_topics/identity-provider-provisioning-user-lookup-mapping.adoc
diff --git a/_unused_topics/images-s2i-java-build-deploy-applications.adoc b/_unused_topics/images-s2i-java-build-deploy-applications.adoc
new file mode 100644
index 000000000000..94e7fa2e6f9c
--- /dev/null
+++ b/_unused_topics/images-s2i-java-build-deploy-applications.adoc
@@ -0,0 +1,69 @@
+// Module included in the following assemblies:
+//
+// * openshift_images/using_images/using-images-source-to-image.adoc
+// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used.
+
+[id="images-s2i-java-build-deploy-applications_{context}"]
+= Building and deploying Java applications
+
+The same source-to-image (S2I) builder image can be used to build a Java application from source or from binary artifacts.
+
+[id="images-s2i-java-build-deploy-applications-source_{context}"]
+== Building and Deploying from Source
+
+The Java S2I builder image can be used to build an application from source by running `oc new-app` against a source repository.
+
+.Procedure
+
+. To build an application from source, run `oc new-app` against a source repository. For example:
++
+[source,terminal]
+----
+$ oc new-app registry.redhat.io/redhat-openjdk-18/openjdk18-openshift~https://github.com/jboss-openshift/openshift-quickstarts --context-dir=undertow-servlet
+----
++
+. By default, tests are not run. To build an application and run tests as part of the build, override the default `MAVEN_ARGS` by entering the following command:
++
+[source,terminal]
+----
+$ oc new-app registry.redhat.io/redhat-openjdk-18/openjdk18-openshift~ --context-dir= --build-env='MAVEN_ARGS=-e -Popenshift -Dcom.redhat.xpaas.repo.redhatga package'
+----
++
+If a Java project consists of multiple Maven modules, it can be useful to explicitly specify the artifact output directory. Specifying the directory where the Maven project outputs the artifacts enables the S2I build to pick them up.
+
+. To specify the modules to build and the artifact output directory, use the following command:
++
+[source,terminal]
+----
+$ oc new-app registry.redhat.io/redhat-openjdk-18/openjdk18-openshift~ --context-dir= --build-env='ARTIFACT_DIR=relative/path/to/artifacts/dir' --build-env='MAVEN_ARGS=install -pl : -am'
+----
+
+[id="images-s2i-java-build-deploy-applications-bianary_{context}"]
+== Building and deploying from binary artifacts
+
+The Java S2I builder image can be used to build an application using binary
+artifacts that you provide.
+
+.Procedure
+
+. Create a new binary build using the `oc new-build` command:
++
+[source,terminal]
+----
+$ oc new-build --name= registry.redhat.io/redhat-openjdk-18/openjdk18-openshift --binary=true
+----
++
+. Start a build using the `oc start-build` command, specifying the path to
+the binary artifacts on your local machine:
++
+[source,terminal]
+----
+$ oc start-build --from-dir=/path/to/artifacts --follow
+----
++
+. Enter the `oc new-app` command to create an application:
++
+[source,terminal]
+----
+$ oc new-app
+----
diff --git a/_unused_topics/images-s2i-java-configuration.adoc b/_unused_topics/images-s2i-java-configuration.adoc
new file mode 100644
index 000000000000..155a7a4f1a84
--- /dev/null
+++ b/_unused_topics/images-s2i-java-configuration.adoc
@@ -0,0 +1,35 @@
+// Module included in the following assemblies:
+//
+// * openshift_images/using_images/using-images-source-to-image.adoc
+// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used.
+
+[id="images-s2i-java-configuration_{context}"]
+= Configuring source-to-image for Java
+
+By default, the Java source-to-image (S2I) builder image uses Maven to build the project with the following goals and options:
+
+[source,bash]
+----
+mvn -Dmaven.repo.local=/tmp/artifacts/m2 -s /tmp/artifacts/configuration/settings.xml -e -Popenshift -DskipTests -Dcom.redhat.xpaas.repo.redhatga -Dfabric8.skip=true package -Djava.net.preferIPv4Stack=true
+----
+
+Based on these defaults, the builder image compiles the project and copies all the transitive dependencies into the output directory without running tests. If the project has a profile named `openshift`, then it is activated for the build.
+
+You can override these default goals and options by specifying the following environment variables:
+
+.Java Environment Variables
+[options="header"]
+|===
+
+|Variable name |Description
+
+|`ARTIFACT_DIR`
+|The relative path to the target where JAR files are created for multi-module builds.
+
+|`JAVA_MAIN_CLASS`
+|The main class to use as the argument to Java. This can also be specified in the _`s2i/environment` file as a Maven property inside the project, `docker.env.Main`.
+
+|`MAVEN_ARGS`
+|The arguments that are passed to the mvn command.
+
+|===
diff --git a/_unused_topics/images-s2i-java-pulling-images.adoc b/_unused_topics/images-s2i-java-pulling-images.adoc
new file mode 100644
index 000000000000..dc9604744b07
--- /dev/null
+++ b/_unused_topics/images-s2i-java-pulling-images.adoc
@@ -0,0 +1,23 @@
+// Module included in the following assemblies:
+//
+// * openshift_images/using_images/using-images-source-to-image.adoc
+// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used.
+
+[id="images-s2i-java-pulling-images_{context}"]
+= Pulling images for Java
+
+The Red Hat Enterprise Linux (RHEL) 8 image is available through the Red Hat Registry.
+
+.Procedure
+
+. To pull the RHEL 8 image, enter the following command:
+[source,terminal]
+----
+$ podman pull registry.redhat.io/redhat-openjdk-18/openjdk18-openshift
+----
+
+To use this image on {product-title}, you can either access it directly from the Red Hat Registry or push it into your {product-title} container image registry. Additionally, you can create an image stream that points to the image, either in your container image registry or at the external location.
+
+////
+Your {product-title} resources can then reference the link:https://github.com/jboss-openshift/application-templates/blob/master/jboss-image-streams.json[image stream definition].
+////
diff --git a/_unused_topics/images-s2i-nodejs-pulling-images.adoc b/_unused_topics/images-s2i-nodejs-pulling-images.adoc
new file mode 100644
index 000000000000..c39a25d474b3
--- /dev/null
+++ b/_unused_topics/images-s2i-nodejs-pulling-images.adoc
@@ -0,0 +1,50 @@
+// Module included in the following assemblies:
+//
+// * openshift_images/using_images/using-images-source-to-image.adoc
+// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used.
+
+[id="images-s2i-nodejs-pulling-images_{context}"]
+= Pulling images for Node.js
+
+//These images come in two options:
+
+//* RHEL 8
+//* CentOS 7
+
+//*RHEL 8 Based Images*
+
+The RHEL 8 images are available through the Red Hat Registry.
+
+.Procedure
+
+* To pull the RHEL 8 image, enter the following command for the version of Node.js you want:
++
+.Node.js `12`
+[source,terminal]
+----
+$ podman pull registry.redhat.io/rhscl/nodejs-12-rhel7:latest
+----
++
+.Node.js `10`
+[source,terminal]
+----
+$ podman pull registry.redhat.io/rhscl/nodejs-10-rhel7:latest
+----
+
+////
+*CentOS 7 Based Image*
+
+This image is available on link:quay.io[Quay.io].
+
+.Procedure
+
+* To pull the CentOS 7 image, enter the following command:
++
+[source,terminal]
+----
+$ podman pull openshift/nodejs-010-centos7
+----
+////
+
+To use these images, you can either access them directly from registry.redhat.io, or push them into your {product-title} image registry. Additionally, you can create an image stream that points to the image, either in your container image registry or at the external location. Your {product-title} resources can then reference the
+image stream.
diff --git a/_unused_topics/images-using-images-s2i-java.adoc b/_unused_topics/images-using-images-s2i-java.adoc
new file mode 100644
index 000000000000..7c7e0d896d48
--- /dev/null
+++ b/_unused_topics/images-using-images-s2i-java.adoc
@@ -0,0 +1,16 @@
+// Module included in the following assemblies:
+//
+// * openshift_images/using_images/using-images-source-to-image.adoc
+// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used.
+
+[id="images-using-images-s2i-java_{context}"]
+= Java overview
+
+{product-title} provides a source-to-image (S2I) builder image for building Java applications. This builder image takes your application source or binary artifacts, builds the source using Maven if a source was provided, and assembles the artifacts with any required dependencies to create a new, ready-to-run image containing your Java application. This resulting image can be run on {product-title}.
+
+The builder image is intended for use with link:https://maven.apache.org[Maven]-based Java standalone projects that are run with main class.
+
+[discrete]
+== Additional resources
+
+* Find additional information and examples in the link:https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html-single/red_hat_java_s2i_for_openshift/[Red Hat JBoss Middleware] documentation.
diff --git a/_unused_topics/images-using-images-s2i-nodejs-configuration.adoc b/_unused_topics/images-using-images-s2i-nodejs-configuration.adoc
new file mode 100644
index 000000000000..0a72ddd0c332
--- /dev/null
+++ b/_unused_topics/images-using-images-s2i-nodejs-configuration.adoc
@@ -0,0 +1,35 @@
+// Module included in the following assemblies:
+//
+// * openshift_images/using_images/using-images-source-to-image.adoc
+// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used.
+
+[id="images-using-images-s2i-nodejs-configuration_{context}"]
+= Configuring source-to-image for Node.js
+
+The Node.js image supports a number of environment variables, which can be set to control the configuration and behavior of the Node.js runtime.
+
+To set these environment variables as part of your image, you can place them into a `.s2i/environment` file inside your source code repository, or define them in the environment section of the build configuration's `sourceStrategy` definition.
+
+You can also set environment variables to be used with an existing image when creating new applications, or by updating environment variables for existing objects such as deployment configurations.
+
+[NOTE]
+====
+Environment variables that control build behavior must be set as part of the source-to-image (S2I) build configuration or in the `.s2i/environment` file to make them available to the build steps.
+====
+
+.Development Mode Environment Variables
+[cols="3a,6a",options="header"]
+|===
+
+| Variable name | Description
+
+|`DEV_MODE`
+|When set to `true`, enables hot deploy and opens the debug port. Additionally, indicates to tooling that the image is in development mode. Default is `false`.
+
+|`DEBUG_PORT`
+|The debug port. Only valid if `DEV_MODE` is set to true. Default is 5858.
+
+|`NPM_MIRROR`
+|The custom NPM registry mirror URL. All NPM packages are downloaded from the mirror link during the build process.
+
+|===
diff --git a/_unused_topics/images-using-images-s2i-nodejs-hot-deploying.adoc b/_unused_topics/images-using-images-s2i-nodejs-hot-deploying.adoc
new file mode 100644
index 000000000000..2e2cf4d9bf3a
--- /dev/null
+++ b/_unused_topics/images-using-images-s2i-nodejs-hot-deploying.adoc
@@ -0,0 +1,28 @@
+// Module included in the following assemblies:
+//
+// * openshift_images/using_images/using-images-source-to-image.adoc
+// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used.
+
+[id="images-using-images-s2i-nodejs-hot-deploying_{context}"]
+= Hot deploying for Node.js
+
+Hot deployment allows you to quickly make and deploy changes to your application without having to generate a new source-to-image (S2I) build. In order to immediately pick up changes made in your application source code, you must run your built image with the `DEV_MODE=true` environment variable.
+
+You can set new environment variables when creating new applications, or updating
+environment variables for existing objects.
+
+[WARNING]
+====
+Only use the `DEV_MODE=true` environment variable while developing or debugging. Using this in your production environment is not recommended.
+====
+
+.Procedure
+
+* To change the source code of a running pod, open a remote shell into the container:
++
+[source,terminal]
+----
+$ oc rsh
+----
++
+Entering into a running container changes your current directory to `/opt/app-root/src`, where the source code is located.
diff --git a/_unused_topics/images-using-images-s2i-nodejs.adoc b/_unused_topics/images-using-images-s2i-nodejs.adoc
new file mode 100644
index 000000000000..8f443b1085af
--- /dev/null
+++ b/_unused_topics/images-using-images-s2i-nodejs.adoc
@@ -0,0 +1,9 @@
+// Module included in the following assemblies:
+//
+// * openshift_images/using_images/using-images-source-to-image.adoc
+// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used.
+
+[id="images-using-images-s2i-nodejs_{context}"]
+= Node.js overview
+
+{product-title} provides source-to-image (S2I) enabled Node.js images for building and running Node.js applications. The Node.js S2I builder image assembles your application source with any required dependencies to create a new image containing your Node.js application. This resulting image can be run either by {product-title} or by a container runtime.
diff --git a/_unused_topics/images-using-images-s2i-perl-configuration.adoc b/_unused_topics/images-using-images-s2i-perl-configuration.adoc
new file mode 100644
index 000000000000..563ba407e4be
--- /dev/null
+++ b/_unused_topics/images-using-images-s2i-perl-configuration.adoc
@@ -0,0 +1,44 @@
+// Module included in the following assemblies:
+//
+// * openshift_images/using_images/using-images-source-to-image.adoc
+// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used.
+
+[id="images-using-images-s2i-perl-configuration_{context}"]
+= Configuring source-to-image for Perl
+
+The Perl image supports a number of environment variables which can be set to control the configuration and behavior of the Perl runtime.
+
+To set these environment variables as part of your image, you can place them into
+a `.s2i/environment` file inside your source code repository, or define them in
+the environment section of the build configuration's `sourceStrategy` definition.
+
+You can also set environment variables to be used with an existing image when creating new applications, or by updating environment variables for existing objects such as deployment configurations.
+
+[NOTE]
+====
+Environment variables that control build behavior must be set as part of the source-to-image (S2I) build configuration or in the `.s2i/environment` file to make them available to the build steps.
+====
+
+.Perl Environment Variables
+[cols="4a,6a",options="header"]
+|===
+
+|Variable name |Description
+
+|`ENABLE_CPAN_TEST`
+|When set to `true`, this variable installs all the cpan modules and runs their tests. By default, the testing of the modules is disabled.
+
+|`CPAN_MIRROR`
+|This variable specifies a mirror URL which cpanminus uses to install dependencies. By default, this URL is not specified.
+
+|`PERL_APACHE2_RELOAD`
+|Set this to `true` to enable automatic reloading of modified Perl modules. By default, automatic reloading is disabled.
+
+|`HTTPD_START_SERVERS`
+|The https://httpd.apache.org/docs/2.4/mod/mpm_common.html#startservers[StartServers] directive sets the number of child server processes created on startup. Default is 8.
+
+|`HTTPD_MAX_REQUEST_WORKERS`
+|Number of simultaneous requests that will be handled by Apache. The default is 256, but it will be automatically lowered if memory is limited.
+|===
+
+//Verify` oc log` is still valid.
diff --git a/_unused_topics/images-using-images-s2i-perl-hot-deploying.adoc b/_unused_topics/images-using-images-s2i-perl-hot-deploying.adoc
new file mode 100644
index 000000000000..de276ad98264
--- /dev/null
+++ b/_unused_topics/images-using-images-s2i-perl-hot-deploying.adoc
@@ -0,0 +1,28 @@
+// Module included in the following assemblies:
+//
+// * openshift_images/using_images/using-images-source-to-image.adoc
+// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used.
+
+[id="images-using-images-s2i-perl-hot-deploying_{context}"]
+= Hot deploying for Perl
+
+Hot deployment allows you to quickly make and deploy changes to your application
+without having to generate a new S2I build. To enable hot deployment in this
+image, you must set the `PERL_APACHE2_RELOAD` environment variable to `true`. You can use the `oc set env` command to update environment variables of existing objects.
+
+[WARNING]
+====
+You should only use this option while developing or debugging. It is not recommended to turn this on in your production environment.
+====
+
+.Procedure
+
+. To change your source code in a running pod, use the `oc rsh` command to enter the container:
++
+[source,terminal]
+----
+$ oc rsh
+----
++
+After you enter into the running container, your current directory is set to
+`/opt/app-root/src`, where the source code is located.
diff --git a/_unused_topics/images-using-images-s2i-perl-pulling-images.adoc b/_unused_topics/images-using-images-s2i-perl-pulling-images.adoc
new file mode 100644
index 000000000000..328dbfa10947
--- /dev/null
+++ b/_unused_topics/images-using-images-s2i-perl-pulling-images.adoc
@@ -0,0 +1,50 @@
+// Module included in the following assemblies:
+//
+// * openshift_images/using_images/using-images-source-to-image.adoc
+// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used.
+
+[id="images-using-images-s2i-perl-pulling-images_{context}"]
+= Pulling images for Perl
+
+//Images comes in two options:
+
+//* RHEL 8
+//* CentOS 7
+
+// *RHEL 8 Based Images*
+
+The RHEL 8 images are available through the Red Hat Registry.
+
+.Procedure
+
+* To pull the RHEL 8 image, enter the following command for the version of Perl you want:
++
+.Perl `5.26`
+[source,terminal]
+----
+$ podman pull registry.redhat.io/rhscl/perl-526-rhel7:latest
+----
++
+.Perl `5.30`
+[source,terminal]
+----
+$ podman pull registry.redhat.io/rhscl/perl-530-rhel7:latest
+----
+
+////
+*CentOS 7 Based Image*
+
+A CentOS image for Perl 5.16 is available on link:quay.io[Quay.io].
+
+.Procedure
+
+* To pull the CentOS 7 image, enter the following command:
++
+[source,terminal]
+----
+$ podman pull openshift/perl-516-centos7
+----
+////
+
+To use these images, you can either access them directly from registry.redhat.io, or push them into your {product-title} image registry. Additionally, you can create an image stream that points to the image, either in your container image registry or at the external location. Your {product-title} resources can then reference the
+image stream.
diff --git a/_unused_topics/images-using-images-s2i-perl.adoc b/_unused_topics/images-using-images-s2i-perl.adoc
new file mode 100644
index 000000000000..01277ff90a72
--- /dev/null
+++ b/_unused_topics/images-using-images-s2i-perl.adoc
@@ -0,0 +1,13 @@
+// Module included in the following assemblies:
+//
+// * openshift_images/using_images/using-images-source-to-image.adoc
+// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used.
+
+[id="images-using-images-s2i-perl_{context}"]
+= Perl overview
+
+{product-title} provides source-to-image (S2I) enabled Perl images for building and running Perl applications. The Perl S2I builder image assembles your application source with any required dependencies to create a new image containing your Perl application. This resulting image can be run either by {product-title} or by a container runtime.
+
+[id="images-using-images-s2i-perl-accessing-logs_{context}"]
+== Accessing logs
+Access logs are streamed to standard output and as such they can be viewed using the `oc logs` command. Error logs are stored in the `/tmp/error_log` file, which can be viewed using the `oc rsh` command to access the container.
diff --git a/_unused_topics/images-using-images-s2i-php-configuration.adoc b/_unused_topics/images-using-images-s2i-php-configuration.adoc
new file mode 100644
index 000000000000..7e2ec6f6d7fd
--- /dev/null
+++ b/_unused_topics/images-using-images-s2i-php-configuration.adoc
@@ -0,0 +1,116 @@
+// Module included in the following assemblies:
+//
+// * openshift_images/using_images/using-images-source-to-image.adoc
+// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used.
+
+[id="images-using-images-s2i-php-configuration_{context}"]
+= Configuring source-to-image for PHP
+
+The PHP image supports a number of environment variables which can be set to control the configuration and behavior of the PHP runtime.
+
+To set these environment variables as part of your image, you can place them into a `.s2i/environment` file inside your source code repository, or define them in the environment section of the build configuration's `sourceStrategy` definition.
+
+You can also set environment variables to be used with an existing image when creating new applications, or by updating environment variables for existing objects such as deployment configurations.
+
+[NOTE]
+====
+Environment variables that control build behavior must be set as part of the source-to-image (S2I) build configuration or in the `.s2i/environment` file to make them available to the build steps.
+====
+
+The following environment variables set their equivalent property value in the
+`php.ini` file:
+
+.PHP Environment Variables
+[cols="4a,6a,6a",options="header"]
+|===
+
+|Variable Name |Description |Default
+
+|`ERROR_REPORTING`
+|Informs PHP of the errors, warnings, and notices for which you would like it to
+take action.
+|`E_ALL & ~E_NOTICE`
+
+|`DISPLAY_ERRORS`
+|Controls if and where PHP outputs errors, notices, and warnings.
+|`ON`
+
+|`DISPLAY_STARTUP_ERRORS`
+|Causes any display errors that occur during PHP's startup sequence to be
+handled separately from display errors.
+|`OFF`
+
+|`TRACK_ERRORS`
+|Stores the last error/warning message in `$php_errormsg` (boolean).
+|`OFF`
+
+|`HTML_ERRORS`
+|Links errors to documentation that is related to the error.
+|`ON`
+
+|`INCLUDE_PATH`
+|Path for PHP source files.
+|`.:/opt/openshift/src:/opt/rh/php55/root/usr/share/pear`
+
+|`SESSION_PATH`
+|Location for session data files.
+|`/tmp/sessions`
+
+|`DOCUMENTROOT`
+|Path that defines the document root for your application (for example, `/public`).
+|`/`
+|===
+
+The following environment variable sets its equivalent property value in the
+`opcache.ini` file:
+
+.Additional PHP settings
+[cols="3a,6a,1a",options="header"]
+|===
+
+|Variable Name |Description |Default
+
+|`OPCACHE_MEMORY_CONSUMPTION`
+|The link:http://php.net/manual/en/book.opcache.php[OPcache] shared memory
+storage size.
+|`16M`
+
+|`OPCACHE_REVALIDATE_FREQ`
+|How often to check script time stamps for updates, in seconds. `0` results in
+link:http://php.net/manual/en/book.opcache.php[OPcache] checking for updates on
+every request.
+|`2`
+|===
+
+You can also override the entire directory used to load the PHP configuration by setting:
+
+.Additional PHP settings
+[cols="3a,6a",options="header"]
+|===
+
+| Variable Name | Description
+
+|`PHPRC`
+|Sets the path to the `php.ini` file.
+
+|`*PHP_INI_SCAN_DIR*`
+|Path to scan for additional `.ini` configuration files
+|===
+
+You can use a custom composer repository mirror URL to download packages instead of the default `packagist.org`:
+
+.Composer Environment Variables
+[cols="4a,6a",options="header"]
+|===
+
+|Variable Name |Description
+
+|`COMPOSER_MIRROR`
+|Set this variable to use a custom Composer repository mirror URL to download required packages during the build process.
+Note: This only affects packages listed in `composer.json`.
+|===
+
+[id="images-using-images-s2i-php-apache-configuration_{context}"]
+== Apache configuration
+
+If the `DocumentRoot` of the application is nested in the source directory `/opt/openshift/src`, you can provide your own `.htaccess` file to override the default Apache behavior and specify how application requests should be handled. The `.htaccess` file must be located at the root of the application source.
diff --git a/_unused_topics/images-using-images-s2i-php-hot-deploying.adoc b/_unused_topics/images-using-images-s2i-php-hot-deploying.adoc
new file mode 100644
index 000000000000..f8a852dd3447
--- /dev/null
+++ b/_unused_topics/images-using-images-s2i-php-hot-deploying.adoc
@@ -0,0 +1,27 @@
+// Module included in the following assemblies:
+//
+// * openshift_images/using_images/using-images-source-to-image.adoc
+// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used.
+
+[id="images-using-images-s2i-hot-deploying_{context}"]
+= Hot deploying for PHP
+
+Hot deployment allows you to quickly make and deploy changes to your application without having to generate a new source-to-image (S2I) build. In order to immediately pick up changes made in your application source code, you must run your built image with the `OPCACHE_REVALIDATE_FREQ=0` environment variable.
+
+You can use the `oc env` command to update environment variables of existing objects.
+
+[WARNING]
+====
+You should only use this option while developing or debugging. It is not recommended to turn this on in your production environment.
+====
+
+.Procedure
+
+. To change your source code in a running pod, use the `oc rsh` command to enter the container:
++
+[source,terminal]
+----
+$ oc rsh
+----
+
+After you enter into the running container, your current directory is set to `/opt/app-root/src`, where the source code is located.
diff --git a/_unused_topics/images-using-images-s2i-php-pulling-images.adoc b/_unused_topics/images-using-images-s2i-php-pulling-images.adoc
new file mode 100644
index 000000000000..fbbe32b686a8
--- /dev/null
+++ b/_unused_topics/images-using-images-s2i-php-pulling-images.adoc
@@ -0,0 +1,55 @@
+// Module included in the following assemblies:
+//
+// * openshift_images/using_images/using-images-source-to-image.adoc
+// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used.
+
+[id="images-using-images-s2i-php-pulling-images_{context}"]
+= Pulling images for PHP
+
+//These images come in two options:
+
+//* RHEL 8
+//* CentOS 7
+
+The RHEL 8 images are available through the Red Hat Registry.
+
+.Procedure
+
+* To pull the RHEL 8 image, enter the following command for the version of PHP you want:
+
+.PHP `7.2`
+[source,terminal]
+----
+$ podman pull registry.redhat.io/ubi8/php-72:latest
+----
++
+.PHP `7.3`
+[source,terminal]
+----
+$ podman pull registry.redhat.io/rhscl/php-73-rhel7:latest
+----
+
+////
+*CentOS 7 Based Images*
+
+CentOS images for PHP 5.5 and 5.6 are available on link:quay.io[Quay.io].
+
+.Procedure
+
+* To pull the CentOS 7 image, enter the following command for the version of Node.js you want:
++
+.PHP `5.5`
+[source,terminal]
+----
+$ podman pull openshift/php-55-centos7
+----
++
+.PHP `5.6`
+[source,terminal]
+----
+$ podman pull openshift/php-56-centos7
+----
+////
+
+To use these images, you can either access them directly from registry.redhat.io, or push them into your {product-title} image registry. Additionally, you can create an image stream that points to the image, either in your container image registry or at the external location. Your {product-title} resources can then reference the
+image stream.
diff --git a/_unused_topics/images-using-images-s2i-php.adoc b/_unused_topics/images-using-images-s2i-php.adoc
new file mode 100644
index 000000000000..116276a93b06
--- /dev/null
+++ b/_unused_topics/images-using-images-s2i-php.adoc
@@ -0,0 +1,14 @@
+// Module included in the following assemblies:
+//
+// * openshift_images/using_images/using-images-source-to-image.adoc
+// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used.
+
+[id="images-using-images-s2i-php_{context}"]
+= PHP overview
+
+{product-title} provides source-to-image (S2I) enabled PHP images for building and running PHP applications.The PHP S2I builder image assembles your application source with any required dependencies to create a new image containing your PHP application. This resulting image can be run either by {product-title} or by a container runtime.
+
+[id="images-using-images-s2i-php-accessing-logs_{context}"]
+== Accessing logs
+
+Access logs are streamed to standard out and as such they can be viewed using the `oc logs` command. Error logs are stored in the `/tmp/error_log` file, which can be viewed using the `oc rsh` command to access the container.
diff --git a/_unused_topics/images-using-images-s2i-python-configuration.adoc b/_unused_topics/images-using-images-s2i-python-configuration.adoc
new file mode 100644
index 000000000000..f2dfd34cbbb9
--- /dev/null
+++ b/_unused_topics/images-using-images-s2i-python-configuration.adoc
@@ -0,0 +1,48 @@
+// Module included in the following assemblies:
+//
+// * openshift_images/using_images/using-images-source-to-image.adoc
+// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used.
+
+[id="images-using-images-s2i-python-configuration_{context}"]
+= Configuring source-to-image for Python
+
+The Python image supports a number of environment variables which can be set to control the configuration and behavior of the Python runtime.
+
+To set these environment variables as part of your image, you can place them into a `.s2i/environment` file inside your source code repository, or define them in the environment section of the build configuration's `sourceStrategy` definition.
+
+You can also set environment variables to be used with an existing image when creating new applications, or by updating environment variables for existing objects such as deployment configurations.
+
+[NOTE]
+====
+Environment variables that control build behavior must be set as part of the source-to-image (S2I) build configuration or in the `.s2i/environment` file to make them available to the build steps.
+====
+
+.Python Environment Variables
+[cols="4a,6a",options="header"]
+|===
+
+|Variable name |Description
+
+|`APP_FILE`
+|This variable specifies the file name passed to the Python interpreter which is responsible for launching the application. This variable is set to `app.py` by default.
+
+|`APP_MODULE`
+|This variable specifies the WSGI callable. It follows the pattern `$(MODULE_NAME):$(VARIABLE_NAME)`, where the module name is a full dotted path and the variable name refers to a function inside the specified module. If you use `setup.py` for installing the application, then the module name can be read from that file and the variable defaults to `application`.
+
+|`APP_CONFIG`
+|This variable indicates the path to a valid Python file with a http://docs.gunicorn.org/en/latest/configure.html[gunicorn configuration].
+
+|`DISABLE_COLLECTSTATIC`
+|Set it to a nonempty value to inhibit the execution of `manage.py collectstatic` during the build. Only affects Django projects.
+
+|`DISABLE_MIGRATE`
+|Set it to a nonempty value to inhibit the execution of `manage.py migrate` when the produced image is run. Only affects Django projects.
+
+|`*PIP_INDEX_URL*`
+| Set this variable to use a custom index URL or mirror to download required
+packages during build process. This only affects packages listed in the
+*_requirements.txt_* file.
+
+| `WEB_CONCURRENCY`
+| Set this to change the default setting for the number of http://docs.gunicorn.org/en/stable/settings.html#workers[workers]. By default, this is set to the number of available cores times 4.
+|===
diff --git a/_unused_topics/images-using-images-s2i-python-hot-deploying.adoc b/_unused_topics/images-using-images-s2i-python-hot-deploying.adoc
new file mode 100644
index 000000000000..03989935aebb
--- /dev/null
+++ b/_unused_topics/images-using-images-s2i-python-hot-deploying.adoc
@@ -0,0 +1,28 @@
+// Module included in the following assemblies:
+//
+// * openshift_images/using_images/using-images-source-to-image.adoc
+// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used.
+
+[id="images-using-images-s2i-python-hot-deploying_{context}"]
+= Hot deploying
+
+Hot deployment allows you to quickly make and deploy changes to your application without having to generate a new source-to-image (S2I) build. If you are using Django, hot deployment works out of the box.
+
+To enable hot deployment while using Gunicorn, ensure you have a Gunicorn
+configuration file inside your repository with https://gunicorn-docs.readthedocs.org/en/latest/settings.html#reload[the `reload` option set to `true`. Specify your configuration file using the `APP_CONFIG` environment variable. For example, see the `oc new-app` command. You can use the `oc set env` command to update environment variables of existing objects.
+
+[WARNING]
+====
+You should only use this option while developing or debugging. It is not recommended to turn this on in your production environment.
+====
+
+. Procedure
+
+To change your source code in a running pod, use the `oc rsh` command to enter the container:
++
+[source,terminal]
+----
+$ oc rsh
+----
+
+After you enter into the running container, your current directory is set to `/opt/app-root/src`, where the source code is located.
diff --git a/_unused_topics/images-using-images-s2i-python-pulling-images.adoc b/_unused_topics/images-using-images-s2i-python-pulling-images.adoc
new file mode 100644
index 000000000000..7ab8e81e897c
--- /dev/null
+++ b/_unused_topics/images-using-images-s2i-python-pulling-images.adoc
@@ -0,0 +1,75 @@
+// Module included in the following assemblies:
+//
+// * openshift_images/using_images/using-images-source-to-image.adoc
+// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used.
+
+[id="images-using-images-s2i-python-pulling-images_{context}"]
+= Pulling images for Python
+
+//These images come in two options:
+
+//* RHEL 8
+//* CentOS 7
+
+//*RHEL 8 Based Images*
+
+The RHEL 8 images are available through the Red Hat Registry.
+
+.Procedure
+
+* To pull the RHEL 7 image, enter the following command for the version of Python you want:
++
+.Python `2.7`
+[source,terminal]
+----
+$ podman pull egistry.redhat.io/rhscl/python-27-rhel7:latest
+----
++
+.Python `3.6`
+[source,terminal]
+----
+$ podman pull registry.redhat.io/ubi8/python-36:latest
+----
++
+.Python `3.8`
+[source,terminal]
+----
+$ podman pull registry.redhat.io/rhscl/python-38-rhel7:latest
+----
+
+////
+*CentOS 7 Based Images*
+
+These images are available on link:quay.io[Quay.io].
+
+.Procedure
+
+* To pull the CentOS 7 image, enter the following command for the version of Python you want:
++
+.Python `2.7`
+[source,terminal]
+----
+$ podman pull centos/python-27-centos7
+----
++
+.Python `3.3`
+[source,terminal]
+----
+$ podman pull openshift/python-33-centos7
+----
++
+.Python `3.4`
+[source,terminal]
+----
+$ podman pull centos/python-34-centos7
+----
++
+.Python `3.5`
+[source,terminal]
+----
+$ podman pull centos/python-35-centos7
+----
+////
+
+To use these images, you can either access them directly from registry.redhat.io, or push them into your {product-title} image registry. Additionally, you can create an image stream that points to the image, either in your container image registry or at the external location. Your {product-title} resources can then reference the
+image stream.
diff --git a/_unused_topics/images-using-images-s2i-python.adoc b/_unused_topics/images-using-images-s2i-python.adoc
new file mode 100644
index 000000000000..92c996b56fa7
--- /dev/null
+++ b/_unused_topics/images-using-images-s2i-python.adoc
@@ -0,0 +1,9 @@
+// Module included in the following assemblies:
+//
+// * openshift_images/using_images/using-images-source-to-image.adoc
+// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used.
+
+[id="images-using-images-s2i-python_{context}"]
+= python overview
+
+{product-title} provides source-to-image (S2I) enabled Python images for building and running Python applications. The Python S2I builder image assembles your application source with any required dependencies to create a new image containing your Python application. This resulting image can be run either by {product-title} or by a container runtime.
diff --git a/_unused_topics/images-using-images-s2i-ruby-configuration.adoc b/_unused_topics/images-using-images-s2i-ruby-configuration.adoc
new file mode 100644
index 000000000000..07841e122384
--- /dev/null
+++ b/_unused_topics/images-using-images-s2i-ruby-configuration.adoc
@@ -0,0 +1,43 @@
+// Module included in the following assemblies:
+//
+// * openshift_images/using_images/using-images-source-to-image.adoc
+// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used.
+
+[id="images-using-images-s2i-ruby-configuration_{context}"]
+= Configuring source-to-image for Ruby
+
+The Ruby image supports a number of environment variables which can be set to control the configuration and behavior of the Ruby runtime.
+
+To set these environment variables as part of your image, you can place them into a `_.s2i/environment` file inside your source code repository, or define them in the environment section of the build configuration's `sourceStrategy` definition.
+
+You can also set environment variables to be used with an existing image when creating new applications, or by updating environment variables for existing objects such as deployment configurations.
+
+[NOTE]
+====
+Environment variables that control build behavior must be set as part of the source-to-image (S2I) build configuration or in the `.s2i/environment` file to make them available to the build steps.
+====
+
+.Ruby Environment Variables
+[cols="4a,6a",options="header"]
+|===
+
+|Variable name |Description
+
+|`RACK_ENV`
+|This variable specifies the environment within which the Ruby application is deployed, for example, `production`, `development`, or `test`. Each level has different behavior in terms of logging verbosity, error pages, and `ruby gem` installation. The application assets are only compiled if `RACK_ENV` is set to `production`. The default value is `production`.
+
+|`RAILS_ENV`
+|This variable specifies the environment within which the Ruby on Rails application is deployed, for example, `production`, `development`, or `test`. Each level has different behavior in terms of logging verbosity, error pages, and `ruby gem` installation. The application assets are only compiled if `RAILS_ENV` is set to `production`. This variable is set to `${RACK_ENV}` by default.
+
+|`DISABLE_ASSET_COMPILATION`
+|When set to `true`, this variable disables the process of asset compilation. Asset compilation only happens when the application runs in a production environment. Therefore, you can use this variable when assets have already been compiled.
+
+|`PUMA_MIN_THREADS`, `PUMA_MAX_THREADS`
+|This variable indicates the minimum and maximum number of threads that will be available in Puma's thread pool.
+
+|`PUMA_WORKERS`
+|This variable indicates the number of worker processes to be launched in Puma's clustered mode, when Puma runs more than two processes. If not explicitly set, the default behavior sets `PUMA_WORKERS` to a value that is appropriate for the memory available to the container and the number of cores on the host.
+
+|`RUBYGEM_MIRROR`
+|Set this variable to use a custom RubyGems mirror URL to download required gem packages during the build process. This environment variable is only available for Ruby 2.2+ images.
+|===
diff --git a/_unused_topics/images-using-images-s2i-ruby-hot-deploying.adoc b/_unused_topics/images-using-images-s2i-ruby-hot-deploying.adoc
new file mode 100644
index 000000000000..6463af2986fb
--- /dev/null
+++ b/_unused_topics/images-using-images-s2i-ruby-hot-deploying.adoc
@@ -0,0 +1,50 @@
+// Module included in the following assemblies:
+//
+// * openshift_images/using_images/using-images-source-to-image.adoc
+// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used.
+
+[id="images-using-images-s2i-ruby-hot-deploying_{context}"]
+== Hot deploying for Ruby
+
+Hot deployment allows you to quickly make and deploy changes to your application without having to generate a new source-to-image (S2I) build. The method for enabling hot deployment in this image differs based on the application type.
+
+*Ruby on Rails applications*
+
+.Procedure
+
+For Ruby on Rails application, run the built Rails application with the `RAILS_ENV=development` environment variable passed to the running pod.
+
+* For an existing deployment configuration, you can use the `oc set env` command:
++
+[source,terminal]
+----
+$ oc set env dc/rails-app RAILS_ENV=development
+----
+
+*Other Types of Ruby applications such as Sinatra or Padrino*
+
+For other types of Ruby applications, your application must be built with a gem that can reload the server every time a change to the source code is made inside the running container. Those gems are:
+
+* Shotgun
+* Rerun
+* Rack-livereload
+
+To be able to run your application in development mode, you must modify the S2I `run` script so that the web server is launched by the chosen gem, which checks for changes in the source code.
+
+After you build your application image with your version of the S2I `run` script, run the image with the `RACK_ENV=development` environment variable. For example, you can use the `oc new-app` command. You can use the `oc set env` command to update environment variables of existing objects.
+
+[WARNING]
+====
+You should only use this option while developing or debugging. It is not recommended to turn this on in your production environment.
+====
+
+.Procedure
+
+. To change your source code in a running pod, use the `oc rsh` command to enter the container:
++
+[source,terminal]
+----
+$ oc rsh
+----
+
+After you enter into the running container, your current directory is set to `/opt/app-root/src`, where the source code is located.
diff --git a/_unused_topics/images-using-images-s2i-ruby-pulling-images.adoc b/_unused_topics/images-using-images-s2i-ruby-pulling-images.adoc
new file mode 100644
index 000000000000..98452af2d4bd
--- /dev/null
+++ b/_unused_topics/images-using-images-s2i-ruby-pulling-images.adoc
@@ -0,0 +1,69 @@
+// Module included in the following assemblies:
+//
+// * openshift_images/using_images/using-images-source-to-image.adoc
+// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used.
+
+[id="images-using-images-s2i-ruby-pulling-images_{context}"]
+= Pulling images for Ruby
+
+//These images come in two options:
+
+//* RHEL 8
+//* CentOS 7
+
+//*RHEL 8 Based Images*
+
+The RHEL 8 images are available through the Red Hat Registry.
+
+.Procedure
+
+* To pull the RHEL 8 image, enter the following command for the version of Ruby you want:
++
+.Ruby `2.5`
+[source,terminal]
+----
+$ podman pull registry.redhat.io/rhscl/ruby-25-rhel7:latest
+----
++
+.Ruby `2.6`
+[source,terminal]
+----
+$ podman pull registry.redhat.io/rhscl/ruby-26-rhel7:latest
+----
++
+.Ruby `2.7`
+[source,terminal]
+----
+$ podman pull registry.redhat.io/rhscl/ruby-27-rhel7:latest
+----
+
+////
+*CentOS 7 Based Images*
+
+These images are available on link:quay.io[Quay.io].
+
+.Procedure
+
+* To pull the CentOS 7 image, enter the following command for the version of Ruby you want:
++
+.Ruby `2.0`
+[source,terminal]
+----
+$ podman pull openshift/ruby-20-centos7
+----
++
+.Ruby `2.2`
+[source,terminal]
+----
+$ podman pull openshift/ruby-22-centos7
+----
++
+.Ruby `2.3`
+[source,terminal]
+----
+$ podman pull centos/ruby-23-centos7
+----
+////
+
+To use these images, you can either access them directly from registry.redhat.io, or push them into your {product-title} image registry. Additionally, you can create an image stream that points to the image, either in your container image registry or at the external location. Your {product-title} resources can then reference the
+image stream.
diff --git a/_unused_topics/images-using-images-s2i-ruby.adoc b/_unused_topics/images-using-images-s2i-ruby.adoc
new file mode 100644
index 000000000000..feed3359d273
--- /dev/null
+++ b/_unused_topics/images-using-images-s2i-ruby.adoc
@@ -0,0 +1,9 @@
+// Module included in the following assemblies:
+//
+// * openshift_images/using_images/using-images-source-to-image.adoc
+// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used.
+
+[id="images-using-images-s2i-ruby_{context}"]
+= Ruby overview
+
+{product-title} provides source-to-image (S2I) enabled Ruby images for building and running Ruby applications. The Ruby S2I builder image assembles your application source with any required dependencies to create a new image containing your Ruby application. This resulting image can be run either by {product-title} or by a container runtime.
diff --git a/modules/installation-about-custom.adoc b/_unused_topics/installation-about-custom.adoc
similarity index 100%
rename from modules/installation-about-custom.adoc
rename to _unused_topics/installation-about-custom.adoc
diff --git a/modules/installation-creating-worker-machineset.adoc b/_unused_topics/installation-creating-worker-machineset.adoc
similarity index 96%
rename from modules/installation-creating-worker-machineset.adoc
rename to _unused_topics/installation-creating-worker-machineset.adoc
index 54bd8f1478df..fab07717826c 100644
--- a/modules/installation-creating-worker-machineset.adoc
+++ b/_unused_topics/installation-creating-worker-machineset.adoc
@@ -15,7 +15,7 @@ cluster.
.Procedure
-. Optionally, launch worker nodes that are controlled by the machine API.
+. Optional: Launch worker nodes that are controlled by the machine API.
. View the list of MachineSets in the `openshift-machine-api` namespace:
+
----
@@ -119,7 +119,7 @@ infrastructure name that you extracted from the Ignition config metadata,
which has the format `-`.
////
-. Optionally, replace the `subnet` stanza with one that specifies the subnet
+. Optional: Replace the `subnet` stanza with one that specifies the subnet
to deploy the machines on:
+
----
diff --git a/_unused_topics/installation-osp-troubleshooting.adoc b/_unused_topics/installation-osp-troubleshooting.adoc
new file mode 100644
index 000000000000..8b5bcff20bd9
--- /dev/null
+++ b/_unused_topics/installation-osp-troubleshooting.adoc
@@ -0,0 +1,40 @@
+// Module included in the following assemblies:
+//
+// * n/a
+
+[id="installation-osp-customizing_{context}"]
+
+= Troubleshooting {product-title} on OpenStack installations
+
+// Structure as needed in the end. This is very much a WIP.
+// A few more troubleshooting and/or known issues blurbs incoming
+
+Unfortunately, there will always be some cases where {product-title} fails to install properly. In these events, it is helpful to understand the likely failure modes as well as how to troubleshoot the failure.
+
+This document discusses some troubleshooting options for {rh-openstack}-based
+deployments. For general tips on troubleshooting the installation program, see the [Installer Troubleshooting](../troubleshooting.md) guide.
+
+== View instance logs
+
+{rh-openstack} CLI tools must be installed, then:
+
+----
+$ openstack console log show
+----
+
+== Connect to instances via SSH
+
+Get the IP address of the machine on the private network:
+```
+openstack server list | grep master
+| 0dcd756b-ad80-42f1-987a-1451b1ae95ba | cluster-wbzrr-master-1 | ACTIVE | cluster-wbzrr-openshift=172.24.0.21 | rhcos | m1.s2.xlarge |
+| 3b455e43-729b-4e64-b3bd-1d4da9996f27 | cluster-wbzrr-master-2 | ACTIVE | cluster-wbzrr-openshift=172.24.0.18 | rhcos | m1.s2.xlarge |
+| 775898c3-ecc2-41a4-b98b-a4cd5ae56fd0 | cluster-wbzrr-master-0 | ACTIVE | cluster-wbzrr-openshift=172.24.0.12 | rhcos | m1.s2.xlarge |
+```
+
+And connect to it using the control plane machine currently holding the API as a jumpbox:
+
+```
+ssh -J core@${floating IP address}<1> core@
+```
+<1> The floating IP address assigned to the control plane machine.
diff --git a/modules/looking-inside-nodes.adoc b/_unused_topics/looking-inside-nodes.adoc
similarity index 79%
rename from modules/looking-inside-nodes.adoc
rename to _unused_topics/looking-inside-nodes.adoc
index 16b9df69c54c..dfc6846e31cd 100644
--- a/modules/looking-inside-nodes.adoc
+++ b/_unused_topics/looking-inside-nodes.adoc
@@ -5,31 +5,37 @@
[id="looking-inside-openshift-nodes_{context}"]
= Looking inside {product-title} nodes
-Directly accessing a node is strongly discouraged. Nodes are meant to be managed entirely from the cluster and are considered tainted if you log into a node and change anything. That said, there might be times when you want to troubleshoot a problem on a node or simply go onto a node in a test environment to see how things work.
+Directly accessing a node is strongly discouraged. Nodes are meant to be managed entirely from the cluster and that are considered tainted if you log in to a node and change anything. That said, there might be times when you want to troubleshoot a problem on a node or simply go onto a node in a test environment to see how things work.
For debugging purposes, the oc debug command lets you go inside any pod and look around. For nodes, in particular, you open a tools pod on the node, then chroot to the node’s host filesystem. At that point, you are effectively working on the node. Here’s how to do that:
+----
$ oc get nodes
-NAME STATUS ROLES AGE VERSION
+NAME STATUS ROLES AGE VERSION
-ip-10-0-0-1.us-east-2.compute.internal Ready worker 3h19m v1.13.4+4dd65df23d
+ip-10-0-0-1.us-east-2.compute.internal Ready worker 3h19m v1.22.1
-ip-10-0-0-39.us-east-2.compute.internal Ready master 3h37m v1.13.4+4dd65df23d
+ip-10-0-0-39.us-east-2.compute.internal Ready master 3h37m v1.22.1
-…
+…
$ oc debug nodes/ip-10-0-138-39.us-east-2.compute.internal
Starting pod/ip-10-0-138-39us-east-2computeinternal-debug …
+----
-To use host binaries, run `chroot /host`
+----
+$ oc debug nodes/ip-10-0-138-39.us-east-2.compute.internal
-If you don’t see a command prompt, try pressing enter.
+Starting pod/ip-10-0-138-39us-east-2computeinternal-debug …
-sh-4.2#
+To use host binaries, run chroot /host
+
+If you don’t see a command prompt, try pressing enter.
-
+sh-4.3#
+----
As noted, you can change to the root of the node’s filesystem by typing chroot /host and running commands from the host on that filesystem as though you were logged in directly from the host. Here are some examples of commands you can run to see what is happening on the node:
diff --git a/modules/machine-configs-and-pools.adoc b/_unused_topics/machine-configs-and-pools.adoc
similarity index 100%
rename from modules/machine-configs-and-pools.adoc
rename to _unused_topics/machine-configs-and-pools.adoc
diff --git a/modules/managing-dedicated-readers-group.adoc b/_unused_topics/managing-dedicated-readers-group.adoc
similarity index 92%
rename from modules/managing-dedicated-readers-group.adoc
rename to _unused_topics/managing-dedicated-readers-group.adoc
index c602bff7dd1c..511dc8313ab6 100644
--- a/modules/managing-dedicated-readers-group.adoc
+++ b/_unused_topics/managing-dedicated-readers-group.adoc
@@ -2,7 +2,7 @@
//
// administering_a_cluster/dedicated-admin-role.adoc
-[id="dedicated-managing-dedicated-readers-group{context}"]
+[id="dedicated-managing-dedicated-readers-group_{context}"]
= Managing the dedicated-readers group
Users with a `dedicated-reader` role are granted edit and view access to the
diff --git a/_unused_topics/metering-resources.adoc b/_unused_topics/metering-resources.adoc
new file mode 100644
index 000000000000..7b0f67114a9a
--- /dev/null
+++ b/_unused_topics/metering-resources.adoc
@@ -0,0 +1,23 @@
+// Module included in the following assemblies:
+//
+// * metering/metering-install-metering.adoc
+
+[id="metering-resources_{context}"]
+= Metering resources
+
+Metering has many resources, which can be used to manage the deployment and installation of Metering, as well as the reporting functionality Metering provides.
+
+Metering is managed using the following CustomResourceDefinitions (CRDs):
+
+[cols="1,7"]
+|===
+
+|*MeteringConfig* |Configures the Metering stack for deployment. Contains customizations and configuration options to control each component that makes up the Metering stack.
+
+|*Reports* |Controls what query to use, when, and how often the query should be run, and where to store the results.
+
+|*ReportQueries* |Contains the SQL queries used to perform analysis on the data contained with in ReportDataSources.
+
+|*ReportDataSources* |Controls the data available to ReportQueries and Reports. Allows configuring access to different databases for use within Metering.
+
+|===
diff --git a/modules/monitoring-configuring-etcd-monitoring.adoc b/_unused_topics/monitoring-configuring-etcd-monitoring.adoc
similarity index 96%
rename from modules/monitoring-configuring-etcd-monitoring.adoc
rename to _unused_topics/monitoring-configuring-etcd-monitoring.adoc
index 414b1e6455fd..66e1144babb9 100644
--- a/modules/monitoring-configuring-etcd-monitoring.adoc
+++ b/_unused_topics/monitoring-configuring-etcd-monitoring.adoc
@@ -39,7 +39,7 @@ $ oc -n openshift-monitoring edit configmap cluster-monitoring-config
. Under `config.yaml: |+`, add the `etcd` section.
+
-.. If you run `etcd` in static pods on your master nodes, you can specify the `etcd` nodes using the selector:
+.. If you run `etcd` in static pods on your control plane nodes (also known as master nodes), you can specify the `etcd` nodes using the selector:
+
[subs="quotes"]
----
@@ -118,7 +118,7 @@ image::etcd-no-certificate.png[]
While `etcd` is being monitored, Prometheus is not yet able to authenticate against `etcd`, and so cannot gather metrics. To configure Prometheus authentication against `etcd`:
-. Copy the `/etc/etcd/ca/ca.crt` and `/etc/etcd/ca/ca.key` credentials files from the master node to the local machine:
+. Copy the `/etc/etcd/ca/ca.crt` and `/etc/etcd/ca/ca.key` credentials files from the control plane node to the local machine:
+
[subs="quotes"]
----
diff --git a/modules/monitoring-dead-mans-switch-pagerduty.adoc b/_unused_topics/monitoring-dead-mans-switch-pagerduty.adoc
similarity index 100%
rename from modules/monitoring-dead-mans-switch-pagerduty.adoc
rename to _unused_topics/monitoring-dead-mans-switch-pagerduty.adoc
diff --git a/modules/monitoring-dead-mans-switch.adoc b/_unused_topics/monitoring-dead-mans-switch.adoc
similarity index 100%
rename from modules/monitoring-dead-mans-switch.adoc
rename to _unused_topics/monitoring-dead-mans-switch.adoc
diff --git a/modules/monitoring-enabling-dynamically-provisioned-storage.adoc b/_unused_topics/monitoring-enabling-dynamically-provisioned-storage.adoc
similarity index 84%
rename from modules/monitoring-enabling-dynamically-provisioned-storage.adoc
rename to _unused_topics/monitoring-enabling-dynamically-provisioned-storage.adoc
index 5b4ab65e9677..3cb84b12da5c 100644
--- a/modules/monitoring-enabling-dynamically-provisioned-storage.adoc
+++ b/_unused_topics/monitoring-enabling-dynamically-provisioned-storage.adoc
@@ -14,7 +14,7 @@ Instead of statically-provisioned storage, you can use dynamically-provisioned s
* `openshift_cluster_monitoring_operator_prometheus_storage_enabled` (Default: false)
* `openshift_cluster_monitoring_operator_alertmanager_storage_enabled` (Default: false)
+
-. (optional) After you enable dynamic storage, you can also set the `storageclass` for the persistent volume claim for each component in the following parameters in the Ansible inventory file:
+. Optional: After you enable dynamic storage, you can also set the `storageclass` for the persistent volume claim for each component in the following parameters in the Ansible inventory file:
+
* `openshift_cluster_monitoring_operator_prometheus_storage_class_name` (default: "")
* `openshift_cluster_monitoring_operator_alertmanager_storage_class_name` (default: "")
diff --git a/modules/monitoring-enabling-persistent-storage.adoc b/_unused_topics/monitoring-enabling-persistent-storage.adoc
similarity index 100%
rename from modules/monitoring-enabling-persistent-storage.adoc
rename to _unused_topics/monitoring-enabling-persistent-storage.adoc
diff --git a/modules/monitoring-full-list-of-configuration-variables.adoc b/_unused_topics/monitoring-full-list-of-configuration-variables.adoc
similarity index 100%
rename from modules/monitoring-full-list-of-configuration-variables.adoc
rename to _unused_topics/monitoring-full-list-of-configuration-variables.adoc
diff --git a/modules/monitoring-grouping-alerts.adoc b/_unused_topics/monitoring-grouping-alerts.adoc
similarity index 100%
rename from modules/monitoring-grouping-alerts.adoc
rename to _unused_topics/monitoring-grouping-alerts.adoc
diff --git a/modules/monitoring-monitoring-overview.adoc b/_unused_topics/monitoring-monitoring-overview.adoc
similarity index 92%
rename from modules/monitoring-monitoring-overview.adoc
rename to _unused_topics/monitoring-monitoring-overview.adoc
index 00b804716c31..8b1096b717b0 100644
--- a/modules/monitoring-monitoring-overview.adoc
+++ b/_unused_topics/monitoring-monitoring-overview.adoc
@@ -52,6 +52,6 @@ Other {product-title} framework components might be exposing metrics as well. Se
[NOTE]
====
-In order to be able to deliver updates with guaranteed compatibility, configurability of the {product-title} Monitoring stack is limited to the explicitly available options.
+To be able to deliver updates with guaranteed compatibility, configurability of the {product-title} Monitoring stack is limited to the explicitly available options.
====
diff --git a/modules/monitoring-setting-persistent-storage-size.adoc b/_unused_topics/monitoring-setting-persistent-storage-size.adoc
similarity index 100%
rename from modules/monitoring-setting-persistent-storage-size.adoc
rename to _unused_topics/monitoring-setting-persistent-storage-size.adoc
diff --git a/modules/monitoring-update-and-compatibility-guarantees.adoc b/_unused_topics/monitoring-update-and-compatibility-guarantees.adoc
similarity index 76%
rename from modules/monitoring-update-and-compatibility-guarantees.adoc
rename to _unused_topics/monitoring-update-and-compatibility-guarantees.adoc
index 641df76ef15a..4f42970573fc 100644
--- a/modules/monitoring-update-and-compatibility-guarantees.adoc
+++ b/_unused_topics/monitoring-update-and-compatibility-guarantees.adoc
@@ -5,13 +5,13 @@
[id="update-and-compatibility-guarantees_{context}"]
= Update and compatibility guarantees
-In order to be able to deliver updates with guaranteed compatibility, configurability of the {product-title} Monitoring stack is limited to the explicitly available options. This document describes known pitfalls of which types of configuration and customization are unsupported, as well as misuse of resources provided by {product-title} Monitoring. All configuration options described in this topic are explicitly supported.
+To be able to deliver updates with guaranteed compatibility, configurability of the {product-title} Monitoring stack is limited to the explicitly available options. This document describes known pitfalls of which types of configuration and customization are unsupported, as well as misuse of resources provided by {product-title} Monitoring. All configuration options described in this topic are explicitly supported.
*Modification of {product-title} monitoring resources*
The {product-title} Monitoring stack ensures its resources are _always_ in the state it expects them to be. If they are modified, {product-title} Monitoring will ensure that this will be reset. Nonetheless it is possible to pause this behavior, by setting the `paused` field in the `AppVersion` called `openshift-monitoring`. Setting the {product-title} Monitoring stack to be paused, stops all future updates and will cause modification of the otherwise managed resources. If resources are modified in an uncontrolled manner, this will cause undefined behavior during updates.
-In order to ensure compatible and functioning updates, the `paused` field must be set to `false` on upgrades.
+To ensure compatible and functioning updates, the `paused` field must be set to `false` on upgrades.
*Usage of resources created by {product-title} monitoring*
diff --git a/modules/mounting-local-volumes.adoc b/_unused_topics/mounting-local-volumes.adoc
similarity index 100%
rename from modules/mounting-local-volumes.adoc
rename to _unused_topics/mounting-local-volumes.adoc
diff --git a/modules/nodes-cluster-disabling-features-list.adoc b/_unused_topics/nodes-cluster-disabling-features-list.adoc
similarity index 100%
rename from modules/nodes-cluster-disabling-features-list.adoc
rename to _unused_topics/nodes-cluster-disabling-features-list.adoc
diff --git a/modules/nodes-cluster-overcommit-node-memory.adoc b/_unused_topics/nodes-cluster-overcommit-node-memory.adoc
similarity index 87%
rename from modules/nodes-cluster-overcommit-node-memory.adoc
rename to _unused_topics/nodes-cluster-overcommit-node-memory.adoc
index 422cbe41f505..e1da11dcbb73 100644
--- a/modules/nodes-cluster-overcommit-node-memory.adoc
+++ b/_unused_topics/nodes-cluster-overcommit-node-memory.adoc
@@ -10,7 +10,7 @@ You can use the `qos-reserved` parameter to specify a percentage of memory to be
by a pod in a particular QoS level. This feature attempts to reserve requested resources to exclude pods
from lower OoS classes from using resources requested by pods in higher QoS classes.
-By reserving resources for higher QOS levels, pods that don't have resource limits are prevented from encroaching on the resources
+By reserving resources for higher QOS levels, pods that do not have resource limits are prevented from encroaching on the resources
requested by pods at higher QoS levels.
.Prerequisites
@@ -45,6 +45,21 @@ metadata:
----
$ oc label machineconfigpool worker custom-kubelet=small-pods
----
++
+[TIP]
+====
+You can alternatively apply the following YAML to add the label:
+
+[source,yaml]
+----
+apiVersion: machineconfiguration.openshift.io/v1
+kind: MachineConfigPool
+metadata:
+ labels:
+ custom-kubelet: small-pods
+ name: worker
+----
+====
.Procedure
diff --git a/modules/nodes-containers-using-about.adoc b/_unused_topics/nodes-containers-using-about.adoc
similarity index 100%
rename from modules/nodes-containers-using-about.adoc
rename to _unused_topics/nodes-containers-using-about.adoc
diff --git a/modules/nodes-containers-using-ssh.adoc b/_unused_topics/nodes-containers-using-ssh.adoc
similarity index 95%
rename from modules/nodes-containers-using-ssh.adoc
rename to _unused_topics/nodes-containers-using-ssh.adoc
index 503e4620be58..868386626226 100644
--- a/modules/nodes-containers-using-ssh.adoc
+++ b/_unused_topics/nodes-containers-using-ssh.adoc
@@ -19,7 +19,7 @@ For example, in a MySQL container, you can count the number of records in the
database by invoking the `mysql` command, then using the prompt to type in the `SELECT` command. You can
also use commands like `ps(1)` and `ls(1)` for validation.
-`*BuildConfigs*` and `*DeployConfigs*` map out how you want things to look and
+`BuildConfigs` and `DeployConfigs` map out how you want things to look and
pods (with containers inside) are created and dismantled as needed. Your changes
are not persistent. If you make changes directly within the container and that
container is destroyed and rebuilt, your changes will no longer exist.
diff --git a/modules/nodes-nodes-audit-log-advanced.adoc b/_unused_topics/nodes-nodes-audit-log-advanced.adoc
similarity index 99%
rename from modules/nodes-nodes-audit-log-advanced.adoc
rename to _unused_topics/nodes-nodes-audit-log-advanced.adoc
index ed5d789df839..e790f3ec446c 100644
--- a/modules/nodes-nodes-audit-log-advanced.adoc
+++ b/_unused_topics/nodes-nodes-audit-log-advanced.adoc
@@ -19,7 +19,7 @@ openshift_master_audit_config={"enabled": true, "auditFilePath": "/var/lib/origi
[IMPORTANT]
====
-The policy file *_/etc/origin/master/adv-audit.yaml_* must be available on each master node.
+The policy file *_/etc/origin/master/adv-audit.yaml_* must be available on each control plane node.
====
@@ -137,4 +137,3 @@ that group.
For more information on advanced audit, see the
link:https://kubernetes.io/docs/tasks/debug-application-cluster/audit[Kubernetes
documentation]
-
diff --git a/modules/nodes-nodes-resources-configuring-viewing.adoc b/_unused_topics/nodes-nodes-resources-configuring-viewing.adoc
similarity index 100%
rename from modules/nodes-nodes-resources-configuring-viewing.adoc
rename to _unused_topics/nodes-nodes-resources-configuring-viewing.adoc
diff --git a/modules/nodes-nodes-working-adding.adoc b/_unused_topics/nodes-nodes-working-adding.adoc
similarity index 100%
rename from modules/nodes-nodes-working-adding.adoc
rename to _unused_topics/nodes-nodes-working-adding.adoc
diff --git a/modules/nodes-pods-daemonsets-pods.adoc b/_unused_topics/nodes-pods-daemonsets-pods.adoc
similarity index 100%
rename from modules/nodes-pods-daemonsets-pods.adoc
rename to _unused_topics/nodes-pods-daemonsets-pods.adoc
diff --git a/modules/nodes-pods-priority-examples.adoc b/_unused_topics/nodes-pods-priority-examples.adoc
similarity index 84%
rename from modules/nodes-pods-priority-examples.adoc
rename to _unused_topics/nodes-pods-priority-examples.adoc
index a56136bbe17b..92d898eb2d3f 100644
--- a/modules/nodes-pods-priority-examples.adoc
+++ b/_unused_topics/nodes-pods-priority-examples.adoc
@@ -5,7 +5,7 @@
[id="nodes-pods-priority-examples_{context}"]
= Pod priority example scenarios
-Pod priority and preemption assigns a priority to pods for scheduling. The scheduler will preempt (evict) lower-priority pods in order to schedule higher-priority pods.
+Pod priority and preemption assigns a priority to pods for scheduling. The scheduler will preempt (evict) lower-priority pods to schedule higher-priority pods.
Typical preemption scenario::
*Pod P* is a pending pod.
@@ -43,7 +43,7 @@ Pod priority and cross-node preemption::
+
There are no other cases of anti-affinity between *Pod P* and other pods in the zone.
-. In order to schedule *Pod P* on *Node N*, the scheduler must preempt *Pod Q* to remove the pod anti-affinity violation, allowing the scheduler to schedule *Pod P* on *Node N*.
+. To schedule *Pod P* on *Node N*, the scheduler must preempt *Pod Q* to remove the pod anti-affinity violation, allowing the scheduler to schedule *Pod P* on *Node N*.
The scheduler can preempt *Pod Q*, but scheduler does not perform cross-node preemption. So, Pod P will be deemed unschedulable on Node N.
////
diff --git a/modules/nodes-scheduler-node-antiaffinity-configuring.adoc b/_unused_topics/nodes-scheduler-node-antiaffinity-configuring.adoc
similarity index 93%
rename from modules/nodes-scheduler-node-antiaffinity-configuring.adoc
rename to _unused_topics/nodes-scheduler-node-antiaffinity-configuring.adoc
index 662f7389bf94..083d94dfc6eb 100644
--- a/modules/nodes-scheduler-node-antiaffinity-configuring.adoc
+++ b/_unused_topics/nodes-scheduler-node-antiaffinity-configuring.adoc
@@ -9,7 +9,7 @@ You can configure two types of node affinity rules: required and preferred.
== Configuring a required node affinity rule
-Required rules *must* be met before a pod can be scheduled on a node.
+Required rules *must* be met before a pod can be scheduled on a node.
.Procedure
@@ -20,6 +20,21 @@ The following steps demonstrate a simple configuration that creates a node and a
----
$ oc label node node1 e2e-az-name=e2e-az1
----
++
+[TIP]
+====
+You can alternatively apply the following YAML to add the label:
+
+[source,yaml]
+----
+kind: Node
+apiVersion: v1
+metadata:
+ name:
+ labels:
+ e2e-az-name: e2e-az1
+----
+====
. In the pod specification, use the `nodeAffinity` stanza to configure the `requiredDuringSchedulingIgnoredDuringExecution` parameter:
+
diff --git a/modules/nodes-scheduler-taints-tolerations-examples.adoc b/_unused_topics/nodes-scheduler-taints-tolerations-examples.adoc
similarity index 78%
rename from modules/nodes-scheduler-taints-tolerations-examples.adoc
rename to _unused_topics/nodes-scheduler-taints-tolerations-examples.adoc
index df2518f4f07c..43a1eb49af61 100644
--- a/modules/nodes-scheduler-taints-tolerations-examples.adoc
+++ b/_unused_topics/nodes-scheduler-taints-tolerations-examples.adoc
@@ -27,6 +27,27 @@ For example:
----
$ oc adm taint nodes node1 dedicated=groupName:NoSchedule
----
++
+[TIP]
+====
+You can alternatively apply the following YAML to add the taint:
+
+[source,yaml]
+----
+kind: Node
+apiVersion: v1
+metadata:
+ name:
+ labels:
+ ...
+spec:
+ taints:
+ - key: dedicated
+ value: groupName
+ effect: NoSchedule
+ ...
+----
+====
. Add a corresponding toleration to the pods by writing a custom admission controller.
+
@@ -48,6 +69,27 @@ For example:
----
$ oc adm taint nodes node1 dedicated=groupName:NoSchedule
----
++
+[TIP]
+====
+You can alternatively apply the following YAML to add the taint:
+
+[source,yaml]
+----
+kind: Node
+apiVersion: v1
+metadata:
+ name:
+ labels:
+ ...
+spec:
+ taints:
+ - key: dedicated
+ value: groupName
+ effect: NoSchedule
+ ...
+----
+====
. Add a corresponding toleration to the pods by writing a custom admission controller.
+
@@ -70,6 +112,27 @@ To ensure pods are blocked from the specialized hardware:
$ oc adm taint nodes disktype=ssd:NoSchedule
$ oc adm taint nodes disktype=ssd:PreferNoSchedule
----
++
+[TIP]
+====
+You can alternatively apply the following YAML to add the taint:
+
+[source,yaml]
+----
+kind: Node
+apiVersion: v1
+metadata:
+ name:
+ labels:
+ ...
+spec:
+ taints:
+ - key: disktype
+ value: ssd
+ effect: PreferNoSchedule
+ ...
+----
+====
. Adding a corresponding toleration to pods that use the special hardware using an admission controller.
diff --git a/modules/nodes-scheduler-taints-tolerations-seconds.adoc b/_unused_topics/nodes-scheduler-taints-tolerations-seconds.adoc
similarity index 100%
rename from modules/nodes-scheduler-taints-tolerations-seconds.adoc
rename to _unused_topics/nodes-scheduler-taints-tolerations-seconds.adoc
diff --git a/modules/pod-using-a-different-service-account.adoc b/_unused_topics/pod-using-a-different-service-account.adoc
similarity index 100%
rename from modules/pod-using-a-different-service-account.adoc
rename to _unused_topics/pod-using-a-different-service-account.adoc
diff --git a/modules/rbac-updating-policy-definitions.adoc b/_unused_topics/rbac-updating-policy-definitions.adoc
similarity index 95%
rename from modules/rbac-updating-policy-definitions.adoc
rename to _unused_topics/rbac-updating-policy-definitions.adoc
index c22a325676e6..1a2e45a62e90 100644
--- a/modules/rbac-updating-policy-definitions.adoc
+++ b/_unused_topics/rbac-updating-policy-definitions.adoc
@@ -2,7 +2,7 @@
//
// * orphaned
-ifdef::openshift-enterprise,openshift-origin[]
+ifdef::openshift-enterprise,openshift-webscale,openshift-origin[]
[id="updating-policy-definitions_{context}"]
= Updating policy definitions
diff --git a/modules/running-modified-installation.adoc b/_unused_topics/running-modified-installation.adoc
similarity index 100%
rename from modules/running-modified-installation.adoc
rename to _unused_topics/running-modified-installation.adoc
diff --git a/modules/security-context-constraints-restore-defaults.adoc b/_unused_topics/security-context-constraints-restore-defaults.adoc
similarity index 100%
rename from modules/security-context-constraints-restore-defaults.adoc
rename to _unused_topics/security-context-constraints-restore-defaults.adoc
diff --git a/modules/security-overview.adoc b/_unused_topics/security-overview.adoc
similarity index 97%
rename from modules/security-overview.adoc
rename to _unused_topics/security-overview.adoc
index d59273adb56e..2f5170e0e604 100644
--- a/modules/security-overview.adoc
+++ b/_unused_topics/security-overview.adoc
@@ -26,7 +26,7 @@ checks for one or more of the roles assigned to the user, such as a cluster
administrator or administrator of the current project, before allowing it to
continue.
-ifdef::openshift-origin,openshift-online,openshift-enterprise[]
+ifdef::openshift-origin,openshift-online,openshift-enterprise,openshift-webscale[]
Since every container that runs on the cluster is associated with a service
account, it is also possible to associate secrets to those service accounts and have them
automatically delivered into the container. This secret delivery enables the infrastructure to
@@ -45,7 +45,7 @@ ifdef::openshift-origin,openshift-enterprise,openshift-dedicated[]
By default, a new internal PKI is created for each deployment of
{product-title}. The internal PKI uses 2048 bit RSA keys and SHA-256 signatures.
endif::[]
-ifdef::openshift-origin,openshift-enterprise[]
+ifdef::openshift-origin,openshift-enterprise,openshift-webscale[]
Custom certificates for public hosts are supported as well.
endif::[]
diff --git a/_unused_topics/serverless-creating-kubeconfig-file.adoc b/_unused_topics/serverless-creating-kubeconfig-file.adoc
new file mode 100644
index 000000000000..cecdb2fbba4f
--- /dev/null
+++ b/_unused_topics/serverless-creating-kubeconfig-file.adoc
@@ -0,0 +1,18 @@
+// Module is included in the following assemblies:
+//
+// serverless/knative-client.adoc
+
+[id="create-kubeconfig-file_{contect}"]
+= Creating a `kubeconfig` file
+
+Use `kubeconfig` files to organize information about clusters, users, namespaces, and authentication mechanisms. The CLI tool uses `kubeconfig` files to communicate with the API server of a cluster.
+
+.Procedure
+* Create a basic `kubeconfig` file from client certificates. Use the following command:
+
+----
+$ oc adm create-kubeconfig \
+ --client-certificate=/path/to/client.crt \
+ --client-key=/path/to/client.key \
+ --certificate-authority=/path/to/ca.crt
+----
\ No newline at end of file
diff --git a/_unused_topics/serverless-rn-template-module.adoc b/_unused_topics/serverless-rn-template-module.adoc
new file mode 100644
index 000000000000..3a590658e7ef
--- /dev/null
+++ b/_unused_topics/serverless-rn-template-module.adoc
@@ -0,0 +1,13 @@
+[id="serverless-rn-_{context}"]
+= Release Notes for Red Hat {ServerlessProductName}
+// add a version, e.g. Technology Preview 1.0.0
+//update the to match the filename
+
+[id="new-features-_{context}"]
+== New features
+
+[id="fixed-issues-_{context}"]
+== Fixed issues
+
+[id="known-issues-_{context}"]
+== Known issues
diff --git a/modules/service-accounts-adding-secrets.adoc b/_unused_topics/service-accounts-adding-secrets.adoc
similarity index 100%
rename from modules/service-accounts-adding-secrets.adoc
rename to _unused_topics/service-accounts-adding-secrets.adoc
diff --git a/modules/service-accounts-managing-secrets.adoc b/_unused_topics/service-accounts-managing-secrets.adoc
similarity index 100%
rename from modules/service-accounts-managing-secrets.adoc
rename to _unused_topics/service-accounts-managing-secrets.adoc
diff --git a/modules/understanding-installation.adoc b/_unused_topics/understanding-installation.adoc
similarity index 100%
rename from modules/understanding-installation.adoc
rename to _unused_topics/understanding-installation.adoc
diff --git a/_unused_topics/understanding-workers-masters.adoc b/_unused_topics/understanding-workers-masters.adoc
new file mode 100644
index 000000000000..2dbbfd44773f
--- /dev/null
+++ b/_unused_topics/understanding-workers-masters.adoc
@@ -0,0 +1,33 @@
+// Module included in the following assemblies:
+//
+// *
+
+[id="understanding-workers-masters_{context}"]
+= Understanding {product-title} workers and masters
+
+With installation complete, the cluster is now fully in charge of managing itself. Management of worker (compute) and master (control plane) nodes is done from within the cluster. So, before moving on to what the {product-title} cluster does to help you develop and deploy applications, you should explore how an {product-title} cluster manages itself. For that, we focus on three things; workers, masters (the control plane) and Operators.
+
+To see which workers and masters are running on your cluster, type:
+
+----
+$ oc get nodes
+
+NAME STATUS ROLES AGE VERSION
+ip-10-0-0-1.us-east-2.compute.internal Ready worker 4h20m v1.22.1
+ip-10-0-0-2.us-east-2.compute.internal Ready master 4h39m v1.22.1
+ip-10-0-0.3.us-east-2.compute.internal Ready worker 4h20m v1.22.1
+ip-10-0-0-4.us-east-2.compute.internal Ready master 4h39m v1.22.1
+ip-10-0-0-5.us-east-2.compute.internal Ready master 4h39m v1.22.1
+ip-10-0-0-6.us-east-2.compute.internal Ready worker 4h20m v1.22.1
+----
+
+To see more information about internal and external IP addresses, the type of operating system ({op-system}), kernel version, and container runtime (CRI-O), add the `-o wide` option.
+
+----
+$ oc get nodes -o wide
+
+NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
+ip-10-0-134-252.us-east-2.compute.internal Ready worker 17h v1.22.1 10.0.134.252 Red Hat CoreOS 4.0 3.10.0-957.5.1.el7.x86_64 cri-o://1.22.1-1.rhaos4.0.git2f0cb0d.el7
+
+....
+----
diff --git a/modules/upgrade-cluster-version-definition.adoc b/_unused_topics/upgrade-cluster-version-definition.adoc
similarity index 97%
rename from modules/upgrade-cluster-version-definition.adoc
rename to _unused_topics/upgrade-cluster-version-definition.adoc
index 681e0d28cf19..eb6a82098f45 100644
--- a/modules/upgrade-cluster-version-definition.adoc
+++ b/_unused_topics/upgrade-cluster-version-definition.adoc
@@ -21,7 +21,7 @@ metadata:
selfLink: /apis/config.openshift.io/v1/clusterversions/version
uid: 82f9f2c4-4cae-11e9-90b7-06dc0f62ad38
spec:
- channel: stable-4.1 <1>
+ channel: stable-4.3 <1>
overrides: "" <2>
clusterID: 0b1cf91f-c3fb-4f9e-aa02-e0d70c71f6e6
upstream: https://api.openshift.com/api/upgrades_info/v1/graph
@@ -60,7 +60,7 @@ components as `unmanaged` to prevent the CVO from creating or updating the objec
====
Set the `ClusterVersionSpec.overrides` parameter value only during cluster
debugging. Setting this value can prevent successful upgrades and is not
-suggested for production clusters.
+supported for production clusters.
====
<3> The status of available updates and any in-progress updates. These values display
the version that the cluster is reconciling to, and the conditions
@@ -76,4 +76,4 @@ information about the condition.
* `Available` means that the upgrade to the `desiredUpdate` value completed.
* `Progressing` means that an upgrade is in progress.
-* `Failing` means that an update is blocked by a temporary or permanent error.
\ No newline at end of file
+* `Failing` means that an update is blocked by a temporary or permanent error.
diff --git a/_unused_topics/using-images-source-to-image-java.adoc b/_unused_topics/using-images-source-to-image-java.adoc
new file mode 100644
index 000000000000..c8b62de41b34
--- /dev/null
+++ b/_unused_topics/using-images-source-to-image-java.adoc
@@ -0,0 +1,17 @@
+// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used.
+
+[id="using-images-source-to-image"]
+= Java
+include::modules/common-attributes.adoc[]
+:context: using-images-source-to-image
+toc::[]
+
+This topic includes information on the source-to-image (S2I) supported Java images available for {product-title} users.
+
+//Add link to Build -> S21 following updates
+
+include::modules/images-using-images-s2i-java.adoc[leveloffset=+1]
+include::modules/images-s2i-java-pulling-images.adoc[leveloffset=+1]
+include::modules/images-s2i-build-process-overview.adoc[leveloffset=+1]
+include::modules/images-s2i-java-configuration.adoc[leveloffset=+1]
+include::modules/images-s2i-java-build-deploy-applications.adoc[leveloffset=+1]
diff --git a/_unused_topics/using-images-source-to-image-nodejs.adoc b/_unused_topics/using-images-source-to-image-nodejs.adoc
new file mode 100644
index 000000000000..7219f8572373
--- /dev/null
+++ b/_unused_topics/using-images-source-to-image-nodejs.adoc
@@ -0,0 +1,17 @@
+// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used.
+
+[id="using-images-source-to-image-nodejs"]
+= Node.js
+include::modules/common-attributes.adoc[]
+:context: using-images-source-to-image-nodejs
+toc::[]
+
+This topic includes information on the source-to-image (S2I) supported Node.js images available for {product-title} users.
+
+//Add link to Build -> S21 following updates
+
+include::modules/images-using-images-s2i-nodejs.adoc[leveloffset=+1]
+include::modules/images-s2i-nodejs-pulling-images.adoc[leveloffset=+1]
+include::modules/images-s2i-build-process-overview.adoc[leveloffset=+1]
+include::modules/images-using-images-s2i-nodejs-configuration.adoc[leveloffset=+1]
+include::modules/images-using-images-s2i-nodejs-hot-deploying.adoc[leveloffset=+1]
diff --git a/_unused_topics/using-images-source-to-image-perl.adoc b/_unused_topics/using-images-source-to-image-perl.adoc
new file mode 100644
index 000000000000..e75f0d8b1b54
--- /dev/null
+++ b/_unused_topics/using-images-source-to-image-perl.adoc
@@ -0,0 +1,17 @@
+// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used.
+
+[id="using-images-source-to-image-perl"]
+= Perl
+include::modules/common-attributes.adoc[]
+:context: using-images-source-to-image-perl
+toc::[]
+
+This topic includes information on the source-to-image (S2I) supported Perl images available for {product-title} users.
+
+//Add link to Build -> S21 following updates
+
+include::modules/images-using-images-s2i-perl.adoc[leveloffset=+1]
+include::modules/images-using-images-s2i-perl-pulling-images.adoc[leveloffset=+1]
+include::modules/images-s2i-build-process-overview.adoc[leveloffset=+1]
+include::modules/images-using-images-s2i-perl-configuration.adoc[leveloffset=+1]
+include::modules/images-using-images-s2i-perl-hot-deploying.adoc[leveloffset=+1]
diff --git a/_unused_topics/using-images-source-to-image-php.adoc b/_unused_topics/using-images-source-to-image-php.adoc
new file mode 100644
index 000000000000..ea1feb1ac8fa
--- /dev/null
+++ b/_unused_topics/using-images-source-to-image-php.adoc
@@ -0,0 +1,17 @@
+// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used.
+
+[id="using-images-source-to-image-php"]
+= PHP
+include::modules/common-attributes.adoc[]
+:context: using-images-source-to-image-php
+toc::[]
+
+This topic includes information on the source-to-image (S2I) supported PHP images available for {product-title} users.
+
+//Add link to Build -> S21 following updates
+
+include::modules/images-using-images-s2i-php.adoc[leveloffset=+1]
+include::modules/images-using-images-s2i-php-pulling-images.adoc[leveloffset=+1]
+include::modules/images-s2i-build-process-overview.adoc[leveloffset=+1]
+include::modules/images-using-images-s2i-php-configuration.adoc[leveloffset=+1]
+include::modules/images-using-images-s2i-php-hot-deploying.adoc[leveloffset=+1]
diff --git a/_unused_topics/using-images-source-to-image-python.adoc b/_unused_topics/using-images-source-to-image-python.adoc
new file mode 100644
index 000000000000..47d9efceba84
--- /dev/null
+++ b/_unused_topics/using-images-source-to-image-python.adoc
@@ -0,0 +1,17 @@
+// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used.
+
+[id="using-images-source-to-image-python"]
+= Python
+include::modules/common-attributes.adoc[]
+:context: using-images-source-to-image-python
+toc::[]
+
+This topic includes information on the source-to-image (S2I) supported Python images available for {product-title} users.
+
+//Add link to Build -> S21 following updates
+
+include::modules/images-using-images-s2i-python.adoc[leveloffset=+1]
+include::modules/images-using-images-s2i-python-pulling-images.adoc[leveloffset=+1]
+include::modules/images-s2i-build-process-overview.adoc[leveloffset=+1]
+include::modules/images-using-images-s2i-python-configuration.adoc[leveloffset=+1]
+include::modules/images-using-images-s2i-python-hot-deploying.adoc[leveloffset=+1]
diff --git a/_unused_topics/using-images-source-to-image-ruby.adoc b/_unused_topics/using-images-source-to-image-ruby.adoc
new file mode 100644
index 000000000000..535cf6af11d0
--- /dev/null
+++ b/_unused_topics/using-images-source-to-image-ruby.adoc
@@ -0,0 +1,17 @@
+// * Unused. Can be removed by 4.9 if still unused. Request full peer review for the module if it’s used.
+
+[id="using-images-source-to-image-ruby"]
+= Ruby
+include::modules/common-attributes.adoc[]
+:context: using-images-source-to-image-ruby
+toc::[]
+
+This topic includes information on the source-to-image (S2I) supported Ruby images available for {product-title} users.
+
+//Add link to Build -> S21 following updates
+
+include::modules/images-using-images-s2i-ruby.adoc[leveloffset=+1]
+include::modules/images-using-images-s2i-ruby-pulling-images.adoc[leveloffset=+1]
+include::modules/images-s2i-build-process-overview.adoc[leveloffset=+1]
+include::modules/images-using-images-s2i-ruby-configuration.adoc[leveloffset=+1]
+include::modules/images-using-images-s2i-ruby-hot-deploying.adoc[leveloffset=+1]
diff --git a/administering_a_cluster/cluster-admin-role.adoc b/administering_a_cluster/cluster-admin-role.adoc
new file mode 100644
index 000000000000..9e2d0203dbd7
--- /dev/null
+++ b/administering_a_cluster/cluster-admin-role.adoc
@@ -0,0 +1,12 @@
+[id="cluster-administrator-role"]
+= The cluster-admin role
+include::modules/common-attributes.adoc[]
+:context: cluster-administrator
+
+toc::[]
+
+As an administrator of {product-title} with Customer Cloud Subscriptions (link:https://www.openshift.com/dedicated/ccs[CCS]), you can request additional permissions and access to the *cluster-admin* role within your organization's cluster. While logged into an account with the cluster-admin role, users have increased permissions to run privileged security contexts and install additional Operators for their environment.
+
+include::modules/dedicated-cluster-admin-enable.adoc[leveloffset=+1]
+
+include::modules/dedicated-cluster-admin-grant.adoc[leveloffset=+1]
diff --git a/administering_a_cluster/dedicated-admin-role.adoc b/administering_a_cluster/dedicated-admin-role.adoc
index 68560339e1c1..cea4e66b26ff 100644
--- a/administering_a_cluster/dedicated-admin-role.adoc
+++ b/administering_a_cluster/dedicated-admin-role.adoc
@@ -2,6 +2,7 @@
= The dedicated-admin role
include::modules/common-attributes.adoc[]
:context: dedicated-administrator
+
toc::[]
As an administrator of an {product-title} cluster, your account has additional
@@ -24,10 +25,28 @@ include::modules/dedicated-logging-in-and-verifying-permissions.adoc[leveloffset
include::modules/dedicated-managing-dedicated-administrators.adoc[leveloffset=+1]
-include::modules/managing-dedicated-readers-group.adoc[leveloffset=+1]
-
include::modules/dedicated-admin-granting-permissions.adoc[leveloffset=+1]
include::modules/dedicated-managing-service-accounts.adoc[leveloffset=+1]
include::modules/dedicated-managing-quotas-and-limit-ranges.adoc[leveloffset=+1]
+
+[id="osd-installing-operators-from-operatorhub_{context}"]
+== Installing Operators from the OperatorHub
+
+{product-title} administrators can install Operators from a curated list
+provided by the OperatorHub. This makes the Operator available to all developers
+on your cluster to create Custom Resources and applications using that Operator.
+
+[NOTE]
+====
+Privileged and custom Operators cannot be installed.
+====
+
+Administrators can only install Operators to the default `openshift-operators`
+namespace, except for the Red Hat OpenShift Logging Operator, which requires the
+`openshift-logging` namespace.
+
+.Additional resources
+
+* xref:../operators/admin/olm-adding-operators-to-cluster.adoc#olm-adding-operators-to-a-cluster[Adding Operators to a cluster]
diff --git a/cnv/images b/administering_a_cluster/images
similarity index 100%
rename from cnv/images
rename to administering_a_cluster/images
diff --git a/cnv/modules b/administering_a_cluster/modules
similarity index 100%
rename from cnv/modules
rename to administering_a_cluster/modules
diff --git a/api-config.yaml b/api-config.yaml
new file mode 100644
index 000000000000..14ba3415ad7e
--- /dev/null
+++ b/api-config.yaml
@@ -0,0 +1,1147 @@
+version: 2
+outputDir: rest_api
+apisToHide: []
+apiSupportLevels:
+- apiGroup: openshift\.io$
+ supportLevels:
+ - apiVersion: v\d+
+ level: 1
+ - apiVersion: v\d+beta\d+
+ level: 2
+ - apiVersion: v\d+alpha\d+
+ level: 4
+- apiGroup: ^\w+$
+ supportLevels:
+ - apiVersion: v\d+
+ level: 1
+ - apiVersion: v\d+beta\d+
+ level: 2
+- apiGroup: k8s\.io$
+ supportLevels:
+ - apiVersion: v\d+
+ level: 1
+ - apiVersion: v\d+beta\d+
+ level: 2
+- apiGroup: monitoring\.coreos\.com$
+ supportLevels:
+ - apiVersion: v\d+
+ level: 1
+- apiGroup: operators\.coreos\.com$
+ supportLevels:
+ - apiVersion: v\d+
+ level: 1
+ - apiVersion: v\d+alpha\d+
+ level: 3
+- apiGroup: metal3\.io$
+ supportLevels:
+ - apiVersion: v\d+alpha\d+
+ level: 4
+packageMap:
+ com.coreos.monitoring: monitoring.coreos.com
+ com.coreos.operators: operators.coreos.com
+ com.github.openshift.api.apps: apps.openshift.io
+ com.github.openshift.api.authorization: authorization.openshift.io
+ com.github.openshift.api.build: build.openshift.io
+ com.github.openshift.api.image: image.openshift.io
+ com.github.openshift.api.oauth: oauth.openshift.io
+ com.github.openshift.api.project: project.openshift.io
+ com.github.openshift.api.quota: quota.openshift.io
+ com.github.openshift.api.route: route.openshift.io
+ com.github.openshift.api.security: security.openshift.io
+ com.github.openshift.api.template: template.openshift.io
+ com.github.openshift.api.user: user.openshift.io
+ com.github.operator-framework.api.pkg.lib: packages.operators.coreos.com
+ com.github.operator-framework.api.pkg.operators: packages.operators.coreos.com
+ com.github.operator-framework.operator-lifecycle-manager.pkg.package-server.apis.operators: packages.operators.coreos.com
+ io.cncf.cni.k8s: k8s.cni.cncf.io
+ io.cncf.cni.whereabouts: whereabouts.cni.cncf.io
+ io.k8s.metrics.pkg.apis.metrics: metrics.k8s.io
+ io.k8s.api.admissionregistration: admissionregistration.k8s.io
+ io.k8s.api.apps: apps
+ io.k8s.api.authentication: authentication.k8s.io
+ io.k8s.api.authorization: authorization.k8s.io
+ io.k8s.api.autoscaling: autoscaling
+ io.k8s.api.batch: batch
+ io.k8s.api.certificates: certificates.k8s.io
+ io.k8s.api.coordination: coordination.k8s.io
+ io.k8s.api.core: core
+ io.k8s.api.discovery: discovery.k8s.io
+ io.k8s.api.events: events.k8s.io
+ io.k8s.api.extensions: extensions
+ io.k8s.api.flowcontrol: flowcontrol.apiserver.k8s.io
+ io.k8s.api.networking: networking.k8s.io
+ io.k8s.api.node: node.k8s.io
+ io.k8s.api.policy: policy
+ io.k8s.api.rbac: rbac.authorization.k8s.io
+ io.k8s.api.scheduling: scheduling.k8s.io
+ io.k8s.api.storage: storage.k8s.io
+ io.k8s.apiextensions-apiserver.pkg.apis.apiextensions: apiextensions.k8s.io
+ io.k8s.apimachinery.pkg.api: api
+ io.k8s.apimachinery.pkg.apis.meta: meta
+ io.k8s.apimachinery.pkg.util: util
+ io.k8s.apimachinery.pkg: pkg
+ io.k8s.kube-aggregator.pkg.apis.apiregistration: apiregistration.k8s.io
+ io.k8s.migration: migration.k8s.io
+ io.k8s.storage.snapshot: snapshot.storage.k8s.io
+ io.metal3: metal3.io
+ io.openshift.apiserver: apiserver.openshift.io
+ io.openshift.authorization: authorization.openshift.io
+ io.openshift.autoscaling: autoscaling.openshift.io
+ io.openshift.cloudcredential: cloudcredential.openshift.io
+ io.openshift.config: config.openshift.io
+ io.openshift.console: console.openshift.io
+ io.openshift.helm: helm.openshift.io
+ io.openshift.internal.security: security.internal.openshift.io
+ io.openshift.machine: machine.openshift.io
+ io.openshift.machineconfiguration: machineconfiguration.openshift.io
+ io.openshift.network: network.openshift.io
+ io.openshift.operator.controlplane: controlplane.operator.openshift.io
+ io.openshift.operator.imageregistry: imageregistry.operator.openshift.io
+ io.openshift.operator.ingress: ingress.operator.openshift.io
+ io.openshift.operator.network: network.operator.openshift.io
+ io.openshift.operator.samples: samples.operator.openshift.io
+ io.openshift.operator: operator.openshift.io
+ io.openshift.quota: quota.openshift.io
+ io.openshift.security: security.openshift.io
+ io.openshift.tuned: tuned.openshift.io
+apiMap:
+- name: Authorization APIs
+ resources:
+ - kind: LocalResourceAccessReview
+ group: authorization.openshift.io
+ version: v1
+ plural: localresourceaccessreviews
+ namespaced: true
+ - kind: LocalSubjectAccessReview
+ group: authorization.openshift.io
+ version: v1
+ plural: localsubjectaccessreviews
+ namespaced: true
+ - kind: ResourceAccessReview
+ group: authorization.openshift.io
+ version: v1
+ plural: resourceaccessreviews
+ namespaced: false
+ - kind: SelfSubjectRulesReview
+ group: authorization.openshift.io
+ version: v1
+ plural: selfsubjectrulesreviews
+ namespaced: true
+ - kind: SubjectAccessReview
+ group: authorization.openshift.io
+ version: v1
+ plural: subjectaccessreviews
+ namespaced: false
+ - kind: SubjectRulesReview
+ group: authorization.openshift.io
+ version: v1
+ plural: subjectrulesreviews
+ namespaced: true
+ - kind: TokenReview
+ group: authentication.k8s.io
+ version: v1
+ plural: tokenreviews
+ namespaced: false
+ - kind: LocalSubjectAccessReview
+ group: authorization.k8s.io
+ version: v1
+ plural: localsubjectaccessreviews
+ namespaced: true
+ - kind: SelfSubjectAccessReview
+ group: authorization.k8s.io
+ version: v1
+ plural: selfsubjectaccessreviews
+ namespaced: false
+ - kind: SelfSubjectRulesReview
+ group: authorization.k8s.io
+ version: v1
+ plural: selfsubjectrulesreviews
+ namespaced: false
+ - kind: SubjectAccessReview
+ group: authorization.k8s.io
+ version: v1
+ plural: subjectaccessreviews
+ namespaced: false
+- name: Autoscale APIs
+ resources:
+ - kind: ClusterAutoscaler
+ group: autoscaling.openshift.io
+ version: v1
+ plural: clusterautoscalers
+ namespaced: false
+ - kind: MachineAutoscaler
+ group: autoscaling.openshift.io
+ version: v1beta1
+ plural: machineautoscalers
+ namespaced: true
+ - kind: HorizontalPodAutoscaler
+ group: autoscaling
+ version: v1
+ plural: horizontalpodautoscalers
+ namespaced: true
+- name: Config APIs
+ resources:
+ - kind: APIServer
+ group: config.openshift.io
+ version: v1
+ plural: apiservers
+ namespaced: false
+ - kind: Authentication
+ group: config.openshift.io
+ version: v1
+ plural: authentications
+ namespaced: false
+ - kind: Build
+ group: config.openshift.io
+ version: v1
+ plural: builds
+ namespaced: false
+ - kind: ClusterOperator
+ group: config.openshift.io
+ version: v1
+ plural: clusteroperators
+ namespaced: false
+ - kind: ClusterVersion
+ group: config.openshift.io
+ version: v1
+ plural: clusterversions
+ namespaced: false
+ - kind: Console
+ group: config.openshift.io
+ version: v1
+ plural: consoles
+ namespaced: false
+ - kind: DNS
+ group: config.openshift.io
+ version: v1
+ plural: dnses
+ namespaced: false
+ - kind: FeatureGate
+ group: config.openshift.io
+ version: v1
+ plural: featuregates
+ namespaced: false
+ - kind: HelmChartRepository
+ group: helm.openshift.io
+ version: v1beta1
+ plural: helmchartrepositories
+ namespaced: false
+ - kind: Image
+ group: config.openshift.io
+ version: v1
+ plural: images
+ namespaced: false
+ - kind: Infrastructure
+ group: config.openshift.io
+ version: v1
+ plural: infrastructures
+ namespaced: false
+ - kind: Ingress
+ group: config.openshift.io
+ version: v1
+ plural: ingresses
+ namespaced: false
+ - kind: Network
+ group: config.openshift.io
+ version: v1
+ plural: networks
+ namespaced: false
+ - kind: OAuth
+ group: config.openshift.io
+ version: v1
+ plural: oauths
+ namespaced: false
+ - kind: OperatorHub
+ group: config.openshift.io
+ version: v1
+ plural: operatorhubs
+ namespaced: false
+ - kind: Project
+ group: config.openshift.io
+ version: v1
+ plural: projects
+ namespaced: false
+ - kind: Proxy
+ group: config.openshift.io
+ version: v1
+ plural: proxies
+ namespaced: false
+ - kind: Scheduler
+ group: config.openshift.io
+ version: v1
+ plural: schedulers
+ namespaced: false
+- name: Console APIs
+ resources:
+ - kind: ConsoleCLIDownload
+ group: console.openshift.io
+ version: v1
+ plural: consoleclidownloads
+ namespaced: false
+ - kind: ConsoleExternalLogLink
+ group: console.openshift.io
+ version: v1
+ plural: consoleexternalloglinks
+ namespaced: false
+ - kind: ConsoleLink
+ group: console.openshift.io
+ version: v1
+ plural: consolelinks
+ namespaced: false
+ - kind: ConsoleNotification
+ group: console.openshift.io
+ version: v1
+ plural: consolenotifications
+ namespaced: false
+ - kind: ConsolePlugin
+ group: console.openshift.io
+ version: v1alpha1
+ plural: consoleplugins
+ namespaced: false
+ - kind: ConsoleQuickStart
+ group: console.openshift.io
+ version: v1
+ plural: consolequickstarts
+ namespaced: false
+ - kind: ConsoleYAMLSample
+ group: console.openshift.io
+ version: v1
+ plural: consoleyamlsamples
+ namespaced: false
+- name: Extension APIs
+ resources:
+ - kind: APIService
+ group: apiregistration.k8s.io
+ version: v1
+ plural: apiservices
+ namespaced: false
+ - kind: CustomResourceDefinition
+ group: apiextensions.k8s.io
+ version: v1
+ plural: customresourcedefinitions
+ namespaced: false
+ - kind: MutatingWebhookConfiguration
+ group: admissionregistration.k8s.io
+ version: v1
+ plural: mutatingwebhookconfigurations
+ namespaced: false
+ - kind: ValidatingWebhookConfiguration
+ group: admissionregistration.k8s.io
+ version: v1
+ plural: validatingwebhookconfigurations
+ namespaced: false
+- name: Image APIs
+ resources:
+ - kind: Image
+ group: image.openshift.io
+ version: v1
+ plural: images
+ namespaced: false
+ - kind: ImageSignature
+ group: image.openshift.io
+ version: v1
+ plural: imagesignatures
+ namespaced: false
+ - kind: ImageStreamImage
+ group: image.openshift.io
+ version: v1
+ plural: imagestreamimages
+ namespaced: true
+ - kind: ImageStreamImport
+ group: image.openshift.io
+ version: v1
+ plural: imagestreamimports
+ namespaced: true
+ - kind: ImageStreamMapping
+ group: image.openshift.io
+ version: v1
+ plural: imagestreammappings
+ namespaced: true
+ - kind: ImageStream
+ group: image.openshift.io
+ version: v1
+ plural: imagestreams
+ namespaced: true
+ - kind: ImageStreamTag
+ group: image.openshift.io
+ version: v1
+ plural: imagestreamtags
+ namespaced: true
+ - kind: ImageTag
+ group: image.openshift.io
+ version: v1
+ plural: imagetags
+ namespaced: true
+- name: Machine APIs
+ resources:
+ - kind: ContainerRuntimeConfig
+ group: machineconfiguration.openshift.io
+ version: v1
+ plural: containerruntimeconfigs
+ namespaced: false
+ - kind: ControllerConfig
+ group: machineconfiguration.openshift.io
+ version: v1
+ plural: controllerconfigs
+ namespaced: false
+ - kind: KubeletConfig
+ group: machineconfiguration.openshift.io
+ version: v1
+ plural: kubeletconfigs
+ namespaced: false
+ - kind: MachineConfigPool
+ group: machineconfiguration.openshift.io
+ version: v1
+ plural: machineconfigpools
+ namespaced: false
+ - kind: MachineConfig
+ group: machineconfiguration.openshift.io
+ version: v1
+ plural: machineconfigs
+ namespaced: false
+ - kind: MachineHealthCheck
+ group: machine.openshift.io
+ version: v1beta1
+ plural: machinehealthchecks
+ namespaced: true
+ - kind: Machine
+ group: machine.openshift.io
+ version: v1beta1
+ plural: machines
+ namespaced: true
+ - kind: MachineSet
+ group: machine.openshift.io
+ version: v1beta1
+ plural: machinesets
+ namespaced: true
+- name: Metadata APIs
+ resources:
+ - kind: APIRequestCount
+ group: apiserver.openshift.io
+ version: v1
+ plural: apirequestcounts
+ namespaced: false
+ - kind: Binding
+ group: core
+ version: v1
+ plural: bindings
+ namespaced: true
+ - kind: ComponentStatus
+ group: core
+ version: v1
+ plural: componentstatuses
+ namespaced: false
+ - kind: ConfigMap
+ group: core
+ version: v1
+ plural: configmaps
+ namespaced: true
+ - kind: ControllerRevision
+ group: apps
+ version: v1
+ plural: controllerrevisions
+ namespaced: true
+ - kind: Event
+ group: events.k8s.io
+ version: v1
+ plural: events
+ namespaced: true
+ - kind: Event
+ group: core
+ version: v1
+ plural: events
+ namespaced: true
+ - kind: Lease
+ group: coordination.k8s.io
+ version: v1
+ plural: leases
+ namespaced: true
+ - kind: Namespace
+ group: core
+ version: v1
+ plural: namespaces
+ namespaced: false
+- name: Monitoring APIs
+ resources:
+ - kind: Alertmanager
+ group: monitoring.coreos.com
+ version: v1
+ plural: alertmanagers
+ namespaced: true
+ - kind: AlertmanagerConfig
+ group: monitoring.coreos.com
+ version: v1alpha1
+ plural: alertmanagerconfigs
+ namespaced: true
+ - kind: PodMonitor
+ group: monitoring.coreos.com
+ version: v1
+ plural: podmonitors
+ namespaced: true
+ - kind: Probe
+ group: monitoring.coreos.com
+ version: v1
+ plural: probes
+ namespaced: true
+ - kind: Prometheus
+ group: monitoring.coreos.com
+ version: v1
+ plural: prometheuses
+ namespaced: true
+ - kind: PrometheusRule
+ group: monitoring.coreos.com
+ version: v1
+ plural: prometheusrules
+ namespaced: true
+ - kind: ServiceMonitor
+ group: monitoring.coreos.com
+ version: v1
+ plural: servicemonitors
+ namespaced: true
+ - kind: ThanosRuler
+ group: monitoring.coreos.com
+ version: v1
+ plural: thanosrulers
+ namespaced: true
+- name: Network APIs
+ resources:
+ - kind: ClusterNetwork
+ group: network.openshift.io
+ version: v1
+ plural: clusternetworks
+ namespaced: false
+ - kind: Endpoints
+ group: core
+ version: v1
+ plural: endpoints
+ namespaced: true
+ - kind: EndpointSlice
+ group: discovery.k8s.io
+ version: v1
+ plural: endpointslices
+ namespaced: true
+ - kind: EgressNetworkPolicy
+ group: network.openshift.io
+ version: v1
+ plural: egressnetworkpolicies
+ namespaced: true
+ - kind: EgressRouter
+ group: network.operator.openshift.io
+ version: v1
+ plural: egressrouters
+ namespaced: true
+ - kind: HostSubnet
+ group: network.openshift.io
+ version: v1
+ plural: hostsubnets
+ namespaced: false
+ - kind: Ingress
+ group: networking.k8s.io
+ version: v1
+ plural: ingresses
+ namespaced: true
+ - kind: IngressClass
+ group: networking.k8s.io
+ version: v1
+ plural: ingressclasses
+ namespaced: false
+ - kind: IPPool
+ group: whereabouts.cni.cncf.io
+ version: v1alpha1
+ plural: ippools
+ namespaced: true
+ - kind: NetNamespace
+ group: network.openshift.io
+ version: v1
+ plural: netnamespaces
+ namespaced: false
+ - kind: NetworkAttachmentDefinition
+ group: k8s.cni.cncf.io
+ version: v1
+ plural: network-attachment-definitions
+ namespaced: true
+ - kind: NetworkPolicy
+ group: networking.k8s.io
+ version: v1
+ plural: networkpolicies
+ namespaced: true
+ - kind: PodNetworkConnectivityCheck
+ group: controlplane.operator.openshift.io
+ version: v1alpha1
+ plural: podnetworkconnectivitychecks
+ namespaced: true
+ - kind: Route
+ group: route.openshift.io
+ version: v1
+ plural: routes
+ namespaced: true
+ - kind: Service
+ group: core
+ version: v1
+ plural: services
+ namespaced: true
+- name: Node APIs
+ resources:
+ - kind: Node
+ group: core
+ version: v1
+ plural: nodes
+ namespaced: false
+ - kind: Profile
+ group: tuned.openshift.io
+ version: v1
+ plural: profiles
+ namespaced: true
+ - kind: RuntimeClass
+ group: node.k8s.io
+ version: v1
+ plural: runtimeclasses
+ namespaced: false
+ - kind: Tuned
+ group: tuned.openshift.io
+ version: v1
+ plural: tuneds
+ namespaced: true
+- name: OAuth APIs
+ resources:
+ - kind: OAuthAccessToken
+ group: oauth.openshift.io
+ version: v1
+ plural: oauthaccesstokens
+ namespaced: false
+ - kind: OAuthAuthorizeToken
+ group: oauth.openshift.io
+ version: v1
+ plural: oauthauthorizetokens
+ namespaced: false
+ - kind: OAuthClientAuthorization
+ group: oauth.openshift.io
+ version: v1
+ plural: oauthclientauthorizations
+ namespaced: false
+ - kind: OAuthClient
+ group: oauth.openshift.io
+ version: v1
+ plural: oauthclients
+ namespaced: false
+# Not in OpenAPI spec JSON
+# - kind: TokenReview
+# group: oauth.openshift.io
+# version: v1
+# plural: tokenreviews
+# namespaced: false
+ - kind: UserOAuthAccessToken
+ group: oauth.openshift.io
+ version: v1
+ plural: useroauthaccesstokens
+ namespaced: false
+- name: Operator APIs
+ resources:
+ - kind: Authentication
+ group: operator.openshift.io
+ version: v1
+ plural: authentications
+ namespaced: false
+ - kind: CloudCredential
+ group: operator.openshift.io
+ version: v1
+ plural: cloudcredentials
+ namespaced: false
+ - kind: ClusterCSIDriver
+ group: operator.openshift.io
+ version: v1
+ plural: clustercsidrivers
+ namespaced: false
+ - kind: Console
+ group: operator.openshift.io
+ version: v1
+ plural: consoles
+ namespaced: false
+ - kind: Config
+ group: operator.openshift.io
+ version: v1
+ plural: configs
+ namespaced: false
+ - kind: Config
+ group: imageregistry.operator.openshift.io
+ version: v1
+ plural: configs
+ namespaced: false
+ - kind: Config
+ group: samples.operator.openshift.io
+ version: v1
+ plural: configs
+ namespaced: false
+ - kind: CSISnapshotController
+ group: operator.openshift.io
+ version: v1
+ plural: csisnapshotcontrollers
+ namespaced: false
+ - kind: DNS
+ group: operator.openshift.io
+ version: v1
+ plural: dnses
+ namespaced: false
+ - kind: DNSRecord
+ group: ingress.operator.openshift.io
+ version: v1
+ plural: dnsrecords
+ namespaced: true
+ - kind: Etcd
+ group: operator.openshift.io
+ version: v1
+ plural: etcds
+ namespaced: false
+ - kind: ImageContentSourcePolicy
+ group: operator.openshift.io
+ version: v1alpha1
+ plural: imagecontentsourcepolicies
+ namespaced: false
+ - kind: ImagePruner
+ group: imageregistry.operator.openshift.io
+ version: v1
+ plural: imagepruners
+ namespaced: false
+ - kind: IngressController
+ group: operator.openshift.io
+ version: v1
+ plural: ingresscontrollers
+ namespaced: true
+ - kind: KubeAPIServer
+ group: operator.openshift.io
+ version: v1
+ plural: kubeapiservers
+ namespaced: false
+ - kind: KubeControllerManager
+ group: operator.openshift.io
+ version: v1
+ plural: kubecontrollermanagers
+ namespaced: false
+ - kind: KubeScheduler
+ group: operator.openshift.io
+ version: v1
+ plural: kubeschedulers
+ namespaced: false
+ - kind: KubeStorageVersionMigrator
+ group: operator.openshift.io
+ version: v1
+ plural: kubestorageversionmigrators
+ namespaced: false
+ - kind: Network
+ group: operator.openshift.io
+ version: v1
+ plural: networks
+ namespaced: false
+ - kind: OpenShiftAPIServer
+ group: operator.openshift.io
+ version: v1
+ plural: openshiftapiservers
+ namespaced: false
+ - kind: OpenShiftControllerManager
+ group: operator.openshift.io
+ version: v1
+ plural: openshiftcontrollermanagers
+ namespaced: false
+ - kind: OperatorPKI
+ group: network.operator.openshift.io
+ version: v1
+ plural: operatorpkis
+ namespaced: true
+ - kind: ServiceCA
+ group: operator.openshift.io
+ version: v1
+ plural: servicecas
+ namespaced: false
+ - kind: Storage
+ group: operator.openshift.io
+ version: v1
+ plural: storages
+ namespaced: false
+- name: OperatorHub APIs
+ resources:
+ - kind: CatalogSource
+ group: operators.coreos.com
+ version: v1alpha1
+ plural: catalogsources
+ namespaced: true
+ - kind: ClusterServiceVersion
+ group: operators.coreos.com
+ version: v1alpha1
+ plural: clusterserviceversions
+ namespaced: true
+ - kind: InstallPlan
+ group: operators.coreos.com
+ version: v1alpha1
+ plural: installplans
+ namespaced: true
+ - kind: Operator
+ group: operators.coreos.com
+ version: v1
+ plural: operators
+ namespaced: false
+ - kind: OperatorCondition
+ group: operators.coreos.com
+ version: v2
+ plural: operatorconditions
+ namespaced: true
+ - kind: OperatorGroup
+ group: operators.coreos.com
+ version: v1
+ plural: operatorgroups
+ namespaced: true
+ - kind: PackageManifest
+ group: packages.operators.coreos.com
+ version: v1
+ plural: packagemanifests
+ namespaced: true
+ - kind: Subscription
+ group: operators.coreos.com
+ version: v1alpha1
+ plural: subscriptions
+ namespaced: true
+- name: Policy APIs
+ resources:
+ - kind: PodDisruptionBudget
+ group: policy
+ version: v1
+ plural: poddisruptionbudgets
+ namespaced: true
+# https://bugzilla.redhat.com/show_bug.cgi?id=1875493
+# - kind: PodSecurityPolicy
+# group: policy
+# version: v1beta1
+# plural: podsecuritypolicies
+# namespaced: false
+- name: Project APIs
+ resources:
+ - kind: Project
+ group: project.openshift.io
+ version: v1
+ plural: projects
+ namespaced: false
+ - kind: ProjectRequest
+ group: project.openshift.io
+ version: v1
+ plural: projectrequests
+ namespaced: false
+- name: Provisioning APIs
+ resources:
+ - kind: BareMetalHost
+ group: metal3.io
+ version: v1alpha1
+ plural: baremetalhosts
+ namespaced: true
+ - kind: Provisioning
+ group: metal3.io
+ version: v1alpha1
+ plural: provisionings
+ namespaced: false
+- name: RBAC APIs
+ resources:
+ - kind: ClusterRoleBinding
+ group: rbac.authorization.k8s.io
+ version: v1
+ plural: clusterrolebindings
+ namespaced: false
+ - kind: ClusterRole
+ group: rbac.authorization.k8s.io
+ version: v1
+ plural: clusterroles
+ namespaced: false
+ - kind: RoleBinding
+ group: rbac.authorization.k8s.io
+ version: v1
+ plural: rolebindings
+ namespaced: true
+ - kind: Role
+ group: rbac.authorization.k8s.io
+ version: v1
+ plural: roles
+ namespaced: true
+- name: Role APIs
+ resources:
+ - kind: ClusterRoleBinding
+ group: authorization.openshift.io
+ version: v1
+ plural: clusterrolebindings
+ namespaced: false
+ - kind: ClusterRole
+ group: authorization.openshift.io
+ version: v1
+ plural: clusterroles
+ namespaced: false
+ - kind: RoleBindingRestriction
+ group: authorization.openshift.io
+ version: v1
+ plural: rolebindingrestrictions
+ namespaced: true
+ - kind: RoleBinding
+ group: authorization.openshift.io
+ version: v1
+ plural: rolebindings
+ namespaced: true
+ - kind: Role
+ group: authorization.openshift.io
+ version: v1
+ plural: roles
+ namespaced: true
+- name: Schedule and quota APIs
+ resources:
+ - kind: AppliedClusterResourceQuota
+ group: quota.openshift.io
+ version: v1
+ plural: appliedclusterresourcequotas
+ namespaced: true
+ - kind: ClusterResourceQuota
+ group: quota.openshift.io
+ version: v1
+ plural: clusterresourcequotas
+ namespaced: false
+ - kind: FlowSchema
+ group: flowcontrol.apiserver.k8s.io
+ version: v1beta1
+ plural: flowschemas
+ namespaced: false
+ - kind: LimitRange
+ group: core
+ version: v1
+ plural: limitranges
+ namespaced: true
+ - kind: PriorityClass
+ group: scheduling.k8s.io
+ version: v1
+ plural: priorityclasses
+ namespaced: false
+ - kind: PriorityLevelConfiguration
+ group: flowcontrol.apiserver.k8s.io
+ version: v1beta1
+ plural: prioritylevelconfigurations
+ namespaced: false
+ - kind: ResourceQuota
+ group: core
+ version: v1
+ plural: resourcequotas
+ namespaced: true
+- name: Security APIs
+ resources:
+ - kind: CertificateSigningRequest
+ group: certificates.k8s.io
+ version: v1
+ plural: certificatesigningrequests
+ namespaced: false
+ - kind: CredentialsRequest
+ group: cloudcredential.openshift.io
+ version: v1
+ plural: credentialsrequests
+ namespaced: true
+ - kind: PodSecurityPolicyReview
+ group: security.openshift.io
+ version: v1
+ plural: podsecuritypolicyreviews
+ namespaced: true
+ - kind: PodSecurityPolicySelfSubjectReview
+ group: security.openshift.io
+ version: v1
+ plural: podsecuritypolicyselfsubjectreviews
+ namespaced: true
+ - kind: PodSecurityPolicySubjectReview
+ group: security.openshift.io
+ version: v1
+ plural: podsecuritypolicysubjectreviews
+ namespaced: true
+ - kind: RangeAllocation
+ group: security.openshift.io
+ version: v1
+ plural: rangeallocations
+ namespaced: false
+# This is internal only, and must be ignored
+# - kind: RangeAllocation
+# group: security.internal.openshift.io
+# version: v1
+# plural: rangeallocations
+# namespaced: false
+ - kind: Secret
+ group: core
+ version: v1
+ plural: secrets
+ namespaced: true
+ - kind: SecurityContextConstraints
+ group: security.openshift.io
+ version: v1
+ plural: securitycontextconstraints
+ namespaced: false
+ - kind: ServiceAccount
+ group: core
+ version: v1
+ plural: serviceaccounts
+ namespaced: true
+- name: Storage APIs
+ resources:
+ - kind: CSIDriver
+ group: storage.k8s.io
+ version: v1
+ plural: csidrivers
+ namespaced: false
+ - kind: CSINode
+ group: storage.k8s.io
+ version: v1
+ plural: csinodes
+ namespaced: false
+ - kind: CSIStorageCapacity
+ group: storage.k8s.io
+ version: v1beta1
+ plural: csistoragecapacities
+ namespaced: true
+ - kind: PersistentVolumeClaim
+ group: core
+ version: v1
+ plural: persistentvolumeclaims
+ namespaced: true
+ - kind: StorageClass
+ group: storage.k8s.io
+ version: v1
+ plural: storageclasses
+ namespaced: false
+ - kind: StorageState
+ group: migration.k8s.io
+ version: v1alpha1
+ plural: storagestates
+ namespaced: false
+ - kind: StorageVersionMigration
+ group: migration.k8s.io
+ version: v1alpha1
+ plural: storageversionmigrations
+ namespaced: false
+ - kind: VolumeAttachment
+ group: storage.k8s.io
+ version: v1
+ plural: volumeattachments
+ namespaced: false
+ - kind: VolumeSnapshot
+ group: snapshot.storage.k8s.io
+ version: v1
+ plural: volumesnapshots
+ namespaced: true
+ - kind: VolumeSnapshotClass
+ group: snapshot.storage.k8s.io
+ version: v1
+ plural: volumesnapshotclasses
+ namespaced: false
+ - kind: VolumeSnapshotContent
+ group: snapshot.storage.k8s.io
+ version: v1
+ plural: volumesnapshotcontents
+ namespaced: false
+- name: Template APIs
+ resources:
+ - kind: BrokerTemplateInstance
+ group: template.openshift.io
+ version: v1
+ plural: brokertemplateinstances
+ namespaced: false
+ - kind: PodTemplate
+ group: core
+ version: v1
+ plural: podtemplates
+ namespaced: true
+ - kind: Template
+ group: template.openshift.io
+ version: v1
+ plural: processedtemplates
+ namespaced: true
+ - kind: TemplateInstance
+ group: template.openshift.io
+ version: v1
+ plural: templateinstances
+ namespaced: true
+- name: User and group APIs
+ resources:
+ - kind: Group
+ group: user.openshift.io
+ version: v1
+ plural: groups
+ namespaced: false
+ - kind: Identity
+ group: user.openshift.io
+ version: v1
+ plural: identities
+ namespaced: false
+ - kind: UserIdentityMapping
+ group: user.openshift.io
+ version: v1
+ plural: useridentitymappings
+ namespaced: false
+ - kind: User
+ group: user.openshift.io
+ version: v1
+ plural: users
+ namespaced: false
+- name: Workloads APIs
+ resources:
+ - kind: BuildConfig
+ group: build.openshift.io
+ version: v1
+ plural: buildconfigs
+ namespaced: true
+ - kind: Build
+ group: build.openshift.io
+ version: v1
+ plural: builds
+ namespaced: true
+ - kind: CronJob
+ group: batch
+ version: v1
+ plural: cronjobs
+ namespaced: true
+ - kind: DaemonSet
+ group: apps
+ version: v1
+ plural: daemonsets
+ namespaced: true
+ - kind: Deployment
+ group: apps
+ version: v1
+ plural: deployments
+ namespaced: true
+ - kind: DeploymentConfig
+ group: apps.openshift.io
+ version: v1
+ plural: deploymentconfigs
+ namespaced: true
+ - kind: Job
+ group: batch
+ version: v1
+ plural: jobs
+ namespaced: true
+ - kind: Pod
+ group: core
+ version: v1
+ plural: pods
+ namespaced: true
+ - kind: ReplicationController
+ group: core
+ version: v1
+ plural: replicationcontrollers
+ namespaced: true
+ - kind: PersistentVolume
+ group: core
+ version: v1
+ plural: persistentvolumes
+ namespaced: false
+ - kind: ReplicaSet
+ group: apps
+ version: v1
+ plural: replicasets
+ namespaced: true
+ - kind: StatefulSet
+ group: apps
+ version: v1
+ plural: statefulsets
+ namespaced: true
+
+# No properties defined in OpenAPI spec
+# - kind: OverlappingRangeIPReservation
+# group: whereabouts.cni.cncf.io
+# version: v1alpha1
+# plural: overlappingrangeipreservations
+# namespaced: true
diff --git a/applications/application-health.adoc b/applications/application-health.adoc
new file mode 100644
index 000000000000..b6ac30dc221a
--- /dev/null
+++ b/applications/application-health.adoc
@@ -0,0 +1,31 @@
+:context: application-health
+[id="application-health"]
+= Monitoring application health by using health checks
+include::modules/common-attributes.adoc[]
+
+toc::[]
+
+
+In software systems, components can become unhealthy due to transient issues such as temporary connectivity loss, configuration errors, or problems with external dependencies. {product-title} applications have a number of options to detect and handle unhealthy containers.
+
+// The following include statements pull in the module files that comprise
+// the assembly. Include any combination of concept, procedure, or reference
+// modules required to cover the user story. You can also include other
+// assemblies.
+
+
+include::modules/application-health-about.adoc[leveloffset=+1]
+
+include::modules/application-health-configuring.adoc[leveloffset=+1]
+
+include::modules/odc-monitoring-application-health-using-developer-perspective.adoc[leveloffset=+1]
+
+include::modules/odc-adding-health-checks.adoc[leveloffset=+1]
+
+include::modules/odc-editing-health-checks.adoc[leveloffset=+1]
+
+include::modules/odc-monitoring-health-checks.adoc[leveloffset=+1]
+
+.Additional Resources
+* For details on switching to the *Developer* perspective in the web console, see xref:../web_console/odc-about-developer-perspective.adoc#odc-about-developer-perspective[About *Developer* perspective].
+* For details on adding health checks while creating and deploying an application, see *Advanced Options* in the xref:../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-creating-applications-using-developer-perspective[Creating applications using the Developer perspective] section.
diff --git a/applications/config-maps.adoc b/applications/config-maps.adoc
new file mode 100644
index 000000000000..c4b2b8d17a09
--- /dev/null
+++ b/applications/config-maps.adoc
@@ -0,0 +1,26 @@
+[id="config-maps"]
+= Using config maps with applications
+include::modules/common-attributes.adoc[]
+:context: config-maps
+
+toc::[]
+
+Config maps allow you to decouple configuration artifacts from image content to keep containerized applications portable.
+
+The following sections define config maps and how to create and use them.
+
+For information on creating config maps, see xref:../nodes/pods/nodes-pods-configmaps.adoc[Creating and using config maps].
+
+include::modules/nodes-pods-configmap-overview.adoc[leveloffset=+1]
+
+[id="nodes-pods-config-maps-consuming-configmap-in-pods"]
+== Use cases: Consuming config maps in pods
+
+The following sections describe some uses cases when consuming `ConfigMap`
+objects in pods.
+
+include::modules/nodes-pods-configmaps-use-case-consuming-in-env-vars.adoc[leveloffset=+2]
+
+include::modules/nodes-pods-configmaps-use-case-setting-command-line-arguments.adoc[leveloffset=+2]
+
+include::modules/nodes-pods-configmaps-use-case-consuming-in-volumes.adoc[leveloffset=+2]
diff --git a/applications/connecting_applications_to_services/exposing-binding-data-from-a-service.adoc b/applications/connecting_applications_to_services/exposing-binding-data-from-a-service.adoc
new file mode 100644
index 000000000000..621a53c8a469
--- /dev/null
+++ b/applications/connecting_applications_to_services/exposing-binding-data-from-a-service.adoc
@@ -0,0 +1,20 @@
+[id="exposing-binding-data-from-a-service"]
+= Exposing binding data from a service
+include::modules/common-attributes.adoc[]
+include::modules/servicebinding-document-attributes.adoc[]
+:context: exposing-binding-data-from-a-service
+
+toc::[]
+
+Application developers need access to backing services to build and connect workloads. Connecting workloads to backing services is always a challenge because each service provider requires a different way to access their secrets and consume them in a workload.
+
+The {servicebinding-title} enables application developers to easily bind workloads together with operator-managed backing services, without any manual procedures to configure the binding connection. For the {servicebinding-title} to provide the binding data, as an Operator provider or user who creates backing services, you must expose the binding data to be automatically detected by the {servicebinding-title}. Then, the {servicebinding-title} automatically collects the binding data from the backing service and shares it with a workload to provide a consistent and predictable experience.
+
+include::modules/sbo-methods-of-exposing-binding-data.adoc[leveloffset=+1]
+
+include::modules/sbo-categories-of-exposable-binding-data.adoc[leveloffset=+1]
+
+== Additional resources
+* link:https://github.com/openshift/console/blob/master/frontend/packages/operator-lifecycle-manager/src/components/descriptors/reference/reference.md[OLM Descriptor Reference].
+* xref:../../operators/operator_sdk/osdk-generating-csvs.adoc#osdk-generating-csvs[Defining cluster service versions (CSVs)].
+* xref:../../applications/connecting_applications_to_services/projecting-binding-data.adoc#projecting-binding-data[Projecting binding data].
diff --git a/applications/connecting_applications_to_services/getting-started-with-service-binding.adoc b/applications/connecting_applications_to_services/getting-started-with-service-binding.adoc
new file mode 100644
index 000000000000..71b73d37356d
--- /dev/null
+++ b/applications/connecting_applications_to_services/getting-started-with-service-binding.adoc
@@ -0,0 +1,33 @@
+[id="getting-started-with-service-binding"]
+= Getting started with service binding
+include::modules/common-attributes.adoc[]
+include::modules/servicebinding-document-attributes.adoc[]
+:context: getting-started-with-service-binding
+
+toc::[]
+
+{servicebinding-title} manages the data plane for workloads and backing services. This guide provides instructions with examples to help you create a database instance, deploy an application, and use {servicebinding-title} to create a binding connection between the application and the database service.
+
+// Prerequisites for getting started with Service Binding Operator
+[discrete]
+== Prerequisites
+
+* You have access to an {product-title} cluster using an account with `cluster-admin` permissions.
+* You have installed the `oc` CLI.
+* You have installed PostgreSQL `psql` CLI.
+* You have installed {servicebinding-title} from OperatorHub.
+* You have installed the Crunchy Postgres for Kubernetes Operator from OperatorHub using the *v5* Update channel. The installed Operator is available in an appropriate namespace, such as the `my-postgresql` namespace.
+
+//Creating a PostgreSQL database instance
+include::modules/sbo-creating-a-postgresql-database-instance.adoc[leveloffset=+1]
+
+//Deploying the Spring PetClinic sample application
+include::modules/sbo-deploying-the-spring-petclinic-sample-application.adoc[leveloffset=+1]
+
+//Connecting the Spring PetClinic sample application to the PostgreSQL database service
+include::modules/sbo-connecting-spring-petclinic-sample-app-to-postgresql-database-service.adoc[leveloffset=+1]
+
+== Additional Resources
+* xref:../../applications/connecting_applications_to_services/installing-sbo.adoc#installing-sbo[Installing Service Binding Operator].
+* xref:../../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-creating-applications-using-developer-perspective[Creating applications using the Developer perspective].
+* xref:../../operators/understanding/crds/crd-managing-resources-from-crds.adoc[Managing resources from custom resource definitions].
diff --git a/cnv/cnv_install/images b/applications/connecting_applications_to_services/images
similarity index 100%
rename from cnv/cnv_install/images
rename to applications/connecting_applications_to_services/images
diff --git a/applications/connecting_applications_to_services/installing-sbo.adoc b/applications/connecting_applications_to_services/installing-sbo.adoc
new file mode 100644
index 000000000000..bd6e07632fad
--- /dev/null
+++ b/applications/connecting_applications_to_services/installing-sbo.adoc
@@ -0,0 +1,26 @@
+[id="installing-sbo"]
+= Installing Service Binding Operator
+include::modules/common-attributes.adoc[]
+include::modules/servicebinding-document-attributes.adoc[]
+:context: installing-sbo
+
+toc::[]
+
+This guide walks cluster administrators through the process of installing the {servicebinding-title} to an {product-title} cluster.
+
+You can install {servicebinding-title} on {product-title} 4.7 and later.
+
+// Prerequisites for installing Service Binding Operator
+//[discrete]
+//== Prerequisites
+
+//You have access to an {product-title} cluster using an account with `cluster-admin` permissions.
+
+
+//Installing Service Binding Operator using web console
+
+include::modules/op-installing-sbo-operator-using-the-web-console.adoc[leveloffset=+1]
+
+
+== Additional Resources
+* xref:../../applications/connecting_applications_to_services/getting-started-with-service-binding.adoc#getting-started-with-service-binding[Getting started with service binding].
diff --git a/applications/connecting_applications_to_services/modules b/applications/connecting_applications_to_services/modules
new file mode 120000
index 000000000000..8b0e8540076d
--- /dev/null
+++ b/applications/connecting_applications_to_services/modules
@@ -0,0 +1 @@
+../../modules
\ No newline at end of file
diff --git a/applications/connecting_applications_to_services/odc-connecting-an-application-to-a-service-using-the-developer-perspective.adoc b/applications/connecting_applications_to_services/odc-connecting-an-application-to-a-service-using-the-developer-perspective.adoc
new file mode 100644
index 000000000000..9867346ee0c4
--- /dev/null
+++ b/applications/connecting_applications_to_services/odc-connecting-an-application-to-a-service-using-the-developer-perspective.adoc
@@ -0,0 +1,90 @@
+[id="odc-connecting-an-application-to-a-service-using-the-developer-perspective"]
+= Connecting an application to a service using the Developer perspective
+include::modules/common-attributes.adoc[]
+include::modules/servicebinding-document-attributes.adoc[]
+:context: odc-connecting-an-application-to-a-service-using-the-developer-perspective
+
+In addition to grouping multiple components within an application, you can also use the *Topology* view to connect components with each other. You can either use a binding connector or a visual one to connect components.
+
+A binding connection between the components can be established only if the target node is an Operator-backed service. This is indicated by the *Create a binding connector* tool-tip which appears when you drag an arrow to such a target node. When an application is connected to a service using a binding connector a `ServiceBinding` resource is created. Then, the Service Binding Operator controller projects the necessary binding data into the application deployment. After the request is successful, the application is redeployed establishing an interaction between the connected components.
+
+A visual connector establishes only a visual connection between the components, depicting an intent to connect. No interaction between the components is established. If the target node is not an Operator-backed service the *Create a visual connector* tool-tip is displayed when you drag an arrow to a target node.
+
+== Creating a visual connection between components
+You can depict an intent to connect application components using the visual connector.
+
+This procedure walks you through an example of creating a visual connection between a PostgreSQL Database service and a Spring PetClinic sample application.
+
+.Prerequisites
+
+* Ensure that you have created and deployed a Spring PetClinic sample application using the *Developer* perspective.
+* Ensure that you have created and deployed a Crunchy PostgreSQL database instance using the *Developer* perspective. This instance has the following three components: `hippo-backup`, `hippo-instance`, and `hippo-pgbouncer`.
+
+.Procedure
+
+. Hover over the Spring PetClinic sample application to see a dangling arrow on the node.
++
+.Visual connector
+image::odc_connector.png[]
+. Click and drag the arrow towards the `hippo-pgbouncer` deployment to connect the Spring PetClinic sample application with it.
+. Click on the `spring-petclinic-rest` deployment to see the *Overview* panel. Under the *Details* tab, click the edit icon in the *Annotations* section to see the *Key = `app.openshift.io/connects-to`* and *Value = `[{"apiVersion":"apps/v1","kind":"Deployment","name":"hippo-pgbouncer"}]`* annotation added to the deployment.
+
+Similarly you can create other applications and components and establish visual connections between them.
+
+.Connecting multiple applications
+image::odc_connecting_multiple_applications.png[]
+
+== Creating a binding connection between components
+You can establish a binding connection with Operator-backed components.
+
+This procedure walks through an example of creating a binding connection between a PostgreSQL Database service and a Spring PetClinic sample application. To create a binding connection with a service that is backed by the PostgreSQL Database Operator, you must first add the Red Hat-provided PostgreSQL Database Operator to the *OperatorHub*, and then install the Operator.
+The PostreSQL Database Operator then creates and manages the `Database` resource, which exposes the binding information in secrets, config maps, status, and spec attributes.
+
+.Prerequisites
+* Ensure that you have created and deployed a Spring PetClinic sample application using the *Developer* perspective.
+* Ensure that you have installed the {servicebinding-title} from the OperatorHub.
+* Ensure that you have installed the *Crunchy Postgres for Kubernetes* Operator from the OperatorHub using `v5` *Update* channel.
+* Ensure that you have created and deployed a Crunchy PostgreSQL database instance using the *Developer* perspective. This instance has the following three components: `hippo-backup`, `hippo-instance`, and `hippo-pgbouncer`.
+
+.Procedure
+. Switch to the *Developer* perspective and ensure that you are in the appropriate project, for example, `my-postgresql`. In the *Topology* view, hover over the Spring PetClinic sample application to see a dangling arrow on the node.
+. Click and drag the arrow towards the *hippo* database Postgres Cluster to make a binding connection with the Spring PetClinic sample application.
++
+Alternatively, in the *+Add* view, click the *YAML* option to see the *Import YAML* screen. Use the YAML editor and add the `ServiceBinding` resource:
+
++
+[source,YAML]
+----
+apiVersion: binding.operators.coreos.com/v1alpha1
+kind: ServiceBinding
+metadata:
+ name: spring-petclinic-rest
+ namespace: my-postgresql
+spec:
+ services:
+ - group: postgres-operator.crunchydata.com
+ version: v1beta1
+ kind: PostgresCluster
+ name: hippo
+ application:
+ name: spring-petclinic-rest
+ group: apps
+ version: v1
+ resource: deployments
+----
+
+A service binding request is created and the {servicebinding-title} controller projects the database service connection information into the application deployment as files using a volume mount. After the request is successful, the application is redeployed and the connection is established.
+
+.Binding connector
+image::odc-binding-connector.png[]
+
+[NOTE]
+====
+You can also use the context menu by dragging the dangling arrow to add and create a binding connection to an operator-backed service.
+
+.Context menu to create binding connection
+image::odc_context_operator.png[]
+====
+
+== Additional resources
+* xref:../../applications/connecting_applications_to_services/getting-started-with-service-binding.adoc#getting-started-with-service-binding[Getting started with service binding].
diff --git a/applications/connecting_applications_to_services/projecting-binding-data.adoc b/applications/connecting_applications_to_services/projecting-binding-data.adoc
new file mode 100644
index 000000000000..1bfc91364417
--- /dev/null
+++ b/applications/connecting_applications_to_services/projecting-binding-data.adoc
@@ -0,0 +1,21 @@
+[id="projecting-binding-data"]
+= Projecting binding data
+include::modules/common-attributes.adoc[]
+include::modules/servicebinding-document-attributes.adoc[]
+:context: projecting-binding-data
+
+toc::[]
+
+This section provides information on how you can consume the binding data.
+
+== Consumption of binding data
+After the backing service exposes the binding data, for a workload to access and consume this data, you must project it into the workload from a backing service. {servicebinding-title} automatically projects this set of data into the workload in the following methods:
+
+. By default, as files.
+. As environment variables, after you configure the `.spec.bindAsFiles` parameter from the `ServiceBinding` resource.
+
+include::modules/sbo-configuration-of-directory-path-to-project-binding-data.adoc[leveloffset=+1]
+include::modules/sbo-projecting-the-binding-data.adoc[leveloffset=+1]
+
+== Additional resources
+* xref:../../applications/connecting_applications_to_services/exposing-binding-data-from-a-service.adoc#exposing-binding-data-from-a-service[Exposing binding data from a service].
diff --git a/applications/connecting_applications_to_services/sbo-release-notes.adoc b/applications/connecting_applications_to_services/sbo-release-notes.adoc
new file mode 100644
index 000000000000..d5a32fb942c6
--- /dev/null
+++ b/applications/connecting_applications_to_services/sbo-release-notes.adoc
@@ -0,0 +1,29 @@
+//OpenShift Service Binding Release Notes
+include::modules/servicebinding-document-attributes.adoc[]
+[id="servicebinding-release-notes"]
+= {servicebinding-title} release notes
+:context: servicebinding-release-notes
+include::modules/common-attributes.adoc[]
+
+toc::[]
+
+The {servicebinding-title} consists of a controller and an accompanying custom resource definition (CRD) for service binding. It manages the data plane for workloads and backing services. The Service Binding Controller reads the data made available by the control plane of backing services. Then, it projects this data to workloads according to the rules specified through the `ServiceBinding` resource.
+
+With {servicebinding-title}, you can:
+
+* Bind your workloads together with Operator-managed backing services.
+* Automate configuration of binding data.
+* Provide service operators a low-touch administrative experience to provision and manage access to services.
+* Enrich development lifecycle with a consistent and declarative service binding method that eliminates discrepancies in cluster environments.
+
+
+[id="servicebinding-inclusive-language"]
+== Making open source more inclusive
+
+Red Hat is committed to replacing problematic language in our code, documentation, and web properties. We are beginning with these four terms: master, slave, blacklist, and whitelist. Because of the enormity of this endeavor, these changes will be implemented gradually over several upcoming releases. For more details, see link:https://www.redhat.com/en/blog/making-open-source-more-inclusive-eradicating-problematic-language[Red Hat CTO Chris Wright's message].
+
+// Modules included, most to least recent
+include::modules/sbo-release-notes-1-0.adoc[leveloffset=+1]
+
+== Additional resources
+* xref:../../applications/connecting_applications_to_services/understanding-service-binding-operator.adoc#understanding-service-binding-operator[Understanding Service Binding Operator].
diff --git a/applications/connecting_applications_to_services/understanding-service-binding-operator.adoc b/applications/connecting_applications_to_services/understanding-service-binding-operator.adoc
new file mode 100644
index 000000000000..1da15d4f83c6
--- /dev/null
+++ b/applications/connecting_applications_to_services/understanding-service-binding-operator.adoc
@@ -0,0 +1,81 @@
+[id="understanding-service-binding-operator"]
+= Understanding Service Binding Operator
+include::modules/common-attributes.adoc[]
+include::modules/servicebinding-document-attributes.adoc[]
+:context: understanding-service-binding-operator
+
+toc::[]
+
+
+Application developers need access to backing services to build and connect workloads. Connecting workloads to backing services is always a challenge because each service provider suggests a different way to access their secrets and consume them in a workload. In addition, manual configuration and maintenance of this binding together of workloads and backing services make the process tedious, inefficient, and error-prone.
+
+The {servicebinding-title} enables application developers to easily bind workloads together with Operator-managed backing services, without any manual procedures to configure the binding connection.
+
+[id="service-binding-terminology"]
+== Service Binding terminology
+This section summarizes the basic terms used in Service Binding.
+
+[horizontal]
+Service binding:: The representation of the action of providing information about a service to a workload. Examples include establishing the exchange of credentials between a Java application and a database that it requires.
+Backing service:: Any service or software that the application consumes over the network as part of its normal operation. Examples include a database, a message broker, an application with REST endpoints, an event stream, an Application Performance Monitor (APM), or a Hardware Security Module (HSM).
+Workload (application):: Any process, running within a container. Examples include a Spring Boot application, a NodeJS Express application, or a Ruby on Rails application.
+Binding data:: Information about a service that you use to configure the behavior of other resources within the cluster. Examples include credentials, connection details, volume mounts, or secrets.
+Binding connection:: Any connection that establishes an interaction between the connected components such as a bindable backing service and an application requiring that backing service.
+
+
+[id="about-service-binding-operator"]
+== About {servicebinding-title}
+The {servicebinding-title} consists of a controller and an accompanying custom resource definition (CRD) for service binding. It manages the data plane for workloads and backing services. The Service Binding Controller reads the data made available by the control plane of backing services. Then, it projects this data to workloads according to the rules specified through the `ServiceBinding` resource.
+
+As a result, the {servicebinding-title} enables workloads to use backing services or external services by automatically collecting and sharing binding data with the workloads. The process involves making the backing service bindable and binding the workload and the service together.
+
+
+[id="making-an-operator-managed-backing-service-bindable"]
+=== Making an Operator-managed backing service bindable
+To make a service bindable, as an Operator provider you need to expose the binding data required by workloads to bind with the services provided by the Operator. You can provide the binding data either as annotations or as descriptors in the CRD of the Operator that manages the backing service.
+
+
+[id="binding-a-workload-together-with-a-backing-service"]
+=== Binding a workload together with a backing service
+By using the {servicebinding-title}, as an application developer, you need to declare the intent of establishing a binding connection. You must create a `Service Binding` CR that references the backing service. This action triggers the {servicebinding-title} to project the exposed binding data into the workload. The {servicebinding-title} receives the declared intent and binds the workload together with the backing service.
+
+The CRD of the {servicebinding-title} supports the following APIs:
+
+* *Service Binding* with the `binding.operators.coreos.com` API group.
+* *Service Binding (Spec API Tech Preview)* with the `servicebinding.io` API group.
++
+[IMPORTANT]
+====
+*Service Binding (Spec API Tech Preview)* with the `servicebinding.io` API group is a Technology Preview feature only. Technology Preview features are not supported with Red Hat production service level agreements (SLAs) and might not be functionally complete. Red Hat does not recommend using them in production. These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process.
+For more information about the support scope of Red Hat Technology Preview features, see https://access.redhat.com/support/offerings/techpreview/.
+====
+
+With {servicebinding-title}, you can:
+
+* Bind your workloads to Operator-managed backing services.
+* Automate configuration of binding data.
+* Provide service operators a low-touch administrative experience to provision and manage access to services.
+* Enrich development lifecycle with a consistent and declarative service binding method that eliminates discrepancies in cluster environments.
+
+
+[id="sbo-key-features"]
+== Key features
+
+* Exposal of binding data from services
+** Based on annotations present in CRD, custom resources (CRs), or resources.
+** Based on descriptors present in Operator Lifecycle Manager (OLM) descriptors.
+* Workload projection
+** Projection of binding data as files, with volume mounts.
+** Projection of binding data as environment variables.
+* Service Binding Options
+** Bind backing services in a namespace that is different from the workload namespace.
+** Project binding data into the specific container workloads.
+** Auto-detection of the binding data from resources owned by the backing service CR.
+** Compose custom binding data from the exposed binding data.
+** Support for non-`PodSpec` compliant workload resources.
+* Security
+** Support for role-based access control (RBAC).
+
+
+== Additional resources
+* xref:../../applications/connecting_applications_to_services/getting-started-with-service-binding.adoc#getting-started-with-service-binding[Getting started with service binding].
diff --git a/applications/crds/crd-extending-api-with-crds.adoc b/applications/crds/crd-extending-api-with-crds.adoc
deleted file mode 100644
index 13a7ab7e2e42..000000000000
--- a/applications/crds/crd-extending-api-with-crds.adoc
+++ /dev/null
@@ -1,15 +0,0 @@
-[id="crd-extending-api-with-crds"]
-= Extending the Kubernetes API with Custom Resource Definitions
-include::modules/common-attributes.adoc[]
-:context: crd-extending-api-with-crds
-
-toc::[]
-
-This guide describes how cluster administrators can extend their {product-title}
-cluster by creating and managing Custom Resource Definitions (CRDs).
-
-include::modules/crd-custom-resource-definitions.adoc[leveloffset=+1]
-include::modules/crd-creating-crds.adoc[leveloffset=+1]
-include::modules/crd-creating-aggregated-cluster-roles.adoc[leveloffset=+1]
-include::modules/crd-creating-custom-resources-from-file.adoc[leveloffset=+1]
-include::modules/crd-inspecting-custom-resources.adoc[leveloffset=+1]
diff --git a/applications/crds/crd-managing-resources-from-crds.adoc b/applications/crds/crd-managing-resources-from-crds.adoc
deleted file mode 100644
index 43df2fd7ee5c..000000000000
--- a/applications/crds/crd-managing-resources-from-crds.adoc
+++ /dev/null
@@ -1,13 +0,0 @@
-[id="crd-managing-resources-from-crds"]
-= Managing resources from Custom Resource Definitions
-include::modules/common-attributes.adoc[]
-:context: crd-managing-resources-from-crds
-
-toc::[]
-
-This guide describes how developers can manage Custom Resources (CRs) that come
-from Custom Resource Definitions (CRDs).
-
-include::modules/crd-custom-resource-definitions.adoc[leveloffset=+1]
-include::modules/crd-creating-custom-resources-from-file.adoc[leveloffset=+1]
-include::modules/crd-inspecting-custom-resources.adoc[leveloffset=+1]
diff --git a/applications/creating_applications/creating-applications-using-cli.adoc b/applications/creating_applications/creating-applications-using-cli.adoc
new file mode 100644
index 000000000000..659af8fae33d
--- /dev/null
+++ b/applications/creating_applications/creating-applications-using-cli.adoc
@@ -0,0 +1,21 @@
+[id="creating-applications-using-cli"]
+= Creating applications using the CLI
+include::modules/common-attributes.adoc[]
+:context: creating-applications-using-cli
+
+toc::[]
+
+You can create an {product-title} application from components that include
+source or binary code, images, and templates by using the {product-title}
+CLI.
+
+The set of objects created by `new-app` depends on the artifacts passed as
+input: source repositories, images, or templates.
+
+include::modules/applications-create-using-cli-source-code.adoc[leveloffset=+1]
+
+include::modules/applications-create-using-cli-image.adoc[leveloffset=+1]
+
+include::modules/applications-create-using-cli-template.adoc[leveloffset=+1]
+
+include::modules/applications-create-using-cli-modify.adoc[leveloffset=+1]
diff --git a/applications/creating_applications/creating-apps-from-installed-operators.adoc b/applications/creating_applications/creating-apps-from-installed-operators.adoc
new file mode 100644
index 000000000000..eb225a957e9b
--- /dev/null
+++ b/applications/creating_applications/creating-apps-from-installed-operators.adoc
@@ -0,0 +1,22 @@
+[id="creating-apps-from-installed-operators"]
+= Creating applications from installed Operators
+include::modules/common-attributes.adoc[]
+:context: creating-apps-from-installed-operators
+
+toc::[]
+
+_Operators_ are a method of packaging, deploying, and managing a Kubernetes
+application. You can create applications on {product-title} using Operators that
+have been installed by a cluster administrator.
+
+This guide walks developers through an example of creating applications from an
+installed Operator using the {product-title} web console.
+
+.Additional resources
+
+* See the
+xref:../../operators/understanding/olm-what-operators-are.adoc#olm-what-operators-are[Operators]
+guide for more on how Operators work and how the Operator Lifecycle Manager is
+integrated in {product-title}.
+
+include::modules/olm-creating-etcd-cluster-from-operator.adoc[leveloffset=+1]
diff --git a/cnv/cnv_release_notes/images b/applications/creating_applications/images
similarity index 100%
rename from cnv/cnv_release_notes/images
rename to applications/creating_applications/images
diff --git a/applications/creating_applications/modules b/applications/creating_applications/modules
new file mode 120000
index 000000000000..8b0e8540076d
--- /dev/null
+++ b/applications/creating_applications/modules
@@ -0,0 +1 @@
+../../modules
\ No newline at end of file
diff --git a/applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc b/applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc
new file mode 100644
index 000000000000..42e64ca49649
--- /dev/null
+++ b/applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc
@@ -0,0 +1,71 @@
+[id="odc-creating-applications-using-developer-perspective"]
+= Creating applications using the Developer perspective
+include::modules/common-attributes.adoc[]
+include::modules/serverless-document-attributes.adoc[]
+:context: odc-creating-applications-using-developer-perspective
+
+toc::[]
+
+The *Developer* perspective in the web console provides you the following options from the *+Add* view to create applications and associated services and deploy them on {product-title}:
+
+* *Getting started resources*: Use these resources to help you get started with Developer Console. You can choose to hide the header using the *Options* menu {kebab}.
+** *Creating applications using samples*: Use existing code samples to get started with creating applications on the {product-title}.
+** *Build with guided documentation*: Follow the guided documentation to build applications and familiarize yourself with key concepts and terminologies.
+** *Explore new developer features*: Explore the new features and resources within the *Developer* perspective.
+
+* *Developer catalog*: Explore the Developer Catalog to select the required applications, services, or source to image builders, and then add it to your project.
+** *All Services*: Browse the catalog to discover services across {product-title}.
+** *Database*: Select the required database service and add it to your application.
+** *Operator Backed*: Select and deploy the required Operator-managed service.
+** *Helm chart*: Select the required Helm chart to simplify deployment of applications and services.
+** *Event Source*: Select an event source to register interest in a class of events from a particular system.
++
+[NOTE]
+====
+The Managed services option is also available if the RHOAS Operator is installed.
+====
+
+* *Git repository*: Import an existing codebase, Devfile, or Dockerfile from your Git repository using the *From Git*, *From Devfile*, or *From Dockerfile* options respectively, to build and deploy an application on {product-title}.
+
+* *Container images*: Use existing images from an image stream or registry to deploy it on to the {product-title}.
+
+* *Pipelines*: Use Tekton pipeline to create CI/CD pipelines for your software delivery process on the {product-title}.
+
+* *Serverless*: Explore the *Serverless* options to create, build, and deploy stateless and serverless applications on the {product-title}.
+** *Channel*: Create a Knative channel to create an event forwarding and persistence layer with in-memory and reliable implementations.
+
+* *Samples*: Explore the available sample applications to create, build, and deploy an application quickly.
+
+* *From Local Machine*: Explore the *From Local Machine* tile to import or upload files on your local machine for building and deploying applications easily.
+** *Import YAML*: Upload a YAML file to create and define resources for building and deploying applications.
+** *Upload JAR file*: Upload a JAR file to build and deploy Java applications.
+
+ifdef::openshift-enterprise,openshift-webscale[]
+Note that certain options, such as *Pipelines*, *Event Source*, and *Import Virtual Machines*, are displayed only when the xref:../../cicd/pipelines/installing-pipelines.adoc#op-installing-pipelines-operator-in-web-console_installing-pipelines[OpenShift Pipelines Operator], xref:../../serverless/admin_guide/install-serverless-operator.adoc#serverless-install-web-console_install-serverless-operator[{ServerlessOperatorName}], and xref:../../virt/install/installing-virt-web.adoc#virt-subscribing-to-the-catalog_installing-virt-web[OpenShift Virtualization Operator] are installed, respectively.
+endif::[]
+
+[id="prerequisites_odc-creating-applications-using-developer-perspective"]
+== Prerequisites
+
+To create applications using the *Developer* perspective ensure that:
+
+* You have xref:../../web_console/web-console.adoc#web-console[logged in to the web console].
+* You are in the xref:../../web_console/odc-about-developer-perspective.adoc#odc-about-developer-perspective[*Developer* perspective].
+* You have created a project or have access to a project with the appropriate xref:../../authentication/using-rbac.adoc#default-roles_using-rbac[roles and permissions] to create applications and other workloads in {product-title}.
+
+ifdef::openshift-enterprise,openshift-webscale[]
+
+To create serverless applications, in addition to the preceding prerequisites, ensure that:
+
+* You have xref:../../serverless/admin_guide/install-serverless-operator.adoc#install-serverless-operator[installed the {ServerlessOperatorName}].
+* You have xref:../../serverless/admin_guide/installing-knative-serving.adoc#installing-knative-serving[created a `KnativeServing` resource in the `knative-serving` namespace].
+
+endif::[]
+
+include::modules/odc-creating-sample-applications.adoc[leveloffset=+1]
+
+include::modules/odc-importing-codebase-from-git-to-create-application.adoc[leveloffset=+1]
+
+include::modules/odc-deploying-java-applications.adoc[leveloffset=+1]
+
+include::modules/odc-using-the-developer-catalog-to-add-services-or-components.adoc[leveloffset=+1]
diff --git a/applications/deployments/deployment-strategies.adoc b/applications/deployments/deployment-strategies.adoc
index e651fdd5a35a..fb8c34528386 100644
--- a/applications/deployments/deployment-strategies.adoc
+++ b/applications/deployments/deployment-strategies.adoc
@@ -1,23 +1,15 @@
[id="deployment-strategies"]
-= Using DeploymentConfig strategies
+= Using deployment strategies
include::modules/common-attributes.adoc[]
:context: deployment-strategies
toc::[]
-A _deployment strategy_ is a way to change or upgrade an application. The aim
-is to make the change without downtime in a way that the user barely notices the
-improvements.
+A _deployment strategy_ is a way to change or upgrade an application. The aim is to make the change without downtime in a way that the user barely notices the improvements.
-Because the end user usually accesses the application through a route handled by
-a router, the deployment strategy can focus on DeploymentConfig features or
-routing features. Strategies that focus on the DeploymentConfig impact all
-routes that use the application. Strategies that use router features target
-individual routes.
+Because the end user usually accesses the application through a route handled by a router, the deployment strategy can focus on `DeploymentConfig` object features or routing features. Strategies that focus on the deployment impact all routes that use the application. Strategies that use router features target individual routes.
-Many deployment strategies are supported through the DeploymentConfig, and some
-additional strategies are supported through router features. DeploymentConfig
-strategies are discussed in this section.
+Many deployment strategies are supported through the `DeploymentConfig` object, and some additional strategies are supported through router features. Deployment strategies are discussed in this section.
////
@@ -35,23 +27,26 @@ xref:../../applications/deployments/route-based-deployment-strategies.adoc#route
Consider the following when choosing a deployment strategy:
- Long-running connections must be handled gracefully.
-- Database conversions can be complex and must be done and rolled back along with
-the application.
-- If the application is a hybrid of microservices and traditional components,
-downtime might be required to complete the transition.
+- Database conversions can be complex and must be done and rolled back along with the application.
+- If the application is a hybrid of microservices and traditional components, downtime might be required to complete the transition.
- You must have the infrastructure to do this.
-- If you have a non-isolated test environment, you can break both new and old
-versions.
+- If you have a non-isolated test environment, you can break both new and old versions.
-A deployment strategy uses readiness checks to determine if a new Pod is ready
-for use. If a readiness check fails, the DeploymentConfig retries to run the
-Pod until it times out. The default timeout is `10m`, a value set in
-`TimeoutSeconds` in `dc.spec.strategy.*params`.
+A deployment strategy uses readiness checks to determine if a new pod is ready for use. If a readiness check fails, the `DeploymentConfig` object retries to run the pod until it times out. The default timeout is `10m`, a value set in `TimeoutSeconds` in `dc.spec.strategy.*params`.
include::modules/deployments-rolling-strategy.adoc[leveloffset=+1]
include::modules/deployments-canary-deployments.adoc[leveloffset=+2]
include::modules/deployments-creating-rolling-deployment.adoc[leveloffset=+2]
+include::modules/odc-starting-rolling-deployment.adoc[leveloffset=+2]
+.Additional resources
+- xref:../../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-creating-applications-using-developer-perspective[Creating and deploying applications on {product-title} using the *Developer* perspective]
+- xref:../../applications/odc-viewing-application-composition-using-topology-view.adoc#odc-viewing-application-composition-using-topology-view[Viewing the applications in your project, verifying their deployment status, and interacting with them in the *Topology* view]
include::modules/deployments-recreate-strategy.adoc[leveloffset=+1]
+include::modules/odc-starting-recreate-deployment.adoc[leveloffset=+1]
+.Additional resources
+- xref:../../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-creating-applications-using-developer-perspective[Creating and deploying applications on {product-title} using the *Developer* perspective]
+- xref:../../applications/odc-viewing-application-composition-using-topology-view.adoc#odc-viewing-application-composition-using-topology-view[Viewing the applications in your project, verifying their deployment status, and interacting with them in the *Topology* view]
+
include::modules/deployments-custom-strategy.adoc[leveloffset=+1]
include::modules/deployments-lifecycle-hooks.adoc[leveloffset=+1]
diff --git a/applications/deployments/managing-deployment-processes.adoc b/applications/deployments/managing-deployment-processes.adoc
index 1fe591eb371b..d8512cfbf3c0 100644
--- a/applications/deployments/managing-deployment-processes.adoc
+++ b/applications/deployments/managing-deployment-processes.adoc
@@ -6,11 +6,9 @@ include::modules/common-attributes.adoc[]
toc::[]
[id="deploymentconfig-operations"]
-== Managing DeploymentConfigs
+== Managing DeploymentConfig objects
-DeploymentConfigs can be managed from the {product-title} web console's
-*Workloads* page or using the `oc` CLI. The following procedures show CLI usage
-unless otherwise stated.
+`DeploymentConfig` objects can be managed from the {product-title} web console's *Workloads* page or using the `oc` CLI. The following procedures show CLI usage unless otherwise stated.
include::modules/deployments-starting-deployment.adoc[leveloffset=+2]
include::modules/deployments-viewing-deployment.adoc[leveloffset=+2]
@@ -24,7 +22,7 @@ include::modules/deployments-setting-resources.adoc[leveloffset=+2]
include::modules/deployments-scaling-manually.adoc[leveloffset=+2]
include::modules/deployments-accessing-private-repos.adoc[leveloffset=+2]
-ifdef::openshift-enterprise,openshift-origin[]
+ifdef::openshift-enterprise,openshift-webscale,openshift-origin[]
include::modules/deployments-assigning-pods-to-nodes.adoc[leveloffset=+2]
endif::[]
diff --git a/applications/deployments/route-based-deployment-strategies.adoc b/applications/deployments/route-based-deployment-strategies.adoc
index 47fd44b0deac..a328483086df 100644
--- a/applications/deployments/route-based-deployment-strategies.adoc
+++ b/applications/deployments/route-based-deployment-strategies.adoc
@@ -5,11 +5,7 @@ include::modules/common-attributes.adoc[]
toc::[]
-Deployment strategies provide a way for the application to evolve. Some
-strategies use DeploymentConfigs to make changes that are seen by users of all
-routes that resolve to the application. Other advanced strategies, such as the
-ones described in this section, use router features in conjunction with
-DeploymentConfigs to impact specific routes.
+Deployment strategies provide a way for the application to evolve. Some strategies use `Deployment` objects to make changes that are seen by users of all routes that resolve to the application. Other advanced strategies, such as the ones described in this section, use router features in conjunction with `Deployment` objects to impact specific routes.
////
This link keeps breaking Travis for some reason.
@@ -17,31 +13,18 @@ This link keeps breaking Travis for some reason.
[NOTE]
====
See
-xref:../../applications/deployments/deployment-strategies.adoc#deployment-strategies[Using DeploymentConfig strategies]
+xref:../../applications/deployments/deployment-strategies.adoc#deployment-strategies[Using deployment strategies]
for more on the basic strategy types.
====
////
-The most common route-based strategy is to use a _blue-green deployment_. The
-new version (the blue version) is brought up for testing and evaluation, while
-the users still use the stable version (the green version). When ready, the
-users are switched to the blue version. If a problem arises, you can switch back
-to the green version.
-
-A common alternative strategy is to use _A/B versions_ that are both active at
-the same time and some users use one version, and some users use the other
-version. This can be used for experimenting with user interface changes and
-other features to get user feedback. It can also be used to verify proper
-operation in a production context where problems impact a limited number of
-users.
-
-A canary deployment tests the new version but when a problem is detected it
-quickly falls back to the previous version. This can be done with both of the
-above strategies.
-
-The route-based deployment strategies do not scale the number of Pods in the
-services. To maintain desired performance characteristics the deployment
-configurations might have to be scaled.
+The most common route-based strategy is to use a _blue-green deployment_. The new version (the green version) is brought up for testing and evaluation, while the users still use the stable version (the blue version). When ready, the users are switched to the green version. If a problem arises, you can switch back to the blue version.
+
+A common alternative strategy is to use _A/B versions_ that are both active at the same time and some users use one version, and some users use the other version. This can be used for experimenting with user interface changes and other features to get user feedback. It can also be used to verify proper operation in a production context where problems impact a limited number of users.
+
+A canary deployment tests the new version but when a problem is detected it quickly falls back to the previous version. This can be done with both of the above strategies.
+
+The route-based deployment strategies do not scale the number of pods in the services. To maintain desired performance characteristics the deployment configurations might have to be scaled.
include::modules/deployments-proxy-shards.adoc[leveloffset=+1]
include::modules/deployments-n1-compatibility.adoc[leveloffset=+1]
diff --git a/applications/deployments/what-deployments-are.adoc b/applications/deployments/what-deployments-are.adoc
index b46f23d04ff1..ec6f9bdbd2cd 100644
--- a/applications/deployments/what-deployments-are.adoc
+++ b/applications/deployments/what-deployments-are.adoc
@@ -1,23 +1,15 @@
[id="what-deployments-are"]
-= Understanding Deployments and DeploymentConfigs
+= Understanding Deployment and DeploymentConfig objects
include::modules/common-attributes.adoc[]
:context: what-deployments-are
toc::[]
-_Deployments_ and _DeploymentConfigs_ in {product-title} are API objects that
-provide two similar but different methods for fine-grained management over
-common user applications. They are comprised of the following separate API
-objects:
+The `Deployment` and `DeploymentConfig` API objects in {product-title} provide two similar but different methods for fine-grained management over common user applications. They are composed of the following separate API objects:
-- A DeploymentConfig or a Deployment, either of which describes the desired state
-of a particular component of the application as a Pod template.
-- DeploymentConfigs involve one or more _ReplicationControllers_, which contain a
-point-in-time record of the state of a DeploymentConfig as a Pod template.
-Similarly, Deployments involve one or more _ReplicaSets_, a successor of
-ReplicationControllers.
-- One or more Pods, which represent an instance of a particular version of an
-application.
+* A `DeploymentConfig` or `Deployment` object, either of which describes the desired state of a particular component of the application as a pod template.
+* `DeploymentConfig` objects involve one or more _replication controllers_, which contain a point-in-time record of the state of a deployment as a pod template. Similarly, `Deployment` objects involve one or more _replica sets_, a successor of replication controllers.
+* One or more pods, which represent an instance of a particular version of an application.
////
Update when converted:
@@ -36,19 +28,13 @@ xref:../../dev_guide/pod_autoscaling.adoc#dev-guide-pod-autoscaling[autoscaling]
[id="what-deployments-are-build-blocks"]
== Building blocks of a deployment
-Deployments and DeploymentConfigs are enabled by the use of native Kubernetes
-API objects ReplicationControllers and ReplicaSets, respectively, as their
-building blocks.
+Deployments and deployment configs are enabled by the use of native Kubernetes API objects `ReplicaSet` and `ReplicationController`, respectively, as their building blocks.
-Users do not have to manipulate ReplicationControllers, ReplicaSets, or Pods
-owned by DeploymentConfigs or Deployments. The deployment systems ensures
-changes are propagated appropriately.
+Users do not have to manipulate replication controllers, replica sets, or pods owned by `DeploymentConfig` objects or deployments. The deployment systems ensure changes are propagated appropriately.
[TIP]
====
-If the existing deployment strategies are not suited for your use case and you
-must run manual steps during the lifecycle of your deployment, then
-you should consider creating a Custom deployment strategy.
+If the existing deployment strategies are not suited for your use case and you must run manual steps during the lifecycle of your deployment, then you should consider creating a custom deployment strategy.
====
The following sections provide further details on these objects.
diff --git a/applications/idling-applications.adoc b/applications/idling-applications.adoc
index a76e677dba97..223a827f713e 100644
--- a/applications/idling-applications.adoc
+++ b/applications/idling-applications.adoc
@@ -5,18 +5,11 @@ include::modules/common-attributes.adoc[]
toc::[]
-Cluster administrators can idle applications to reduce resource consumption.
-This is useful when the cluster is deployed on a public cloud where cost is
-related to resource consumption.
+Cluster administrators can idle applications to reduce resource consumption. This is useful when the cluster is deployed on a public cloud where cost is related to resource consumption.
-If any scalable resources are not in use, {product-title} discovers and idles
-them by scaling their replicas to `0`. The next time network traffic is directed
-to the resources, the resources are unidled by scaling up the replicas, and
-normal operation continues.
+If any scalable resources are not in use, {product-title} discovers and idles them by scaling their replicas to `0`. The next time network traffic is directed to the resources, the resources are unidled by scaling up the replicas, and normal operation continues.
-Applications are made of services, as well as other scalable resources, such as
-DeploymentConfigs. The action of idling an application involves idling
-all associated resources.
+Applications are made of services, as well as other scalable resources, such as deployment configs. The action of idling an application involves idling all associated resources.
include::modules/idle-idling-applications.adoc[leveloffset=+1]
include::modules/idle-unidling-applications.adoc[leveloffset=+1]
diff --git a/applications/odc-deleting-applications.adoc b/applications/odc-deleting-applications.adoc
new file mode 100644
index 000000000000..b0932bc8b94d
--- /dev/null
+++ b/applications/odc-deleting-applications.adoc
@@ -0,0 +1,10 @@
+[id="odc-deleting-applications"]
+= Deleting applications
+include::modules/common-attributes.adoc[]
+:context: odc-deleting-applications
+
+toc::[]
+
+You can delete applications created in your project.
+
+include::modules/odc-deleting-applications-using-developer-perspective.adoc[leveloffset=+1]
diff --git a/applications/odc-editing-applications.adoc b/applications/odc-editing-applications.adoc
new file mode 100644
index 000000000000..b3a6a20783b0
--- /dev/null
+++ b/applications/odc-editing-applications.adoc
@@ -0,0 +1,18 @@
+[id="odc-editing-applications"]
+= Editing applications
+include::modules/common-attributes.adoc[]
+:context: odc-editing-applications
+
+toc::[]
+
+You can edit the configuration and the source code of the application you create using the *Topology* view.
+
+== Prerequisites
+
+* You have xref:../web_console/web-console.adoc#web-console[logged in to the web console] and have switched to the xref:../web_console/odc-about-developer-perspective.adoc#odc-about-developer-perspective[*Developer* perspective].
+* You have the appropriate xref:../authentication/using-rbac.adoc#default-roles_using-rbac[roles and permissions] in a project to create and modify applications in {product-title}.
+* You have xref:../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-creating-applications-using-developer-perspective[created and deployed an application on {product-title} using the *Developer* perspective].
+
+include::modules/odc-editing-source-code-using-developer-perspective.adoc[leveloffset=+1]
+
+include::modules/odc-editing-application-configuration-using-developer-perspective.adoc[leveloffset=+1]
diff --git a/applications/odc-monitoring-project-and-application-metrics-using-developer-perspective.adoc b/applications/odc-monitoring-project-and-application-metrics-using-developer-perspective.adoc
new file mode 100644
index 000000000000..c5fb05bccfad
--- /dev/null
+++ b/applications/odc-monitoring-project-and-application-metrics-using-developer-perspective.adoc
@@ -0,0 +1,21 @@
+[id="odc-monitoring-project-and-application-metrics-using-developer-perspective"]
+= Monitoring project and application metrics using the Developer perspective
+include::modules/common-attributes.adoc[]
+:context: monitoring-project-and-application-metrics-using-developer-perspective
+
+toc::[]
+
+
+The *Monitoring* view in the *Developer* perspective provides options to monitor your project or application metrics, such as CPU, memory, and bandwidth usage, and network related information.
+
+== Prerequisites
+
+* You have xref:../web_console/web-console.adoc#web-console-overview[logged in to the web console] and have switched to the xref:../web_console/odc-about-developer-perspective.adoc#odc-about-developer-perspective[*Developer* perspective].
+* You have xref:../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-creating-applications-using-developer-perspective[created and deployed applications on {product-title}].
+
+include::modules/odc-monitoring-your-project-metrics.adoc[leveloffset=+1]
+
+include::modules/odc-monitoring-your-application-metrics.adoc[leveloffset=+1]
+
+== Additional Resources
+* xref:../monitoring/understanding-the-monitoring-stack.adoc#understanding-the-monitoring-stack[Understanding the monitoring stack]
diff --git a/applications/odc-viewing-application-composition-using-topology-view.adoc b/applications/odc-viewing-application-composition-using-topology-view.adoc
new file mode 100644
index 000000000000..4fffa646af36
--- /dev/null
+++ b/applications/odc-viewing-application-composition-using-topology-view.adoc
@@ -0,0 +1,38 @@
+[id="odc-viewing-application-composition-using-topology-view"]
+= Viewing application composition using the Topology view
+include::modules/common-attributes.adoc[]
+:context: viewing-application-composition-using-topology-view
+
+toc::[]
+
+The *Topology* view in the *Developer* perspective of the web console provides a visual representation of all the applications within a project, their build status, and the components and services associated with them.
+
+== Prerequisites
+To view your applications in the *Topology* view and interact with them, ensure that:
+
+* You have xref:../web_console/web-console.adoc#web-console[logged in to the web console].
+* You are in the xref:../web_console/odc-about-developer-perspective.adoc#odc-about-developer-perspective[*Developer* perspective].
+* You have the appropriate xref:../authentication/using-rbac.adoc#default-roles_using-rbac[roles and permissions] in a project to create applications and other workloads in {product-title}.
+* You have xref:../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-creating-applications-using-developer-perspective[created and deployed an application on {product-title} using the *Developer* perspective].
+
+
+include::modules/odc-viewing-application-topology.adoc[leveloffset=+1]
+
+include::modules/odc-interacting-with-applications-and-components.adoc[leveloffset=+1]
+
+include::modules/odc-scaling-application-pods-and-checking-builds-and-routes.adoc[leveloffset=+1]
+
+include::modules/odc-adding-components-to-an-existing-project.adoc[leveloffset=+1]
+
+include::modules/odc-grouping-multiple-components.adoc[leveloffset=+1]
+
+include::modules/odc-adding-services-to-application.adoc[leveloffset=+1]
+
+include::modules/odc-removing-services-from-application.adoc[leveloffset=+1]
+
+include::modules/odc-labels-and-annotations-used-for-topology-view.adoc[leveloffset=+1]
+
+== Additional resources
+
+* See xref:../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-importing-codebase-from-git-to-create-application_odc-creating-applications-using-developer-perspective[Importing a codebase from Git to create an application] for more information on creating an application from Git.
+* See xref:../applications/connecting_applications_to_services/odc-connecting-an-application-to-a-service-using-the-developer-perspective.adoc#odc-connecting-an-application-to-a-service-using-the-developer-perspective[Connecting an application to a service using the Developer perspective].
diff --git a/applications/operator_sdk/osdk-ansible.adoc b/applications/operator_sdk/osdk-ansible.adoc
deleted file mode 100644
index 921eb44945fe..000000000000
--- a/applications/operator_sdk/osdk-ansible.adoc
+++ /dev/null
@@ -1,52 +0,0 @@
-[id="osdk-ansible"]
-= Creating Ansible-based Operators
-include::modules/common-attributes.adoc[]
-:context: osdk-ansible
-
-toc::[]
-
-This guide outlines Ansible support in the Operator SDK and walks Operator
-authors through examples building and running Ansible-based Operators with the
-`operator-sdk` CLI tool that use Ansible playbooks and modules.
-
-include::modules/osdk-ansible-support.adoc[leveloffset=+1]
-include::modules/osdk-ansible-custom-resource-files.adoc[leveloffset=+2]
-include::modules/osdk-ansible-watches-file.adoc[leveloffset=+2]
-include::modules/osdk-ansible-extra-variables.adoc[leveloffset=+2]
-include::modules/osdk-ansible-runner-directory.adoc[leveloffset=+2]
-
-include::modules/osdk-installing-cli.adoc[leveloffset=+1]
-include::modules/osdk-building-ansible-operator.adoc[leveloffset=+1]
-
-[id="osdk-ansible-k8s-module"]
-== Managing application lifecycle using the k8s Ansible module
-
-To manage the lifecycle of your application on Kubernetes using Ansible, you
-can use the link:https://docs.ansible.com/ansible/2.7/modules/k8s_module.html[`k8s` Ansible module].
-This Ansible module allows a developer to either leverage their existing
-Kubernetes resource files (written in YAML) or express the lifecycle management
-in native Ansible.
-
-One of the biggest benefits of using Ansible in conjunction with existing
-Kubernetes resource files is the ability to use Jinja templating so that you can
-customize resources with the simplicity of a few variables in Ansible.
-
-This section goes into detail on usage of the `k8s` Ansible module. To get
-started, install the module on your local workstation and test it using a
-playbook before moving on to using it within an Operator.
-
-include::modules/osdk-ansible-k8s-module-installing.adoc[leveloffset=+2]
-include::modules/osdk-ansible-k8s-module-testing-locally.adoc[leveloffset=+2]
-include::modules/osdk-ansible-k8s-module-inside-operator.adoc[leveloffset=+2]
-
-include::modules/osdk-ansible-managing-cr-status.adoc[leveloffset=+1]
-
-[id="osdk-ansible-addtl-resources"]
-== Additional resources
-
-- See
-xref:../../applications/operator_sdk/osdk-appendices.adoc#osdk-project-scaffolding-layout_operator-appendices[Appendices]
-to learn about the project directory structures created by the Operator SDK.
-- link:https://blog.openshift.com/reaching-for-the-stars-with-ansible-operator/[Reaching for the Stars with Ansible Operator] - Red Hat OpenShift Blog
-
-- link:https://operators.gitbook.io/operator-developer-guide-for-red-hat-partners/[Operator Development Guide for Red Hat Partners]
diff --git a/applications/operator_sdk/osdk-appendices.adoc b/applications/operator_sdk/osdk-appendices.adoc
deleted file mode 100644
index 86bde1b7c83d..000000000000
--- a/applications/operator_sdk/osdk-appendices.adoc
+++ /dev/null
@@ -1,8 +0,0 @@
-[id="operator-appendices"]
-= Appendices
-include::modules/common-attributes.adoc[]
-:context: operator-appendices
-
-toc::[]
-
-include::modules/osdk-project-staffolding-layout.adoc[leveloffset=+1]
diff --git a/applications/operator_sdk/osdk-cli-reference.adoc b/applications/operator_sdk/osdk-cli-reference.adoc
deleted file mode 100644
index 9ecb07373017..000000000000
--- a/applications/operator_sdk/osdk-cli-reference.adoc
+++ /dev/null
@@ -1,22 +0,0 @@
-[id="osdk-cli-reference"]
-= Operator SDK CLI reference
-include::modules/common-attributes.adoc[]
-:context: osdk-cli-reference
-
-toc::[]
-
-This guide documents the Operator SDK CLI commands and their syntax:
-
-----
-$ operator-sdk [] [] []
-----
-
-include::modules/osdk-cli-reference-build.adoc[leveloffset=+1]
-include::modules/osdk-cli-reference-completion.adoc[leveloffset=+1]
-include::modules/osdk-cli-reference-print-deps.adoc[leveloffset=+1]
-include::modules/osdk-cli-reference-generate.adoc[leveloffset=+1]
-include::modules/osdk-cli-reference-olm-catalog.adoc[leveloffset=+1]
-include::modules/osdk-cli-reference-new.adoc[leveloffset=+1]
-include::modules/osdk-cli-reference-add.adoc[leveloffset=+1]
-include::modules/osdk-cli-reference-test.adoc[leveloffset=+1]
-include::modules/osdk-cli-reference-up.adoc[leveloffset=+1]
diff --git a/applications/operator_sdk/osdk-generating-csvs.adoc b/applications/operator_sdk/osdk-generating-csvs.adoc
deleted file mode 100644
index 002a9f9730a3..000000000000
--- a/applications/operator_sdk/osdk-generating-csvs.adoc
+++ /dev/null
@@ -1,49 +0,0 @@
-[id="osdk-generating-csvs"]
-= Generating a ClusterServiceVersion (CSV)
-include::modules/common-attributes.adoc[]
-:context: osdk-generating-csvs
-
-toc::[]
-
-A _ClusterServiceVersion_ (CSV) is a YAML manifest created from Operator
-metadata that assists the Operator Lifecycle Manager (OLM) in running the
-Operator in a cluster. It is the metadata that accompanies an Operator container
-image, used to populate user interfaces with information like its logo,
-description, and version. It is also a source of technical information that is required to
-run the Operator, like the RBAC rules it requires and which Custom Resources
-(CRs) it manages or depends on.
-
-The Operator SDK includes the `olm-catalog gen-csv` subcommand to generate a
-_ClusterServiceVersion_ (CSV) for the current Operator project customized using
-information contained in manually-defined YAML manifests and Operator source
-files.
-
-A CSV-generating command removes the responsibility of Operator authors having
-in-depth Operator Lifecycle Manager (OLM) knowledge in order for their Operator
-to interact with OLM or publish metadata to the Catalog Registry. Further,
-because the CSV spec will likely change over time as new Kubernetes and OLM
-features are implemented, the Operator SDK is equipped to easily extend its
-update system to handle new CSV features going forward.
-
-The CSV version is the same as the Operator's, and a new CSV is generated when
-upgrading Operator versions. Operator authors can use the `--csv-version` flag
-to have their Operators' state encapsulated in a CSV with the supplied version:
-
-----
-$ operator-sdk olm-catalog gen-csv --csv-version
-----
-
-This action is idempotent and only updates the CSV file when a new version is
-supplied, or a YAML manifest or source file is changed. Operator authors should
-not have to directly modify most fields in a CSV manifest. Those that require
-modification are defined in this guide. For example, the CSV version must be
-included in `metadata.name`.
-
-include::modules/osdk-how-csv-gen-works.adoc[leveloffset=+1]
-include::modules/osdk-csv-composition-configuration.adoc[leveloffset=+1]
-include::modules/osdk-manually-defined-csv-fields.adoc[leveloffset=+1]
-include::modules/osdk-generating-a-csv.adoc[leveloffset=+1]
-
-////
-TODO: discuss whether multiple CSV files can be present, each with a unique file name (ex. `app-operator.csv.0.1.1.yaml`), or a single `app-operator.csv.yaml` file that relies on VCS (git) to version the file.
-////
diff --git a/applications/operator_sdk/osdk-getting-started.adoc b/applications/operator_sdk/osdk-getting-started.adoc
deleted file mode 100644
index 62d9e71d9da9..000000000000
--- a/applications/operator_sdk/osdk-getting-started.adoc
+++ /dev/null
@@ -1,52 +0,0 @@
-[id="osdk-getting-started"]
-= Getting started with the Operator SDK
-include::modules/common-attributes.adoc[]
-:context: osdk-getting-started
-
-toc::[]
-
-This guide outlines the basics of the Operator SDK and walks Operator authors
-with cluster administrator access to a Kubernetes-based cluster (such as
-{product-title}) through an example of building a simple Go-based Memcached
-Operator and managing its lifecycle from installation to upgrade.
-
-This is accomplished using two centerpieces of the Operator Framework: the
-Operator SDK (the `operator-sdk` CLI tool and `controller-runtime` library API)
-and the Operator Lifecycle Manager (OLM).
-
-[NOTE]
-====
-{product-title} 4 supports Operator SDK v0.7.0 or later.
-====
-
-include::modules/osdk-architecture.adoc[leveloffset=+1]
-include::modules/osdk-monitoring-prometheus-operator-support.adoc[leveloffset=+2]
-
-include::modules/osdk-installing-cli.adoc[leveloffset=+1]
-include::modules/building-memcached-operator-using-osdk.adoc[leveloffset=+1]
-include::modules/managing-memcached-operator-using-olm.adoc[leveloffset=+1]
-
-[id="osdk-getting-started-addtl-resources"]
-== Additional resources
-
-- See
-xref:../../applications/operator_sdk/osdk-appendices.adoc#osdk-project-scaffolding-layout_operator-appendices[Appendices]
-to learn about the project directory structures created by the Operator SDK.
-
-- link:https://operators.gitbook.io/operator-developer-guide-for-red-hat-partners/[Operator Development Guide for Red Hat Partners]
-
-ifdef::openshift-origin[]
-[id="osdk-getting-started-getting-involved"]
-== Getting involved
-
-This guide provides an effective demonstration of the value of the Operator
-Framework for building and managing Operators, but this is much more left out in
-the interest of brevity. The Operator Framework and its components are open
-source, so visit each project individually and learn what else you can do:
-
-link:https://github.com/operator-framework[*github.com/operator-framework*]
-
-If you want to discuss your experience, have questions, or want to get involved,
-join the
-link:https://groups.google.com/forum/#!forum/operator-framework[Operator Framework mailing list].
-endif::[]
diff --git a/applications/operator_sdk/osdk-helm.adoc b/applications/operator_sdk/osdk-helm.adoc
deleted file mode 100644
index f0f3010af740..000000000000
--- a/applications/operator_sdk/osdk-helm.adoc
+++ /dev/null
@@ -1,23 +0,0 @@
-[id="osdk-helm"]
-= Creating Helm-based Operators
-include::modules/common-attributes.adoc[]
-:context: osdk-helm
-
-toc::[]
-
-This guide outlines Helm chart support in the Operator SDK and walks Operator
-authors through an example of building and running an Nginx Operator with the
-`operator-sdk` CLI tool that uses an existing Helm chart.
-
-include::modules/osdk-helm-chart-support.adoc[leveloffset=+1]
-include::modules/osdk-installing-cli.adoc[leveloffset=+1]
-include::modules/osdk-building-helm-operator.adoc[leveloffset=+1]
-
-[id="osdk-helm-addtl-resources"]
-== Additional resources
-
-- See
-xref:../../applications/operator_sdk/osdk-appendices.adoc#osdk-project-scaffolding-layout_operator-appendices[Appendices]
-to learn about the project directory structures created by the Operator SDK.
-
-- link:https://operators.gitbook.io/operator-developer-guide-for-red-hat-partners/[Operator Development Guide for Red Hat Partners]
diff --git a/applications/operator_sdk/osdk-leader-election.adoc b/applications/operator_sdk/osdk-leader-election.adoc
deleted file mode 100644
index fae0b548b265..000000000000
--- a/applications/operator_sdk/osdk-leader-election.adoc
+++ /dev/null
@@ -1,41 +0,0 @@
-[id="osdk-leader-election"]
-= Configuring leader election
-include::modules/common-attributes.adoc[]
-:context: osdk-leader-election
-
-toc::[]
-
-During the lifecycle of an Operator, it is possible that there may be more than
-one instance running at any given time, for example when rolling out an upgrade
-for the Operator. In such a scenario, it is necessary to avoid contention
-between multiple Operator instances using leader election. This ensures only one
-leader instance handles the reconciliation while the other instances are
-inactive but ready to take over when the leader steps down.
-
-There are two different leader election implementations to choose from, each
-with its own trade-off:
-
-* _Leader-for-life_: The leader Pod only gives up leadership (using garbage
-collection) when it is deleted. This implementation precludes the possibility of
-two instances mistakenly running as leaders (split brain). However, this method
-can be subject to a delay in electing a new leader. For example, when the leader
-Pod is on an unresponsive or partitioned node, the
-link:https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager/#options[`pod-eviction-timeout`]
-dictates how it takes for the leader Pod to be deleted from the node and step
-down (default `5m`). See the link:https://godoc.org/github.com/operator-framework/operator-sdk/pkg/leader[Leader-for-life] Go documentation for more.
-
-* _Leader-with-lease_: The leader Pod periodically renews the leader lease and
-gives up leadership when it cannot renew the lease. This implementation allows
-for a faster transition to a new leader when the existing leader is isolated,
-but there is a possibility of split brain in
-link:https://github.com/kubernetes/client-go/blob/30b06a83d67458700a5378239df6b96948cb9160/tools/leaderelection/leaderelection.go#L21-L24[certain situations]. See the
-link:https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/leaderelection[Leader-with-lease]
-Go documentation for more.
-
-By default, the Operator SDK enables the Leader-for-life implementation. Consult
-the related Go documentation for both approaches to consider the trade-offs that make
-sense for your use case,
-
-The following examples illustrate how to use the two options.
-
-include::modules/osdk-leader-election-types.adoc[leveloffset=+1]
diff --git a/applications/operator_sdk/osdk-migrating-to-v0-1-0.adoc b/applications/operator_sdk/osdk-migrating-to-v0-1-0.adoc
deleted file mode 100644
index 8814d7f4c4bd..000000000000
--- a/applications/operator_sdk/osdk-migrating-to-v0-1-0.adoc
+++ /dev/null
@@ -1,28 +0,0 @@
-[id="osdk-migrating-to-v0-1-0"]
-= Migrating to Operator SDK v0.1.0
-include::modules/common-attributes.adoc[]
-:context: osdk-migrating-to-v0-1-0
-
-toc::[]
-
-This guide describes how to migrate an Operator project built using Operator SDK
-v0.0.x to the project structure required by
-link:https://github.com/operator-framework/operator-sdk/releases[Operator SDK v0.1.0].
-
-The recommended method for migrating your project is to:
-
-. Initialize a new v0.1.0 project.
-. Copy your code into the new project.
-. Modify the new project as described for v0.1.0.
-
-This guide uses the `memcached-operator`, the example project
-from xref:osdk-getting-started.adoc#osdk-getting-started[Getting started with the Operator SDK],
-to illustrate the migration steps. See the
-link:https://github.com/operator-framework/operator-sdk-samples/tree/aa15bd278eec0959595e0a0a7282a26055d7f9d6/memcached-operator[v0.0.7 memcached-operator]
-and
-link:https://github.com/operator-framework/operator-sdk-samples/tree/4c6934448684a6953ece4d3d9f3f77494b1c125e/memcached-operator[v0.1.0 memcached-operator]
-project structures for pre- and post-migration examples, respectively.
-
-include::modules/creating-new-osdk-v0-1-0-project.adoc[leveloffset=+1]
-include::modules/migrating-custom-types-pkg-apis.adoc[leveloffset=+1]
-include::modules/migrating-reconcile-code.adoc[leveloffset=+1]
diff --git a/applications/operators/olm-adding-operators-to-cluster.adoc b/applications/operators/olm-adding-operators-to-cluster.adoc
deleted file mode 100644
index 2688a34023c9..000000000000
--- a/applications/operators/olm-adding-operators-to-cluster.adoc
+++ /dev/null
@@ -1,41 +0,0 @@
-[id="olm-adding-operators-to-a-cluster"]
-= Adding Operators to a cluster
-include::modules/common-attributes.adoc[]
-:context: olm-adding-operators-to-a-cluster
-
-toc::[]
-
-This guide walks cluster administrators through installing Operators to an
-{product-title} cluster.
-
-[id="olm-installing-operators-from-operatorhub_{context}"]
-== Installing Operators from the OperatorHub
-
-As a cluster administrator, you can install an Operator from the OperatorHub
-using the {product-title} web console or the CLI. You can then subscribe the
-Operator to one or more namespaces to make it available for developers on your
-cluster.
-
-During installation, you must determine the following initial settings for the
-Operator:
-
-Installation Mode:: Choose *All namespaces on the cluster (default)* to have the
-Operator installed on all namespaces or choose individual namespaces, if
-available, to only install the Operator on selected namespaces. This example
-chooses *All namespaces...* to make the Operator available to all users and
-projects.
-
-Update Channel:: If an Operator is available through multiple channels, you can
-choose which channel you want to subscribe to. For example, to deploy from the
-*stable* channel, if available, select it from the list.
-
-Approval Strategy:: You can choose Automatic or Manual updates. If you choose
-Automatic updates for an installed Operator, when a new version of that Operator
-is available, the OLM automatically upgrades the running instance of your
-Operator without human intervention. If you select Manual updates, when a newer
-version of an Operator is available, the OLM creates an update request. As a
-cluster administrator, you must then manually approve that update request to
-have the Operator updated to the new version.
-
-include::modules/olm-installing-from-operatorhub-using-web-console.adoc[leveloffset=+2]
-include::modules/olm-installing-from-operatorhub-using-cli.adoc[leveloffset=+2]
diff --git a/applications/operators/olm-deleting-operators-from-cluster.adoc b/applications/operators/olm-deleting-operators-from-cluster.adoc
deleted file mode 100644
index d42846fdd323..000000000000
--- a/applications/operators/olm-deleting-operators-from-cluster.adoc
+++ /dev/null
@@ -1,16 +0,0 @@
-[id='olm-deleting-operators-from-a-cluster']
-= Deleting Operators from a cluster
-include::modules/common-attributes.adoc[]
-:context: olm-deleting-operators-from-a-cluster
-
-toc::[]
-
-To delete (uninstall) an Operator from your cluster, you can simply
-delete the subscription to remove it from the subscribed namespace.
-If you want a clean slate, you can also remove the operator CSV and
-deployment, then delete Operator's entry in the CatalogSourceConfig.
-The following text describes how to delete Operators from a cluster
-using either the web console or the command line.
-
-include::modules/olm-deleting-operators-from-a-cluster-using-web-console.adoc[leveloffset=+1]
-include::modules/olm-deleting-operators-from-a-cluster-using-cli.adoc[leveloffset=+1]
diff --git a/applications/operators/olm-understanding-olm.adoc b/applications/operators/olm-understanding-olm.adoc
deleted file mode 100644
index cfd0bec398d8..000000000000
--- a/applications/operators/olm-understanding-olm.adoc
+++ /dev/null
@@ -1,15 +0,0 @@
-[id="olm-understanding-olm"]
-= Understanding the Operator Lifecycle Manager
-include::modules/common-attributes.adoc[]
-:context: olm-understanding-olm
-
-toc::[]
-
-This guide outlines the workflow and architecture of the Operator Lifecycle
-Manager (OLM) in {product-title}.
-
-include::modules/olm-overview.adoc[leveloffset=+1]
-include::modules/olm-csv.adoc[leveloffset=+1]
-include::modules/olm-architecture.adoc[leveloffset=+1]
-include::modules/olm-operatorgroups.adoc[leveloffset=+1]
-include::modules/olm-metrics.adoc[leveloffset=+1]
diff --git a/applications/operators/olm-understanding-operatorhub.adoc b/applications/operators/olm-understanding-operatorhub.adoc
deleted file mode 100644
index 10b129acee9b..000000000000
--- a/applications/operators/olm-understanding-operatorhub.adoc
+++ /dev/null
@@ -1,11 +0,0 @@
-[id="olm-understanding-operatorhub"]
-= Understanding the OperatorHub
-include::modules/common-attributes.adoc[]
-:context: olm-understanding-operatorhub
-
-toc::[]
-
-This guide outlines the architecture of the OperatorHub.
-
-include::modules/olm-operatorhub-overview.adoc[leveloffset=+1]
-include::modules/olm-operatorhub-architecture.adoc[leveloffset=+1]
diff --git a/applications/operators/olm-what-operators-are.adoc b/applications/operators/olm-what-operators-are.adoc
deleted file mode 100644
index 5d1bb4897c28..000000000000
--- a/applications/operators/olm-what-operators-are.adoc
+++ /dev/null
@@ -1,30 +0,0 @@
-[id="olm-what-operators-are"]
-= Understanding Operators
-include::modules/common-attributes.adoc[]
-:context: olm-what-operators-are
-
-toc::[]
-
-Conceptually, _Operators_ take human operational knowledge and encode it into
-software that is more easily shared with consumers.
-
-Operators are pieces of software that ease the operational complexity of running
-another piece of software. They act like an extension of the software vendor's
-engineering team, watching over a Kubernetes environment (such as
-{product-title}) and using its current state to make decisions in real time.
-Advanced Operators are designed to handle upgrades seamlessly, react to failures
-automatically, and not take shortcuts, like skipping a software backup process
-to save time.
-
-More technically, _Operators_ are a method of packaging, deploying, and managing a
-Kubernetes application.
-
-A Kubernetes application is an app that is both deployed on Kubernetes and
-managed using the Kubernetes APIs and `kubectl` or `oc` tooling. To be able to
-make the most of Kubernetes, you require a set of cohesive APIs to extend in order
-to service and manage your apps that run on Kubernetes. Think of
-Operators as the runtime that manages this type of app on Kubernetes.
-
-include::modules/olm-why-use-operators.adoc[leveloffset=+1]
-include::modules/olm-operator-framework.adoc[leveloffset=+1]
-include::modules/olm-operator-maturity-model.adoc[leveloffset=+1]
diff --git a/applications/projects/configuring-project-creation.adoc b/applications/projects/configuring-project-creation.adoc
new file mode 100644
index 000000000000..768bcfe7ce04
--- /dev/null
+++ b/applications/projects/configuring-project-creation.adoc
@@ -0,0 +1,27 @@
+[id="configuring-project-creation"]
+= Configuring project creation
+include::modules/common-attributes.adoc[]
+:context: configuring-project-creation
+
+toc::[]
+
+In {product-title}, _projects_ are used to group and isolate related objects.
+When a request is made to create a new project using the web console or `oc
+new-project` command, an endpoint in {product-title} is used to provision the
+project according to a template, which can be customized.
+
+As
+a cluster administrator, you can allow and configure how developers and service
+accounts can create, or _self-provision_, their own projects.
+
+////
+ifdef::openshift-dedicated[]
+A dedicated administrator is by default an administrator for all projects on the
+cluster that are not managed by Red Hat Operations.
+endif::[]
+////
+
+include::modules/about-project-creation.adoc[leveloffset=+1]
+include::modules/modifying-template-for-new-projects.adoc[leveloffset=+1]
+include::modules/disabling-project-self-provisioning.adoc[leveloffset=+1]
+include::modules/customizing-project-request-message.adoc[leveloffset=+1]
diff --git a/authentication/creating-project-other-user.adoc b/applications/projects/creating-project-other-user.adoc
similarity index 98%
rename from authentication/creating-project-other-user.adoc
rename to applications/projects/creating-project-other-user.adoc
index 107404e7e0fe..182274cdf0c8 100644
--- a/authentication/creating-project-other-user.adoc
+++ b/applications/projects/creating-project-other-user.adoc
@@ -2,10 +2,11 @@
= Creating a project as another user
include::modules/common-attributes.adoc[]
:context: creating-project-other-user
+
toc::[]
Impersonation allows you to create a project as a different user.
include::modules/authentication-api-impersonation.adoc[leveloffset=+1]
-include::modules/impersonation-project-creation.adoc[leveloffset=+1]
\ No newline at end of file
+include::modules/impersonation-project-creation.adoc[leveloffset=+1]
diff --git a/applications/crds/images b/applications/projects/images
similarity index 100%
rename from applications/crds/images
rename to applications/projects/images
diff --git a/applications/crds/modules b/applications/projects/modules
similarity index 100%
rename from applications/crds/modules
rename to applications/projects/modules
diff --git a/applications/projects/working-with-projects.adoc b/applications/projects/working-with-projects.adoc
new file mode 100644
index 000000000000..3d1e89f544ab
--- /dev/null
+++ b/applications/projects/working-with-projects.adoc
@@ -0,0 +1,41 @@
+[id="working-with-projects"]
+= Working with projects
+include::modules/common-attributes.adoc[]
+:context: projects
+
+toc::[]
+
+A _project_ allows a community of users to organize and manage their content in
+isolation from other communities.
+
+[NOTE]
+====
+Projects starting with `openshift-` and `kube-` are xref:../../authentication/using-rbac.adoc#rbac-default-projects_using-rbac[default projects]. These projects host cluster components that run as pods and other infrastructure components. As such, {product-title} does not allow you to create projects starting with `openshift-` or `kube-` using the `oc new-project` command. Cluster administrators can create these projects using the `oc adm new-project` command.
+====
+
+[NOTE]
+====
+You cannot assign an SCC to pods created in one of the default namespaces: `default`, `kube-system`, `kube-public`, `openshift-node`, `openshift-infra`, and `openshift`. You cannot use these namespaces for running pods or services.
+====
+
+include::modules/creating-a-project-using-the-web-console.adoc[leveloffset=+1]
+
+include::modules/odc-creating-projects-using-developer-perspective.adoc[leveloffset=+1]
+
+include::modules/creating-a-project-using-the-CLI.adoc[leveloffset=+1]
+
+include::modules/viewing-a-project-using-the-web-console.adoc[leveloffset=+1]
+
+include::modules/viewing-a-project-using-the-CLI.adoc[leveloffset=+1]
+
+include::modules/odc-providing-project-permissions-using-developer-perspective.adoc[leveloffset=+1]
+
+include::modules/adding-to-a-project.adoc[leveloffset=+1]
+
+include::modules/checking-project-status-using-the-web-console.adoc[leveloffset=+1]
+
+include::modules/checking-project-status-using-the-CLI.adoc[leveloffset=+1]
+
+include::modules/deleting-a-project-using-the-web-console.adoc[leveloffset=+1]
+
+include::modules/deleting-a-project-using-the-CLI.adoc[leveloffset=+1]
diff --git a/applications/pruning-objects.adoc b/applications/pruning-objects.adoc
index 617f2728c101..9dcff7f00133 100644
--- a/applications/pruning-objects.adoc
+++ b/applications/pruning-objects.adoc
@@ -19,12 +19,17 @@ include::modules/pruning-groups.adoc[leveloffset=+1]
include::modules/pruning-deployments.adoc[leveloffset=+1]
include::modules/pruning-builds.adoc[leveloffset=+1]
.Additional resources
-- xref:../builds/advanced-build-operations.adoc#builds-build-pruning-advanced-build-operations[Performing advanced builds -> Pruning builds]
+- xref:../cicd/builds/advanced-build-operations.adoc#builds-build-pruning-advanced-build-operations[Performing advanced builds -> Pruning builds]
include::modules/pruning-images.adoc[leveloffset=+1]
+include::modules/pruning-images-manual.adoc[leveloffset=+1]
.Additional resources
- xref:../registry/accessing-the-registry.adoc#accessing-the-registry[Accessing the registry]
- xref:../registry/securing-exposing-registry.adoc#securing-exposing-registry[Exposing the registry]
+- See
+xref:../registry/configuring-registry-operator.adoc#configuring-registry-operator[Image
+Registry Operator in {product-title}] for information on how to create a
+registry route.
include::modules/pruning-hard-pruning-registry.adoc[leveloffset=+1]
include::modules/pruning-cronjobs.adoc[leveloffset=+1]
diff --git a/applications/quotas/quotas-setting-across-multiple-projects.adoc b/applications/quotas/quotas-setting-across-multiple-projects.adoc
index f190c5db9454..216aee8c5747 100644
--- a/applications/quotas/quotas-setting-across-multiple-projects.adoc
+++ b/applications/quotas/quotas-setting-across-multiple-projects.adoc
@@ -5,13 +5,9 @@ include::modules/common-attributes.adoc[]
toc::[]
-A multi-project quota, defined by a ClusterResourceQuota object, allows quotas
-to be shared across multiple projects. Resources used in each selected project
-are aggregated and that aggregate is used to limit resources across all the
-selected projects.
+A multi-project quota, defined by a `ClusterResourceQuota` object, allows quotas to be shared across multiple projects. Resources used in each selected project are aggregated and that aggregate is used to limit resources across all the selected projects.
-This guide describes how cluster administrators can set and manage resource
-quotas across multiple projects.
+This guide describes how cluster administrators can set and manage resource quotas across multiple projects.
include::modules/quotas-selecting-projects.adoc[leveloffset=+1]
include::modules/quotas-viewing-clusterresourcequotas.adoc[leveloffset=+1]
diff --git a/applications/quotas/quotas-setting-per-project.adoc b/applications/quotas/quotas-setting-per-project.adoc
index 95d9001dd2ca..12df415ac621 100644
--- a/applications/quotas/quotas-setting-per-project.adoc
+++ b/applications/quotas/quotas-setting-per-project.adoc
@@ -5,14 +5,9 @@ include::modules/common-attributes.adoc[]
toc::[]
-A _resource quota_, defined by a ResourceQuota object, provides constraints that
-limit aggregate resource consumption per project. It can limit the quantity of
-objects that can be created in a project by type, as well as the total amount of
-compute resources and storage that may be consumed by resources in that project.
+A _resource quota_, defined by a `ResourceQuota` object, provides constraints that limit aggregate resource consumption per project. It can limit the quantity of objects that can be created in a project by type, as well as the total amount of compute resources and storage that might be consumed by resources in that project.
-This guide describes how resource quotas work, how cluster administrators can
-set and manage resource quotas on a per project basis, and how developers and
-cluster administrators can view them.
+This guide describes how resource quotas work, how cluster administrators can set and manage resource quotas on a per project basis, and how developers and cluster administrators can view them.
include::modules/quotas-resources-managed.adoc[leveloffset=+1]
include::modules/quotas-scopes.adoc[leveloffset=+1]
@@ -23,5 +18,4 @@ include::modules/quotas-creating-a-quota.adoc[leveloffset=+1]
include::modules/quotas-creating-object-count-quotas.adoc[leveloffset=+2]
include::modules/setting-resource-quota-for-extended-resources.adoc[leveloffset=+2]
include::modules/quotas-viewing-quotas.adoc[leveloffset=+1]
-include::modules/quotas-configuring-quota-sync-period.adoc[leveloffset=+1]
include::modules/quotas-requiring-explicit-quota.adoc[leveloffset=+1]
diff --git a/applications/red-hat-marketplace.adoc b/applications/red-hat-marketplace.adoc
new file mode 100644
index 000000000000..4f03f96f49e1
--- /dev/null
+++ b/applications/red-hat-marketplace.adoc
@@ -0,0 +1,10 @@
+[id="red-hat-marketplace"]
+= Using the Red Hat Marketplace
+include::modules/common-attributes.adoc[]
+:context: red-hat-marketplace
+
+toc::[]
+
+The link:https://marketplace.redhat.com[Red Hat Marketplace] is an open cloud marketplace that makes it easy to discover and access certified software for container-based environments that run on public clouds and on-premises.
+
+include::modules/red-hat-marketplace-features.adoc[leveloffset=+1]
diff --git a/applications/service_brokers/configuring-ansible-service-broker.adoc b/applications/service_brokers/configuring-ansible-service-broker.adoc
deleted file mode 100644
index c4aeea13b8a5..000000000000
--- a/applications/service_brokers/configuring-ansible-service-broker.adoc
+++ /dev/null
@@ -1,22 +0,0 @@
-[id="sb-configuring-asb"]
-= Configuring the {asb-name}
-include::modules/common-attributes.adoc[]
-:context: sb-configuring-asb
-
-toc::[]
-
-[IMPORTANT]
-====
-The {asb-name} is deprecated in {product-title} 4. Equivalent and better
-functionality is present in the Operator Framework and Operator Lifecycle
-Manager (OLM).
-====
-
-// Configuring the {asb-name} Operator
-include::modules/sb-configuring-ansible-service-broker.adoc[leveloffset=+1]
-
-// {asb-name} configuration options
-include::modules/sb-configuring-ansible-service-broker-options.adoc[leveloffset=+2]
-
-// Configuring monitoring for the {asb-name}
-include::modules/sb-configuring-asb-monitoring.adoc[leveloffset=+1]
diff --git a/applications/service_brokers/installing-ansible-service-broker.adoc b/applications/service_brokers/installing-ansible-service-broker.adoc
deleted file mode 100644
index d8f7aebc6e67..000000000000
--- a/applications/service_brokers/installing-ansible-service-broker.adoc
+++ /dev/null
@@ -1,32 +0,0 @@
-[id="sb-installing-asb"]
-= Installing the {asb-name}
-include::modules/common-attributes.adoc[]
-:context: sb-installing-asb
-
-toc::[]
-
-You can install the {asb-name} to gain access to the service bundles that it
-provides.
-
-[IMPORTANT]
-====
-The {asb-name} is deprecated in {product-title} 4. Equivalent and better
-functionality is present in the Operator Framework and Operator Lifecycle
-Manager (OLM).
-====
-
-.Prerequisites
-
-* xref:../../applications/service_brokers/installing-service-catalog.adoc#sb-install-service-catalog_sb-installing-service-catalog[Install the service catalog]
-
-// About the {asb-name}
-include::modules/modules/sb-about-ansible-service-broker.adoc[leveloffset=+1]
-
-// Installing the {asb-name} Operator
-include::modules/sb-install-asb-operator.adoc[leveloffset=+1]
-
-// Starting the {asb-name}
-include::modules/sb-start-asb.adoc[leveloffset=+1]
-
-// {asb-name} configuration options
-include::modules/sb-configuring-ansible-service-broker-options.adoc[leveloffset=+2]
diff --git a/applications/service_brokers/installing-service-catalog.adoc b/applications/service_brokers/installing-service-catalog.adoc
deleted file mode 100644
index d9ebfe05bd32..000000000000
--- a/applications/service_brokers/installing-service-catalog.adoc
+++ /dev/null
@@ -1,19 +0,0 @@
-[id="sb-installing-service-catalog"]
-= Installing the service catalog
-include::modules/common-attributes.adoc[]
-:context: sb-installing-service-catalog
-
-toc::[]
-
-[IMPORTANT]
-====
-The service catalog is deprecated in {product-title} 4. Equivalent and better
-functionality is present in the Operator Framework and Operator Lifecycle
-Manager (OLM).
-====
-
-// About the service catalog
-include::modules/modules/sb-about-service-catalog.adoc[leveloffset=+1]
-
-// Installing service catalog
-include::modules/modules/sb-install-service-catalog.adoc[leveloffset=+1]
diff --git a/applications/service_brokers/installing-template-service-broker.adoc b/applications/service_brokers/installing-template-service-broker.adoc
deleted file mode 100644
index 0073662955bb..000000000000
--- a/applications/service_brokers/installing-template-service-broker.adoc
+++ /dev/null
@@ -1,29 +0,0 @@
-[id="sb-installing-tsb"]
-= Installing the {tsb-name}
-include::modules/common-attributes.adoc[]
-:context: sb-installing-tsb
-
-toc::[]
-
-You can install the {tsb-name} to gain access to the template
-applications that it provides.
-
-[IMPORTANT]
-====
-The {tsb-name} is deprecated in {product-title} 4. Equivalent and better
-functionality is present in the Operator Framework and Operator Lifecycle
-Manager (OLM).
-====
-
-.Prerequisites
-
-* xref:../../applications/service_brokers/installing-service-catalog.adoc#sb-install-service-catalog_sb-installing-service-catalog[Install the service catalog]
-
-// About the service {tsb-name}
-include::modules/modules/sb-about-template-service-broker.adoc[leveloffset=+1]
-
-// Installing the {tsb-name} Operator
-include::modules/sb-install-tsb-operator.adoc[leveloffset=+1]
-
-// Starting the {tsb-name}
-include::modules/sb-start-tsb.adoc[leveloffset=+1]
diff --git a/applications/service_brokers/provisioning-service-bundle.adoc b/applications/service_brokers/provisioning-service-bundle.adoc
deleted file mode 100644
index dbaf82f029e4..000000000000
--- a/applications/service_brokers/provisioning-service-bundle.adoc
+++ /dev/null
@@ -1,9 +0,0 @@
-[id="sb-provisioning-service-bundles"]
-= Provisioning service bundles
-include::modules/common-attributes.adoc[]
-:context: sb-provisioning-service-bundles
-
-toc::[]
-
-// Provisioning service bundles
-include::modules/sb-provision-service-bundle.adoc[leveloffset=+1]
diff --git a/applications/service_brokers/provisioning-template-application.adoc b/applications/service_brokers/provisioning-template-application.adoc
deleted file mode 100644
index 36c6e1f0ebed..000000000000
--- a/applications/service_brokers/provisioning-template-application.adoc
+++ /dev/null
@@ -1,9 +0,0 @@
-[id="sb-provisioning-template-application"]
-= Provisioning template applications
-include::modules/common-attributes.adoc[]
-:context: sb-provisioning-template-application
-
-toc::[]
-
-// Provisioning template applications
-include::modules/sb-provision-template-application.adoc[leveloffset=+1]
diff --git a/applications/service_brokers/uninstalling-ansible-service-broker.adoc b/applications/service_brokers/uninstalling-ansible-service-broker.adoc
deleted file mode 100644
index b9d4d2b67c93..000000000000
--- a/applications/service_brokers/uninstalling-ansible-service-broker.adoc
+++ /dev/null
@@ -1,19 +0,0 @@
-[id="sb-uninstalling-asb"]
-= Uninstalling the {asb-name}
-include::modules/common-attributes.adoc[]
-:context: sb-uninstalling-asb
-
-toc::[]
-
-You can uninstall the {asb-name} if you no longer require access to the service
-bundles that it provides.
-
-[IMPORTANT]
-====
-The {asb-name} is deprecated in {product-title} 4. Equivalent and better
-functionality is present in the Operator Framework and Operator Lifecycle
-Manager (OLM).
-====
-
-// Uninstalling the {asb-name}
-include::modules/modules/sb-uninstall-asb.adoc[leveloffset=+1]
diff --git a/applications/service_brokers/uninstalling-template-service-broker.adoc b/applications/service_brokers/uninstalling-template-service-broker.adoc
deleted file mode 100644
index 279b62bdfcb0..000000000000
--- a/applications/service_brokers/uninstalling-template-service-broker.adoc
+++ /dev/null
@@ -1,19 +0,0 @@
-[id="sb-uninstalling-tsb"]
-= Uninstalling the {tsb-name}
-include::modules/common-attributes.adoc[]
-:context: sb-uninstalling-tsb
-
-toc::[]
-
-You can uninstall the {tsb-name} if you no longer require access to the template
-applications that it provides.
-
-[IMPORTANT]
-====
-The {tsb-name} is deprecated in {product-title} 4. Equivalent and better
-functionality is present in the Operator Framework and Operator Lifecycle
-Manager (OLM).
-====
-
-// Uninstalling the {tsb-name}
-include::modules/modules/sb-uninstall-tsb.adoc[leveloffset=+1]
diff --git a/applications/working_with_helm_charts/configuring-custom-helm-chart-repositories.adoc b/applications/working_with_helm_charts/configuring-custom-helm-chart-repositories.adoc
new file mode 100644
index 000000000000..2d4e03dbd957
--- /dev/null
+++ b/applications/working_with_helm_charts/configuring-custom-helm-chart-repositories.adoc
@@ -0,0 +1,35 @@
+[id="configuring-custom-helm-chart-repositories"]
+
+= Configuring custom Helm chart repositories
+include::modules/common-attributes.adoc[]
+:context: configuring-custom-helm-chart-repositories
+
+toc::[]
+
+You can install Helm charts on an {product-title} cluster using the following methods:
+
+* The CLI.
+* The *Developer* perspective of the web console.
+
+The *Developer Catalog*, in the *Developer* perspective of the web console, displays the Helm charts available in the cluster. By default, it lists the Helm charts from the Red Hat OpenShift Helm chart repository. For a list of the charts, see link:https://charts.openshift.io/index.yaml[the Red Hat `Helm index` file].
+
+As a cluster administrator, you can add multiple Helm chart repositories, apart from the default one, and display the Helm charts from these repositories in the *Developer Catalog*.
+
+
+include::modules/helm-installing-a-helm-chart-on-an-openshift-cluster.adoc[leveloffset=+1]
+
+include::modules/odc-installing-helm-charts-using-developer-perspective.adoc[leveloffset=+1]
+
+== Using Helm in the web terminal
+
+You can use Helm by initializing the web terminal in the *Developer* perspective of the web console. For more information, see xref:../../web_console/odc-about-web-terminal.html#odc-using-web-terminal_odc-about-web-terminal[Using the web terminal].
+
+include::modules/helm-creating-a-custom-helm-chart-on-openshift.adoc[leveloffset=+1]
+
+include::modules/helm-adding-helm-chart-repositories.adoc[leveloffset=+1]
+
+include::modules/helm-creating-credentials-and-certificates-to-add-helm-repositories.adoc[leveloffset=+1]
+
+include::modules/helm-filtering-helm-charts-by-certification-level.adoc[leveloffset=+1]
+
+include::modules/helm-disabling-helm-chart-repositories.adoc[leveloffset=+1]
diff --git a/cnv/cnv_users_guide/images b/applications/working_with_helm_charts/images
similarity index 100%
rename from cnv/cnv_users_guide/images
rename to applications/working_with_helm_charts/images
diff --git a/applications/working_with_helm_charts/installing-helm.adoc b/applications/working_with_helm_charts/installing-helm.adoc
new file mode 100644
index 000000000000..c72a38f455f7
--- /dev/null
+++ b/applications/working_with_helm_charts/installing-helm.adoc
@@ -0,0 +1,105 @@
+[id="installing-helm"]
+= Installing Helm
+include::modules/common-attributes.adoc[]
+:context: installing-helm
+
+toc::[]
+
+The following section describes how to install Helm on different platforms using the CLI.
+
+You can also find the URL to the latest binaries from the {product-title} web console by clicking the *?* icon in the upper-right corner and selecting *Command Line Tools*.
+
+.Prerequisites
+* You have installed Go, version 1.13 or higher.
+
+== On Linux
+
+. Download the Helm binary and add it to your path:
+
+* Linux (x86_64, amd64)
++
+[source,terminal]
+----
+# curl -L https://mirror.openshift.com/pub/openshift-v4/clients/helm/latest/helm-linux-amd64 -o /usr/local/bin/helm
+----
+
+* Linux on IBM Z and LinuxONE (s390x)
++
+[source,terminal]
+----
+# curl -L https://mirror.openshift.com/pub/openshift-v4/clients/helm/latest/helm-linux-s390x -o /usr/local/bin/helm
+----
+
+* Linux on IBM Power Systems (ppc64le)
++
+[source,terminal]
+----
+# curl -L https://mirror.openshift.com/pub/openshift-v4/clients/helm/latest/helm-linux-ppc64le -o /usr/local/bin/helm
+----
+
+. Make the binary file executable:
++
+[source,terminal]
+----
+# chmod +x /usr/local/bin/helm
+----
+
+. Check the installed version:
++
+[source,terminal]
+----
+$ helm version
+----
++
+.Example output
+[source,terminal]
+----
+version.BuildInfo{Version:"v3.0", GitCommit:"b31719aab7963acf4887a1c1e6d5e53378e34d93", GitTreeState:"clean", GoVersion:"go1.13.4"}
+----
+
+== On Windows 7/8
+
+. Download the latest link:https://mirror.openshift.com/pub/openshift-v4/clients/helm/latest/helm-windows-amd64.exe[`.exe` file] and put in a directory of your preference.
+. Right click *Start* and click *Control Panel*.
+. Select *System and Security* and then click *System*.
+. From the menu on the left, select *Advanced systems settings* and click *Environment Variables* at the bottom.
+. Select *Path* from the *Variable* section and click *Edit*.
+. Click *New* and type the path to the folder with the `.exe` file into the field or click *Browse* and select the directory, and click *OK*.
+
+== On Windows 10
+
+. Download the latest link:https://mirror.openshift.com/pub/openshift-v4/clients/helm/latest/helm-windows-amd64.exe[`.exe` file] and put in a directory of your preference.
+. Click *Search* and type `env` or `environment`.
+. Select *Edit environment variables for your account*.
+. Select *Path* from the *Variable* section and click *Edit*.
+. Click *New* and type the path to the directory with the exe file into the field or click *Browse* and select the directory, and click *OK*.
+
+
+== On MacOS
+. Download the Helm binary and add it to your path:
++
+[source,terminal]
+----
+# curl -L https://mirror.openshift.com/pub/openshift-v4/clients/helm/latest/helm-darwin-amd64 -o /usr/local/bin/helm
+----
+
+
+. Make the binary file executable:
++
+[source,terminal]
+----
+# chmod +x /usr/local/bin/helm
+----
+
+. Check the installed version:
++
+[source,terminal]
+----
+$ helm version
+----
++
+.Example output
+[source,terminal]
+----
+version.BuildInfo{Version:"v3.0", GitCommit:"b31719aab7963acf4887a1c1e6d5e53378e34d93", GitTreeState:"clean", GoVersion:"go1.13.4"}
+----
diff --git a/applications/working_with_helm_charts/modules b/applications/working_with_helm_charts/modules
new file mode 120000
index 000000000000..8b0e8540076d
--- /dev/null
+++ b/applications/working_with_helm_charts/modules
@@ -0,0 +1 @@
+../../modules
\ No newline at end of file
diff --git a/applications/working_with_helm_charts/odc-working-with-helm-releases.adoc b/applications/working_with_helm_charts/odc-working-with-helm-releases.adoc
new file mode 100644
index 000000000000..9057460c3f2d
--- /dev/null
+++ b/applications/working_with_helm_charts/odc-working-with-helm-releases.adoc
@@ -0,0 +1,20 @@
+[id="odc-working-with-helm-releases"]
+= Working with Helm releases
+include::modules/common-attributes.adoc[]
+:context: working-with-helm-releases
+
+toc::[]
+
+
+You can use the *Developer* perspective in the web console to upgrade, rollback, or uninstall a Helm release.
+
+
+== Prerequisites
+
+* You have logged in to the web console and have switched to the xref:../../web_console/odc-about-developer-perspective.adoc#odc-about-developer-perspective[*Developer* perspective].
+
+include::modules/odc-upgrading-helm-release.adoc[leveloffset=+1]
+
+include::modules/odc-rolling-back-helm-release.adoc[leveloffset=+1]
+
+include::modules/odc-uninstalling-helm-release.adoc[leveloffset=+1]
diff --git a/applications/working_with_helm_charts/understanding-helm.adoc b/applications/working_with_helm_charts/understanding-helm.adoc
new file mode 100644
index 000000000000..5a82e0c03f80
--- /dev/null
+++ b/applications/working_with_helm_charts/understanding-helm.adoc
@@ -0,0 +1,36 @@
+[id="understanding-helm"]
+= Understanding Helm
+include::modules/common-attributes.adoc[]
+:context: understanding-helm
+
+toc::[]
+
+Helm is a software package manager that simplifies deployment of applications and services to {product-title} clusters.
+
+Helm uses a packaging format called _charts_.
+A Helm chart is a collection of files that describes the {product-title} resources.
+
+A running instance of the chart in a cluster is called a _release_. A new release is created every time a chart is installed on the cluster.
+
+Each time a chart is installed, or a release is upgraded or rolled back, an incremental revision is created.
+
+
+== Key features
+
+Helm provides the ability to:
+
+* Search through a large collection of charts stored in the chart repository.
+* Modify existing charts.
+* Create your own charts with {product-title} or Kubernetes resources.
+* Package and share your applications as charts.
+
+== Red Hat Certification of Helm charts for OpenShift
+
+You can choose to verify and certify your Helm charts by Red Hat for all the components you will be deploying on the Red Hat {product-title}. Charts go through an automated Red Hat OpenShift certification workflow that guarantees security compliance as well as best integration and experience with the platform. Certification assures the integrity of the chart and ensures that the Helm chart works seamlessly on Red Hat OpenShift clusters.
+
+== Additional resources
+
+* For more information on how to certify your Helm charts as a Red Hat partner, see link:https://redhat-connect.gitbook.io/partner-guide-for-red-hat-openshift-and-container/helm-chart-certification/overview[Red Hat Certification of Helm charts for OpenShift].
+* For more information on OpenShift and Container certification guides for Red Hat partners, see link:https://redhat-connect.gitbook.io/partner-guide-for-red-hat-openshift-and-container/[Partner Guide for OpenShift and Container Certification].
+* For a list of the charts, see link:https://charts.openshift.io/index.yaml[the Red Hat `Helm index` file].
+* You can view the available charts at the link:https://marketplace.redhat.com/en-us/documentation/access-red-hat-marketplace[Red Hat Marketplace]. For more information, see xref:../../applications/red-hat-marketplace.adoc#red-hat-marketplace[Using the Red Hat Marketplace].
diff --git a/architecture/admission-plug-ins.adoc b/architecture/admission-plug-ins.adoc
new file mode 100644
index 000000000000..67bf668d81b9
--- /dev/null
+++ b/architecture/admission-plug-ins.adoc
@@ -0,0 +1,29 @@
+[id="admission-plug-ins"]
+= Admission plug-ins
+include::modules/common-attributes.adoc[]
+:context: admission-plug-ins
+
+toc::[]
+
+// Concept modules
+include::modules/admission-plug-ins-about.adoc[leveloffset=+1]
+
+include::modules/admission-plug-ins-default.adoc[leveloffset=+1]
+
+include::modules/admission-webhooks-about.adoc[leveloffset=+1]
+
+include::modules/admission-webhook-types.adoc[leveloffset=+1]
+
+// Procedure module
+include::modules/configuring-dynamic-admission.adoc[leveloffset=+1]
+
+[id="admission-plug-ins-additional-resources"]
+== Additional resources
+
+ifdef::openshift-enterprise,openshift-webscale[]
+* xref:../networking/hardware_networks/configuring-sriov-operator.adoc#configuring-sriov-operator[Limiting custom network resources managed by the SR-IOV network device plug-in]
+endif::[]
+
+* xref:../nodes/scheduling/nodes-scheduler-taints-tolerations.adoc#nodes-scheduler-taints-tolerations_dedicating_nodes-scheduler-taints-tolerations[Defining tolerations that enable taints to qualify which pods should be scheduled on a node]
+
+* xref:../nodes/pods/nodes-pods-priority.adoc#admin-guide-priority-preemption-names_nodes-pods-priority[Pod priority class validation]
diff --git a/architecture/architecture-installation.adoc b/architecture/architecture-installation.adoc
index 64e6c8868d46..601f81e3d22c 100644
--- a/architecture/architecture-installation.adoc
+++ b/architecture/architecture-installation.adoc
@@ -2,8 +2,29 @@
= Installation and update
include::modules/common-attributes.adoc[]
:context: architecture-installation
+
toc::[]
include::modules/installation-overview.adoc[leveloffset=+1]
-include::modules/update-service-overview.adoc[leveloffset=+1]
\ No newline at end of file
+include::modules/supported-platforms-for-openshift-clusters.adoc[leveloffset=+2]
+
+include::modules/installation-process.adoc[leveloffset=+2]
+
+[discrete]
+=== Installation scope
+
+The scope of the {product-title} installation program is intentionally narrow. It is designed for simplicity and ensured success. You can complete many more configuration tasks after installation completes.
+
+.Additional resources
+
+* See xref:../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Available cluster customizations] for details about {product-title} configuration resources.
+
+include::modules/update-service-overview.adoc[leveloffset=+1]
+
+include::modules/unmanaged-operators.adoc[leveloffset=+1]
+
+[id="architecture-installation-next-steps"]
+== Next steps
+
+* xref:../installing/installing-preparing.adoc#installing-preparing[Selecting a cluster installation method and preparing it for users]
diff --git a/architecture/architecture-rhcos.adoc b/architecture/architecture-rhcos.adoc
index f573456ec75e..90c77e103f65 100644
--- a/architecture/architecture-rhcos.adoc
+++ b/architecture/architecture-rhcos.adoc
@@ -2,6 +2,7 @@
= {op-system-first}
include::modules/common-attributes.adoc[]
:context: architecture-rhcos
+
toc::[]
include::modules/rhcos-about.adoc[leveloffset=+1]
diff --git a/architecture/architecture.adoc b/architecture/architecture.adoc
index a7264a400e2a..92ac6d933a2f 100644
--- a/architecture/architecture.adoc
+++ b/architecture/architecture.adoc
@@ -2,6 +2,7 @@
= {product-title} architecture
include::modules/common-attributes.adoc[]
:context: architecture
+
toc::[]
include::modules/architecture-platform-introduction.adoc[leveloffset=+1]
@@ -15,7 +16,7 @@ include::modules/architecture-platform-benefits.adoc[leveloffset=+2]
== User facing components
* Workloads (Deployments, Jobs, ReplicaSets, etc)
* Operator Lifecycle Manager
-* xref:../builds/understanding-image-builds.adoc[Builds] - The build component
+* xref:../cicd/builds/understanding-image-builds.adoc[Builds] - The build component
provides an API and infrastructure for producing new container images using a
variety of techniques including industry standard Dockerfiles and publishing
them to either the cluster image registry, or an external registry. It also
@@ -24,7 +25,7 @@ workflows.
* xref:../registry/architecture-component-imageregistry.adoc[Image Registry] -
The image registry provides a scalable repository for storing and retrieving
container images that are produced by and run on the cluster. Image access is
-integrated with the cluster’s role-based access controls and user authentication
+integrated with the cluster's role-based access controls and user authentication
system.
* xref:../openshift_images/images-understand.adoc[Image
streams] - The imagestream API provides an abstraction over container images
diff --git a/architecture/argocd.adoc b/architecture/argocd.adoc
new file mode 100644
index 000000000000..8d45b22ec45e
--- /dev/null
+++ b/architecture/argocd.adoc
@@ -0,0 +1,24 @@
+[id="argocd"]
+= Using ArgoCD with {product-title}
+include::modules/common-attributes.adoc[]
+
+:context: argocd
+
+toc::[]
+
+[id="argocd-what"]
+== What does ArgoCD do?
+
+ArgoCD is a declarative continuous delivery tool that leverages GitOps to maintain cluster resources. ArgoCD is implemented as a controller that continuously monitors application definitions and configurations defined in a Git repository and compares the specified state of those configurations with their live state on the cluster. Configurations that deviate from their specified state in the Git repository are classified as OutOfSync. ArgoCD reports these differences and allows administrators to automatically or manually resync configurations to the defined state.
+
+ArgoCD enables you to deliver global custom resources, like the resources that are used to configure {product-title} clusters.
+
+[id="argocd-support"]
+== Statement of support
+
+Red Hat does not provide support for this tool. To obtain support for ArgoCD, see link:https://argoproj.github.io/argo-cd/SUPPORT/[Support] in the ArgoCD documentation.
+
+[id="argocd-documentation"]
+== ArgoCD documentation
+
+For more information about using ArgoCD, see the link:https://argoproj.github.io/argo-cd/[ArgoCD documentation].
diff --git a/architecture/cicd_gitops.adoc b/architecture/cicd_gitops.adoc
new file mode 100644
index 000000000000..5ac7cd4f4b21
--- /dev/null
+++ b/architecture/cicd_gitops.adoc
@@ -0,0 +1,59 @@
+[id="cicd_gitops"]
+= The CI/CD methodology and practice
+include::modules/common-attributes.adoc[]
+:context: cicd_gitops
+
+toc::[]
+
+Using a _continuous integration/continuous delivery_ (CI/CD) methodology enables you to regularly deliver applications to customers by introducing automation into the stages of application development, from integration and testing phases to delivery and deployment. The CI/CD process is often referred to as a "CI/CD pipeline." The main concepts attributed to CI/CD are continuous integration, continuous delivery, and continuous deployment.
+
+[id="cicd_admin"]
+== CI/CD for cluster administration and application configuration management
+
+_Continuous integration_ is an automation process for developers. Code changes to an application are regularly built, tested, and merged to a shared repository.
+
+_Continuous delivery_ and _continuous deployment_ are closely related concepts that are sometimes used interchangeably and refer to automation of the pipeline.
+Continuous delivery uses automation to ensure that a developer's changes to an application are tested and sent to a repository, where an operations team can deploy them to a production environment. Continuous deployment enables the release of changes, starting from the repository and ending in production. Continuous deployment speeds up application delivery and prevents the operations team from getting overloaded.
+
+[id="cicd_gitops_methodology"]
+== The GitOps methodology and practice
+
+_GitOps_ is a set of practices that use Git pull requests to manage infrastructure and application configurations. The Git repository in GitOps is the only source of truth for system and application configuration. The repository contains the entire state of the system so that the trail of changes to the system state are visible and auditable. GitOps enables you to implement a DevOps methodology.
+
+You can use GitOps tooling to create repeatable and predictable processes for managing and recreating {product-title} clusters and applications. By using GitOps, you can address the issues of infrastructure and application configuration sprawl. It simplifies the propagation of infrastructure and application configuration changes across multiple clusters by defining your infrastructure and applications definitions as “code.” Implementing GitOps for your cluster configuration files can make automated installation easier and allow you to configure automated cluster customizations. You can apply the core principles of developing and maintaining software in a Git repository to the creation and management of your cluster and application configuration files.
+
+By using {product-title} to automate both your cluster configuration and container development process, you can pick and choose where and when to adopt GitOps practices. Using a CI pipeline that pairs with your GitOps strategy and execution plan is ideal. {product-title} provides the flexibility to choose when and how you integrate this methodology into your business practices and pipelines.
+
+With GitOps integration, you can declaratively configure and store your {product-title} cluster configuration
+
+GitOps works well with {product-title} because you can both declaratively configure clusters and store the state of the cluster configuration in Git. For more information, see xref:../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Available cluster customizations].
+
+[id="cicd_gitops_cluster_administration"]
+=== GitOps for single-cluster and multi-cluster administration
+
+Whether you need one or more independent or cooperative {product-title} clusters, you can use a GitOps strategy to manage the following tasks:
+
+* Ensure that the clusters have similar states for configuration, monitoring, or storage.
+* Recover or recreate clusters from a known state.
+* Create clusters with a known state.
+* Apply or revert configuration changes to multiple {product-title} clusters.
+* Associate templated configuration with different environments.
+
+[id="cicd_gitops_application_configuration"]
+=== GitOps for application configuration management
+
+You can also use GitOps practices to manage application configuration. This practice ensures consistency in applications when you deploy them to different clusters in different environments, like development, stage, and production. Managing application configuration with GitOps is also beneficial when you must deploy applications across multiple clusters, whether on-cloud or on-premise, for availability and scalability purposes.
+
+You can use a GitOps strategy to:
+
+* Promote applications across clusters, from stage to production.
+* Apply or revert application changes to multiple {product-title} clusters.
+
+[id="cicd_gitops_integrators"]
+=== GitOps technology providers and integrators
+
+There are several community offerings and third-party vendors that provide a high level of integration with {product-title}.
+
+You can integrate GitOps into {product-title} with the following community partners and third-party integrators:
+
+* xref:../architecture/argocd.adoc#argocd[ArgoCD]
diff --git a/architecture/control-plane.adoc b/architecture/control-plane.adoc
index 4abc49935aad..8f574964dfe6 100644
--- a/architecture/control-plane.adoc
+++ b/architecture/control-plane.adoc
@@ -2,14 +2,21 @@
= The {product-title} control plane
include::modules/common-attributes.adoc[]
:context: control-plane
+
toc::[]
include::modules/understanding-control-plane.adoc[leveloffset=+1]
+include::modules/architecture-machine-config-pools.adoc[leveloffset=+2]
+
include::modules/architecture-machine-roles.adoc[leveloffset=+2]
include::modules/operators-overview.adoc[leveloffset=+2]
include::modules/update-service-overview.adoc[leveloffset=+3]
-include::modules/understanding-machine-config-operator.adoc[leveloffset=+3]
\ No newline at end of file
+include::modules/understanding-machine-config-operator.adoc[leveloffset=+3]
+
+.Additional information
+
+For information on preventing the control plane machines from rebooting after the Machine Config Operator makes changes to the machine config, see xref:../support/troubleshooting/troubleshooting-operator-issues.adoc#troubleshooting-disabling-autoreboot-mco_troubleshooting-operator-issues[Disabling Machine Config Operator from automatically rebooting].
diff --git a/architecture/understanding-development.adoc b/architecture/understanding-development.adoc
index 244faaaba7a7..afe39cececa4 100644
--- a/architecture/understanding-development.adoc
+++ b/architecture/understanding-development.adoc
@@ -2,6 +2,7 @@
= Understanding {product-title} development
include::modules/common-attributes.adoc[]
:context: understanding-development
+
toc::[]
To fully leverage the capability of containers when developing and running
@@ -72,11 +73,11 @@ The following diagram displays the process of building and pushing an image:
.Create a simple containerized application and push it to a registry
image::create-push-app.png[Creating and pushing a containerized application]
-If you use a computer that runs Red Hat Enterprise Linux (RHEL) as the operating
+If you use a computer that runs {op-system-base-full} as the operating
system, the process of creating a containerized application requires the
following steps:
-. Install container build tools: RHEL contains a set of tools that includes
+. Install container build tools: {op-system-base} contains a set of tools that includes
podman, buildah, and skopeo that you use to build and manage containers.
. Create a Dockerfile to combine base image and software: Information about
building your container goes into a file that is named `Dockerfile`. In that
@@ -84,10 +85,10 @@ file, you identify the base image you build from, the software packages you
install, and the software you copy into the container. You also identify
parameter values like network ports that you expose outside the container and
volumes that you mount inside the container. Put your Dockerfile and the
-software you want to containerized in a directory on your RHEL system.
+software you want to containerize in a directory on your {op-system-base} system.
. Run buildah or docker build: Run the `buildah build-using-dockerfile` or
-the `docker build` command to pull you chosen base image to the local system and
-creates a container image that is stored locally. You can also build container
+the `docker build` command to pull your chosen base image to the local system and
+create a container image that is stored locally. You can also build container images
without a Dockerfile by using buildah.
. Tag and push to a registry: Add a tag to your new container image that
identifies the location of the registry in which you want to store and share
@@ -100,28 +101,25 @@ command. Here `` is the name of your new container image, which
resembles `quay.io/myrepo/myapp:latest`. The registry might require credentials
to push and pull images.
+ifdef::openshift-origin,openshift-enterprise,openshift-webscale[]
For more details on the process of building container images, pushing them to
registries, and running them, see
-xref:../builds/custom-builds-buildah.adoc#custom-builds-buildah[Custom image builds with Buildah].
+xref:../cicd/builds/custom-builds-buildah.adoc#custom-builds-buildah[Custom image builds with Buildah].
+endif::openshift-origin,openshift-enterprise,openshift-webscale[]
[id="container-build-tool-options"]
=== Container build tool options
-While the Docker Container Engine and `docker` command are popular tools
-to work with containers, with RHEL and many other Linux systems, you can
-instead choose a different set of container tools that includes podman, skopeo,
-and buildah. You can still use Docker Container Engine tools to create
-containers that will run in {product-title} and any other container platform.
+Building and managing containers with buildah, podman, and skopeo results in industry standard container images that include features specifically tuned for deploying containers in {product-title} or other Kubernetes environments. These tools are daemonless and can run without root privileges, requiring less overhead to run them.
-Building and managing containers with buildah, podman, and skopeo results in
-industry standard container images that include features tuned specifically
-for ultimately deploying those containers in {product-title} or other Kubernetes
-environments. These tools are daemonless and can be run without root privileges,
-so there is less overhead in running them.
+[IMPORTANT]
+====
+Support for Docker Container Engine as a container runtime is deprecated in Kubernetes 1.20 and will be removed in a future release. However, Docker-produced images will continue to work in your cluster with all runtimes, including CRI-O. For more information, see the link:https://kubernetes.io/blog/2020/12/02/dont-panic-kubernetes-and-docker/[Kubernetes blog announcement].
+====
When you ultimately run your containers in {product-title}, you use the
link:https://cri-o.io/[CRI-O] container engine. CRI-O runs on every worker and
-master machine in an {product-title} cluster, but CRI-O is not yet supported as
+control plane machine in an {product-title} cluster, but CRI-O is not yet supported as
a standalone runtime outside of {product-title}.
[id="base-image-options"]
@@ -146,7 +144,7 @@ These UBI images have standard, init, and minimal versions. You can also use the
link:https://access.redhat.com/documentation/en-us/red_hat_software_collections/3/html-single/using_red_hat_software_collections_container_images/index[Red Hat Software Collections]
images as a foundation for applications that rely on specific runtime
environments such as Node.js, Perl, or Python. Special versions of some of
-these runtime base images referred to as Source-to-image (S2I) images. With
+these runtime base images are referred to as Source-to-Image (S2I) images. With
S2I images, you can insert your code into a base image environment that is ready
to run that code.
@@ -156,10 +154,10 @@ by selecting *Catalog* -> *Developer Catalog*, as shown in the following figure:
.Choose S2I base images for apps that need specific runtimes
image::developer-catalog.png[{product-title} Developer Catalog]
-[id="registry-options"]
+[id="understanding-development-registry-options"]
=== Registry options
-Container Registries are where you store container images so you can share them
+Container registries are where you store container images so you can share them
with others and make them available to the platform where they ultimately run.
You can select large, public container registries that offer free accounts or a
premium version that offer more storage and special features. You can also
@@ -171,7 +169,7 @@ Red Hat Registry. The Red Hat Registry is represented by two locations:
`registry.access.redhat.com`, which is unauthenticated and deprecated, and
`registry.redhat.io`, which requires authentication. You can learn about the Red
Hat and partner images in the Red Hat Registry from the
-link:https://registry.redhat.io/[Red Hat Container Catalog].
+link:https://catalog.redhat.com/software/containers/explore[Container images section of the Red Hat Ecosystem Catalog].
Besides listing Red Hat container images, it also shows extensive information
about the contents and quality of those images, including health scores that are
based on applied security updates.
@@ -181,7 +179,7 @@ link:https://quay.io/[Quay.io]. The Quay.io registry is owned and managed by Red
Hat. Many of the components used in {product-title} are stored in Quay.io,
including container images and the Operators that are used to deploy
{product-title} itself. Quay.io also offers the means of storing other types of
-content, including Helm Charts.
+content, including Helm charts.
If you want your own, private container registry, {product-title} itself
includes a private container registry that is installed with {product-title}
@@ -195,7 +193,7 @@ from those registries. Some of those credentials are presented on a cluster-wide
basis from {product-title}, while other credentials can be assigned to individuals.
[id="creating-kubernetes-manifest-openshift"]
-== Creating a Kubernetes Manifest for {product-title}
+== Creating a Kubernetes manifest for {product-title}
While the container image is the basic building block for a containerized
application, more information is required to manage and deploy that application
@@ -211,34 +209,34 @@ to the next environment, roll it back to earlier versions, if necessary, and
share it with others
[id="understanding-kubernetes-pods"]
-=== About Kubernetes Pods and services
+=== About Kubernetes pods and services
While the container image is the basic unit with docker, the basic units that
Kubernetes works with are called
-link:https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/[Pods].
-Pods represent the next step in building out an application. A Pod can contain
-one or more than one container. The key is that the Pod is the single unit
+link:https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/[pods].
+Pods represent the next step in building out an application. A pod can contain
+one or more than one container. The key is that the pod is the single unit
that you deploy, scale, and manage.
Scalability and namespaces are probably the main items to consider when determining
-what goes in a Pod. For ease of deployment, you might want to deploy a container
-in a Pod and include its own logging and monitoring container in the Pod. Later,
-when you run the Pod and need to scale up an additional instance, those other
-containers are scaled up with it. For namespaces, containers in a Pod share the
+what goes in a pod. For ease of deployment, you might want to deploy a container
+in a pod and include its own logging and monitoring container in the pod. Later,
+when you run the pod and need to scale up an additional instance, those other
+containers are scaled up with it. For namespaces, containers in a pod share the
same network interfaces, shared storage volumes, and resource limitations,
-such as memory and CPU, which makes it easier to manage the contents of the Pod
-as a single unit. Containers in a Pod can also communicate with each other by
+such as memory and CPU, which makes it easier to manage the contents of the pod
+as a single unit. Containers in a pod can also communicate with each other by
using standard inter-process communications, such as System V semaphores or
POSIX shared memory.
-While individual Pods represent a scalable unit in Kubernetes, a
+While individual pods represent a scalable unit in Kubernetes, a
link:https://kubernetes.io/docs/concepts/services-networking/service/[service]
-provides a means of grouping together a set of Pods to create a complete, stable
+provides a means of grouping together a set of pods to create a complete, stable
application that can complete tasks such as load balancing. A service is also
-more permanent than a Pod because the service remains available from the same
+more permanent than a pod because the service remains available from the same
IP address until you delete it. When the service is in use, it is requested by
name and the {product-title} cluster resolves that name into the IP addresses
-and ports where you can reach the Pods that compose the service.
+and ports where you can reach the pods that compose the service.
By their nature, containerized applications are separated from the operating
systems where they run and, by extension, their users. Part of your Kubernetes
@@ -248,20 +246,20 @@ link:https://kubernetes.io/docs/concepts/services-networking/network-policies/[n
that allow fine-grained control over communication with your containerized
applications. To connect incoming requests for HTTP, HTTPS, and other services
from outside your cluster to services inside your cluster, you can use an
-link:https://kubernetes.io/docs/concepts/services-networking/ingress/[Ingress]
+link:https://kubernetes.io/docs/concepts/services-networking/ingress/[`Ingress`]
resource.
If your container requires on-disk storage instead of database storage, which
might be provided through a service, you can add
link:https://kubernetes.io/docs/concepts/storage/volumes/[volumes]
-to your manifests to make that storage available to your Pods. You can configure
-the manifests to create physical volumes (PVs) or dynamically create volumes that
-are added to your Pod definitions.
+to your manifests to make that storage available to your pods. You can configure
+the manifests to create persistent volumes (PVs) or dynamically create volumes that
+are added to your `Pod` definitions.
-After you define a group of Pods that compose your application, you can define
-those Pods in
-link:https://kubernetes.io/docs/concepts/workloads/controllers/deployment/[deployments]
-and xref:../applications/deployments/what-deployments-are.adoc#what-deployments-are[deploymentconfigs].
+After you define a group of pods that compose your application, you can define
+those pods in
+link:https://kubernetes.io/docs/concepts/workloads/controllers/deployment/[`Deployment`]
+and xref:../applications/deployments/what-deployments-are.adoc#what-deployments-are[`DeploymentConfig`] objects.
[id="application-types"]
=== Application types
@@ -276,26 +274,25 @@ application, consider if the application is:
starts up to produce a report and exits when the report is complete. The
application might not run again then for a month. Suitable {product-title}
objects for these types of applications include
-link:https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/[Jobs]
-and https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/[CronJob] objects.
+link:https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/[`Job`]
+and https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/[`CronJob`] objects.
* Expected to run continuously. For long-running applications, you can write a
-xref:../applications/deployments/what-deployments-are.adoc#deployments-kube-deployments[Deployment]
-or a xref:../applications/deployments/what-deployments-are.adoc#deployments-and-deploymentconfigs[DeploymentConfig].
+xref:../applications/deployments/what-deployments-are.adoc#deployments-kube-deployments[deployment].
* Required to be highly available. If your application requires high
availability, then you want to size your deployment to have more than one
-instance. A Deployment or DeploymentConfig can incorporate a
-link:https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/[ReplicaSet]
-for that type of application. With ReplicaSets, Pods run across multiple nodes
+instance. A `Deployment` or `DeploymentConfig` object can incorporate a
+link:https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/[replica set]
+for that type of application. With replica sets, pods run across multiple nodes
to make sure the application is always available, even if a worker goes down.
* Need to run on every node. Some types of Kubernetes applications are intended
to run in the cluster itself on every master or worker node. DNS and monitoring
applications are examples of applications that need to run continuously on every
node. You can run this type of application as a
-link:https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/[DaemonSet].
-You can also run a DaemonSet on a subset of nodes, based on node labels.
+link:https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/[daemon set].
+You can also run a daemon set on a subset of nodes, based on node labels.
* Require life-cycle management. When you want to hand off your application so
that others can use it, consider creating an
-link:https://coreos.com/operators/[Operator]. Operators let you build in
+link:https://www.openshift.com/learn/topics/operators[Operator]. Operators let you build in
intelligence, so it can handle things like backups and upgrades automatically.
Coupled with the Operator Lifecycle Manager (OLM), cluster managers can expose
Operators to selected namespaces so that users in the cluster can run them.
@@ -303,8 +300,8 @@ Operators to selected namespaces so that users in the cluster can run them.
requirements or numbering requirements. For example, you might be
required to run exactly three instances of the application and to name the
instances `0`, `1`, and `2`. A
-https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/[StatefulSet]
-is suitable for this application. StatefulSets are most useful for applications
+https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/[stateful set]
+is suitable for this application. Stateful sets are most useful for applications
that require independent storage, such as databases and zookeeper clusters.
[id="supporting-components"]
@@ -321,25 +318,14 @@ certified Red Hat partners, and community members to the cluster operator. The
cluster operator can make those Operators available in all or selected
namespaces in the cluster, so developers can launch them and configure them
with their applications.
-* Service Catalog, which offers alternatives to Operators. While deploying
-Operators is the preferred method of getting packaged
-applications in {product-title}, there are some reasons why you might
-want to use the Service Catalog to get supporting applications for your own
-application. You might want to use the Service Catalog if you are an existing
-{product-title} 3 customer and are
-invested in Service Catalog applications or if you already have a Cloud Foundry
-environment from which you are interested in consuming brokers from other ecosystems.
* Templates, which are useful for a one-off type of application, where the
lifecycle of a component is not important after it is installed. A template provides an easy
way to get started developing a Kubernetes application with minimal overhead.
-A template can be a list of resource definitions, which could be deployments,
-services, routes, or other objects. If you want to change names or resources,
+A template can be a list of resource definitions, which could be `Deployment`,
+`Service`, `Route`, or other objects. If you want to change names or resources,
you can set these values as parameters in the template.
-The Template Service Broker Operator is a service broker that you can use to
-instantiate your own templates. You can also install templates directly from the
-command line.
-You can configure the supporting Operators, Service Catalog applications, and
+You can configure the supporting Operators and
templates to the specific needs of your development team and then make them
available in the namespaces in which your developers work. Many people add
shared templates to the `openshift` namespace because it is accessible from all
@@ -387,9 +373,9 @@ like updating the Operator, can happen automatically and invisibly to the
Operator's users.
An example of a useful Operator is one that is set up to automatically back up
-data at particular times. Having an Operator manage an application’s backup at
+data at particular times. Having an Operator manage an application's backup at
set times can save a system administrator from remembering to do it.
Any application maintenance that has traditionally been completed manually,
like backing up data or rotating certificates, can be completed automatically
-with an Operator.
\ No newline at end of file
+with an Operator.
diff --git a/asciibinder-template.yml b/asciibinder-template.yml
index bde96db99ff3..4e471db1cfa6 100644
--- a/asciibinder-template.yml
+++ b/asciibinder-template.yml
@@ -5,7 +5,7 @@ metadata:
name: "public-asciibinder-website"
annotations:
openshift.io/display-name: "Public AsciiBinder Static Website"
- description: "Takes files created for static site generator AsciiBinder (http://asciibinder.org/) from a public source repo, builds them in Ruby, and deploys them in a PHP container."
+ description: "Takes files created for static site generator AsciiBinder (https://github.com/redhataccess/ascii_binder/) from a public source repo, builds them in Ruby, and deploys them in a PHP container."
tags: "ruby,asciibinder"
iconClass: "icon-shadowman"
template.openshift.io/long-description: "This template defines resources needed to develop an AsciiBinder application, including a build configuration and application DeploymentConfig."
@@ -17,9 +17,9 @@ objects:
- kind: "Service"
apiVersion: "v1"
metadata:
- name: "commercial-${NAME}"
+ name: "${NAME}"
annotations:
- description: "Exposes and load balances the frontend application pods for the commercial deployment"
+ description: "Exposes and load balances the frontend application pods for the deployment"
labels:
app: "${NAME}"
spec:
@@ -28,59 +28,24 @@ objects:
port: 8443
targetPort: 8080
selector:
- name: "commercial-${NAME}"
-
- - kind: "Service"
- apiVersion: "v1"
- metadata:
- name: "community-${NAME}"
- annotations:
- description: "Exposes and load balances the frontend application pods for the community deployment"
- labels:
- app: "${NAME}"
- spec:
- ports:
- - name: "https"
- port: 8443
- targetPort: 8080
- selector:
- name: "community-${NAME}"
+ name: "${NAME}"
- kind: "Route"
apiVersion: "v1"
metadata:
- name: "commercial-${NAME}"
- labels:
- app: "${NAME}"
- spec:
- host: "${COMMERCIAL_APPLICATION_DOMAIN}"
- to:
- kind: "Service"
- name: "commercial-${NAME}"
- weight: 1
- port:
- targetPort: "https"
- tls:
- termination: "edge"
- insecureEdgeTerminationPolicy: "Redirect"
-
- - kind: "Route"
- apiVersion: "v1"
- metadata:
- name: "community-${NAME}"
+ annotations:
+ kubernetes.io/tls-acme: "true"
+ name: "${NAME}"
labels:
app: "${NAME}"
spec:
- host: "${COMMUNITY_APPLICATION_DOMAIN}"
+ host: "${APPLICATION_DOMAIN}"
to:
kind: "Service"
- name: "community-${NAME}"
+ name: "${NAME}"
weight: 1
port:
targetPort: "https"
- tls:
- termination: "edge"
- insecureEdgeTerminationPolicy: "Redirect"
- kind: "ImageStream"
apiVersion: "v1"
@@ -94,18 +59,9 @@ objects:
- kind: "ImageStream"
apiVersion: "v1"
metadata:
- name: "final-commercial-${NAME}"
- annotations:
- description: "Keeps track of changes in the final-commercial-${NAME} application image"
- labels:
- app: "${NAME}"
-
- - kind: "ImageStream"
- apiVersion: "v1"
- metadata:
- name: "final-community-${NAME}"
+ name: "final-${NAME}"
annotations:
- description: "Keeps track of changes in the final-community-${NAME} application image"
+ description: "Keeps track of changes in the final-${NAME} application image"
labels:
app: "${NAME}"
@@ -169,46 +125,9 @@ objects:
- kind: "BuildConfig"
apiVersion: "v1"
metadata:
- name: "final-commercial-${NAME}"
- annotations:
- description: "Defines how to perform final commercial build for ${NAME} before deployment"
- labels:
- app: "${NAME}"
- spec:
- nodeSelector:
- source:
- type: "Images"
- images:
- - from:
- kind: "ImageStreamTag"
- name: "stg1-${NAME}:latest"
- paths:
- - sourcePath: "/opt/app-root/src/commercial_package/commercial/."
- destinationDir: "."
- strategy:
- type: "Source"
- sourceStrategy:
- from:
- kind: "ImageStreamTag"
- name: "httpd-24-rhel7:latest"
- output:
- to:
- kind: "ImageStreamTag"
- name: "final-commercial-${NAME}:latest"
- triggers:
- - imageChange:
- from:
- kind: ImageStreamTag
- name: 'stg1-${NAME}:latest'
- type: "ImageChange"
- - type: "ConfigChange"
-
- - kind: "BuildConfig"
- apiVersion: "v1"
- metadata:
- name: "final-community-${NAME}"
+ name: "final-${NAME}"
annotations:
- description: "Defines how to perform final community build for ${NAME} before deployment"
+ description: "Defines how to perform final build for ${NAME} before deployment"
labels:
app: "${NAME}"
spec:
@@ -220,7 +139,7 @@ objects:
kind: "ImageStreamTag"
name: "stg1-${NAME}:latest"
paths:
- - sourcePath: "/opt/app-root/src/community_package/community/."
+ - sourcePath: "/opt/app-root/src/${DOC_TYPE}_package/${DOC_TYPE}/."
destinationDir: "."
strategy:
type: "Source"
@@ -231,7 +150,7 @@ objects:
output:
to:
kind: "ImageStreamTag"
- name: "final-community-${NAME}:latest"
+ name: "final-${NAME}:latest"
triggers:
- imageChange:
from:
@@ -243,9 +162,9 @@ objects:
- kind: "DeploymentConfig"
apiVersion: "v1"
metadata:
- name: "commercial-${NAME}"
+ name: "${NAME}"
annotations:
- description: "Defines how to deploy the ${COMMERCIAL_APPLICATION_DOMAIN} domain"
+ description: "Defines how to deploy the ${APPLICATION_DOMAIN} domain"
labels:
app: "${NAME}"
spec:
@@ -256,81 +175,24 @@ objects:
imageChangeParams:
automatic: true
containerNames:
- - "commercial-${NAME}"
+ - "${NAME}"
from:
kind: "ImageStreamTag"
- name: "final-commercial-${NAME}:latest"
+ name: "final-${NAME}:latest"
- type: "ConfigChange"
replicas: 1
test: false
selector:
- name: "commercial-${NAME}"
+ name: "${NAME}"
template:
metadata:
- name: "commercial-${NAME}"
+ name: "${NAME}"
labels:
- name: "commercial-${NAME}"
+ name: "${NAME}"
app: "${NAME}"
spec:
containers:
- - name: "commercial-${NAME}"
- ports:
- - containerPort: 8080
- readinessProbe:
- timeoutSeconds: 5
- initialDelaySeconds: 10
- httpGet:
- path: "/${HEALTHCHECK_PATH}"
- port: 8080
- livenessProbe:
- timeoutSeconds: 5
- initialDelaySeconds: 10
- periodSeconds: 30
- httpGet:
- path: "/${HEALTHCHECK_PATH}"
- port: 8080
- resources:
- requests:
- cpu: "${CPU_REQUEST}"
- memory: "${MEMORY_REQUEST}"
- limits:
- cpu: "${CPU_LIMIT}"
- memory: "${MEMORY_LIMIT}"
-
- - kind: "DeploymentConfig"
- apiVersion: "v1"
- metadata:
- name: "community-${NAME}"
- annotations:
- description: "Defines how to deploy the ${COMMUNITY_APPLICATION_DOMAIN} domain"
- labels:
- app: "${NAME}"
- spec:
- strategy:
- type: "Rolling"
- triggers:
- - type: "ImageChange"
- imageChangeParams:
- automatic: true
- containerNames:
- - "community-${NAME}"
- from:
- kind: "ImageStreamTag"
- name: "final-community-${NAME}:latest"
- - type: "ConfigChange"
- replicas: 1
- test: false
- selector:
- name: "community-${NAME}"
- template:
- metadata:
- name: "community-${NAME}"
- labels:
- name: "community-${NAME}"
- app: "${NAME}"
- spec:
- containers:
- - name: "community-${NAME}"
+ - name: "${NAME}"
ports:
- containerPort: 8080
readinessProbe:
@@ -355,38 +217,19 @@ objects:
memory: "${MEMORY_LIMIT}"
- kind: "HorizontalPodAutoscaler"
- apiVersion: "extensions/v1beta1"
+ apiVersion: "autoscaling/v1"
metadata:
- name: "commercial-${NAME}"
+ name: "${NAME}"
labels:
app: "${NAME}"
spec:
- scaleRef:
+ scaleTargetRef:
kind: DeploymentConfig
- name: "commercial-${NAME}"
+ name: "${NAME}"
apiVersion: "v1"
- subresource: "scale"
- minReplicas: ${MIN_REPLICAS}
- maxReplicas: ${MAX_REPLICAS}
- cpuUtilization:
- targetPercentage: ${TARGET_PERCENTAGE}
-
- - kind: "HorizontalPodAutoscaler"
- apiVersion: "extensions/v1beta1"
- metadata:
- name: "community-${NAME}"
- labels:
- app: "${NAME}"
- spec:
- scaleRef:
- kind: DeploymentConfig
- name: "community-${NAME}"
- apiVersion: "v1"
- subresource: "scale"
- minReplicas: ${MIN_REPLICAS}
- maxReplicas: ${MAX_REPLICAS}
- cpuUtilization:
- targetPercentage: ${TARGET_PERCENTAGE}
+ minReplicas: 2
+ maxReplicas: 5
+ targetCPUUtilizationPercentage: 75
parameters:
- name: "NAME"
@@ -394,18 +237,18 @@ parameters:
description: "The name assigned to all of the application components defined in this template."
required: true
- - name: "COMMERCIAL_APPLICATION_DOMAIN"
- displayName: "Commercial Application Hostname"
- description: "The exposed hostname that will route to the httpd service for commercial content."
- value: ""
+ - name: "DOC_TYPE"
+ displayName: "Document Type (community|commercial)"
+ description: "The type of documents to build (commercial vs community)."
+ required: true
- - name: "COMMUNITY_APPLICATION_DOMAIN"
- displayName: "Community Application Hostname"
- description: "The exposed hostname that will route to the httpd service for community content."
+ - name: "APPLICATION_DOMAIN"
+ displayName: "Application Hostname"
+ description: "The exposed hostname that will route to the httpd service for content."
value: ""
- name: "HEALTHCHECK_PATH"
- displayName: "URI path to a known, working web page."
+ displayName: "URI path to a known, working web page"
description: "The URI path to a known, working web page for testing liveness and readiness probes. Exclude leading '/'"
required: true
value: "index.html"
@@ -428,43 +271,25 @@ parameters:
displayName: "CPU Request"
description: "Requested amount of CPU the httpd container will use."
required: true
- value: "200m"
+ value: "50m"
- name: "CPU_LIMIT"
displayName: "CPU Limit"
description: "Maximum amount of CPU the httpd container can use."
required: true
- value: "2"
+ value: "500m"
- name: "MEMORY_REQUEST"
displayName: "Memory Request"
description: "Requested amount of memory the httpd container will use."
required: true
- value: "1Gi"
+ value: "100Mi"
- name: "MEMORY_LIMIT"
displayName: "Memory Limit"
description: "Maximum amount of memory the httpd container can use."
required: true
- value: "2Gi"
-
- - name: "MIN_REPLICAS"
- displayName: "Minimum pod replicas"
- description: "The minimum number of replicas to run in autoscaling."
- required: true
- value: "2"
-
- - name: "MAX_REPLICAS"
- displayName: "Maximum pod replicas"
- description: "The maximum number of replicas to run in autoscaling."
- required: true
- value: "10"
-
- - name: "TARGET_PERCENTAGE"
- displayName: "Autoscaling target CPU percentage (as an integer)."
- description: "The percentage of the requested CPU that each pod should ideally be using."
- required: true
- value: "80"
+ value: "512Mi"
- name: "NAMESPACE"
displayName: "Namespace"
diff --git a/aura.tar.gz b/aura.tar.gz
index bba31907b2c3..929f544f9719 100644
Binary files a/aura.tar.gz and b/aura.tar.gz differ
diff --git a/authentication/bound-service-account-tokens.adoc b/authentication/bound-service-account-tokens.adoc
new file mode 100644
index 000000000000..9efe9ee83c4b
--- /dev/null
+++ b/authentication/bound-service-account-tokens.adoc
@@ -0,0 +1,16 @@
+[id="bound-service-account-tokens"]
+= Using bound service account tokens
+include::modules/common-attributes.adoc[]
+:context: bound-service-account-tokens
+
+toc::[]
+
+You can use bound service account tokens, which improves the ability to integrate with cloud provider identity access management (IAM) services, such as AWS IAM.
+
+// About bound service account tokens
+include::modules/bound-sa-tokens-about.adoc[leveloffset=+1]
+
+// Configuring bound service account tokens using volume projection
+include::modules/bound-sa-tokens-configuring.adoc[leveloffset=+1]
+
+// TODO: Verify distros: openshift-enterprise,openshift-webscale,openshift-origin,openshift-dedicated
diff --git a/authentication/certificates/service-serving-certificate.adoc b/authentication/certificates/service-serving-certificate.adoc
deleted file mode 100644
index 8ab6572350f1..000000000000
--- a/authentication/certificates/service-serving-certificate.adoc
+++ /dev/null
@@ -1,24 +0,0 @@
-[id="add-service-serving"]
-= Securing service traffic using service serving certificate secrets
-include::modules/common-attributes.adoc[]
-:context: service-serving-certificate
-
-toc::[]
-
-include::modules/customize-certificates-understanding-service-serving.adoc[leveloffset=+1]
-
-[IMPORTANT]
-====
-The service CA certificate is automatically rotated during upgrades, but
-must be
-xref:../../authentication/certificates/service-serving-certificate.adoc#manually-rotate-service-ca_{context}[manually rotated]
-in the event that the cluster is not upgraded.
-====
-
-include::modules/customize-certificates-add-service-serving.adoc[leveloffset=+1]
-
-include::modules/customize-certificates-add-service-serving-configmap.adoc[leveloffset=+1]
-
-include::modules/customize-certificates-rotate-service-serving.adoc[leveloffset=+1]
-
-include::modules/customize-certificates-manually-rotate-service-ca.adoc[leveloffset=+1]
diff --git a/authentication/configuring-internal-oauth.adoc b/authentication/configuring-internal-oauth.adoc
index efb676b2424a..7b23ab02cd6b 100644
--- a/authentication/configuring-internal-oauth.adoc
+++ b/authentication/configuring-internal-oauth.adoc
@@ -2,13 +2,8 @@
= Configuring the internal OAuth server
include::modules/common-attributes.adoc[]
:context: configuring-internal-oauth
-toc::[]
-[IMPORTANT]
-====
-Configuring these options must change because they're set in the master
-config file now.
-====
+toc::[]
include::modules/oauth-server-overview.adoc[leveloffset=+1]
@@ -18,7 +13,9 @@ include::modules/oauth-internal-options.adoc[leveloffset=+1]
include::modules/oauth-configuring-internal-oauth.adoc[leveloffset=+1]
-include::modules/oauth-register-additional-client.adoc[leveloffset=+1]
+include::modules/oauth-configuring-token-inactivity-timeout.adoc[leveloffset=+1]
+
+include::modules/oauth-customizing-the-oauth-server-URL.adoc[leveloffset=+1]
include::modules/oauth-server-metadata.adoc[leveloffset=+1]
diff --git a/authentication/configuring-oauth-clients.adoc b/authentication/configuring-oauth-clients.adoc
new file mode 100644
index 000000000000..5138dc56a1ec
--- /dev/null
+++ b/authentication/configuring-oauth-clients.adoc
@@ -0,0 +1,21 @@
+[id="configuring-oauth-clients"]
+= Configuring OAuth clients
+include::modules/common-attributes.adoc[]
+:context: configuring-oauth-clients
+
+toc::[]
+
+Several OAuth clients are created by default in {product-title}. You can also register and configure additional OAuth clients.
+
+// Default OAuth clients
+include::modules/oauth-default-clients.adoc[leveloffset=+1]
+
+// Register an additional OAuth client
+include::modules/oauth-register-additional-client.adoc[leveloffset=+1]
+
+// Configuring token inactivity timeout for OAuth clients
+include::modules/oauth-configuring-token-inactivity-timeout-clients.adoc[leveloffset=+1]
+
+== Additional resources
+
+* xref:../rest_api/oauth_apis/oauthclient-oauth-openshift-io-v1.adoc#oauthclient-oauth-openshift-io-v1[OAuthClient [oauth.openshift.io/v1]]
diff --git a/authentication/dedicated-understanding-authentication.adoc b/authentication/dedicated-understanding-authentication.adoc
new file mode 100644
index 000000000000..22e45b488120
--- /dev/null
+++ b/authentication/dedicated-understanding-authentication.adoc
@@ -0,0 +1,38 @@
+[id="understanding-identity-provider"]
+= Understanding identity provider configuration
+include::modules/common-attributes.adoc[]
+:context: understanding-identity-provider
+
+toc::[]
+
+include::modules/identity-provider-parameters.adoc[leveloffset=+1]
+
+[id="supported-identity-providers"]
+== Supported identity providers
+
+You can configure the following types of identity providers:
+
+[cols="2a,8a",options="header"]
+|===
+
+|Identity provider
+|Description
+
+|xref:../authentication/identity_providers/configuring-ldap-identity-provider.adoc#configuring-ldap-identity-provider[LDAP]
+|Configure the `ldap` identity provider to validate user names and passwords
+against an LDAPv3 server, using simple bind authentication.
+
+|xref:../authentication/identity_providers/configuring-github-identity-provider.adoc#configuring-github-identity-provider[GitHub or GitHub Enterprise]
+|Configure a `github` identity provider to validate user names and passwords
+against GitHub or GitHub Enterprise's OAuth authentication server.
+
+|xref:../authentication/identity_providers/configuring-google-identity-provider.adoc#configuring-google-identity-provider[Google]
+|Configure a `google` identity provider using
+link:https://developers.google.com/identity/protocols/OpenIDConnect[Google's OpenID Connect integration].
+
+|xref:../authentication/identity_providers/configuring-oidc-identity-provider.adoc#configuring-oidc-identity-provider[OpenID Connect]
+|Configure an `oidc` identity provider to integrate with an OpenID Connect
+identity provider using an
+link:http://openid.net/specs/openid-connect-core-1_0.html#CodeFlowAuth[Authorization Code Flow].
+
+|===
diff --git a/authentication/identity_providers/configuring-basic-authentication-identity-provider.adoc b/authentication/identity_providers/configuring-basic-authentication-identity-provider.adoc
index 09ac569d2ff8..0904fa462d68 100644
--- a/authentication/identity_providers/configuring-basic-authentication-identity-provider.adoc
+++ b/authentication/identity_providers/configuring-basic-authentication-identity-provider.adoc
@@ -1,23 +1,31 @@
[id="configuring-basic-authentication-identity-provider"]
-= Configuring an basic authentication identity provider
+= Configuring a basic authentication identity provider
include::modules/common-attributes.adoc[]
:context: configuring-basic-authentication-identity-provider
+
toc::[]
Configure a `basic-authentication` identity provider for users to log in to
{product-title} with credentials validated against a remote identity provider.
-Basic authentication is a generic backend integration mechanism.
+Basic authentication is a generic back-end integration mechanism.
include::modules/identity-provider-overview.adoc[leveloffset=+1]
include::modules/identity-provider-about-basic-authentication.adoc[leveloffset=+1]
-include::modules/identity-provider-secret.adoc[leveloffset=+1]
+include::modules/identity-provider-secret-tls.adoc[leveloffset=+1]
include::modules/identity-provider-config-map.adoc[leveloffset=+1]
include::modules/identity-provider-basic-authentication-CR.adoc[leveloffset=+1]
+// Included here so that it is associated with the above module
+.Additional resources
+
+* See xref:../../authentication/understanding-identity-provider.adoc#identity-provider-parameters_understanding-identity-provider[Identity provider parameters] for information on parameters, such as `mappingMethod`, that are common to all identity providers.
+
include::modules/identity-provider-add.adoc[leveloffset=+1]
+include::modules/example-apache-httpd-configuration.adoc[leveloffset=+1]
+
include::modules/identity-provider-basic-authentication-troubleshooting.adoc[leveloffset=+1]
diff --git a/authentication/identity_providers/configuring-github-identity-provider.adoc b/authentication/identity_providers/configuring-github-identity-provider.adoc
index 572a4959dd1e..238c15d607ae 100644
--- a/authentication/identity_providers/configuring-github-identity-provider.adoc
+++ b/authentication/identity_providers/configuring-github-identity-provider.adoc
@@ -2,6 +2,7 @@
= Configuring a GitHub or GitHub Enterprise identity provider
include::modules/common-attributes.adoc[]
:context: configuring-github-identity-provider
+
toc::[]
Configure a `github` identity provider to validate user names and passwords
@@ -24,15 +25,23 @@ their GitHub credentials. To prevent anyone with any GitHub user ID from logging
in to your {product-title} cluster, you can restrict access to only those in
specific GitHub organizations.
-
+ifdef::openshift-origin,openshift-enterprise,openshift-webscale[]
include::modules/identity-provider-overview.adoc[leveloffset=+1]
+endif::openshift-origin,openshift-enterprise,openshift-webscale[]
include::modules/identity-provider-registering-github.adoc[leveloffset=+1]
+ifdef::openshift-enterprise,openshift-webscale,openshift-origin[]
include::modules/identity-provider-secret.adoc[leveloffset=+1]
include::modules/identity-provider-config-map.adoc[leveloffset=+1]
include::modules/identity-provider-github-CR.adoc[leveloffset=+1]
+// Included here so that it is associated with the above module
+.Additional resources
+
+* See xref:../../authentication/understanding-identity-provider.adoc#identity-provider-parameters_understanding-identity-provider[Identity provider parameters] for information on parameters, such as `mappingMethod`, that are common to all identity providers.
+
include::modules/identity-provider-add.adoc[leveloffset=+1]
+endif::[]
diff --git a/authentication/identity_providers/configuring-gitlab-identity-provider.adoc b/authentication/identity_providers/configuring-gitlab-identity-provider.adoc
index a06a2d832b2d..8a9c530daab7 100644
--- a/authentication/identity_providers/configuring-gitlab-identity-provider.adoc
+++ b/authentication/identity_providers/configuring-gitlab-identity-provider.adoc
@@ -2,6 +2,7 @@
= Configuring a GitLab identity provider
include::modules/common-attributes.adoc[]
:context: configuring-gitlab-identity-provider
+
toc::[]
Configure a `gitlab` identity provider to use
@@ -13,7 +14,9 @@ link:https://docs.gitlab.com/ce/integration/openid_connect_provider.html[OpenID
to connect instead of OAuth.
+ifdef::openshift-origin,openshift-enterprise,openshift-webscale[]
include::modules/identity-provider-overview.adoc[leveloffset=+1]
+endif::openshift-origin,openshift-enterprise,openshift-webscale[]
include::modules/identity-provider-secret.adoc[leveloffset=+1]
@@ -21,4 +24,9 @@ include::modules/identity-provider-config-map.adoc[leveloffset=+1]
include::modules/identity-provider-gitlab-CR.adoc[leveloffset=+1]
+// Included here so that it is associated with the above module
+.Additional resources
+
+* See xref:../../authentication/understanding-identity-provider.adoc#identity-provider-parameters_understanding-identity-provider[Identity provider parameters] for information on parameters, such as `mappingMethod`, that are common to all identity providers.
+
include::modules/identity-provider-add.adoc[leveloffset=+1]
diff --git a/authentication/identity_providers/configuring-google-identity-provider.adoc b/authentication/identity_providers/configuring-google-identity-provider.adoc
index 2eb9df3b1349..a0ff89cec3ee 100644
--- a/authentication/identity_providers/configuring-google-identity-provider.adoc
+++ b/authentication/identity_providers/configuring-google-identity-provider.adoc
@@ -2,6 +2,7 @@
= Configuring a Google identity provider
include::modules/common-attributes.adoc[]
:context: configuring-google-identity-provider
+
toc::[]
Configure a `google` identity provider using
@@ -20,10 +21,19 @@ You can limit authentication to members of a specific hosted domain with the
`hostedDomain` configuration attribute.
====
+ifdef::openshift-origin,openshift-enterprise,openshift-webscale[]
include::modules/identity-provider-overview.adoc[leveloffset=+1]
+endif::openshift-origin,openshift-enterprise,openshift-webscale[]
+ifdef::openshift-enterprise,openshift-webscale,openshift-origin[]
include::modules/identity-provider-secret.adoc[leveloffset=+1]
include::modules/identity-provider-google-CR.adoc[leveloffset=+1]
+// Included here so that it is associated with the above module
+.Additional resources
+
+* See xref:../../authentication/understanding-identity-provider.adoc#identity-provider-parameters_understanding-identity-provider[Identity provider parameters] for information on parameters, such as `mappingMethod`, that are common to all identity providers.
+
include::modules/identity-provider-add.adoc[leveloffset=+1]
+endif::[]
diff --git a/authentication/identity_providers/configuring-htpasswd-identity-provider.adoc b/authentication/identity_providers/configuring-htpasswd-identity-provider.adoc
index 3c97f41c9543..008735583e47 100644
--- a/authentication/identity_providers/configuring-htpasswd-identity-provider.adoc
+++ b/authentication/identity_providers/configuring-htpasswd-identity-provider.adoc
@@ -2,6 +2,7 @@
= Configuring an HTPasswd identity provider
include::modules/common-attributes.adoc[]
:context: configuring-htpasswd-identity-provider
+
toc::[]
[id="identity-provider-overview_{context}"]
@@ -38,6 +39,13 @@ include::modules/identity-provider-htpasswd-secret.adoc[leveloffset=+1]
include::modules/identity-provider-htpasswd-CR.adoc[leveloffset=+1]
+// Included here so that it is associated with the above module
+.Additional resources
+
+* See xref:../../authentication/understanding-identity-provider.adoc#identity-provider-parameters_understanding-identity-provider[Identity provider parameters] for information on parameters, such as `mappingMethod`, that are common to all identity providers.
+
include::modules/identity-provider-add.adoc[leveloffset=+1]
+include::modules/identity-provider-htpasswd-update-users.adoc[leveloffset=+1]
+
include::modules/identity-provider-configuring-using-web-console.adoc[leveloffset=+1]
diff --git a/authentication/identity_providers/configuring-keystone-identity-provider.adoc b/authentication/identity_providers/configuring-keystone-identity-provider.adoc
index 6caff1da9ee5..699391fb158c 100644
--- a/authentication/identity_providers/configuring-keystone-identity-provider.adoc
+++ b/authentication/identity_providers/configuring-keystone-identity-provider.adoc
@@ -2,6 +2,7 @@
= Configuring a Keystone identity provider
include::modules/common-attributes.adoc[]
:context: configuring-keystone-identity-provider
+
toc::[]
Configure the `keystone` identity provider to integrate
@@ -22,10 +23,15 @@ user name, the new user might have access to the old user's resources.
include::modules/identity-provider-overview.adoc[leveloffset=+1]
-include::modules/identity-provider-secret.adoc[leveloffset=+1]
+include::modules/identity-provider-secret-tls.adoc[leveloffset=+1]
include::modules/identity-provider-config-map.adoc[leveloffset=+1]
include::modules/identity-provider-keystone-CR.adoc[leveloffset=+1]
+// Included here so that it is associated with the above module
+.Additional resources
+
+* See xref:../../authentication/understanding-identity-provider.adoc#identity-provider-parameters_understanding-identity-provider[Identity provider parameters] for information on parameters, such as `mappingMethod`, that are common to all identity providers.
+
include::modules/identity-provider-add.adoc[leveloffset=+1]
diff --git a/authentication/identity_providers/configuring-ldap-identity-provider.adoc b/authentication/identity_providers/configuring-ldap-identity-provider.adoc
index 6d07c9b3c286..dca4424ba96e 100644
--- a/authentication/identity_providers/configuring-ldap-identity-provider.adoc
+++ b/authentication/identity_providers/configuring-ldap-identity-provider.adoc
@@ -2,19 +2,29 @@
= Configuring an LDAP identity provider
include::modules/common-attributes.adoc[]
:context: configuring-ldap-identity-provider
+
toc::[]
Configure the `ldap` identity provider to validate user names and passwords
against an LDAPv3 server, using simple bind authentication.
+ifdef::openshift-origin,openshift-enterprise,openshift-webscale[]
include::modules/identity-provider-overview.adoc[leveloffset=+1]
+endif::openshift-origin,openshift-enterprise,openshift-webscale[]
include::modules/identity-provider-about-ldap.adoc[leveloffset=+1]
+ifdef::openshift-enterprise,openshift-webscale,openshift-origin[]
include::modules/identity-provider-ldap-secret.adoc[leveloffset=+1]
include::modules/identity-provider-config-map.adoc[leveloffset=+1]
include::modules/identity-provider-ldap-CR.adoc[leveloffset=+1]
+// Included here so that it is associated with the above module
+.Additional resources
+
+* See xref:../../authentication/understanding-identity-provider.adoc#identity-provider-parameters_understanding-identity-provider[Identity provider parameters] for information on parameters, such as `mappingMethod`, that are common to all identity providers.
+
include::modules/identity-provider-add.adoc[leveloffset=+1]
+endif::[]
diff --git a/authentication/identity_providers/configuring-oidc-identity-provider.adoc b/authentication/identity_providers/configuring-oidc-identity-provider.adoc
index ccfe5987aa2a..5473a20e33e1 100644
--- a/authentication/identity_providers/configuring-oidc-identity-provider.adoc
+++ b/authentication/identity_providers/configuring-oidc-identity-provider.adoc
@@ -2,6 +2,7 @@
= Configuring a OpenID Connect identity provider
include::modules/common-attributes.adoc[]
:context: configuring-oidc-identity-provider
+
toc::[]
Configure an `oidc` identity provider to integrate with an OpenID Connect
@@ -13,12 +14,20 @@ You can link:https://www.keycloak.org/docs/latest/server_admin/index.html#opensh
Connect identity provider for {product-title}.
endif::[]
-ifdef::openshift-enterprise[]
+ifdef::openshift-enterprise,openshift-webscale[]
You can
-link:https://access.redhat.com/documentation/en-us/red_hat_jboss_middleware_for_openshift/3/html/red_hat_single_sign-on_for_openshift/tutorials[configure Red Hat Single Sign-On]
+link:https://access.redhat.com/documentation/en-us/red_hat_single_sign-on/[configure Red Hat Single Sign-On]
as an OpenID Connect identity provider for {product-title}.
endif::[]
+[IMPORTANT]
+====
+The Authentication Operator in {product-title} requires that the configured
+OpenID Connect identity provider implements the
+link:https://openid.net/specs/openid-connect-discovery-1_0.html[OpenID Connect Discovery]
+specification.
+====
+
[NOTE]
====
`ID Token` and `UserInfo` decryptions are not supported.
@@ -37,15 +46,29 @@ You can also indicate which claims to use as the user's preferred user name,
display name, and email address. If multiple claims are specified, the first one
with a non-empty value is used. The standard claims are:
-[horizontal]
-`sub`:: Short for "subject identifier." The remote identity for the user at the
+[cols="1,2",options="header"]
+|===
+
+|Claim
+|Description
+
+|`sub`
+|Short for "subject identifier." The remote identity for the user at the
issuer.
-`preferred_username`:: The preferred user name when provisioning a user. A
+
+|`preferred_username`
+|The preferred user name when provisioning a user. A
shorthand name that the user wants to be referred to as, such as `janedoe`. Typically
a value that corresponding to the user's login or username in the authentication
system, such as username or email.
-`email`:: Email address.
-`name`:: Display name.
+
+|`email`
+|Email address.
+
+|`name`
+|Display name.
+
+|===
See the
link:http://openid.net/specs/openid-connect-core-1_0.html#StandardClaims[OpenID claims documentation]
@@ -57,14 +80,23 @@ Using an OpenID Connect identity provider requires users to get a token using
`/oauth/token/request` to use with command-line tools.
====
+ifdef::openshift-origin,openshift-enterprise,openshift-webscale[]
include::modules/identity-provider-overview.adoc[leveloffset=+1]
+endif::openshift-origin,openshift-enterprise,openshift-webscale[]
+ifdef::openshift-enterprise,openshift-webscale,openshift-origin[]
include::modules/identity-provider-secret.adoc[leveloffset=+1]
include::modules/identity-provider-config-map.adoc[leveloffset=+1]
include::modules/identity-provider-oidc-CR.adoc[leveloffset=+1]
+// Included here so that it is associated with the above module
+.Additional resources
+
+* See xref:../../authentication/understanding-identity-provider.adoc#identity-provider-parameters_understanding-identity-provider[Identity provider parameters] for information on parameters, such as `mappingMethod`, that are common to all identity providers.
+
include::modules/identity-provider-add.adoc[leveloffset=+1]
include::modules/identity-provider-configuring-using-web-console.adoc[leveloffset=+1]
+endif::[]
diff --git a/authentication/identity_providers/configuring-request-header-identity-provider.adoc b/authentication/identity_providers/configuring-request-header-identity-provider.adoc
index daf3f8cd4764..abec4bb305c5 100644
--- a/authentication/identity_providers/configuring-request-header-identity-provider.adoc
+++ b/authentication/identity_providers/configuring-request-header-identity-provider.adoc
@@ -2,6 +2,7 @@
= Configuring a request header identity provider
include::modules/common-attributes.adoc[]
:context: configuring-request-header-identity-provider
+
toc::[]
Configure a `request-header` identity provider to identify users from request
@@ -16,4 +17,21 @@ include::modules/identity-provider-config-map.adoc[leveloffset=+1]
include::modules/identity-provider-request-header-CR.adoc[leveloffset=+1]
+// Included here so that it is associated with the above module
+.Additional resources
+
+* See xref:../../authentication/understanding-identity-provider.adoc#identity-provider-parameters_understanding-identity-provider[Identity provider parameters] for information on parameters, such as `mappingMethod`, that are common to all identity providers.
+
include::modules/identity-provider-add.adoc[leveloffset=+1]
+
+[id="example-apache-auth-config-using-request-header"]
+== Example Apache authentication configuration using request header
+
+This example configures an Apache authentication proxy for the {product-title}
+using the request header identity provider.
+
+[discrete]
+include::modules/identity-provider-apache-custom-proxy-configuration.adoc[leveloffset=+2]
+
+[discrete]
+include::modules/identity-provider-configuring-apache-request-header.adoc[leveloffset=+2]
diff --git a/installing/installing_aws_user_infra/images b/authentication/identity_providers/images
similarity index 100%
rename from installing/installing_aws_user_infra/images
rename to authentication/identity_providers/images
diff --git a/cnv/cnv_install/modules b/authentication/identity_providers/modules
similarity index 100%
rename from cnv/cnv_install/modules
rename to authentication/identity_providers/modules
diff --git a/authentication/impersonating-system-admin.adoc b/authentication/impersonating-system-admin.adoc
index 8e5c4c2b3002..77f5cb3113b1 100644
--- a/authentication/impersonating-system-admin.adoc
+++ b/authentication/impersonating-system-admin.adoc
@@ -1,9 +1,12 @@
[id="impersonating-system-admin"]
-= Impersonating the `system:admin` user
+= Impersonating the system:admin user
include::modules/common-attributes.adoc[]
:context: impersonating-system-admin
+
toc::[]
include::modules/authentication-api-impersonation.adoc[leveloffset=+1]
include::modules/impersonation-system-admin-user.adoc[leveloffset=+1]
+
+include::modules/impersonation-system-admin-group.adoc[leveloffset=+1]
diff --git a/authentication/ldap-syncing.adoc b/authentication/ldap-syncing.adoc
new file mode 100644
index 000000000000..50e3c4e933b4
--- /dev/null
+++ b/authentication/ldap-syncing.adoc
@@ -0,0 +1,64 @@
+[[ldap-syncing]]
+= Syncing LDAP groups
+include::modules/common-attributes.adoc[]
+:context: ldap-syncing-groups
+
+toc::[]
+
+ifdef::openshift-enterprise,openshift-webscale,openshift-origin[]
+As an administrator,
+endif::[]
+ifdef::openshift-dedicated[]
+As a xref:../authentication/understanding-and-creating-service-accounts.html#dedicated-admin-role-overview_{context}[dedicated administrator],
+endif::[]
+you can use groups to manage users, change
+their permissions, and enhance collaboration. Your organization may have already
+created user groups and stored them in an LDAP server. {product-title} can sync
+those LDAP records with internal {product-title} records, enabling you to manage
+your groups in one place. {product-title} currently supports group sync with
+LDAP servers using three common schemas for defining group membership: RFC 2307,
+Active Directory, and augmented Active Directory.
+
+For more information on configuring LDAP, see
+xref:../authentication/identity_providers/configuring-ldap-identity-provider.adoc#configuring-ldap-identity-provider[Configuring an LDAP identity provider].
+
+ifdef::openshift-enterprise,openshift-webscale,openshift-origin[]
+[NOTE]
+====
+You must have `cluster-admin` privileges to sync groups.
+====
+endif::[]
+
+ifdef::openshift-dedicated[]
+[NOTE]
+====
+You must have `dedicated-admins` privileges to sync groups.
+====
+endif::[]
+
+include::modules/ldap-syncing-about.adoc[leveloffset=+1]
+include::modules/ldap-syncing-config-rfc2307.adoc[leveloffset=+2]
+include::modules/ldap-syncing-config-activedir.adoc[leveloffset=+2]
+include::modules/ldap-syncing-config-augmented-activedir.adoc[leveloffset=+2]
+include::modules/ldap-syncing-running.adoc[leveloffset=+1]
+include::modules/ldap-syncing-running-all-ldap.adoc[leveloffset=+2]
+include::modules/ldap-syncing-running-openshift.adoc[leveloffset=+2]
+include::modules/ldap-syncing-running-subset.adoc[leveloffset=+2]
+include::modules/ldap-syncing-pruning.adoc[leveloffset=+1]
+
+// Automatically syncing LDAP groups
+include::modules/ldap-auto-syncing.adoc[leveloffset=+1]
+
+.Additional resources
+
+* xref:../authentication/identity_providers/configuring-ldap-identity-provider.adoc#configuring-ldap-identity-provider[Configuring an LDAP identity provider]
+* xref:../nodes/jobs/nodes-nodes-jobs.adoc#nodes-nodes-jobs-creating-cron_nodes-nodes-jobs[Creating cron jobs]
+
+include::modules/ldap-syncing-examples.adoc[leveloffset=+1]
+include::modules/ldap-syncing-rfc2307.adoc[leveloffset=+2]
+include::modules/ldap-syncing-rfc2307-user-defined.adoc[leveloffset=+2]
+include::modules/ldap-syncing-rfc2307-user-defined-error.adoc[leveloffset=+2]
+include::modules/ldap-syncing-activedir.adoc[leveloffset=+2]
+include::modules/ldap-syncing-augmented-activedir.adoc[leveloffset=+2]
+include::modules/ldap-syncing-nesting.adoc[leveloffset=+2]
+include::modules/ldap-syncing-spec.adoc[leveloffset=+1]
diff --git a/authentication/managing-oauth-access-tokens.adoc b/authentication/managing-oauth-access-tokens.adoc
new file mode 100644
index 000000000000..fcdc8348720a
--- /dev/null
+++ b/authentication/managing-oauth-access-tokens.adoc
@@ -0,0 +1,17 @@
+[id="managing-oauth-access-tokens"]
+= Managing user-owned OAuth access tokens
+include::modules/common-attributes.adoc[]
+:context: managing-oauth-access-tokens
+
+toc::[]
+
+Users can review their own OAuth access tokens and delete any that are no longer needed.
+
+// Listing user-owned OAuth access tokens
+include::modules/oauth-list-tokens.adoc[leveloffset=+1]
+
+// Viewing the details of a user-owned OAuth access token
+include::modules/oauth-view-details-tokens.adoc[leveloffset=+1]
+
+// Deleting user-owned OAuth access tokens
+include::modules/oauth-delete-tokens.adoc[leveloffset=+1]
diff --git a/authentication/managing-security-context-constraints.adoc b/authentication/managing-security-context-constraints.adoc
index d8fe8e4b07e4..4b275ba4b206 100644
--- a/authentication/managing-security-context-constraints.adoc
+++ b/authentication/managing-security-context-constraints.adoc
@@ -1,7 +1,8 @@
[id="managing-pod-security-policies"]
-= Managing Security Context Constraints
+= Managing security context constraints
include::modules/common-attributes.adoc[]
:context: configuring-internal-oauth
+
toc::[]
include::modules/security-context-constraints-about.adoc[leveloffset=+1]
diff --git a/authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc b/authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc
new file mode 100644
index 000000000000..0f403ae4aeed
--- /dev/null
+++ b/authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc
@@ -0,0 +1,92 @@
+[id="about-cloud-credential-operator"]
+= About the Cloud Credential Operator
+include::modules/common-attributes.adoc[]
+:context: about-cloud-credential-operator
+
+toc::[]
+
+The Cloud Credential Operator (CCO) manages cloud provider credentials as custom resource definitions (CRDs). The CCO syncs on `CredentialsRequest` custom resources (CRs) to allow {product-title} components to request cloud provider credentials with the specific permissions that are required for the cluster to run.
+
+By setting different values for the `credentialsMode` parameter in the `install-config.yaml` file, the CCO can be configured to operate in several different modes. If no mode is specified, or the `credentialsMode` parameter is set to an empty string (`""`), the CCO operates in its default mode.
+
+[id="about-cloud-credential-operator-modes"]
+== Modes
+
+By setting different values for the `credentialsMode` parameter in the `install-config.yaml` file, the CCO can be configured to operate in _mint_, _passthrough_, or _manual_ mode. These options provide transparency and flexibility in how the CCO uses cloud credentials to process `CredentialsRequest` CRs in the cluster, and allow the CCO to be configured to suit the security requirements of your organization. Not all CCO modes are supported for all cloud providers.
+
+* **xref:../../authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc#cco-mode-mint[Mint]**: In mint mode, the CCO uses the provided admin-level cloud credential to create new credentials for components in the cluster with only the specific permissions that are required.
++
+[NOTE]
+====
+Mint mode is the default and recommended best practice setting for the CCO to use.
+====
+
+* **xref:../../authentication/managing_cloud_provider_credentials/cco-mode-passthrough.adoc#cco-mode-passthrough[Passthrough]**: In passthrough mode, the CCO passes the provided cloud credential to the components that request cloud credentials.
+
+* **xref:../../authentication/managing_cloud_provider_credentials/cco-mode-manual.adoc#cco-mode-manual[Manual]**: In manual mode, a user manages cloud credentials instead of the CCO.
+
+** **xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#cco-mode-sts[Manual with AWS STS]**: In manual mode, you can configure an AWS cluster to use Amazon Web Services Secure Token Service (AWS STS). With this configuration, the CCO uses temporary credentials for different components.
+
+.CCO mode support matrix
+[cols="<.^2,^.^1,^.^1,^.^1"]
+|====
+|Cloud provider |Mint |Passthrough |Manual
+
+|Amazon Web Services (AWS)
+|X
+|X
+|X
+
+
+|Microsoft Azure
+|X ^[1]^
+|X ^[1]^
+|X
+
+|Google Cloud Platform (GCP)
+|X
+|X
+|X
+
+|{rh-openstack-first}
+|
+|X
+|
+
+|{rh-virtualization-first}
+|
+|X
+|
+
+|VMware vSphere
+|
+|X
+|
+
+|====
+[.small]
+--
+1. Manual mode is the only supported CCO configuration for Microsoft Azure Stack Hub.
+--
+
+[id="about-cloud-credential-operator-default"]
+== Default behavior
+For platforms on which multiple modes are supported (AWS, Azure, and GCP), when the CCO operates in its default mode, it checks the provided credentials dynamically to determine for which mode they are sufficient to process `CredentialsRequest` CRs.
+
+By default, the CCO determines whether the credentials are sufficient for mint mode, which is the preferred mode of operation, and uses those credentials to create appropriate credentials for components in the cluster. If the credentials are not sufficient for mint mode, it determines whether they are sufficient for passthrough mode. If the credentials are not sufficient for passthrough mode, the CCO cannot adequately process `CredentialsRequest` CRs.
+
+[NOTE]
+====
+The CCO cannot verify whether Azure credentials are sufficient for passthrough mode. If Azure credentials are insufficient for mint mode, the CCO operates with the assumption that the credentials are sufficient for passthrough mode.
+====
+
+If the provided credentials are determined to be insufficient during installation, the installation fails. For AWS, the installer fails early in the process and indicates which required permissions are missing. Other providers might not provide specific information about the cause of the error until errors are encountered.
+
+If the credentials are changed after a successful installation and the CCO determines that the new credentials are insufficient, the CCO puts conditions on any new `CredentialsRequest` CRs to indicate that it cannot process them because of the insufficient credentials.
+
+To resolve insufficient credentials issues, provide a credential with sufficient permissions. If an error occurred during installation, try installing again. For issues with new `CredentialsRequest` CRs, wait for the CCO to try to process the CR again. As an alternative, you can manually create IAM for xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[AWS], xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[Azure], and xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[GCP].
+
+[id="additional-resources_about-cloud-credential-operator"]
+== Additional resources
+
+* xref:../../operators/operator-reference.adoc#cloud-credential-operator_red-hat-operators[Red Hat Operators reference page for the Cloud Credential Operator]
diff --git a/authentication/managing_cloud_provider_credentials/cco-mode-manual.adoc b/authentication/managing_cloud_provider_credentials/cco-mode-manual.adoc
new file mode 100644
index 000000000000..561ccc146c5d
--- /dev/null
+++ b/authentication/managing_cloud_provider_credentials/cco-mode-manual.adoc
@@ -0,0 +1,29 @@
+[id="cco-mode-manual"]
+= Using manual mode
+include::modules/common-attributes.adoc[]
+:context: cco-mode-manual
+
+toc::[]
+
+Manual mode is supported for Amazon Web Services (AWS), Microsoft Azure, and Google Cloud Platform (GCP).
+
+In manual mode, a user manages cloud credentials instead of the Cloud Credential Operator (CCO). To use this mode, you must examine the `CredentialsRequest` CRs in the release image for the version of {product-title} that you are running or installing, create corresponding credentials in the underlying cloud provider, and create Kubernetes Secrets in the correct namespaces to satisfy all `CredentialsRequest` CRs for the cluster's cloud provider.
+
+Using manual mode allows each cluster component to have only the permissions it requires, without storing an administrator-level credential in the cluster. This mode also does not require connectivity to the AWS public IAM endpoint. However, you must manually reconcile permissions with new release images for every upgrade.
+
+For information about configuring your cloud provider to use manual mode, see _Manually creating IAM_ for xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[AWS], xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[Azure], or xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[GCP].
+
+[id="manual-mode-sts-blurb"]
+== Manual mode with AWS STS
+
+You can configure an AWS cluster in manual mode to use xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#cco-mode-sts[Amazon Web Services Secure Token Service (AWS STS)]. With this configuration, the CCO uses temporary credentials for different components.
+
+include::modules/manually-maintained-credentials-upgrade.adoc[leveloffset=+1]
+
+[id="additional-resources_cco-mode-manual"]
+== Additional resources
+
+* xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[Manually creating IAM for AWS]
+* xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[Manually creating IAM for Azure]
+* xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[Manually creating IAM for GCP]
+* xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#cco-mode-sts[Using manual mode with AWS STS]
diff --git a/authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc b/authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc
new file mode 100644
index 000000000000..f6748168e726
--- /dev/null
+++ b/authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc
@@ -0,0 +1,75 @@
+[id="cco-mode-mint"]
+= Using mint mode
+include::modules/common-attributes.adoc[]
+:context: cco-mode-mint
+
+toc::[]
+
+Mint mode is supported for Amazon Web Services (AWS), Microsoft Azure, and Google Cloud Platform (GCP).
+
+Mint mode is the default and recommended best practice setting for the Cloud Credential Operator (CCO) to use on the platforms for which it is supported. In this mode, the CCO uses the provided administrator-level cloud credential to create new credentials for components in the cluster with only the specific permissions that are required.
+
+If the credential is not removed after installation, it is stored and used by the CCO to process `CredentialsRequest` CRs for components in the cluster and create new credentials for each with only the specific permissions that are required. The continuous reconciliation of cloud credentials in mint mode allows actions that require additional credentials or permissions, such as upgrading, to proceed.
+
+If the requirement that mint mode stores the administrator-level credential in the cluster `kube-system` namespace does not suit the security requirements of your organization, see _Alternatives to storing administrator-level secrets in the kube-system project_ for xref:../../installing/installing_aws/manually-creating-iam.adoc#alternatives-to-storing-admin-secrets-in-kube-system_manually-creating-iam-aws[AWS], xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#alternatives-to-storing-admin-secrets-in-kube-system_manually-creating-iam-azure[Azure], or xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#alternatives-to-storing-admin-secrets-in-kube-system_manually-creating-iam-gcp[GCP].
+
+[NOTE]
+====
+xref:../../authentication/managing_cloud_provider_credentials/cco-mode-manual.adoc#cco-mode-manual[Manual mode] is the only supported CCO configuration for Microsoft Azure Stack Hub.
+====
+
+[id="mint-mode-permissions"]
+== Mint mode permissions requirements
+When using the CCO in mint mode, ensure that the credential you provide meets the requirements of the cloud on which you are running or installing {product-title}. If the provided credentials are not sufficient for mint mode, the CCO cannot create an IAM user.
+
+[id="mint-mode-permissions-aws"]
+=== Amazon Web Services (AWS) permissions
+The credential you provide for mint mode in AWS must have the following permissions:
+
+* `iam:CreateAccessKey`
+* `iam:CreateUser`
+* `iam:DeleteAccessKey`
+* `iam:DeleteUser`
+* `iam:DeleteUserPolicy`
+* `iam:GetUser`
+* `iam:GetUserPolicy`
+* `iam:ListAccessKeys`
+* `iam:PutUserPolicy`
+* `iam:TagUser`
+* `iam:SimulatePrincipalPolicy`
+
+[id="mint-mode-permissions-azure"]
+=== Microsoft Azure permissions
+The credential you provide for mint mode in Azure must have a service principal with the permissions specified in xref:../../installing/installing_azure/installing-azure-account.adoc#installation-azure-service-principal_installing-azure-account[Creating a service principal].
+
+[id="mint-mode-permissions-gcp"]
+=== Google Cloud Platform (GCP) permissions
+The credential you provide for mint mode in GCP must have the following permissions:
+
+* `resourcemanager.projects.get`
+* `serviceusage.services.list`
+* `iam.serviceAccountKeys.create`
+* `iam.serviceAccountKeys.delete`
+* `iam.serviceAccounts.create`
+* `iam.serviceAccounts.delete`
+* `iam.serviceAccounts.get`
+* `iam.roles.get`
+* `resourcemanager.projects.getIamPolicy`
+* `resourcemanager.projects.setIamPolicy`
+
+//Mint Mode with removal or rotation of the admin credential
+include::modules/mint-mode-with-removal-of-admin-credential.adoc[leveloffset=+1]
+
+//Rotating cloud provider credentials manually
+include::modules/manually-rotating-cloud-creds.adoc[leveloffset=+2]
+
+//Removing cloud provider credentials
+include::modules/manually-removing-cloud-creds.adoc[leveloffset=+2]
+
+
+== Additional resources
+
+* xref:../../installing/installing_aws/manually-creating-iam.adoc#alternatives-to-storing-admin-secrets-in-kube-system_manually-creating-iam-aws[Alternatives to storing administrator-level secrets in the kube-system project] for AWS
+* xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#alternatives-to-storing-admin-secrets-in-kube-system_manually-creating-iam-azure[Alternatives to storing administrator-level secrets in the kube-system project] for Azure
+* xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#alternatives-to-storing-admin-secrets-in-kube-system_manually-creating-iam-gcp[Alternatives to storing administrator-level secrets in the kube-system project] for GCP
+* xref:../../installing/installing_azure/installing-azure-account.adoc#installation-azure-service-principal_installing-azure-account[Creating a service principal] in Azure
diff --git a/authentication/managing_cloud_provider_credentials/cco-mode-passthrough.adoc b/authentication/managing_cloud_provider_credentials/cco-mode-passthrough.adoc
new file mode 100644
index 000000000000..74e104373af3
--- /dev/null
+++ b/authentication/managing_cloud_provider_credentials/cco-mode-passthrough.adoc
@@ -0,0 +1,105 @@
+[id="cco-mode-passthrough"]
+= Using passthrough mode
+include::modules/common-attributes.adoc[]
+:context: cco-mode-passthrough
+
+toc::[]
+
+Passthrough mode is supported for Amazon Web Services (AWS), Microsoft Azure, Google Cloud Platform (GCP), {rh-openstack-first}, {rh-virtualization-first}, and VMware vSphere.
+
+In passthrough mode, the Cloud Credential Operator (CCO) passes the provided cloud credential to the components that request cloud credentials. The credential must have permissions to perform the installation and complete the operations that are required by components in the cluster, but does not need to be able to create new credentials. The CCO does not attempt to create additional limited-scoped credentials in passthrough mode.
+
+[NOTE]
+====
+xref:../../authentication/managing_cloud_provider_credentials/cco-mode-manual.adoc#cco-mode-manual[Manual mode] is the only supported CCO configuration for Microsoft Azure Stack Hub.
+====
+
+[id="passthrough-mode-permissions"]
+== Passthrough mode permissions requirements
+When using the CCO in passthrough mode, ensure that the credential you provide meets the requirements of the cloud on which you are running or installing {product-title}. If the provided credentials the CCO passes to a component that creates a `CredentialsRequest` CR are not sufficient, that component will report an error when it tries to call an API that it does not have permissions for.
+
+[id="passthrough-mode-permissions-aws"]
+=== Amazon Web Services (AWS) permissions
+The credential you provide for passthrough mode in AWS must have all the requested permissions for all `CredentialsRequest` CRs that are required by the version of {product-title} you are running or installing.
+
+To locate the `CredentialsRequest` CRs that are required, see xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[Manually creating IAM for AWS].
+
+[id="passthrough-mode-permissions-azure"]
+=== Microsoft Azure permissions
+The credential you provide for passthrough mode in Azure must have all the requested permissions for all `CredentialsRequest` CRs that are required by the version of {product-title} you are running or installing.
+
+To locate the `CredentialsRequest` CRs that are required, see xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[Manually creating IAM for Azure].
+
+[id="passthrough-mode-permissions-gcp"]
+=== Google Cloud Platform (GCP) permissions
+The credential you provide for passthrough mode in GCP must have all the requested permissions for all `CredentialsRequest` CRs that are required by the version of {product-title} you are running or installing.
+
+To locate the `CredentialsRequest` CRs that are required, see xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[Manually creating IAM for GCP].
+
+[id="passthrough-mode-permissions-rhosp"]
+=== {rh-openstack-first} permissions
+To install an {product-title} cluster on {rh-openstack}, the CCO requires a credential with the permissions of a `member` user role.
+
+[id="passthrough-mode-permissions-rhv"]
+=== {rh-virtualization-first} permissions
+To install an {product-title} cluster on {rh-virtualization}, the CCO requires a credential with the following privileges:
+
+* `DiskOperator`
+* `DiskCreator`
+* `UserTemplateBasedVm`
+* `TemplateOwner`
+* `TemplateCreator`
+* `ClusterAdmin` on the specific cluster that is targeted for {product-title} deployment
+
+[id="passthrough-mode-permissions-vsware"]
+=== VMware vSphere permissions
+To install an {product-title} cluster on VMware vSphere, the CCO requires a credential with the following vSphere privileges:
+
+.Required vSphere privileges
+[cols="1,2"]
+|====
+|Category |Privileges
+
+|Datastore
+|_Allocate space_
+
+|Folder
+|_Create folder_, _Delete folder_
+
+|vSphere Tagging
+|All privileges
+
+|Network
+|_Assign network_
+
+|Resource
+|_Assign virtual machine to resource pool_
+
+|Profile-driven storage
+|All privileges
+
+|vApp
+|All privileges
+
+|Virtual machine
+|All privileges
+
+|====
+
+[id="passthrough-mode-maintenance"]
+== Passthrough mode credential maintenance
+If `CredentialsRequest` CRs change over time as the cluster is upgraded, you must manually update the passthrough mode credential to meet the requirements. To avoid credentials issues during an upgrade, check the `CredentialsRequest` CRs in the release image for the new version of {product-title} before upgrading. To locate the `CredentialsRequest` CRs that are required for your cloud provider, see _Manually creating IAM_ for xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[AWS], xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[Azure], or xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[GCP].
+
+[id="passthrough-mode-reduce-permissions"]
+== Reducing permissions after installation
+When using passthrough mode, each component has the same permissions used by all other components. If you do not reduce the permissions after installing, all components have the broad permissions that are required to run the installer.
+
+After installation, you can reduce the permissions on your credential to only those that are required to run the cluster, as defined by the `CredentialsRequest` CRs in the release image for the version of {product-title} that you are using.
+
+To locate the `CredentialsRequest` CRs that are required for AWS, Azure, or GCP and learn how to change the permissions the CCO uses, see _Manually creating IAM_ for xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[AWS], xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[Azure], or xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[GCP].
+
+== Additional resources
+
+* xref:../../installing/installing_aws/manually-creating-iam.adoc#manually-creating-iam-aws[Manually creating IAM for AWS]
+* xref:../../installing/installing_azure/manually-creating-iam-azure.adoc#manually-creating-iam-azure[Manually creating IAM for Azure]
+* xref:../../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-creating-iam-gcp[Manually creating IAM for GCP]
diff --git a/authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc b/authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc
new file mode 100644
index 000000000000..a520d8c2d0a7
--- /dev/null
+++ b/authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc
@@ -0,0 +1,114 @@
+[id="cco-mode-sts"]
+= Using manual mode with STS
+include::modules/common-attributes.adoc[]
+:context: cco-mode-sts
+
+toc::[]
+
+Manual mode with STS is supported for Amazon Web Services (AWS).
+
+In manual mode with STS, the individual {product-title} cluster components use AWS Secure Token Service (STS) to assign components IAM roles that provide short-term, limited-privilege security credentials. These credentials are associated with IAM roles that are specific to each component that makes AWS API calls.
+
+Requests for new and refreshed credentials are automated by using an appropriately configured AWS IAM OpenID Connect (OIDC) identity provider, combined with AWS IAM roles. {product-title} signs service account tokens that are trusted by AWS IAM, and can be projected into a pod and used for authentication. Tokens are refreshed after one hour.
+
+//to-do: more detailed info on this flow
+
+.STS authentication flow
+image::142_OpenShift_credentials_STS_0221.svg[Detailed authentication flow between AWS and the cluster when using AWS STS]
+//to-do: improve alt-text
+
+Using manual mode with STS changes the content of the AWS credentials that are provided to individual {product-title} components.
+
+.AWS secret format using long-lived credentials
+
+[source,yaml]
+----
+apiVersion: v1
+kind: Secret
+metadata:
+ namespace: <1>
+ name: <2>
+data:
+ aws_access_key_id:
+ aws_secret_access_key:
+----
+<1> The namespace for the component.
+<2> The name of the component secret.
+
+.AWS secret format with STS
+
+[source,yaml]
+----
+apiVersion: v1
+kind: Secret
+metadata:
+ namespace: <1>
+ name: <2>
+data:
+ role_name: <3>
+ web_identity_token_file: <4>
+----
+<1> The namespace for the component.
+<2> The name of the component secret.
+<3> The IAM role for the component.
+<4> The path to the service account token inside the pod. By convention, this is `/var/run/secrets/openshift/serviceaccount/token` for {product-title} components.
+
+//Supertask: Installing an OCP cluster configured for manual mode with STS
+[id="sts-mode-installing"]
+== Installing an {product-title} cluster configured for manual mode with STS
+
+To install a cluster that is configured to use the Cloud Credential Operator (CCO) in manual mode with STS:
+
+//[pre-4.8]. xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#sts-mode-installing-manual-config_cco-mode-sts[Create the required AWS resources]
+. xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#cco-ccoctl-configuring_cco-mode-sts[Configure the Cloud Credential Operator utility].
+. Create the required AWS resources xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#cco-ccoctl-creating-individually_cco-mode-sts[individually], or xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#cco-ccoctl-creating-at-once_cco-mode-sts[with a single command].
+. xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#sts-mode-installing-manual-run-installer_cco-mode-sts[Run the {product-title} installer].
+. xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#sts-mode-installing-verifying_cco-mode-sts[Verify that the cluster is using short-lived credentials].
+
+[NOTE]
+====
+Because the cluster is operating in manual mode when using STS, it is not able to create new credentials for components with the permissions that they require. When upgrading to a different minor version of {product-title}, there are often new AWS permission requirements. Before upgrading a cluster that is using STS, the cluster administrator must manually ensure that the AWS permissions are sufficient for existing components and available to any new components.
+====
+
+//[pre-4.8]Task part 1: Creating AWS resources manually
+//include::modules/sts-mode-installing-manual-config.adoc[leveloffset=+2]
+
+//Task part 1: Configuring the Cloud Credential Operator utility
+include::modules/cco-ccoctl-configuring.adoc[leveloffset=+2]
+
+[id="sts-mode-create-aws-resources-ccoctl"]
+=== Creating AWS resources with the Cloud Credential Operator utility
+
+You can use the CCO utility (`ccoctl`) to create the required AWS resources xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#cco-ccoctl-creating-individually_cco-mode-sts[individually], or xref:../../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#cco-ccoctl-creating-at-once_cco-mode-sts[with a single command].
+
+//Task part 2a: Creating the required AWS resources individually
+include::modules/cco-ccoctl-creating-individually.adoc[leveloffset=+3]
+
+//Task part 2b: Creating the required AWS resources all at once
+include::modules/cco-ccoctl-creating-at-once.adoc[leveloffset=+3]
+
+//Task part 3: Run the OCP installer
+include::modules/sts-mode-installing-manual-run-installer.adoc[leveloffset=+2]
+
+//Task part 4: Verify that the cluster is using short-lived credentials
+include::modules/sts-mode-installing-verifying.adoc[leveloffset=+2]
+
+[id="sts-mode-upgrading"]
+== Upgrading an {product-title} cluster configured for manual mode with STS
+
+The release image for the version of {product-title} that you are upgrading to contains a version of the `ccoctl` binary and list of `CredentialsRequest` objects specific to that release.
+
+:context: sts-mode-upgrading
+
+//Task part 1: Configuring the Cloud Credential Operator utility
+include::modules/cco-ccoctl-configuring.adoc[leveloffset=+2]
+
+include::modules/cco-ccoctl-upgrading.adoc[leveloffset=+2]
+
+include::modules/manually-maintained-credentials-upgrade.adoc[leveloffset=+2]
+
+//Task part 3: Run the OCP installer
+include::modules/sts-mode-installing-manual-run-installer.adoc[leveloffset=+2]
+
+//Task part 4: Verify that the cluster is using short-lived credentials
+include::modules/sts-mode-installing-verifying.adoc[leveloffset=+2]
diff --git a/monitoring/cluster-monitoring/images b/authentication/managing_cloud_provider_credentials/images
similarity index 100%
rename from monitoring/cluster-monitoring/images
rename to authentication/managing_cloud_provider_credentials/images
diff --git a/cnv/cnv_release_notes/modules b/authentication/managing_cloud_provider_credentials/modules
similarity index 100%
rename from cnv/cnv_release_notes/modules
rename to authentication/managing_cloud_provider_credentials/modules
diff --git a/authentication/remove-kubeadmin.adoc b/authentication/remove-kubeadmin.adoc
index 352f5c1a4d25..9a66a0c2b889 100644
--- a/authentication/remove-kubeadmin.adoc
+++ b/authentication/remove-kubeadmin.adoc
@@ -2,6 +2,7 @@
= Removing the kubeadmin user
include::modules/common-attributes.adoc[]
:context: removing-kubeadmin
+
toc::[]
include::modules/authentication-kubeadmin.adoc[leveloffset=+1]
diff --git a/authentication/tokens-scoping.adoc b/authentication/tokens-scoping.adoc
index b15fffb7e0f7..060ddce00817 100644
--- a/authentication/tokens-scoping.adoc
+++ b/authentication/tokens-scoping.adoc
@@ -2,6 +2,7 @@
= Scoping tokens
include::modules/common-attributes.adoc[]
:context: configuring-internal-oauth
+
toc::[]
include::modules/tokens-scoping-about.adoc[leveloffset=+1]
diff --git a/authentication/understanding-and-creating-service-accounts.adoc b/authentication/understanding-and-creating-service-accounts.adoc
index 976f9155d5d3..4ef0374388ae 100644
--- a/authentication/understanding-and-creating-service-accounts.adoc
+++ b/authentication/understanding-and-creating-service-accounts.adoc
@@ -2,6 +2,7 @@
= Understanding and creating service accounts
include::modules/common-attributes.adoc[]
:context: understanding-service-accounts
+
toc::[]
include::modules/service-accounts-overview.adoc[leveloffset=+1]
diff --git a/authentication/understanding-authentication.adoc b/authentication/understanding-authentication.adoc
index 6f780b6754f7..402c04980512 100644
--- a/authentication/understanding-authentication.adoc
+++ b/authentication/understanding-authentication.adoc
@@ -2,6 +2,7 @@
= Understanding authentication
include::modules/common-attributes.adoc[]
:context: understanding-authentication
+
toc::[]
For users to interact with {product-title}, they must first authenticate
@@ -9,7 +10,7 @@ to the cluster. The authentication layer identifies the user associated with req
{product-title} API. The authorization layer then uses information about the
requesting user to determine if the request is allowed.
-ifdef::openshift-enterprise,openshift-origin[]
+ifdef::openshift-enterprise,openshift-webscale,openshift-origin[]
As an administrator, you can configure authentication for {product-title}.
endif::[]
@@ -23,6 +24,8 @@ include::modules/oauth-server-overview.adoc[leveloffset=+2]
include::modules/oauth-token-requests.adoc[leveloffset=+2]
+ifdef::openshift-enterprise,openshift-webscale,openshift-origin[]
include::modules/authentication-api-impersonation.adoc[leveloffset=+3]
include::modules/authentication-prometheus-system-metrics.adoc[leveloffset=+3]
+endif::[]
diff --git a/authentication/understanding-identity-provider.adoc b/authentication/understanding-identity-provider.adoc
index 583ac1d0d3e0..956423069eb2 100644
--- a/authentication/understanding-identity-provider.adoc
+++ b/authentication/understanding-identity-provider.adoc
@@ -2,6 +2,7 @@
= Understanding identity provider configuration
include::modules/common-attributes.adoc[]
:context: understanding-identity-provider
+
toc::[]
The {product-title} master includes a built-in OAuth server. Developers and
@@ -68,6 +69,10 @@ link:http://openid.net/specs/openid-connect-core-1_0.html#CodeFlowAuth[Authoriza
|===
+Once an identity provider has been defined, you can
+xref:../authentication/using-rbac.adoc#authorization-overview_using-rbac[use RBAC to define and apply permissions].
+
+include::modules/authentication-remove-kubeadmin.adoc[leveloffset=+1]
include::modules/identity-provider-parameters.adoc[leveloffset=+1]
diff --git a/authentication/using-rbac.adoc b/authentication/using-rbac.adoc
index 5c5106380a9b..c312f4ff3368 100644
--- a/authentication/using-rbac.adoc
+++ b/authentication/using-rbac.adoc
@@ -19,10 +19,14 @@ include::modules/rbac-adding-roles.adoc[leveloffset=+1]
include::modules/rbac-creating-local-role.adoc[leveloffset=+1]
+ifdef::openshift-enterprise,openshift-webscale,openshift-origin[]
include::modules/rbac-creating-cluster-role.adoc[leveloffset=+1]
+endif::[]
include::modules/rbac-local-role-binding-commands.adoc[leveloffset=+1]
+ifdef::openshift-enterprise,openshift-webscale,openshift-origin[]
include::modules/rbac-cluster-role-binding-commands.adoc[leveloffset=+1]
include::modules/rbac-creating-cluster-admin.adoc[leveloffset=+1]
+endif::[]
diff --git a/authentication/using-service-accounts-as-oauth-client.adoc b/authentication/using-service-accounts-as-oauth-client.adoc
index 66a13b3dbf8b..3c68594babc9 100644
--- a/authentication/using-service-accounts-as-oauth-client.adoc
+++ b/authentication/using-service-accounts-as-oauth-client.adoc
@@ -2,6 +2,7 @@
= Using a service account as an OAuth client
include::modules/common-attributes.adoc[]
:context: using-service-accounts-as-oauth-client
+
toc::[]
-include::modules/service-accounts-as-oauth-clients.adoc[leveloffset=+1]
\ No newline at end of file
+include::modules/service-accounts-as-oauth-clients.adoc[leveloffset=+1]
diff --git a/authentication/using-service-accounts-in-applications.adoc b/authentication/using-service-accounts-in-applications.adoc
index 68a5155a7304..231e068d6402 100644
--- a/authentication/using-service-accounts-in-applications.adoc
+++ b/authentication/using-service-accounts-in-applications.adoc
@@ -2,6 +2,7 @@
= Using service accounts in applications
include::modules/common-attributes.adoc[]
:context: using-service-accounts
+
toc::[]
include::modules/service-accounts-overview.adoc[leveloffset=+1]
diff --git a/autocomment.sh b/autocomment.sh
index ea563d0708e3..0a7d565191ec 100644
--- a/autocomment.sh
+++ b/autocomment.sh
@@ -9,7 +9,8 @@ wget https://api.travis-ci.org/v3/job/"${TRAVIS_JOB_ID}"/log.txt
ERROR_LIST=$(grep '31m' travis-log-408052641.txt | sed -r "s/[[:cntrl:]]\[[0-9]{1,3}m//g")
echo "" > errors.txt
-ALLOWED_USERS=("mburke5678" "vikram-redhat" "ariordan-redhat" "ahardin-rh" "kalexand-rh" "adellape" "bmcelvee" "ousleyp" "jhoyt-rh" "JStickler" "geekspertise" "rh-max" "bergerhoffer" "huffmanca" "sheriff-rh" "jboxman")
+ALLOWED_USERS=("mburke5678" "vikram-redhat" "abrennan89" "ahardin-rh" "kalexand-rh" "adellape" "bmcelvee" "ousleyp" "lamek" "JStickler" "rh-max" "bergerhoffer" "sheriff-rh" "jboxman" "bobfuru" "aburdenthehand" "boczkowska" "Preeticp" "neal-timpe" "codyhoag" "apinnick" "bgaydosrh" "lmandavi" "maxwelldb" "pneedle-rh" "lbarbeevargas" "jeana-redhat" "RichardHoch" "johnwilkins" "sjhala-ccs" "mgarrellRH" "SNiemann15" "sfortner-RH" "jonquilwilliams" "ktania46" "wking" "
+jc-berger" "rishumehra" "aireilly" "iranzo" "abhatt-rh" "@mohit-sheth" "stoobie" "emarcusRH" "kquinn1204" "mikemckiernan" "skrthomas" "sagidlow" "rolfedh")
USERNAME=${TRAVIS_PULL_REQUEST_SLUG::-15}
if [ "$TRAVIS_PULL_REQUEST" != "false" ] ; then #to make sure it only runs on PRs and not all merges
diff --git a/automerge.sh b/automerge.sh
new file mode 100755
index 000000000000..a65f5a1a4fd5
--- /dev/null
+++ b/automerge.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+set -ev
+
+ALLOWED_USERS=("openshift-cherrypick-robot")
+USERNAME=${TRAVIS_PULL_REQUEST_SLUG::-15}
+
+echo -e "{\"PR_BRANCH\":\"${TRAVIS_PULL_REQUEST_BRANCH}\",\"BASE_REPO\":\"${TRAVIS_REPO_SLUG}\",\"PR_NUMBER\":\"${TRAVIS_PULL_REQUEST}\",\"USER_NAME\":\"${USERNAME}\",\"BASE_REF\":\"${TRAVIS_BRANCH}\",\"REPO_NAME\":\"${TRAVIS_PULL_REQUEST_SLUG}\"}"
+
+if [ "$TRAVIS_PULL_REQUEST" != "false" ] ; then #to make sure it only runs on PRs and not all merges
+ if [[ " ${ALLOWED_USERS[*]} " =~ " ${USERNAME} " ]]; then # to make sure it only runs on PRs from the bot
+ if [ "${TRAVIS_PULL_REQUEST_BRANCH}" != "master" ] ; then # to make sure it does not run for direct master changes
+
+ echo "{\"PR_BRANCH\":\"${TRAVIS_PULL_REQUEST_BRANCH}\",\"BASE_REPO\":\"${TRAVIS_REPO_SLUG}\",\"PR_NUMBER\":\"${TRAVIS_PULL_REQUEST}\",\"USER_NAME\":\"${USERNAME}\",\"BASE_REF\":\"${TRAVIS_BRANCH}\",\"REPO_NAME\":\"${TRAVIS_PULL_REQUEST_SLUG}\"}" > buildset.json
+
+ curl -H 'Content-Type: application/json' --request POST --data @buildset.json "https://roomy-tungsten-cylinder.glitch.me"
+
+ echo -e "\\n\\033[0;32m[✓] Sent request for merging.\\033[0m"
+
+# curl -X PUT -H "Authorization: token ${GHAUTH}" "https://api.github.com/repos/openshift/openshift-docs/pulls/${TRAVIS_PULL_REQUEST}/merge"
+# echo -e "\\n\\033[0;32m[✓] Automerged.\\033[0m"
+ else
+ echo -e "\\n\\033[1;33m[!] Direct PR for master branch, not sending for merge.\\033[0m"
+ fi
+ else
+ echo -e "\\n\\033[1;33m[!] Automerge is only for the bot\\033[0m"
+ fi
+else
+ echo -e "\\n\\033[1;33m[!] Not a valid PR.\\033[0m"
+fi
diff --git a/autopreview.sh b/autopreview.sh
index fdf7ee6b4602..9e1f083ba6f4 100644
--- a/autopreview.sh
+++ b/autopreview.sh
@@ -1,7 +1,8 @@
#!/bin/bash
set -ev
-ALLOWED_USERS=("mburke5678" "vikram-redhat" "ariordan-redhat" "ahardin-rh" "kalexand-rh" "adellape" "bmcelvee" "ousleyp" "jhoyt-rh" "JStickler" "geekspertise" "rh-max" "bergerhoffer" "huffmanca" "sheriff-rh" "jboxman")
+ALLOWED_USERS=("aireilly" "mburke5678" "vikram-redhat" "abrennan89" "ahardin-rh" "kalexand-rh" "adellape" "bmcelvee" "ousleyp" "lamek" "JStickler" "rh-max" "bergerhoffer" "sheriff-rh" "jboxman" "bobfuru" "aburdenthehand" "boczkowska" "Preeticp" "neal-timpe" "codyhoag" "apinnick" "bgaydosrh" "lmandavi" "maxwelldb" "pneedle-rh" "lbarbeevargas" "jeana-redhat" "RichardHoch" "johnwilkins" "sjhala-ccs" "mgarrellRH" "SNiemann15" "sfortner-RH" "jonquilwilliams" "ktania46" "wking" "
+jc-berger" "rishumehra" "iranzo" "abhatt-rh" "@mohit-sheth" "stoobie" "emarcusRH" "kquinn1204" "mikemckiernan" "skrthomas" "sagidlow" "rolfedh")
USERNAME=${TRAVIS_PULL_REQUEST_SLUG::-15}
COMMIT_HASH="$(git rev-parse @~)"
mapfile -t FILES_CHANGED < <(git diff --name-only "$COMMIT_HASH")
@@ -9,9 +10,9 @@ mapfile -t FILES_CHANGED < <(git diff --name-only "$COMMIT_HASH")
if [ "$TRAVIS_PULL_REQUEST" != "false" ] ; then #to make sure it only runs on PRs and not all merges
if [[ " ${ALLOWED_USERS[*]} " =~ " ${USERNAME} " ]]; then # to make sure it only runs on PRs from @openshift/team-documentation
if [ "${TRAVIS_PULL_REQUEST_BRANCH}" != "master" ] ; then # to make sure it does not run for direct master changes
- if [[ " ${FILES_CHANGED[*]} " = *".adoc"* ]]; then # to make sure this doesn't run for genreal modifications
+ if [[ " ${FILES_CHANGED[*]} " = *".adoc"* ]] || [[ " ${FILES_CHANGED[*]} " = *"_topic_map.yml"* ]] || [[ " ${FILES_CHANGED[*]} " = *"_distro_map.yml"* ]] ; then # to make sure this doesn't run for general modifications
echo "{\"PR_BRANCH\":\"${TRAVIS_PULL_REQUEST_BRANCH}\",\"BASE_REPO\":\"${TRAVIS_REPO_SLUG}\",\"PR_NUMBER\":\"${TRAVIS_PULL_REQUEST}\",\"USER_NAME\":\"${USERNAME}\",\"BASE_REF\":\"${TRAVIS_BRANCH}\",\"REPO_NAME\":\"${TRAVIS_PULL_REQUEST_SLUG}\"}" > buildset.json
- curl -H 'Content-Type: application/json' --request POST --data @buildset.json "https://preview-receiver.glitch.me/"
+ curl -H 'Content-Type: application/json' --request POST --data @buildset.json "https://roomy-tungsten-cylinder.glitch.me"
echo -e "\\n\\033[0;32m[✓] Sent request for building a preview.\\033[0m"
else
echo -e "\\n\\033[1;33m[!] No .adoc files modified, not building a preview.\\033[0m"
diff --git a/backup_and_restore/application_backup_and_restore/images b/backup_and_restore/application_backup_and_restore/images
new file mode 120000
index 000000000000..5fa6987088da
--- /dev/null
+++ b/backup_and_restore/application_backup_and_restore/images
@@ -0,0 +1 @@
+../../images
\ No newline at end of file
diff --git a/backup_and_restore/application_backup_and_restore/modules b/backup_and_restore/application_backup_and_restore/modules
new file mode 120000
index 000000000000..8b0e8540076d
--- /dev/null
+++ b/backup_and_restore/application_backup_and_restore/modules
@@ -0,0 +1 @@
+../../modules
\ No newline at end of file
diff --git a/backup_and_restore/application_backup_and_restore/placeholder.adoc b/backup_and_restore/application_backup_and_restore/placeholder.adoc
new file mode 100644
index 000000000000..7f6d1472196d
--- /dev/null
+++ b/backup_and_restore/application_backup_and_restore/placeholder.adoc
@@ -0,0 +1,6 @@
+[id="placeholder"]
+= Application backup and restore
+include::modules/common-attributes.adoc[]
+:context: oadp
+
+TBD
diff --git a/backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc b/backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc
new file mode 100644
index 000000000000..0b643b5ced91
--- /dev/null
+++ b/backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc
@@ -0,0 +1,28 @@
+[id="backup-etcd"]
+= Backing up etcd
+include::modules/common-attributes.adoc[]
+:context: backup-etcd
+
+toc::[]
+
+etcd is the key-value store for {product-title}, which persists the state of all
+resource objects.
+
+Back up your cluster's etcd data regularly and store in a secure location
+ideally outside the {product-title} environment. Do not take an etcd backup
+before the first certificate rotation completes, which occurs 24 hours after
+installation, otherwise the backup will contain expired certificates. It is also
+recommended to take etcd backups during non-peak usage hours, as it is a
+blocking action.
+
+Be sure to take an etcd backup after you upgrade your cluster. This is important because when you restore your cluster, you must use an etcd backup that was taken from the same z-stream release. For example, an {product-title} 4.y.z cluster must use an etcd backup that was taken from 4.y.z.
+
+[IMPORTANT]
+====
+Back up your cluster's etcd data by performing a single invocation of the backup script on a control plane host. Do not take a backup for each control plane host.
+====
+
+After you have an etcd backup, you can xref:../../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[restore to a previous cluster state].
+
+// Backing up etcd data
+include::modules/backup-etcd.adoc[leveloffset=+1]
diff --git a/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/about-disaster-recovery.adoc b/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/about-disaster-recovery.adoc
new file mode 100644
index 000000000000..1d4d469358be
--- /dev/null
+++ b/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/about-disaster-recovery.adoc
@@ -0,0 +1,43 @@
+[id="about-dr"]
+= About disaster recovery
+include::modules/common-attributes.adoc[]
+:context: about-dr
+
+toc::[]
+
+The disaster recovery documentation provides information for administrators on
+how to recover from several disaster situations that might occur with their
+{product-title} cluster. As an administrator, you might need to follow one or
+more of the following procedures to return your cluster to a working
+state.
+
+[IMPORTANT]
+====
+Disaster recovery requires you to have at least one healthy control plane host.
+====
+
+xref:../../../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[Restoring to a previous cluster state]::
+This solution handles situations where you want to restore your cluster to
+a previous state, for example, if an administrator deletes something critical.
+This also includes situations where you have lost the majority of your control plane hosts, leading to etcd quorum loss and the cluster going offline. As long as you have taken an etcd backup, you can follow this procedure to restore your cluster to a previous state.
++
+If applicable, you might also need to xref:../../../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc#dr-recovering-expired-certs[recover from expired control plane certificates].
++
+[WARNING]
+====
+Restoring to a previous cluster state is a destructive and destablizing action to take on a running cluster. This procedure should only be used as a last resort.
+
+Prior to performing a restore, see xref:../../../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-scenario-2-restoring-cluster-state-about_dr-restoring-cluster-state[About restoring cluster state] for more information on the impact to the cluster.
+====
++
+[NOTE]
+====
+If you have a majority of your masters still available and have an etcd quorum, then follow the procedure to xref:../../../backup_and_restore/control_plane_backup_and_restore/replacing-unhealthy-etcd-member.adoc#replacing-unhealthy-etcd-member[replace a single unhealthy etcd member].
+====
+
+xref:../../../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc#dr-recovering-expired-certs[Recovering from expired control plane certificates]::
+This solution handles situations where your control plane certificates have
+expired. For example, if you shut down your cluster before the first certificate
+rotation, which occurs 24 hours after installation, your certificates will not
+be rotated and will expire. You can follow this procedure to recover from
+expired control plane certificates.
diff --git a/applications/operator_sdk/images b/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/images
similarity index 100%
rename from applications/operator_sdk/images
rename to backup_and_restore/control_plane_backup_and_restore/disaster_recovery/images
diff --git a/applications/operator_sdk/modules b/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/modules
similarity index 100%
rename from applications/operator_sdk/modules
rename to backup_and_restore/control_plane_backup_and_restore/disaster_recovery/modules
diff --git a/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc b/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc
new file mode 100644
index 000000000000..83380f6015e4
--- /dev/null
+++ b/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc
@@ -0,0 +1,14 @@
+[id="dr-restoring-cluster-state"]
+= Restoring to a previous cluster state
+include::modules/common-attributes.adoc[]
+:context: dr-restoring-cluster-state
+
+toc::[]
+
+To restore the cluster to a previous state, you must have previously xref:../../../backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc#backing-up-etcd-data_backup-etcd[backed up etcd data] by creating a snapshot. You will use this snapshot to restore the cluster state.
+
+// About restoring to a previous cluster state
+include::modules/dr-restoring-cluster-state-about.adoc[leveloffset=+1]
+
+// Restoring to a previous cluster state
+include::modules/dr-restoring-cluster-state.adoc[leveloffset=+1]
diff --git a/disaster_recovery/scenario-3-expired-certs.adoc b/backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc
similarity index 100%
rename from disaster_recovery/scenario-3-expired-certs.adoc
rename to backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc
diff --git a/backup_and_restore/control_plane_backup_and_restore/images b/backup_and_restore/control_plane_backup_and_restore/images
new file mode 120000
index 000000000000..5fa6987088da
--- /dev/null
+++ b/backup_and_restore/control_plane_backup_and_restore/images
@@ -0,0 +1 @@
+../../images
\ No newline at end of file
diff --git a/backup_and_restore/control_plane_backup_and_restore/modules b/backup_and_restore/control_plane_backup_and_restore/modules
new file mode 120000
index 000000000000..8b0e8540076d
--- /dev/null
+++ b/backup_and_restore/control_plane_backup_and_restore/modules
@@ -0,0 +1 @@
+../../modules
\ No newline at end of file
diff --git a/backup_and_restore/control_plane_backup_and_restore/replacing-unhealthy-etcd-member.adoc b/backup_and_restore/control_plane_backup_and_restore/replacing-unhealthy-etcd-member.adoc
new file mode 100644
index 000000000000..947f07433bf3
--- /dev/null
+++ b/backup_and_restore/control_plane_backup_and_restore/replacing-unhealthy-etcd-member.adoc
@@ -0,0 +1,42 @@
+[id="replacing-unhealthy-etcd-member"]
+= Replacing an unhealthy etcd member
+include::modules/common-attributes.adoc[]
+:context: replacing-unhealthy-etcd-member
+
+toc::[]
+
+This document describes the process to replace a single unhealthy etcd member.
+
+This process depends on whether the etcd member is unhealthy because the machine is not running or the node is not ready, or whether it is unhealthy because the etcd pod is crashlooping.
+
+[NOTE]
+====
+If you have lost the majority of your control plane hosts, leading to etcd quorum loss, then you must follow the disaster recovery procedure to xref:../../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[restore to a previous cluster state] instead of this procedure.
+
+If the control plane certificates are not valid on the member being replaced, then you must follow the procedure to xref:../../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-3-expired-certs.adoc#dr-recovering-expired-certs[recover from expired control plane certificates] instead of this procedure.
+
+If a control plane node is lost and a new one is created, the etcd cluster Operator handles generating the new TLS certificates and adding the node as an etcd member.
+====
+
+== Prerequisites
+
+* Take an xref:../../backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc#backing-up-etcd-data_backup-etcd[etcd backup] prior to replacing an unhealthy etcd member.
+
+// Identifying an unhealthy etcd member
+include::modules/restore-identify-unhealthy-etcd-member.adoc[leveloffset=+1]
+
+// Determining the state of the unhealthy etcd member
+include::modules/restore-determine-state-etcd-member.adoc[leveloffset=+1]
+
+== Replacing the unhealthy etcd member
+
+Depending on the state of your unhealthy etcd member, use one of the following procedures:
+
+* xref:../../backup_and_restore/control_plane_backup_and_restore/replacing-unhealthy-etcd-member.adoc#restore-replace-stopped-etcd-member_replacing-unhealthy-etcd-member[Replacing an unhealthy etcd member whose machine is not running or whose node is not ready]
+* xref:../../backup_and_restore/control_plane_backup_and_restore/replacing-unhealthy-etcd-member.adoc#restore-replace-crashlooping-etcd-member_replacing-unhealthy-etcd-member[Replacing an unhealthy etcd member whose etcd pod is crashlooping]
+
+// Replacing an unhealthy etcd member whose machine is not running or whose node is not ready
+include::modules/restore-replace-stopped-etcd-member.adoc[leveloffset=+2]
+
+// Replacing an unhealthy etcd member whose etcd pod is crashlooping
+include::modules/restore-replace-crashlooping-etcd-member.adoc[leveloffset=+2]
diff --git a/backup_and_restore/graceful-cluster-restart.adoc b/backup_and_restore/graceful-cluster-restart.adoc
new file mode 100644
index 000000000000..808789858dc4
--- /dev/null
+++ b/backup_and_restore/graceful-cluster-restart.adoc
@@ -0,0 +1,27 @@
+[id="graceful-restart-cluster"]
+= Restarting the cluster gracefully
+include::modules/common-attributes.adoc[]
+:context: graceful-restart-cluster
+
+toc::[]
+
+This document describes the process to restart your cluster after a graceful shutdown.
+
+Even though the cluster is expected to be functional after the restart, the cluster might not recover due to unexpected conditions, for example:
+
+* etcd data corruption during shutdown
+* Node failure due to hardware
+* Network connectivity issues
+
+If your cluster fails to recover, follow the steps to xref:../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[restore to a previous cluster state].
+
+== Prerequisites
+
+* You have xref:../backup_and_restore/graceful-cluster-shutdown.adoc#graceful-shutdown-cluster[gracefully shut down your cluster].
+
+// Restarting the cluster
+include::modules/graceful-restart.adoc[leveloffset=+1]
+
+.Additional resources
+
+* See xref:../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[Restoring to a previous cluster state] for how to use an etcd backup to restore if your cluster failed to recover after restarting.
diff --git a/backup_and_restore/graceful-cluster-shutdown.adoc b/backup_and_restore/graceful-cluster-shutdown.adoc
new file mode 100644
index 000000000000..adaf47b238c1
--- /dev/null
+++ b/backup_and_restore/graceful-cluster-shutdown.adoc
@@ -0,0 +1,19 @@
+[id="graceful-shutdown-cluster"]
+= Shutting down the cluster gracefully
+include::modules/common-attributes.adoc[]
+:context: graceful-shutdown-cluster
+
+toc::[]
+
+This document describes the process to gracefully shut down your cluster. You might need to temporarily shut down your cluster for maintenance reasons, or to save on resource costs.
+
+== Prerequisites
+
+* Take an xref:../backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc#backing-up-etcd-data_backup-etcd[etcd backup] prior to shutting down the cluster.
+
+// Shutting down the cluster
+include::modules/graceful-shutdown.adoc[leveloffset=+1]
+
+.Additional resources
+
+* xref:../backup_and_restore/graceful-cluster-restart.adoc#graceful-restart-cluster[Restarting the cluster gracefully]
diff --git a/build.py b/build.py
index 106a1a5ee879..44fb9b4279e8 100755
--- a/build.py
+++ b/build.py
@@ -1,7 +1,16 @@
#!/usr/bin/python
+# this file builds content from asciidoc to ccutil ready format BUT is only
+# used for validating content, rather than the actual conversion. For the
+# actual conversion, the file build_for_portal.py is used (on the portal).
+
+# the only difference between this and build_for_portal file is in the
+# section on _fix_links. This file replaces link anchors within the same file
+# to the top of the file so that Travis build passes. Travis builds don't
+# know books external to them and this helps pass the builds.
+
import argparse
-import ConfigParser
+import configparser
import filecmp
import fnmatch
import logging
@@ -12,6 +21,7 @@
import sys
import time
import yaml
+import requests
from aura import cli
@@ -154,7 +164,7 @@ def parse_build_config(config):
"""
config = os.path.expanduser(config)
with open(config, "r") as f:
- data = list(yaml.load_all(f))
+ data = list(yaml.load_all(f,Loader=yaml.FullLoader))
for book in data:
book_name = book['Name']
@@ -469,6 +479,23 @@ def scrub_file(info, book_src_dir, src_file, tag=None, cwd=None):
"""
base_src_file = src_file.replace(info['src_dir'] + "/", "")
+ # added 1/Sep/2020
+ # to allow loading files like json and yaml from external sources, this
+ # procedure loads the file recognizing that it starts with http
+ # it then checks if it exists or not, and if it exists, returns the raw data
+ # data that it finds.
+ if(base_src_file.startswith("https://raw.githubusercontent.com/openshift/")):
+ try:
+ response = requests.get(base_src_file)
+ if(response):
+ return response.text
+ else:
+ raise ConnectionError("Malformed URL")
+ except Exception as exception:
+ log.error("An include file wasn't found: %s", base_src_file)
+ has_errors = True
+ sys.exit(-1)
+
# Get a list of predefined custom title ids for the file
title_ids = TITLE_IDS.get(base_src_file, {})
@@ -517,12 +544,12 @@ def scrub_file(info, book_src_dir, src_file, tag=None, cwd=None):
# Fix up any duplicate ids
if base_src_file in DUPLICATE_IDS:
- for duplicate_id, new_id in DUPLICATE_IDS[base_src_file].items():
+ for duplicate_id, new_id in list(DUPLICATE_IDS[base_src_file].items()):
content = content.replace("[[" + duplicate_id + "]]", "[[" + new_id + "]]")
# Replace incorrect links with correct ones
if base_src_file in INCORRECT_LINKS:
- for incorrect_link, fixed_link in INCORRECT_LINKS[base_src_file].items():
+ for incorrect_link, fixed_link in list(INCORRECT_LINKS[base_src_file].items()):
content = content.replace(incorrect_link, fixed_link)
# Fix up the links
@@ -614,6 +641,7 @@ def _fix_links(content, book_dir, src_file, info, tag=None, cwd=None):
rel_src_file = src_file.replace(os.path.dirname(book_dir) + "/", "")
has_errors = True
log.error("ERROR (%s): \"%s\" appears to try to reference a file not included in the \"%s\" distro", rel_src_file, link_text.replace("\n", ""), info['distro'])
+ sys.exit(-1)
else:
fixed_link = "xref:" + link_anchor.replace("#", "") + link_title
@@ -729,7 +757,7 @@ def build_file_id(file_title, file_to_id_map, existing_ids):
"""
file_id = base_id = re.sub(r"[\[\]\(\)#]", "", file_title.lower().replace("_", "-").replace(" ", "-"))
count = 1
- while file_id in existing_ids or file_id in file_to_id_map.values():
+ while file_id in existing_ids or file_id in list(file_to_id_map.values()):
file_id = base_id + "-" + str(count)
count += 1
@@ -750,8 +778,8 @@ def replace_nbsp(val):
"""Replaces non breaking spaces with a regular space"""
if val is not None:
# Check if the string is unicode
- if isinstance(val, unicode):
- return val.replace(u'\xa0', ' ')
+ if isinstance(val, str):
+ return val.replace('\xa0', ' ')
else:
return val.replace('\xc2\xa0', ' ')
else:
@@ -850,7 +878,7 @@ def _sync_directories_dircmp(dcmp):
shutil.copytree(left, right)
# Sync sub directories
- for subdcmp in dcmp.subdirs.values():
+ for subdcmp in list(dcmp.subdirs.values()):
_sync_directories_dircmp(subdcmp)
@@ -880,7 +908,7 @@ def parse_repo_config(config_file, distro, version):
log.error("Failed loading the repo configuration from %s", config_file)
sys.exit(-1)
- parser = ConfigParser.SafeConfigParser()
+ parser = configparser.SafeConfigParser()
parser.read(config_file)
repo_urls = dict()
@@ -956,7 +984,7 @@ def main():
ensure_directory(base_git_dir)
# Checkout the gitlab repo, copy the changes and push them back up
- for book_dir, gitlab_repo_url in repo_urls.items():
+ for book_dir, gitlab_repo_url in list(repo_urls.items()):
build_book_dir = os.path.join(dest_dir, book_dir)
git_dirname = gitlab_repo_url.split('/')[-1].replace(".git", "")
git_dir = os.path.join(base_git_dir, git_dirname)
diff --git a/build_for_portal.py b/build_for_portal.py
new file mode 100644
index 000000000000..8f3614ff21eb
--- /dev/null
+++ b/build_for_portal.py
@@ -0,0 +1,993 @@
+#!/usr/bin/python
+
+# see notes in the build.py script as to what this file does
+
+import argparse
+import configparser
+import filecmp
+import fnmatch
+import logging
+import os
+import re
+import shutil
+import subprocess
+import sys
+import time
+import yaml
+import requests
+
+from aura import cli
+
+cli.init_logging(False, True)
+
+has_errors = False
+CLONE_DIR = "."
+BASE_PORTAL_URL = "https://access.redhat.com/documentation/en-us/"
+# ID_RE = re.compile("^\[(?:\[|id=\'|#)(.*?)(\'?,.*?)?(?:\]|\')?\]", re.M | re.DOTALL)
+ID_RE = re.compile("^\[(?:\[|id=\'|#|id=\")(.*?)(\'?,.*?)?(?:\]|\'|\")?\]", re.M | re.DOTALL)
+LINKS_RE = re.compile("(?:xref|link):([\./\w_-]*/?[\w_.-]*\.(?:html|adoc))?(#[\w_-]*)?(\[.*?\])", re.M | re.DOTALL)
+EXTERNAL_LINK_RE = re.compile("[\./]*([\w_-]+)/[\w_/-]*?([\w_.-]*\.(?:html|adoc))", re.DOTALL)
+INCLUDE_RE = re.compile("include::(.*?)\[(.*?)\]", re.M)
+IFDEF_RE = re.compile(r"^if(n?)def::(.*?)\[\]", re.M)
+ENDIF_RE = re.compile(r"^endif::(.*?)\[\]\r?\n", re.M)
+COMMENT_CONTENT_RE = re.compile(r"^^////$.*?^////$", re.M | re.DOTALL)
+TAG_CONTENT_RE = re.compile(r"//\s+tag::(.*?)\[\].*?// end::(.*?)\[\]", re.M | re.DOTALL)
+CMP_IGNORE_FILES = [".git", ".gitignore", "README.md", "build.cfg"]
+DEVNULL = open(os.devnull, 'wb')
+
+
+MASTER_FILE_BASE = "= {title}\n\
+:product-author: {product-author}\n\
+:product-title: {product}\n\
+:product-version: {product-version}\n\
+:{distro}:\n\
+:imagesdir: images\n\
+:idseparator: -\n\
+{preface-title}\n"
+
+DOCINFO_BASE = "{title}\n\
+{{product-title}}\n\
+{{product-version}}\n\
+Enter a short description here.\n\
+\n\
+ A short overview and summary of the book's subject and purpose, traditionally no more than one paragraph long.\n\
+\n\
+\n\
+ {product-author}\n\
+\n\
+\n"
+
+# A list of book titles, that still use the old drupal url format (ie includes the product/version in the book title part)
+# eg. openshift-enterprise/version-3.0/openshift-enterprise-30-getting-started vs openshift-enterprise/version-3.0/getting-started
+DRUPAL_OLD_URL_TITLES = [
+ "Administrator Guide",
+ "Architecture",
+ "CLI Reference",
+ "Creating Images",
+ "Developer Guide",
+ "Getting Started",
+ "REST API Reference",
+ "Using Images",
+ "What's New?"
+]
+
+# A mapping of upstream book/category names to CP book names
+BOOK_NAME_OVERRIDES = {
+ "Administration": "Administrator Guide"
+}
+
+# Lines that should be stripped out/ignored when cleaning the content
+IGNORE_LINES = [
+ "{product-author}\n",
+ "{product-version}\n",
+ "{product-version]\n",
+ "{Lucas Costi}\n",
+ "toc::[]\n"
+]
+
+# Each MACRO in this list is omitted from the output
+# if the input appears as ':MACRO:' (colon, MACRO, colon).
+IGNORE_MACROS = [
+ "description",
+ "keywords",
+ "icons",
+ "data-uri",
+ "toc",
+ "toc-title"
+]
+
+# Files where the title should be removed when building the all-in-one
+ALL_IN_ONE_SCRAP_TITLE = [
+ "welcome/index.adoc"
+]
+
+# Files that should be commented out in the toc structure
+COMMENT_FILES = [
+ "admin_guide/overview.adoc",
+ "creating_images/overview.adoc",
+ "dev_guide/overview.adoc",
+ "using_images/overview.adoc",
+ "rest_api/overview.adoc"
+]
+
+# Map FILENAME to a map of TITLE to ID. In most of the cases the
+# ID is the TITLE downcased, with "strange" chars replaced by hyphen.
+# A notable exception is 'any' TITLE.
+TITLE_IDS = {}
+# A dictionary of existing dup ids to new unique ids
+DUPLICATE_IDS = {}
+# Map FILENAME to a map of BAD to GOOD. Most of the time, BAD and GOOD
+# are in link syntax, i.e., beginning with "link:", but not always.
+INCORRECT_LINKS = {}
+
+log = logging.getLogger("build")
+
+
+def setup_parser():
+ parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+ parser.add_argument("--distro", help="The distribution to build for", default="openshift-enterprise")
+ parser.add_argument("--all-in-one", help=argparse.SUPPRESS, action="store_true")
+ parser.add_argument("--title", help=argparse.SUPPRESS, default="Documentation")
+ parser.add_argument("--product", default="OpenShift Enterprise")
+ parser.add_argument("--version", default="3.0")
+ parser.add_argument("--author", default="Red Hat OpenShift Documentation Team")
+ parser.add_argument("--upstream-url", help="The upstream source url", default="https://github.com/openshift/openshift-docs.git")
+ parser.add_argument("--upstream-branch", help="The upstream source branch", default="enterprise-3.0")
+ parser.add_argument("--branch", help="The GitLab branch to commit changes into", default="GA")
+ parser.add_argument("-p", "--push", help="Commit and push the changes into GitLab", action="store_true")
+ parser.add_argument("--no-clean", help="Don't clean the drupal-build directory before building", action="store_true")
+ parser.add_argument("--no-upstream-fetch", help="Don't fetch the upstream sources", action="store_true")
+ return parser
+
+
+def find_build_config_file():
+ """
+ Finds the build config file to use, as it might be _topic_map.yml or _build_cfg.yml
+ """
+ config = os.path.abspath(os.path.join(CLONE_DIR, "_topic_map.yml"))
+ if not os.path.isfile(config):
+ config = os.path.abspath(os.path.join(CLONE_DIR, "_build_cfg.yml"))
+
+ return config
+
+
+def parse_build_config(config):
+ """
+ Parses the build config and returns a tree based structure for the config.
+ """
+ config = os.path.expanduser(config)
+ with open(config, "r") as f:
+ data = list(yaml.load_all(f,Loader=yaml.FullLoader))
+
+ for book in data:
+ book_name = book['Name']
+ if book_name in BOOK_NAME_OVERRIDES:
+ book['Name'] = BOOK_NAME_OVERRIDES[book_name]
+
+ return data
+
+
+def iter_tree(node, distro, dir_callback=None, topic_callback=None, include_path=True, parent_dir="", depth=0):
+ """
+ Iterates over a build config tree starting from a specifc node, skipping content where the distro doesn't match. Additionally calls are
+ made to the dir_callback or topic_callback functions when a directory or topic is found.
+ """
+ if "Topics" in node:
+ if check_node_distro_matches(node, distro):
+ if include_path:
+ topics_dir = os.path.join(parent_dir, node["Dir"])
+ else:
+ topics_dir = ""
+
+ if dir_callback is not None:
+ dir_callback(node, parent_dir, depth)
+
+ for topic in node["Topics"]:
+ iter_tree(topic, distro, dir_callback, topic_callback, True, topics_dir, depth + 1)
+ elif check_node_distro_matches(node, distro):
+ if topic_callback is not None:
+ topic_callback(node, parent_dir, depth)
+
+
+def check_node_distro_matches(node, distro):
+ """
+ Checks to see if the specified distro matches a distro in the nodes distros list. If there is no distros list specified on the
+ node then all distros are allowed, so return true.
+ """
+ if "Distros" not in node:
+ return True
+ else:
+ node_distros = [x.strip() for x in node['Distros'].split(",")]
+ for node_distro in node_distros:
+ # Check for an exact match, or a glob match
+ if node_distro == distro or fnmatch.fnmatchcase(distro, node_distro):
+ return True
+
+ return False
+
+
+def ensure_directory(directory):
+ """
+ Creates DIRECTORY if it does not exist.
+ """
+ if not os.path.exists(directory):
+ os.mkdir(directory)
+
+
+def build_master_files(info):
+ """
+ Builds the master.adoc and docinfo.xml files for each guide specified in the config.
+ """
+ dest_dir = info['dest_dir']
+
+ all_in_one = info['all_in_one']
+ all_in_one_text = ""
+ for book in info['book_nodes']:
+ book_dest_dir = os.path.join(dest_dir, book['Dir'])
+ ensure_directory(book_dest_dir)
+
+ book_info = dict(info)
+ book_info['title'] = book['Name']
+
+ master = generate_master_entry(book, book['Dir'], info['distro'], all_in_one, all_in_one=all_in_one)
+
+ # Save the content
+ if not all_in_one:
+ master_file = os.path.join(book_dest_dir, 'master.adoc')
+ docinfo_file = os.path.join(book_dest_dir, 'docinfo.xml')
+ master_base = MASTER_FILE_BASE.format(**book_info)
+
+ log.debug("Writing " + master_file)
+ with open(master_file, "w") as f:
+ f.write(master_base + master)
+ log.debug("Writing " + docinfo_file)
+ with open(docinfo_file, "w") as f:
+ f.write(DOCINFO_BASE.format(**book_info))
+ else:
+ if all_in_one_text == "":
+ # Remove the title for the first file in the book
+ master = master.replace("= " + book['Name'] + "\n", "")
+
+ # Set the preface title from the first file in the book
+ first_file = os.path.join(info['src_dir'], book['Dir'], book['Topics'][0]['File'] + ".adoc")
+ preface_title = None
+ with open(first_file, "r") as f:
+ line = f.readline()
+ while line:
+ if include_line(line):
+ preface_title = re.sub("^=+ ", "", line)
+ break
+ line = f.readline()
+ if preface_title is not None:
+ info['preface-title'] = ":preface-title: " + preface_title
+ all_in_one_text += master
+
+ if all_in_one:
+ master_file = os.path.join(dest_dir, 'master.adoc')
+ docinfo_file = os.path.join(dest_dir, 'docinfo.xml')
+
+ master_base = MASTER_FILE_BASE.format(**info)
+
+ log.debug("Writing " + master_file)
+ with open(master_file, "w") as f:
+ f.write(master_base + all_in_one_text)
+ log.debug("Writing " + docinfo_file)
+ with open(docinfo_file, "w") as f:
+ f.write(DOCINFO_BASE.format(**info))
+
+
+def generate_master_entry(node, book_dir, distro, include_name=True, all_in_one=False):
+ """
+ Generates the master.adoc core content for a specific book/node.
+ """
+ master_entries = []
+
+ def dir_callback(dir_node, parent_dir, depth):
+ if include_name or depth > 0:
+ master_entries.append("=" * (depth + 1) + " " + dir_node["Name"].replace("\\", ""))
+
+ def topic_callback(topic_node, parent_dir, depth):
+ book_file_path = os.path.join(parent_dir, topic_node["File"] + ".adoc")
+ file_path = os.path.join(book_dir, book_file_path)
+ include = "include::" + book_file_path + "[leveloffset=+" + str(depth) + "]"
+ if not all_in_one and file_path in COMMENT_FILES:
+ master_entries.append("////")
+ master_entries.append(include)
+ master_entries.append("////")
+ else:
+ master_entries.append(include)
+ # Add a blank line
+ master_entries.append("")
+
+ # Iterate over the tree and build the master.adoc content
+ iter_tree(node, distro, dir_callback, topic_callback, include_name)
+ return "\n".join(master_entries)
+
+
+def reformat_for_drupal(info):
+ """
+ Reformats the source content for use in the Customer Portal. This function does the following:
+
+ - Copies images over and flattens them into a single dir
+ - Copies source asciidoc over
+ - Filters the AsciiDoc source to remove duplicate macro definitions, that should only be in the main file.
+ - Adds id's for each file, so the files can be properly cross referenced.
+ - Adds id's to sections that are cross referenced, but have no id.
+ - Fixes duplicate id's in the source content.
+ - Fixes links that have been done incorrectly and should be cross references instead.
+ """
+ books = info['book_nodes']
+ src_dir = info['src_dir']
+ dest_dir = info['dest_dir']
+ distro = info['distro']
+
+ # Build a mapping of files to ids
+ # Note: For all-in-one we have to collect ids from all books first
+ file_to_id_map = {}
+ if info['all_in_one']:
+ book_ids = []
+ for book in books:
+ book_ids.extend(collect_existing_ids(book, distro, src_dir))
+ for book in books:
+ file_to_id_map.update(build_file_to_id_map(book, distro, book_ids, src_dir))
+ else:
+ for book in books:
+ book_ids = collect_existing_ids(book, distro, src_dir)
+ file_to_id_map.update(build_file_to_id_map(book, distro, book_ids, src_dir))
+ info['file_to_id_map'] = file_to_id_map
+
+ # Reformat the data
+ for book in books:
+ log.info("Processing %s", book['Dir'])
+ book_src_dir = os.path.join(src_dir, book['Dir'])
+
+ if info['all_in_one']:
+ images_dir = os.path.join(dest_dir, "images")
+ else:
+ book_dest_dir = os.path.join(dest_dir, book['Dir'])
+ images_dir = os.path.join(book_dest_dir, "images")
+
+ ensure_directory(images_dir)
+
+ log.debug("Copying source files for " + book['Name'])
+ copy_files(book, book_src_dir, src_dir, dest_dir, info)
+
+ log.debug("Copying images for " + book['Name'])
+ copy_images(book, src_dir, images_dir, distro)
+
+
+def copy_images(node, src_path, dest_dir, distro):
+ """
+ Copy images over to the destination directory and flatten all image directories into the one top level dir.
+ """
+ def dir_callback(dir_node, parent_dir, depth):
+ node_dir = os.path.join(parent_dir, dir_node['Dir'])
+ src = os.path.join(node_dir, "images")
+
+ if os.path.exists(src):
+ src_files = os.listdir(src)
+ for src_file in src_files:
+ shutil.copy(os.path.join(src, src_file), dest_dir)
+
+ iter_tree(node, distro, dir_callback, parent_dir=src_path)
+
+
+def copy_files(node, book_src_dir, src_dir, dest_dir, info):
+ """
+ Recursively copy files from the source directory to the destination directory, making sure to scrub the content, add id's where the
+ content is referenced elsewhere and fix any links that should be cross references.
+ """
+ def dir_callback(dir_node, parent_dir, depth):
+ node_dest_dir = os.path.join(dest_dir, parent_dir, dir_node['Dir'])
+ ensure_directory(node_dest_dir)
+
+ def topic_callback(topic_node, parent_dir, depth):
+ node_src_dir = os.path.join(src_dir, parent_dir)
+ node_dest_dir = os.path.join(dest_dir, parent_dir)
+
+ src_file = os.path.join(node_src_dir, topic_node["File"] + ".adoc")
+ dest_file = os.path.join(node_dest_dir, topic_node["File"] + ".adoc")
+
+ # Copy the file
+ copy_file(info, book_src_dir, src_file, dest_dir, dest_file)
+
+ iter_tree(node, info['distro'], dir_callback, topic_callback)
+
+
+def copy_file(info, book_src_dir, src_file, dest_dir, dest_file, include_check=True, tag=None, cwd=None):
+ """
+ Copies a source file to destination, making sure to scrub the content, add id's where the content is referenced elsewhere and fix any
+ links that should be cross references. Also copies any includes that are referenced, since they aren't included in _build_cfg.yml.
+ """
+ # It's possible that the file might have been created by another include, if so then just return
+ if os.path.isfile(dest_file):
+ return
+
+ # Touch the dest file, so we can handle circular includes
+ parent_dir = os.path.dirname(dest_file)
+ if not os.path.exists(parent_dir):
+ os.makedirs(parent_dir)
+ #os.mknod(dest_file)
+ open(dest_file, 'w').close()
+ # Scrub/fix the content
+ content = scrub_file(info, book_src_dir, src_file, tag=tag, cwd=cwd)
+
+ # Check for any includes
+ if include_check:
+ cleaned_content = remove_conditional_content(content, info)
+ include_iter = INCLUDE_RE.finditer(cleaned_content)
+ for include in include_iter:
+ include_text = include.group(0)
+ include_path = include.group(1)
+ include_unparsed_vars = include.group(2)
+
+ # Determine the include vars
+ include_vars = {}
+ if include_unparsed_vars is not None and len(include_unparsed_vars) > 0:
+ for meta in re.split(r"\s*,\s*", include_unparsed_vars):
+ key, value = re.split("\s*=\s*", meta, 2)
+ include_vars[key] = value
+
+ # Determine the include src/dest paths
+ include_file = os.path.join(os.path.dirname(book_src_dir), include_path)
+ relative_path = os.path.relpath(include_file, os.path.dirname(src_file))
+
+ # If the path is in another book, copy it into this one
+ relative_book_path = os.path.relpath(include_file, book_src_dir)
+ if relative_book_path.startswith("../"):
+ path, src_book_name = os.path.split(book_src_dir)
+ dest_include_dir = os.path.join(dest_dir, src_book_name, "includes")
+ relative_path = os.path.join(os.path.relpath(dest_include_dir, parent_dir), os.path.basename(include_file))
+ else:
+ dest_include_dir = os.path.abspath(os.path.join(os.path.dirname(dest_file), os.path.dirname(relative_path)))
+ dest_include_file = os.path.join(dest_include_dir, os.path.basename(include_file))
+
+ # Make sure we have a reference to the current working dir
+ current_dir = cwd or os.path.dirname(src_file)
+ include_tag = include_vars.get("tag", None)
+
+ # Copy the file and fix the content
+ if not os.path.isfile(dest_include_file):
+ copy_file(info, book_src_dir, include_file, dest_dir, dest_include_file, tag=include_tag, cwd=current_dir)
+ else:
+ # The file has already been copied, so just fix the links for this tag
+ with open(dest_include_file, 'r') as f:
+ include_content = f.read()
+
+ # Fix any links
+ include_content = fix_links(include_content, info, book_src_dir, include_file, tag=include_tag, cwd=cwd)
+
+ with open(dest_include_file, "w") as f:
+ f.write(include_content)
+
+ content = content.replace(include_text, include.expand("include::" + relative_path + "[\\2]"))
+
+ with open(dest_file, "w") as f:
+ f.write(content)
+
+
+def scrub_file(info, book_src_dir, src_file, tag=None, cwd=None):
+ """
+ Scrubs a file and returns the cleaned file contents.
+ """
+ base_src_file = src_file.replace(info['src_dir'] + "/", "")
+
+ # added 1/Sep/2020
+ # to allow loading files like json and yaml from external sources, this
+ # procedure loads the file recognizing that it starts with http
+ # it then checks if it exists or not, and if it exists, returns the raw data
+ # data that it finds.
+ if(base_src_file.startswith("https://raw.githubusercontent.com/openshift/")):
+ try:
+ response = requests.get(base_src_file)
+ if(response):
+ return response.text
+ else:
+ raise ConnectionError("Malformed URL")
+ except Exception as exception:
+ log.error("An include file wasn't found: %s", base_src_file)
+ has_errors = True
+ sys.exit(-1)
+
+ # Get a list of predefined custom title ids for the file
+ title_ids = TITLE_IDS.get(base_src_file, {})
+
+ # Read in the source content
+ with open(src_file, 'r') as f:
+ src_file_content = f.readlines()
+
+ # Scrub the content
+ content = ""
+ header_found = content_found = False
+ current_id = None
+ for line in src_file_content:
+ # Ignore any leading blank lines, before any meaningful content is found
+ if line.strip() == "" and not content_found:
+ continue
+
+ # Check if the line should be included in the output
+ if include_line(line):
+ content_found = True
+
+ # Setup the document header content/id
+ if not header_found and line.strip() != "" and line.startswith("="):
+ header_found = True
+
+ if info['all_in_one'] and base_src_file in ALL_IN_ONE_SCRAP_TITLE and line.startswith("= "):
+ continue
+ # Add a section id if one doesn't exist, so we have something to link to
+ elif current_id is None and src_file in info['file_to_id_map']:
+ file_id = info['file_to_id_map'][src_file]
+ content += "[[" + file_id + "]]\n"
+ # Add a custom title id, if one is needed
+ elif line.startswith("=") and current_id is None:
+ for title in title_ids:
+ title_re = r"^=+ " + title.replace(".", "\\.").replace("?", "\\?") + "( (anchor|\[).*?)?(\n)?$"
+ if re.match(title_re, line):
+ content += "[[" + title_ids[title] + "]]\n"
+
+ # Set the current id based on the line content
+ if current_id is None and ID_RE.match(line.strip()):
+ current_id = line.strip()
+ elif current_id is not None and line.strip != "":
+ current_id = None
+
+ # Add the line to the processed content
+ content += line
+
+ # Fix up any duplicate ids
+ if base_src_file in DUPLICATE_IDS:
+ for duplicate_id, new_id in list(DUPLICATE_IDS[base_src_file].items()):
+ content = content.replace("[[" + duplicate_id + "]]", "[[" + new_id + "]]")
+
+ # Replace incorrect links with correct ones
+ if base_src_file in INCORRECT_LINKS:
+ for incorrect_link, fixed_link in list(INCORRECT_LINKS[base_src_file].items()):
+ content = content.replace(incorrect_link, fixed_link)
+
+ # Fix up the links
+ content = fix_links(content, info, book_src_dir, src_file, tag=tag, cwd=cwd)
+
+ return content
+
+
+def include_line(line):
+ """
+ Determines if a line should be included in the filtered output.
+ """
+ if line in IGNORE_LINES:
+ return False
+
+ for macro in IGNORE_MACROS:
+ if line.startswith(":" + macro + ":"):
+ return False
+
+ return True
+
+
+def fix_links(content, info, book_src_dir, src_file, tag=None, cwd=None):
+ """
+ Fix any links that were done incorrectly and reference the output instead of the source content.
+ """
+ if info['all_in_one']:
+ content = fix_links(content, info['src_dir'], src_file, info)
+ else:
+ # Determine if the tag should be passed when fixing the links. If it's in the same book, then process the entire file. If it's
+ # outside the book then don't process it.
+ if book_src_dir in src_file:
+ content = _fix_links(content, book_src_dir, src_file, info, cwd=cwd)
+ else:
+ content = _fix_links(content, book_src_dir, src_file, info, tag=tag, cwd=cwd)
+
+ return content
+
+
+def _fix_links(content, book_dir, src_file, info, tag=None, cwd=None):
+ """
+ Fix any links that were done incorrectly and reference the output instead of the source content.
+ """
+ # TODO Deal with xref so that they keep the proper path. Atm it'll just strip the path and leave only the id
+ file_to_id_map = info['file_to_id_map']
+ current_dir = cwd or os.path.dirname(src_file)
+ cleaned_content = remove_conditional_content(content, info, tag=tag)
+ links = LINKS_RE.finditer(cleaned_content)
+
+ for link in links:
+ link_text = link.group(0)
+ link_file = link.group(1)
+ link_anchor = link.group(2)
+ link_title = link.group(3)
+
+
+ if link_file is not None:
+ fixed_link_file = link_file.replace(".html", ".adoc")
+ fixed_link_file_abs = os.path.abspath(os.path.join(current_dir, fixed_link_file))
+ if fixed_link_file_abs in file_to_id_map:
+
+ # We are dealing with a cross reference to another book here
+ external_link = EXTERNAL_LINK_RE.search(link_file)
+ book_dir_name = external_link.group(1)
+
+ # Find the book name
+ book_name = book_dir_name
+ for book in info['data']:
+ if check_node_distro_matches(book, info['distro']) and book['Dir'] == book_dir_name:
+ book_name = book['Name']
+ break
+
+ fixed_link_file = BASE_PORTAL_URL + build_portal_url(info, book_name)
+
+ if link_anchor is None:
+ fixed_link = "link:" + fixed_link_file + "#" + file_to_id_map[fixed_link_file_abs] + link_title
+ else:
+ fixed_link = "link:" + fixed_link_file + link_anchor + link_title
+ else:
+ # Cross reference or link that isn't in the docs suite
+ fixed_link = link_text
+ if EXTERNAL_LINK_RE.search(link_file) is not None:
+ rel_src_file = src_file.replace(os.path.dirname(book_dir) + "/", "")
+ has_errors = True
+ log.error("ERROR (%s): \"%s\" appears to try to reference a file not included in the \"%s\" distro", rel_src_file, link_text.replace("\n", ""), info['distro'])
+ else:
+ fixed_link = "xref:" + link_anchor.replace("#", "") + link_title
+
+ content = content.replace(link_text, fixed_link)
+
+ return content
+
+
+def remove_conditional_content(content, info, tag=None):
+ """
+ Removes any conditional content that doesn't match for the specified distro
+ """
+ # Remove any ifdef content
+ ifdef = IFDEF_RE.search(content)
+ while ifdef is not None:
+ is_not_def = ifdef.group(1) == "n"
+ ifdef_distros = ifdef.group(2).split(",")
+ pos = ifdef.start()
+ end = ifdef.end()
+
+ # Determine if we should strip the conditional content, based on the distro
+ strip_content = False
+ if is_not_def and info['distro'] in ifdef_distros:
+ strip_content = True
+ elif not is_not_def and info['distro'] not in ifdef_distros:
+ strip_content = True
+
+ # Remove the conditional content
+ if strip_content:
+ # Find the correct endif for the current ifdef
+ search_pos = end
+ endpos = len(content)
+ while True:
+ next_ifdef = IFDEF_RE.search(content, search_pos)
+ endif = ENDIF_RE.search(content, search_pos)
+
+ if not endif:
+ break
+ elif not next_ifdef or next_ifdef.start() > endif.start():
+ endpos = endif.end()
+ break
+ else:
+ search_pos = endif.end()
+
+ # Replace the content and move the end pos to be the same as the start since the content was removed
+ ifdef_text = content[pos:endpos]
+ content = content.replace(ifdef_text, "")
+ end = pos
+
+ # Move onto the next ifdef
+ ifdef = IFDEF_RE.search(content, end)
+
+ # Remove commented out content
+ for comment in COMMENT_CONTENT_RE.finditer(content):
+ content = content.replace(comment.group(0), "")
+
+ # Remove content outside of tags
+ if tag is not None:
+ for tag_match in TAG_CONTENT_RE.finditer(content):
+ tag_text = tag_match.group(0)
+ tag_label = tag_match.group(1)
+ if tag_label == tag:
+ # Tag matches, so only use the content in the tag
+ content = tag_text
+
+ return content
+
+
+def collect_existing_ids(node, distro, path):
+ """
+ Examines all nodes asciidoc file contents and returns any existing ids.
+ """
+ book_ids = []
+
+ def topic_callback(topic_node, parent_dir, depth):
+ src_file = os.path.join(parent_dir, topic_node["File"] + ".adoc")
+ file_ids = extract_file_ids(src_file)
+ book_ids.extend(file_ids)
+
+ iter_tree(node, distro, topic_callback=topic_callback, parent_dir=path)
+
+ return book_ids
+
+
+def build_file_to_id_map(node, distro, existing_ids, path=""):
+ """
+ Builds a mapping of file names/paths to the root id for the file. This is used to fix the links that are done incorrectly.
+ """
+ file_to_id_map = {}
+
+ def topic_callback(topic_node, parent_dir, depth):
+ src_file = os.path.join(parent_dir, topic_node["File"] + ".adoc")
+ file_to_id_map[src_file] = build_file_id(topic_node["Name"], file_to_id_map, existing_ids)
+
+ iter_tree(node, distro, topic_callback=topic_callback, parent_dir=path)
+ return file_to_id_map
+
+
+def extract_file_ids(file_path):
+ """
+ Extracts all the ids used in the specified file.
+ """
+ with open(file_path, "r") as f:
+ content = f.read()
+
+ ids = ID_RE.finditer(content)
+ return [id.group(1) for id in ids]
+
+
+def build_file_id(file_title, file_to_id_map, existing_ids):
+ """
+ Generates a unique id for a file, based on its title.
+ """
+ file_id = base_id = re.sub(r"[\[\]\(\)#]", "", file_title.lower().replace("_", "-").replace(" ", "-"))
+ count = 1
+ while file_id in existing_ids or file_id in list(file_to_id_map.values()):
+ file_id = base_id + "-" + str(count)
+ count += 1
+
+ return file_id
+
+
+def build_portal_url(info, book_name):
+ """
+ Builds a portal url path by escaping the content in the same way drupal does.
+ """
+ product = info['product']
+ version = info['product-version']
+
+ return generate_url_from_name(product) + "/" + generate_url_from_name(version) + "/html-single/" + generate_url_from_name(book_name) + "/"
+
+
+def replace_nbsp(val):
+ """Replaces non breaking spaces with a regular space"""
+ if val is not None:
+ # Check if the string is unicode
+ if isinstance(val, str):
+ return val.replace('\xa0', ' ')
+ else:
+ return val.replace('\xc2\xa0', ' ')
+ else:
+ return None
+
+
+def generate_url_from_name(name, delimiter='_'):
+ """
+ Generates a url fragment from a product, version or titles name.
+ """
+ # Remove characters that aren't allowed in urls
+ url = re.sub("^\.+|[^0-9a-zA-Z _\-.]+", "", replace_nbsp(name))
+ # Replace spaces with the delimiter
+ url = re.sub("\s+", delimiter, url)
+ # Replace multiple underscores with a single underscore
+ url = re.sub(delimiter + "+", delimiter, url)
+ return url.lower()
+
+
+def call_git_command(*args, **kwargs):
+ """
+ Calls a git command and retries the command if it is unable to connect to the remote repo
+ """
+ retries = kwargs.pop("retries", 3)
+ try:
+ output = subprocess.check_output(*args, **kwargs)
+ if output is not None:
+ sys.stdout.write(output)
+ return output
+ except subprocess.CalledProcessError as e:
+ retries -= 1
+ if retries > 0 and "fatal: Could not read from remote repository" in e.output:
+ # Connection failed, so wait a couple of secs and try again
+ time.sleep(2)
+ call_git_command(*args, retries=retries, **kwargs)
+ else:
+ raise
+
+
+def fetch_sources(url, branch, dir=None, clone_dirname=None):
+ """
+ Fetches sources from a git repository. If the repository doesn't exist it'll be cloned into `dir_name`, otherwise if it already has been
+ cloned, the repo will just be updated.
+ """
+ # Setup the defaults
+ if dir is None:
+ dir = os.getcwd()
+ if clone_dirname is None:
+ clone_dirname = url.split('/')[-1].replace(".git", "")
+
+ # If the dir already exists update the content, otherwise clone it
+ clone_dir = os.path.abspath(os.path.join(dir, clone_dirname))
+ if os.path.exists(os.path.join(clone_dir, ".git")):
+ cmd = ["git", "pull", "-f"]
+ cmd_dir = clone_dir
+
+ # Do a checkout to make sure we are on the right branch
+ checkout_cmd = ["git", "checkout", branch]
+ subprocess.check_output(checkout_cmd, cwd=cmd_dir, stderr=subprocess.STDOUT)
+ else:
+ cmd = ["git", "clone", "-b", branch, url, clone_dirname]
+ cmd_dir = os.path.abspath(dir)
+
+ # Execute the command
+ call_git_command(cmd, cwd=cmd_dir, stderr=subprocess.STDOUT)
+
+
+def sync_directories(src_dir, dest_dir, ignore=None):
+ """
+ Syncs two directories so that the both contain the same content, with the exception of ignored files.
+ """
+ if ignore is None:
+ ignore = []
+ ignore.extend(CMP_IGNORE_FILES)
+
+ dcmp = filecmp.dircmp(src_dir, dest_dir, ignore)
+ _sync_directories_dircmp(dcmp)
+
+
+def _sync_directories_dircmp(dcmp):
+ # Remove files that only exist in the dest directory
+ for filename in dcmp.right_only:
+ right = os.path.join(dcmp.right, filename)
+ if os.path.isfile(right):
+ os.remove(right)
+ else:
+ shutil.rmtree(right)
+
+ # Copy files that only exist in the source directory or files that have changed
+ for filename in dcmp.left_only+dcmp.common_files:
+ left = os.path.join(dcmp.left, filename)
+ right = os.path.join(dcmp.right, filename)
+ if os.path.isfile(left):
+ shutil.copy2(left, right)
+ else:
+ shutil.copytree(left, right)
+
+ # Sync sub directories
+ for subdcmp in list(dcmp.subdirs.values()):
+ _sync_directories_dircmp(subdcmp)
+
+
+def commit_and_push_changes(git_dir, git_branch, git_upstream_branch):
+ """
+ Adds, commits and pushes any changes to a local git repository.
+ """
+ # Add all the changes
+ add_cmd = ["git", "add", "--all"]
+ subprocess.check_call(add_cmd, cwd=git_dir)
+ try:
+ # Commit the changes
+ commit_cmd = ["git", "commit", "-m", "Merge branch 'upstream/" + git_upstream_branch + "' into " + git_branch,
+ "--author", "CCS OSE Build Script "]
+ call_git_command(commit_cmd, cwd=git_dir, stderr=subprocess.STDOUT)
+ # Push the changes
+ push_cmd = ["git", "push"]
+ call_git_command(push_cmd, cwd=git_dir, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ if e.output is None or "nothing to commit" not in e.output:
+ raise
+
+
+def parse_repo_config(config_file, distro, version):
+ # Make sure the repo config file exists
+ if not os.path.isfile(config_file):
+ log.error("Failed loading the repo configuration from %s", config_file)
+ sys.exit(-1)
+
+ parser = configparser.SafeConfigParser()
+ parser.read(config_file)
+
+ repo_urls = dict()
+ section_name = distro + "-" + version
+ if parser.has_section(section_name):
+ for (key, value) in parser.items(section_name):
+ repo_urls[key] = value
+
+ return repo_urls
+
+
+def main():
+ parser = setup_parser()
+ args = parser.parse_args()
+ logging.basicConfig(format='%(message)s', level=logging.INFO, stream=sys.stdout)
+
+ # Copy down the latest files
+ if not args.no_upstream_fetch:
+ log.info("Fetching the upstream sources")
+ fetch_sources(args.upstream_url, args.upstream_branch, clone_dirname=CLONE_DIR)
+
+ config = find_build_config_file()
+ src_dir = os.path.dirname(config)
+
+ # Parse the build config
+ data = parse_build_config(config)
+
+ # Filter the list of books that should be built
+ book_nodes = [node for node in data if check_node_distro_matches(node, args.distro)]
+
+ # Make the new source tree
+ dest_dir = os.path.join(os.getcwd(), "drupal-build", args.distro)
+ if not args.no_clean:
+ log.info("Cleaning the drupal-build directory")
+ if os.path.exists(dest_dir):
+ shutil.rmtree(dest_dir)
+ os.makedirs(dest_dir)
+ elif not os.path.exists(dest_dir):
+ os.makedirs(dest_dir)
+
+ info = {
+ 'title': args.title,
+ 'product-author': args.author,
+ 'product-version': args.version,
+ 'product': args.product,
+ 'distro': args.distro,
+ 'src_dir': src_dir,
+ 'dest_dir': dest_dir,
+ 'data': data,
+ 'book_nodes': book_nodes,
+ 'all_in_one': args.all_in_one,
+ 'preface-title': "",
+ "upstream_branch": args.upstream_branch
+ }
+
+ # Build the master files
+ log.info("Building the drupal files")
+ build_master_files(info)
+
+ # Copy the original data and reformat for drupal
+ reformat_for_drupal(info)
+
+ if has_errors:
+ sys.exit(1)
+
+ if args.push:
+ # Parse the repo urls
+ config_file = os.path.join(os.path.dirname(__file__), 'repos.ini')
+ repo_urls = parse_repo_config(config_file, args.distro, args.version)
+
+ # Make sure the base git dire exists
+ base_git_dir = os.path.join(os.getcwd(), "gitlab-repos")
+ ensure_directory(base_git_dir)
+
+ # Checkout the gitlab repo, copy the changes and push them back up
+ for book_dir, gitlab_repo_url in list(repo_urls.items()):
+ build_book_dir = os.path.join(dest_dir, book_dir)
+ git_dirname = gitlab_repo_url.split('/')[-1].replace(".git", "")
+ git_dir = os.path.join(base_git_dir, git_dirname)
+
+ try:
+ log.info("Fetching " + book_dir + " sources from GitLab")
+ fetch_sources(gitlab_repo_url, args.branch, base_git_dir, git_dirname)
+
+ log.info("Syncing " + book_dir)
+ sync_directories(build_book_dir, git_dir, ["docinfo.xml"])
+
+ log.info("Pushing " + book_dir + " changes back to GitLab")
+ commit_and_push_changes(git_dir, args.branch, args.upstream_branch)
+ except subprocess.CalledProcessError as e:
+ if e.output:
+ sys.stdout.write(e.output)
+ raise
+
+if __name__ == "__main__":
+ main()
diff --git a/builds/advanced-build-operations.adoc b/builds/advanced-build-operations.adoc
deleted file mode 100644
index 37aae6d139fc..000000000000
--- a/builds/advanced-build-operations.adoc
+++ /dev/null
@@ -1,26 +0,0 @@
-// This assembly is included in the following assemblies:
-// * assembly/builds
-
-[id="advanced-build-operations"]
-= Performing advanced builds
-include::modules/common-attributes.adoc[]
-:context: advanced-build-operations
-toc::[]
-
-The following sections provide instructions for advanced build operations including
-setting build resources and maximum duration, assigning builds to nodes, chaining
-builds, build pruning, and build run policies.
-
-// The following include statements pull in the module files that comprise the assembly. Include any combination of concept, procedure, or reference modules required to cover the user story. You can also include other assemblies.
-
-include::modules/builds-setting-build-resources.adoc[leveloffset=+1]
-
-include::modules/builds-setting-maximum-duration.adoc[leveloffset=+1]
-
-include::modules/builds-assigning-builds-to-nodes.adoc[leveloffset=+1]
-
-include::modules/builds-chaining-builds.adoc[leveloffset=+1]
-
-include::modules/builds-build-pruning.adoc[leveloffset=+1]
-
-include::modules/builds-build-run-policy.adoc[leveloffset=+1]
diff --git a/builds/basic-build-operations.adoc b/builds/basic-build-operations.adoc
deleted file mode 100644
index 1bc7fb9d40a7..000000000000
--- a/builds/basic-build-operations.adoc
+++ /dev/null
@@ -1,30 +0,0 @@
-// This assembly is included in the following assemblies:
-// * assembly/builds
-
-[id="basic-build-operations"]
-= Performing basic builds
-include::modules/common-attributes.adoc[]
-:context: basic-build-operations
-toc::[]
-
-The following sections provide instructions for basic build operations including
-starting and canceling builds, deleting BuildConfigs, viewing build details, and
-accessing build logs.
-
-// The following include statements pull in the module files that comprise the assembly. Include any combination of concept, procedure, or reference modules required to cover the user story. You can also include other assemblies.
-
-include::modules/builds-basic-start-build.adoc[leveloffset=+1]
-include::modules/builds-basic-start-re-run.adoc[leveloffset=+2]
-include::modules/builds-basic-start-logs.adoc[leveloffset=+2]
-include::modules/builds-basic-start-environment-variable.adoc[leveloffset=+2]
-include::modules/builds-basic-start-source.adoc[leveloffset=+2]
-include::modules/builds-basic-cancel-build.adoc[leveloffset=+1]
-include::modules/builds-basic-cancel-multiple.adoc[leveloffset=+2]
-include::modules/builds-basic-cancel-all.adoc[leveloffset=+2]
-include::modules/builds-basic-cancel-all-state.adoc[leveloffset=+2]
-include::modules/builds-basic-delete-buildconfig.adoc[leveloffset=+1]
-include::modules/builds-basic-view-build-details.adoc[leveloffset=+1]
-include::modules/builds-basic-access-build-logs.adoc[leveloffset=+1]
-include::modules/builds-basic-access-buildconfig-logs.adoc[leveloffset=+2]
-include::modules/builds-basic-access-buildconfig-version-logs.adoc[leveloffset=+2]
-include::modules/builds-basic-access-build-verbosity.adoc[leveloffset=+2]
diff --git a/builds/running-entitled-builds.adoc b/builds/running-entitled-builds.adoc
deleted file mode 100644
index 19919844748b..000000000000
--- a/builds/running-entitled-builds.adoc
+++ /dev/null
@@ -1,35 +0,0 @@
-:context: running-entitled-builds
-= Running entitled builds
-include::modules/common-attributes.adoc[]
-toc::[]
-
-Use the following sections to run entitled builds on {product-title}.
-
-include::modules/builds-create-imagestreamtag.adoc[leveloffset=+1]
-
-include::modules/builds-source-secrets-entitlements.adoc[leveloffset=+1]
-
-There are two paths to pulling in the base RHEL image:
-
-* Add the pull secret to registry.redhat.io to your project.
-* Create an imagestream in the OpenShift namespace for the RHEL-based
-image. This makes the imagestream available across the cluster.
-
-== Running entitled builds with Subscription Manager
-
-include::modules/builds-source-input-subman-config.adoc[leveloffset=+2]
-
-include::modules/builds-strategy-docker-entitled-subman.adoc[leveloffset=+2]
-
-== Running entitled builds with Satellite
-
-include::modules/builds-source-input-satellite-config.adoc[leveloffset=+2]
-
-include::modules/builds-strategy-docker-entitled-satellite.adoc[leveloffset=+2]
-
-include::modules/builds-strategy-docker-squash-layers.adoc[leveloffset=+1]
-
-
-.Additional resources
-
-* xref:../openshift_images/managing-imagestreams.adoc[Managing imagestreams]
diff --git a/builds/securing-builds-by-strategy.adoc b/builds/securing-builds-by-strategy.adoc
deleted file mode 100644
index f7f5ff55b945..000000000000
--- a/builds/securing-builds-by-strategy.adoc
+++ /dev/null
@@ -1,54 +0,0 @@
-[id="securing-builds-by-strategy"]
-= Securing builds by strategy
-include::modules/common-attributes.adoc[]
-:context: securing-builds-by-strategy
-toc::[]
-
-Builds in {product-title} are run in privileged containers. Depending on the
-build strategy used, this allows a user who can run builds to escalate their
-permissions on the cluster and host nodes. As a security measure, limit who can
-run builds and the strategy that is used for those builds. Custom builds are
-inherently less safe than Source builds, because they can execute any code
-within a privileged container, and are disabled by default. Grant Docker build
-permissions with caution, because a vulnerability in the Dockerfile processing
-logic could result in a privileges being granted on the host node.
-
-By default, all users that can create builds are granted permission to use the
-Docker and Source-to-Image (S2I) build strategies. Users with *cluster-admin*
-privileges can enable the Custom build strategy, as referenced in the
-restricting build strategies to a user globally section.
-
-You can control who can build and which build strategies they can use by using
-an authorization policy. Each build strategy has a corresponding build
-subresource. A user must have permission to create a build _and_ permission to
-create on the build strategy subresource in order to create builds using that
-strategy. Default roles are provided which grant the *create* permission on the
-build strategy subresource.
-
-.Build Strategy Subresources and Roles
-[options="header"]
-|===
-
-|Strategy |Subresource |Role
-
-|Docker
-|builds/docker
-|system:build-strategy-docker
-
-|Source-to-Image
-|builds/source
-|system:build-strategy-source
-
-|Custom
-|builds/custom
-|system:build-strategy-custom
-
-|JenkinsPipeline
-|builds/jenkinspipeline
-|system:build-strategy-jenkinspipeline
-
-|===
-
-include::modules/builds-disabling-build-strategy-globally.adoc[leveloffset=+1]
-include::modules/builds-restricting-build-strategy-globally.adoc[leveloffset=+1]
-include::modules/builds-restricting-build-strategy-to-user.adoc[leveloffset=+1]
diff --git a/builds/setting-up-trusted-ca.adoc b/builds/setting-up-trusted-ca.adoc
deleted file mode 100644
index 28ab3b17aa32..000000000000
--- a/builds/setting-up-trusted-ca.adoc
+++ /dev/null
@@ -1,26 +0,0 @@
-[id="setting-up-trusted-ca"]
-= Setting up additional trusted certifying authorities for builds
-include::modules/common-attributes.adoc[]
-:context: setting-up-trusted-ca
-toc::[]
-
-Use the following sections to set up additional certifying authorities (CA).
-
-In general, a cluster administrator creates a ConfigMap and adds additional CAs.
-
-* Each CA must be associated with a domain. Domain format is `hostname[..port]`.
-* `domain` is the key in the ConfigMap; `value` is the PEM-encoded certificate.
-* The ConfigMap name must be set in the `build.config.openshift.io/cluster`.
-//* No longer needs single PEM bundle
-
-
-include::modules/configmap-overview.adoc[leveloffset=+1]
-
-include::modules/configmap-create.adoc[leveloffset=+1]
-
-include::modules/configmap-adding-ca.adoc[leveloffset=+1]
-
-== Additional resources
-
-* link:https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#create-a-configmap[Create a ConfigMap]
-* link:https://kubectl.docs.kubernetes.io/pages/app_management/secrets_and_configmaps.html[Secrets and ConfigMaps]
diff --git a/builds/understanding-buildconfigs.adoc b/builds/understanding-buildconfigs.adoc
deleted file mode 100644
index e4075bd81db7..000000000000
--- a/builds/understanding-buildconfigs.adoc
+++ /dev/null
@@ -1,10 +0,0 @@
-[id="understanding-buildconfigs"]
-= Understanding build configurations
-include::modules/common-attributes.adoc[]
-:context: understanding-builds
-toc::[]
-
-The following sections define the concept of a build, `BuildConfig`, and outline
-the primary build strategies available.
-
-include::modules/builds-buildconfig.adoc[leveloffset=+1]
diff --git a/cicd/builds/advanced-build-operations.adoc b/cicd/builds/advanced-build-operations.adoc
new file mode 100644
index 000000000000..1d49a98d650f
--- /dev/null
+++ b/cicd/builds/advanced-build-operations.adoc
@@ -0,0 +1,22 @@
+[id="advanced-build-operations"]
+= Performing advanced builds
+include::modules/common-attributes.adoc[]
+:context: advanced-build-operations
+
+toc::[]
+
+The following sections provide instructions for advanced build operations including
+setting build resources and maximum duration, assigning builds to nodes, chaining
+builds, build pruning, and build run policies.
+
+include::modules/builds-setting-build-resources.adoc[leveloffset=+1]
+
+include::modules/builds-setting-maximum-duration.adoc[leveloffset=+1]
+
+include::modules/builds-assigning-builds-to-nodes.adoc[leveloffset=+1]
+
+include::modules/builds-chaining-builds.adoc[leveloffset=+1]
+
+include::modules/builds-build-pruning.adoc[leveloffset=+1]
+
+include::modules/builds-build-run-policy.adoc[leveloffset=+1]
diff --git a/cicd/builds/basic-build-operations.adoc b/cicd/builds/basic-build-operations.adoc
new file mode 100644
index 000000000000..001250963fb4
--- /dev/null
+++ b/cicd/builds/basic-build-operations.adoc
@@ -0,0 +1,25 @@
+[id="basic-build-operations"]
+= Performing and configuring basic builds
+include::modules/common-attributes.adoc[]
+:context: basic-build-operations
+
+toc::[]
+
+The following sections provide instructions for basic build operations, including starting and canceling builds, editing `BuildConfigs`, deleting `BuildConfigs`, viewing build details, and accessing build logs.
+
+include::modules/builds-basic-start-build.adoc[leveloffset=+1]
+include::modules/builds-basic-start-re-run.adoc[leveloffset=+2]
+include::modules/builds-basic-start-logs.adoc[leveloffset=+2]
+include::modules/builds-basic-start-environment-variable.adoc[leveloffset=+2]
+include::modules/builds-basic-start-source.adoc[leveloffset=+2]
+include::modules/builds-basic-cancel-build.adoc[leveloffset=+1]
+include::modules/builds-basic-cancel-multiple.adoc[leveloffset=+2]
+include::modules/builds-basic-cancel-all.adoc[leveloffset=+2]
+include::modules/builds-basic-cancel-all-state.adoc[leveloffset=+2]
+include::modules/builds-basic-edit-buildconfig.adoc[leveloffset=+1]
+include::modules/builds-basic-delete-buildconfig.adoc[leveloffset=+1]
+include::modules/builds-basic-view-build-details.adoc[leveloffset=+1]
+include::modules/builds-basic-access-build-logs.adoc[leveloffset=+1]
+include::modules/builds-basic-access-buildconfig-logs.adoc[leveloffset=+2]
+include::modules/builds-basic-access-buildconfig-version-logs.adoc[leveloffset=+2]
+include::modules/builds-basic-access-build-verbosity.adoc[leveloffset=+2]
diff --git a/builds/build-configuration.adoc b/cicd/builds/build-configuration.adoc
similarity index 99%
rename from builds/build-configuration.adoc
rename to cicd/builds/build-configuration.adoc
index ba353d689893..67d6d8cd08c7 100644
--- a/builds/build-configuration.adoc
+++ b/cicd/builds/build-configuration.adoc
@@ -2,6 +2,7 @@
= Build configuration resources
include::modules/common-attributes.adoc[]
:context: build-configuration
+
toc::[]
Use the following procedure to configure build settings.
diff --git a/builds/build-strategies.adoc b/cicd/builds/build-strategies.adoc
similarity index 87%
rename from builds/build-strategies.adoc
rename to cicd/builds/build-strategies.adoc
index d0711a2e7e37..78ac8f83b89c 100644
--- a/builds/build-strategies.adoc
+++ b/cicd/builds/build-strategies.adoc
@@ -2,11 +2,14 @@
= Using build strategies
include::modules/common-attributes.adoc[]
:context: build-strategies
+
toc::[]
The following sections define the primary supported build strategies, and how to
use them.
+// Docker build strategy
+
include::modules/builds-strategy-docker-build.adoc[leveloffset=+1]
include::modules/builds-strategy-docker-from-image.adoc[leveloffset=+2]
@@ -17,6 +20,15 @@ include::modules/builds-strategy-docker-environment-variables.adoc[leveloffset=+
include::modules/builds-strategy-docker-build-arguments.adoc[leveloffset=+2]
+include::modules/builds-strategy-docker-squash-layers.adoc[leveloffset=+2]
+
+:context: build-strategies-docker
+
+include::modules/builds-using-build-volumes.adoc[leveloffset=+2]
+
+
+// S2I build strategy
+
include::modules/builds-strategy-s2i-build.adoc[leveloffset=+1]
include::modules/builds-strategy-s2i-incremental-builds.adoc[leveloffset=+2]
@@ -37,6 +49,12 @@ include::modules/images-create-s2i-build.adoc[leveloffset=+3]
include::modules/images-create-s2i-scripts.adoc[leveloffset=+3]
+:context: build-strategies-s2i
+
+include::modules/builds-using-build-volumes.adoc[leveloffset=+2]
+
+// Custom build strategy
+
include::modules/builds-strategy-custom-build.adoc[leveloffset=+1]
include::modules/builds-strategy-custom-from-image.adoc[leveloffset=+2]
@@ -47,6 +65,8 @@ include::modules/builds-strategy-custom-environment-variables.adoc[leveloffset=+
include::modules/images-custom.adoc[leveloffset=+2]
+// Pipeline build strategy
+
include::modules/builds-strategy-pipeline-build.adoc[leveloffset=+1]
include::modules/builds-understanding-openshift-pipeline.adoc[leveloffset=+2]
diff --git a/builds/creating-build-inputs.adoc b/cicd/builds/creating-build-inputs.adoc
similarity index 95%
rename from builds/creating-build-inputs.adoc
rename to cicd/builds/creating-build-inputs.adoc
index 86efe19638d9..7be3e31fb1a0 100644
--- a/builds/creating-build-inputs.adoc
+++ b/cicd/builds/creating-build-inputs.adoc
@@ -1,10 +1,8 @@
-:context: creating-build-inputs
+[id="creating-build-inputs"]
= Creating build inputs
include::modules/common-attributes.adoc[]
+:context: creating-build-inputs
-:toc: macro
-:toc-title:
-:prewrap!:
toc::[]
Use the following sections for an overview of build inputs, instructions on how
@@ -57,9 +55,12 @@ include::modules/builds-adding-input-secrets-configmaps.adoc[leveloffset=+2]
include::modules/builds-source-to-image.adoc[leveloffset=+2]
+ifdef::openshift-enterprise,openshift-webscale,openshift-origin,openshift-dedicated[]
+
include::modules/builds-docker-strategy.adoc[leveloffset=+2]
include::modules/builds-custom-strategy.adoc[leveloffset=+2]
+endif::[]
include::modules/builds-using-external-artifacts.adoc[leveloffset=+1]
diff --git a/builds/custom-builds-buildah.adoc b/cicd/builds/custom-builds-buildah.adoc
similarity index 80%
rename from builds/custom-builds-buildah.adoc
rename to cicd/builds/custom-builds-buildah.adoc
index 4b9aef54bbfc..1c781ab64730 100644
--- a/builds/custom-builds-buildah.adoc
+++ b/cicd/builds/custom-builds-buildah.adoc
@@ -2,12 +2,13 @@
= Custom image builds with Buildah
include::modules/common-attributes.adoc[]
:context: custom-builds-buildah
+
toc::[]
-With {product-title} {product-version}, a Docker socket will not be present on the host
+With {product-title} {product-version}, a docker socket will not be present on the host
nodes. This means the _mount docker socket_ option of a custom build is not
-guaranteed to provide an accessible Docker socket for use within a custom build
+guaranteed to provide an accessible docker socket for use within a custom build
image.
If you require this capability in order to build and push images, add the Buildah
@@ -24,9 +25,9 @@ to compromise the cluster and therefore should be granted only to users who are
trusted with administrative privileges on the cluster.
====
-.Prerequisites
+== Prerequisites
-* Review how to xref:../builds/securing-builds-by-strategy.adoc#securing-builds-by-strategy[grant custom build permissions].
+* Review how to xref:../../cicd/builds/securing-builds-by-strategy.adoc#securing-builds-by-strategy[grant custom build permissions].
include::modules/builds-create-custom-build-artifacts.adoc[leveloffset=+1]
diff --git a/cicd/builds/images b/cicd/builds/images
new file mode 120000
index 000000000000..5fa6987088da
--- /dev/null
+++ b/cicd/builds/images
@@ -0,0 +1 @@
+../../images
\ No newline at end of file
diff --git a/builds/managing-build-output.adoc b/cicd/builds/managing-build-output.adoc
similarity index 92%
rename from builds/managing-build-output.adoc
rename to cicd/builds/managing-build-output.adoc
index 5569a33d3c53..0cf38282b022 100644
--- a/builds/managing-build-output.adoc
+++ b/cicd/builds/managing-build-output.adoc
@@ -1,12 +1,11 @@
-:context: managing-build-output
+[id="managing-build-output"]
= Managing build output
include::modules/common-attributes.adoc[]
+:context: managing-build-output
-:toc: macro
-:toc-title:
-:prewrap!:
toc::[]
+
Use the following sections for an overview of and instructions for managing
build output.
diff --git a/cicd/builds/modules b/cicd/builds/modules
new file mode 120000
index 000000000000..8b0e8540076d
--- /dev/null
+++ b/cicd/builds/modules
@@ -0,0 +1 @@
+../../modules
\ No newline at end of file
diff --git a/cicd/builds/running-entitled-builds.adoc b/cicd/builds/running-entitled-builds.adoc
new file mode 100644
index 000000000000..a670c45f50d1
--- /dev/null
+++ b/cicd/builds/running-entitled-builds.adoc
@@ -0,0 +1,27 @@
+[id="running-entitled-builds"]
+= Using Red Hat subscriptions in builds
+include::modules/common-attributes.adoc[]
+:context: running-entitled-builds
+
+toc::[]
+
+Use the following sections to run entitled builds on {product-title}.
+
+include::modules/builds-create-imagestreamtag.adoc[leveloffset=+1]
+
+include::modules/builds-source-secrets-entitlements.adoc[leveloffset=+1]
+
+== Running builds with Subscription Manager
+
+include::modules/builds-strategy-docker-entitled-subman.adoc[leveloffset=+2]
+
+== Running builds with Red Hat Satellite subscriptions
+
+include::modules/builds-source-input-satellite-config.adoc[leveloffset=+2]
+
+include::modules/builds-strategy-docker-entitled-satellite.adoc[leveloffset=+2]
+
+== Additional resources
+
+* xref:../../openshift_images/image-streams-manage.adoc#image-streams-managing[Managing image streams]
+* xref:../../cicd/builds/build-strategies.adoc#build-strategies[build strategy]
diff --git a/cicd/builds/securing-builds-by-strategy.adoc b/cicd/builds/securing-builds-by-strategy.adoc
new file mode 100644
index 000000000000..dd42c3367c98
--- /dev/null
+++ b/cicd/builds/securing-builds-by-strategy.adoc
@@ -0,0 +1,40 @@
+[id="securing-builds-by-strategy"]
+= Securing builds by strategy
+include::modules/common-attributes.adoc[]
+:context: securing-builds-by-strategy
+
+toc::[]
+
+Builds in {product-title} are run in privileged containers. Depending on the build strategy used, if you have privileges, you can run builds to escalate their permissions on the cluster and host nodes. And as a security measure, it limits who can run builds and the strategy that is used for those builds. Custom builds are inherently less safe than source builds, because they can execute any code within a privileged container, and are disabled by default. Grant docker build permissions with caution, because a vulnerability in the Dockerfile processing logic could result in a privileges being granted on the host node.
+
+By default, all users that can create builds are granted permission to use the docker and Source-to-image (S2I) build strategies. Users with cluster administrator privileges can enable the custom build strategy, as referenced in the restricting build strategies to a user globally section.
+
+You can control who can build and which build strategies they can use by using an authorization policy. Each build strategy has a corresponding build subresource. A user must have permission to create a build and permission to create on the build strategy subresource to create builds using that strategy. Default roles are provided that grant the create permission on the build strategy subresource.
+
+.Build Strategy Subresources and Roles
+[options="header"]
+|===
+
+|Strategy |Subresource |Role
+
+|Docker
+|builds/docker
+|system:build-strategy-docker
+
+|Source-to-Image
+|builds/source
+|system:build-strategy-source
+
+|Custom
+|builds/custom
+|system:build-strategy-custom
+
+|JenkinsPipeline
+|builds/jenkinspipeline
+|system:build-strategy-jenkinspipeline
+
+|===
+
+include::modules/builds-disabling-build-strategy-globally.adoc[leveloffset=+1]
+include::modules/builds-restricting-build-strategy-globally.adoc[leveloffset=+1]
+include::modules/builds-restricting-build-strategy-to-user.adoc[leveloffset=+1]
diff --git a/cicd/builds/setting-up-trusted-ca.adoc b/cicd/builds/setting-up-trusted-ca.adoc
new file mode 100644
index 000000000000..d93c52ed7f3d
--- /dev/null
+++ b/cicd/builds/setting-up-trusted-ca.adoc
@@ -0,0 +1,51 @@
+[id="setting-up-trusted-ca"]
+= Setting up additional trusted certificate authorities for builds
+include::modules/common-attributes.adoc[]
+:context: setting-up-trusted-ca
+
+toc::[]
+
+ifdef::openshift-enterprise,openshift-webscale,openshift-origin[]
+Use the following sections to set up additional certificate authorities (CA) to be trusted by builds when pulling images from an image registry.
+
+The procedure requires a cluster administrator to create a `ConfigMap` and add additional CAs as keys in the `ConfigMap`.
+
+* The `ConfigMap` must be created in the `openshift-config` namespace.
+* `domain` is the key in the `ConfigMap` and `value` is the PEM-encoded certificate.
+** Each CA must be associated with a domain. The domain format is `hostname[..port]`.
+* The `ConfigMap` name must be set in the `image.config.openshift.io/cluster` cluster scoped configuration resource's `spec.additionalTrustedCA` field.
+//* No longer needs single PEM bundle
+
+include::modules/configmap-adding-ca.adoc[leveloffset=+1]
+
+== Additional resources
+
+* link:https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#create-a-configmap[Create a `ConfigMap`]
+* link:https://kubectl.docs.kubernetes.io/guides/config_management/secrets_configmaps/[Secrets and `ConfigMaps`]
+* xref:../../networking/configuring-a-custom-pki.adoc#configuring-a-custom-pki[Configuring a custom PKI]
+endif::[]
+
+
+ifdef::openshift-dedicated[]
+Use the following sections to set up additional certificate authorities (CA) to be trusted by builds when pulling images from an image registry.
+
+The procedure requires a Dedicated administrator to create a `ConfigMap` and add additional CAs as keys in the `ConfigMap`.
+
+* The `ConfigMap` must be created in the `openshift-config` namespace.
+* `domain` is the key in the `ConfigMap` and `value` is the PEM-encoded certificate.
+** Each CA must be associated with a domain. The domain format is `hostname[..port]`.
+* The `ConfigMap` name must be set in the `image.config.openshift.io/cluster` cluster scoped configuration resource's `spec.additionalTrustedCA` field.
+//* No longer needs single PEM bundle
+
+[NOTE]
+====
+{product-title} administrators are required to use the `registry-cas` `ConfigMap`.
+====
+
+include::modules/configmap-adding-ca.adoc[leveloffset=+1]
+
+== Additional resources
+
+* link:https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#create-a-configmap[Create a ConfigMap]
+* link:https://kubectl.docs.kubernetes.io/guides/config_management/secrets_configmaps/[Secrets and ConfigMaps]
+endif::[]
diff --git a/builds/triggering-builds-build-hooks.adoc b/cicd/builds/triggering-builds-build-hooks.adoc
similarity index 77%
rename from builds/triggering-builds-build-hooks.adoc
rename to cicd/builds/triggering-builds-build-hooks.adoc
index b32837f207ba..8ea91f175993 100644
--- a/builds/triggering-builds-build-hooks.adoc
+++ b/cicd/builds/triggering-builds-build-hooks.adoc
@@ -1,16 +1,11 @@
-// This assembly is included in the following assemblies:
-// * assembly/builds
-
[id="triggering-builds-build-hooks"]
= Triggering and modifying builds
include::modules/common-attributes.adoc[]
:context: triggering-builds-build-hooks
-toc::[]
-The following sections outline how to trigger builds and modify builds using
-build hooks.
+toc::[]
-// The following include statements pull in the module files that comprise the assembly. Include any combination of concept, procedure, or reference modules required to cover the user story. You can also include other assemblies.
+The following sections outline how to trigger builds and modify builds using build hooks.
include::modules/builds-triggers.adoc[leveloffset=+1]
@@ -28,6 +23,8 @@ include::modules/builds-displaying-webhook-urls.adoc[leveloffset=+3]
include::modules/builds-using-image-change-triggers.adoc[leveloffset=+2]
+include::modules/builds-identifying-image-change-triggers.adoc[leveloffset=+2]
+
include::modules/builds-configuration-change-triggers.adoc[leveloffset=+2]
include::modules/builds-setting-triggers-manually.adoc[leveloffset=+3]
diff --git a/builds/troubleshooting-builds.adoc b/cicd/builds/troubleshooting-builds.adoc
similarity index 82%
rename from builds/troubleshooting-builds.adoc
rename to cicd/builds/troubleshooting-builds.adoc
index de7da7746d7b..a09c425d9135 100644
--- a/builds/troubleshooting-builds.adoc
+++ b/cicd/builds/troubleshooting-builds.adoc
@@ -1,11 +1,8 @@
-// This assembly is included in the following assemblies:
-// * assembly/builds
-
-
:context: troubleshooting-builds
[id="troubleshooting-builds_{context}"]
= Troubleshooting builds
include::modules/common-attributes.adoc[]
+
toc::[]
Use the following to troubleshoot build issues.
diff --git a/cicd/builds/understanding-buildconfigs.adoc b/cicd/builds/understanding-buildconfigs.adoc
new file mode 100644
index 000000000000..ac16a8327d76
--- /dev/null
+++ b/cicd/builds/understanding-buildconfigs.adoc
@@ -0,0 +1,10 @@
+[id="understanding-buildconfigs"]
+= Understanding build configurations
+include::modules/common-attributes.adoc[]
+:context: understanding-builds
+
+toc::[]
+
+The following sections define the concept of a build, build configuration, and outline the primary build strategies available.
+
+include::modules/builds-buildconfig.adoc[leveloffset=+1]
diff --git a/builds/understanding-image-builds.adoc b/cicd/builds/understanding-image-builds.adoc
similarity index 99%
rename from builds/understanding-image-builds.adoc
rename to cicd/builds/understanding-image-builds.adoc
index bc23e8e29c82..f15c8c5755c5 100644
--- a/builds/understanding-image-builds.adoc
+++ b/cicd/builds/understanding-image-builds.adoc
@@ -2,6 +2,7 @@
= Understanding image builds
include::modules/common-attributes.adoc[]
:context: understanding-image-builds
+
toc::[]
include::modules/builds-about.adoc[leveloffset=+1]
diff --git a/cicd/gitops/configuring-sso-for-argo-cd-on-openshift.adoc b/cicd/gitops/configuring-sso-for-argo-cd-on-openshift.adoc
new file mode 100644
index 000000000000..04cbd3244aec
--- /dev/null
+++ b/cicd/gitops/configuring-sso-for-argo-cd-on-openshift.adoc
@@ -0,0 +1,39 @@
+[id="configuring-sso-for-argo-cd-on-openshift"]
+= Configuring SSO for Argo CD on OpenShift
+include::modules/common-attributes.adoc[]
+include::modules/gitops-document-attributes.adoc[]
+:context: configuring-sso-for-argo-cd-on-openshift
+
+toc::[]
+
+After the {gitops-title} Operator is installed, Argo CD automatically creates a user with `admin` permissions. To manage multiple users, Argo CD allows cluster administrators to configure SSO.
+
+[NOTE]
+====
+Bundled Dex OIDC provider is not supported.
+====
+
+.Prerequisites
+* Red Hat SSO is installed on the cluster.
+
+include::modules/gitops-creating-a-new-client-in-keycloak.adoc[leveloffset=+1]
+
+include::modules/gitops-configuring-the-groups-claim.adoc[leveloffset=+1]
+
+include::modules/gitops-configuring-argo-cd-oidc.adoc[leveloffset=+1]
+
+include::modules/gitops-keycloak-identity-brokering-with-openshift-oauthclient.adoc[leveloffset=+1]
+
+include::modules/gitops-registering-an-additional-oauth-client.adoc[leveloffset=+1]
+
+include::modules/gitops-configuring-groups-and-argocd-rbac.adoc[leveloffset=+1]
+
+//include::modules/gitops-enabling-dex.adoc[leveloffset=+1]
+
+include::modules/gitops-in-built-permissions.adoc[leveloffset=+1]
+
+////
+.Additional resources
+* link:https://stedolan.github.io/jq/[`jq` command-line JSON processor documentation.]
+* link:https://argoproj.github.io/argo-cd/operator-manual/rbac/[Argo CD upstream documentation, RBAC Configuration section].
+////
diff --git a/cicd/gitops/configuring_argo_cd_to_recursively_sync_a_git_repository_with_your_application/configuring-an-openshift-cluster-by-deploying-an-application-with-cluster-configurations.adoc b/cicd/gitops/configuring_argo_cd_to_recursively_sync_a_git_repository_with_your_application/configuring-an-openshift-cluster-by-deploying-an-application-with-cluster-configurations.adoc
new file mode 100644
index 000000000000..ddfd051619d2
--- /dev/null
+++ b/cicd/gitops/configuring_argo_cd_to_recursively_sync_a_git_repository_with_your_application/configuring-an-openshift-cluster-by-deploying-an-application-with-cluster-configurations.adoc
@@ -0,0 +1,21 @@
+[id="configuring-an-openshift-cluster-by-deploying-an-application-with-cluster-configurations"]
+= Configuring an OpenShift cluster by deploying an application with cluster configurations
+include::modules/common-attributes.adoc[]
+include::modules/gitops-document-attributes.adoc[]
+:context: configuring-an-openshift-cluster-by-deploying-an-application-with-cluster-configurations
+
+toc::[]
+
+With Red Hat OpenShift GitOps, you can configure Argo CD to recursively sync the content of a Git directory with an application that contains custom configurations for your cluster.
+
+.Prerequisites
+
+* Red Hat OpenShift GitOps is installed in your cluster.
+
+include::modules/logging-in-to-the-argo-cd-instance-by-using-your-openshift-credentials.adoc[leveloffset=+1]
+
+include::modules/gitops-creating-an-application-by-using-the-argo-cd-dashboard.adoc[leveloffset=+1]
+
+include::modules/gitops-creating-an-application-by-using-the-oc-tool.adoc[leveloffset=+1]
+
+include::modules/gitops-synchronizing-your-application-application-with-your-git-repository.adoc[leveloffset=+1]
diff --git a/cicd/gitops/configuring_argo_cd_to_recursively_sync_a_git_repository_with_your_application/deploying-a-spring-boot-application-with-argo-cd.adoc b/cicd/gitops/configuring_argo_cd_to_recursively_sync_a_git_repository_with_your_application/deploying-a-spring-boot-application-with-argo-cd.adoc
new file mode 100644
index 000000000000..69beb907a942
--- /dev/null
+++ b/cicd/gitops/configuring_argo_cd_to_recursively_sync_a_git_repository_with_your_application/deploying-a-spring-boot-application-with-argo-cd.adoc
@@ -0,0 +1,21 @@
+[id="deploying-a-spring-boot-application-with-argo-cd"]
+= Deploying a Spring Boot application with Argo CD
+include::modules/common-attributes.adoc[]
+include::modules/gitops-document-attributes.adoc[]
+:context: deploying-a-spring-boot-application-with-argo-cd
+
+toc::[]
+
+With Argo CD, you can deploy your applications to the OpenShift cluster either by using the Argo CD dashboard or by using the `oc` tool.
+
+.Prerequisites
+
+* Red Hat OpenShift GitOps is installed in your cluster.
+
+include::modules/logging-in-to-the-argo-cd-instance-by-using-your-openshift-credentials.adoc[leveloffset=+1]
+
+include::modules/gitops-creating-an-application-by-using-the-argo-cd-dashboard.adoc[leveloffset=+1]
+
+include::modules/gitops-creating-an-application-by-using-the-oc-tool.adoc[leveloffset=+1]
+
+include::modules/gitops-verifying-argo-cd-self-healing-behavior.adoc[leveloffset=+1]
diff --git a/cicd/gitops/configuring_argo_cd_to_recursively_sync_a_git_repository_with_your_application/images b/cicd/gitops/configuring_argo_cd_to_recursively_sync_a_git_repository_with_your_application/images
new file mode 120000
index 000000000000..4dd3347de19a
--- /dev/null
+++ b/cicd/gitops/configuring_argo_cd_to_recursively_sync_a_git_repository_with_your_application/images
@@ -0,0 +1 @@
+../../../images
\ No newline at end of file
diff --git a/cicd/gitops/configuring_argo_cd_to_recursively_sync_a_git_repository_with_your_application/modules b/cicd/gitops/configuring_argo_cd_to_recursively_sync_a_git_repository_with_your_application/modules
new file mode 120000
index 000000000000..5be29a99c161
--- /dev/null
+++ b/cicd/gitops/configuring_argo_cd_to_recursively_sync_a_git_repository_with_your_application/modules
@@ -0,0 +1 @@
+../../../modules
\ No newline at end of file
diff --git a/cicd/gitops/gitops-release-notes.adoc b/cicd/gitops/gitops-release-notes.adoc
new file mode 100644
index 000000000000..6a9aa6da45f1
--- /dev/null
+++ b/cicd/gitops/gitops-release-notes.adoc
@@ -0,0 +1,27 @@
+//OpenShift GitOps Release Notes
+include::modules/gitops-document-attributes.adoc[]
+[id="gitops-release-notes"]
+= {gitops-title} release notes
+:context: gitops-release-notes
+include::modules/common-attributes.adoc[]
+
+toc::[]
+
+{gitops-title} is a declarative way to implement continuous deployment for cloud native applications. {gitops-title} ensures consistency in applications when you deploy them to different clusters in different environments, such as: development, staging, and production. {gitops-title} helps you automate the following tasks:
+
+* Ensure that the clusters have similar states for configuration, monitoring, and storage
+* Recover or recreate clusters from a known state
+* Apply or revert configuration changes to multiple {product-title} clusters
+* Associate templated configuration with different environments
+* Promote applications across clusters, from staging to production
+
+For an overview of {gitops-title}, see xref:../../cicd/gitops/understanding-openshift-gitops.adoc#understanding-openshift-gitops[Understanding OpenShift GitOps].
+
+include::modules/making-open-source-more-inclusive.adoc[leveloffset=+1]
+
+// Modules included, most to least recent
+include::modules/gitops-release-notes-1-2-1.adoc[leveloffset=+1]
+
+include::modules/gitops-release-notes-1-2.adoc[leveloffset=+1]
+
+include::modules/gitops-release-notes-1-1.adoc[leveloffset=+1]
diff --git a/cicd/gitops/images b/cicd/gitops/images
new file mode 120000
index 000000000000..5fa6987088da
--- /dev/null
+++ b/cicd/gitops/images
@@ -0,0 +1 @@
+../../images
\ No newline at end of file
diff --git a/cicd/gitops/installing-openshift-gitops.adoc b/cicd/gitops/installing-openshift-gitops.adoc
new file mode 100644
index 000000000000..06d9df16cda3
--- /dev/null
+++ b/cicd/gitops/installing-openshift-gitops.adoc
@@ -0,0 +1,13 @@
+[id="getting-started-with-openshift-gitops"]
+= Getting started with OpenShift GitOps
+include::modules/common-attributes.adoc[]
+include::modules/gitops-document-attributes.adoc[]
+:context: getting-started-with-openshift-gitops
+
+toc::[]
+
+Red Hat OpenShift GitOps uses Argo CD to manage specific cluster-scoped resources, including platform operators, optional Operator Lifecycle Manager (OLM) operators, and user management.
+
+This guide explains how to install the Red Hat OpenShift GitOps Operator to an {product-title} cluster and logging in to the Argo CD instance.
+
+include::modules/installing-gitops-operator-in-web-console.adoc[leveloffset=+1]
diff --git a/cicd/gitops/modules b/cicd/gitops/modules
new file mode 120000
index 000000000000..8b0e8540076d
--- /dev/null
+++ b/cicd/gitops/modules
@@ -0,0 +1 @@
+../../modules
\ No newline at end of file
diff --git a/cicd/gitops/understanding-openshift-gitops.adoc b/cicd/gitops/understanding-openshift-gitops.adoc
new file mode 100644
index 000000000000..0f2cc3916517
--- /dev/null
+++ b/cicd/gitops/understanding-openshift-gitops.adoc
@@ -0,0 +1,13 @@
+[id="understanding-openshift-gitops"]
+= Understanding OpenShift GitOps
+include::modules/common-attributes.adoc[]
+include::modules/gitops-document-attributes.adoc[]
+:context: understanding-openshift-gitops
+
+toc::[]
+
+//Concept Module
+include::modules/about-gitops.adoc[leveloffset=+1]
+
+//Concept Module
+include::modules/about-redhat-openshift-gitops.adoc[leveloffset=+1]
diff --git a/cicd/gitops/uninstalling-openshift-gitops.adoc b/cicd/gitops/uninstalling-openshift-gitops.adoc
new file mode 100644
index 000000000000..76496768a148
--- /dev/null
+++ b/cicd/gitops/uninstalling-openshift-gitops.adoc
@@ -0,0 +1,22 @@
+[id="uninstalling-openshift-gitops"]
+= Uninstalling OpenShift GitOps
+include::modules/common-attributes.adoc[]
+include::modules/gitops-document-attributes.adoc[]
+:context: uninstalling-openshift-gitops
+
+toc::[]
+
+Uninstalling the GitOps Operator is a two-step process:
+
+. Delete the Argo CD instances that were added under the default namespace of the GitOps Operator.
+. Uninstall the GitOps Operator.
+
+Uninstalling only the Operator will not remove the Argo CD instances created.
+
+include::modules/go-deleting-argocd-instance.adoc[leveloffset=+1]
+
+include::modules/go-uninstalling-gitops-operator.adoc[leveloffset=+1]
+
+.Additional Resources
+
+* You can learn more about uninstalling Operators on {product-title} in the xref:../../operators/admin/olm-deleting-operators-from-cluster.adoc#olm-deleting-operators-from-a-cluster[Deleting Operators from a cluster] section.
diff --git a/telemetry/images b/cicd/images
similarity index 100%
rename from telemetry/images
rename to cicd/images
diff --git a/cicd/jenkins-tekton/images b/cicd/jenkins-tekton/images
new file mode 120000
index 000000000000..5fa6987088da
--- /dev/null
+++ b/cicd/jenkins-tekton/images
@@ -0,0 +1 @@
+../../images
\ No newline at end of file
diff --git a/cicd/jenkins-tekton/migrating-from-jenkins-to-tekton.adoc b/cicd/jenkins-tekton/migrating-from-jenkins-to-tekton.adoc
new file mode 100644
index 000000000000..dc872a3844a9
--- /dev/null
+++ b/cicd/jenkins-tekton/migrating-from-jenkins-to-tekton.adoc
@@ -0,0 +1,22 @@
+//Jenkins-Tekton-Migration
+include::modules/common-attributes.adoc[]
+include::modules/pipelines-document-attributes.adoc[]
+[id="migrating-from-jenkins-to-tekton_{context}"]
+= Migrating from Jenkins to Tekton
+:context: migrating-from-jenkins-to-tekton
+//include::modules/common-attributes.adoc[]
+
+toc::[]
+
+
+Jenkins and Tekton are extensively used to automate the process of building, testing, and deploying applications and projects. However, Tekton is a cloud-native CI/CD solution that works seamlessly with Kubernetes and {product-title}. This document helps you migrate your Jenkins CI/CD workflows to Tekton.
+
+include::modules/jt-comparison-of-jenkins-and-tekton-concepts.adoc[leveloffset=+1]
+
+include::modules/jt-migrating-a-sample-pipeline-from-jenkins-to-tekton.adoc[leveloffset=+1]
+
+include::modules/jt-migrating-from-jenkins-plugins-to-tekton-hub-tasks.adoc[leveloffset=+1]
+
+include::modules/jt-extending-tekton-capabilities-using-custom-tasks-and-scripts.adoc[leveloffset=+1]
+
+include::modules/jt-comparison-of-jenkins-tekton-execution-models.adoc[leveloffset=+1]
diff --git a/cicd/jenkins-tekton/modules b/cicd/jenkins-tekton/modules
new file mode 120000
index 000000000000..8b0e8540076d
--- /dev/null
+++ b/cicd/jenkins-tekton/modules
@@ -0,0 +1 @@
+../../modules
\ No newline at end of file
diff --git a/service_mesh/modules b/cicd/modules
similarity index 100%
rename from service_mesh/modules
rename to cicd/modules
diff --git a/cicd/pipelines/creating-applications-with-cicd-pipelines.adoc b/cicd/pipelines/creating-applications-with-cicd-pipelines.adoc
new file mode 100644
index 000000000000..0cdefc21f6f3
--- /dev/null
+++ b/cicd/pipelines/creating-applications-with-cicd-pipelines.adoc
@@ -0,0 +1,67 @@
+[id='creating-applications-with-cicd-pipelines']
+= Creating CI/CD solutions for applications using OpenShift Pipelines
+include::modules/common-attributes.adoc[]
+include::modules/pipelines-document-attributes.adoc[]
+:context: creating-applications-with-cicd-pipelines
+
+toc::[]
+
+With {pipelines-title}, you can create a customized CI/CD solution to build, test, and deploy your application.
+
+To create a full-fledged, self-serving CI/CD pipeline for an application, perform the following tasks:
+
+* Create custom tasks, or install existing reusable tasks.
+* Create and define the delivery pipeline for your application.
+* Provide a storage volume or filesystem that is attached to a workspace for the pipeline execution, using one of the following approaches:
+** Specify a volume claim template that creates a persistent volume claim
+** Specify a persistent volume claim
+* Create a `PipelineRun` object to instantiate and invoke the pipeline.
+* Add triggers to capture events in the source repository.
+
+This section uses the `pipelines-tutorial` example to demonstrate the preceding tasks. The example uses a simple application which consists of:
+
+* A front-end interface, `pipelines-vote-ui`, with the source code in the link:https://github.com/openshift/pipelines-vote-ui/tree/{pipelines-ver}[`pipelines-vote-ui`] Git repository.
+* A back-end interface, `pipelines-vote-api`, with the source code in the link:https://github.com/openshift/pipelines-vote-api/tree/{pipelines-ver}[`pipelines-vote-api`] Git repository.
+* The `apply-manifests` and `update-deployment` tasks in the link:https://github.com/openshift/pipelines-tutorial/tree/{pipelines-ver}[`pipelines-tutorial`] Git repository.
+
+== Prerequisites
+
+* You have access to an {product-title} cluster.
+* You have installed xref:../../cicd/pipelines/installing-pipelines.adoc#installing-pipelines[OpenShift Pipelines] using the {pipelines-title} Operator listed in the OpenShift OperatorHub. After it is installed, it is applicable to the entire cluster.
+* You have installed xref:../../cli_reference/tkn_cli/installing-tkn.adoc#installing-tkn[OpenShift Pipelines CLI].
+* You have forked the front-end link:https://github.com/openshift/pipelines-vote-ui/tree/{pipelines-ver}[`pipelines-vote-ui`] and back-end link:https://github.com/openshift/pipelines-vote-api/tree/{pipelines-ver}[`pipelines-vote-api`] Git repositories using your GitHub ID, and have administrator access to these repositories.
+* Optional: You have cloned the link:https://github.com/openshift/pipelines-tutorial/tree/{pipelines-ver}[`pipelines-tutorial`] Git repository.
+
+
+include::modules/op-creating-project-and-checking-pipeline-service-account.adoc[leveloffset=+1]
+
+include::modules/op-creating-pipeline-tasks.adoc[leveloffset=+1]
+
+include::modules/op-assembling-a-pipeline.adoc[leveloffset=+1]
+
+include::modules/op-mirroring-images-to-run-pipelines-in-restricted-environment.adoc[leveloffset=+1]
+
+.Additional resources
+
+* xref:../../openshift_images/configuring-samples-operator.adoc#samples-operator-restricted-network-install[Configuring Samples Operator for a restricted cluster]
+
+* xref:../../installing/installing-mirroring-installation-images.adoc#installation-about-mirror-registry_installing-mirroring-installation-images[Creating a cluster with a mirrored registry]
+
+* xref:../../cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/creating-and-deploying-a-component-to-the-disconnected-cluster.adoc#mirroring-a-supported-builder-image_creating-and-deploying-a-component-to-the-disconnected-cluster[Mirroring a supported builder image]
+
+include::modules/op-running-a-pipeline.adoc[leveloffset=+1]
+
+include::modules/op-adding-triggers.adoc[leveloffset=+1]
+
+include::modules/op-creating-webhooks.adoc[leveloffset=+1]
+
+include::modules/op-triggering-a-pipelinerun.adoc[leveloffset=+1]
+
+[id="pipeline-addtl-resources"]
+== Additional resources
+
+* For more details on pipelines in the *Developer* perspective, see the xref:../../cicd/pipelines/working-with-pipelines-using-the-developer-perspective.adoc#working-with-pipelines-using-the-developer-perspective[working with pipelines in the *Developer* perspective] section.
+* To learn more about Security Context Constraints (SCCs), see the xref:../../authentication/managing-security-context-constraints.adoc#managing-pod-security-policies[Managing Security Context Constraints] section.
+* For more examples of reusable tasks, see the link:https://github.com/openshift/pipelines-catalog[OpenShift Catalog] repository. Additionally, you can also see the Tekton Catalog in the Tekton project.
+* For more details on re-encrypt TLS termination, see link:https://docs.openshift.com/container-platform/3.11/architecture/networking/routes.html#re-encryption-termination[Re-encryption Termination].
+* For more details on secured routes, see the xref:../../networking/routes/secured-routes.adoc#secured-routes[Secured routes] section.
diff --git a/cicd/pipelines/images b/cicd/pipelines/images
new file mode 120000
index 000000000000..5fa6987088da
--- /dev/null
+++ b/cicd/pipelines/images
@@ -0,0 +1 @@
+../../images
\ No newline at end of file
diff --git a/cicd/pipelines/installing-pipelines.adoc b/cicd/pipelines/installing-pipelines.adoc
new file mode 100644
index 000000000000..39e5df9f111b
--- /dev/null
+++ b/cicd/pipelines/installing-pipelines.adoc
@@ -0,0 +1,52 @@
+[id="installing-pipelines"]
+= Installing OpenShift Pipelines
+include::modules/common-attributes.adoc[]
+include::modules/pipelines-document-attributes.adoc[]
+:context: installing-pipelines
+
+toc::[]
+
+This guide walks cluster administrators through the process of installing the {pipelines-title} Operator to an {product-title} cluster.
+
+// Prerequisites for installing OpenShift Operator
+[discrete]
+== Prerequisites
+
+* You have access to an {product-title} cluster using an account with `cluster-admin` permissions.
+
+* You have installed `oc` CLI.
+* You have installed xref:../../cli_reference/tkn_cli/installing-tkn.adoc#installing-tkn[OpenShift Pipelines (`tkn`) CLI] on your local system.
+
+ifdef::openshift-origin[]
+* Ensure that you have downloaded the link:https://console.redhat.com/openshift/install/pull-secret[pull secret from the Red Hat OpenShift Cluster Manager site] as shown in the xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#installation-obtaining-installer_installing-gcp-customizations[Obtaining the installation program] to install this Operator.
++
+If you have the pull secret, add the `redhat-operators` catalog to the OperatorHub custom resource (CR) as shown in xref:../../post_installation_configuration/preparing-for-users.adoc#olm-installing-operators-from-operatorhub-configure_post-install-preparing-for-users[Configuring {product-title} to use Red Hat Operators].
+endif::[]
+
+
+//Installing pipelines Operator using web console
+
+include::modules/op-installing-pipelines-operator-in-web-console.adoc[leveloffset=+1]
+
+// Installing pipelines Operator using CLI
+
+include::modules/op-installing-pipelines-operator-using-the-cli.adoc[leveloffset=+1]
+
+// {pipelines-title} Operator in a restricted environment
+
+include::modules/op-pipelines-operator-in-restricted-environment.adoc[leveloffset=+1]
+
+
+== Additional Resources
+
+* You can learn more about installing Operators on {product-title} in the xref:../../operators/admin/olm-adding-operators-to-cluster.adoc#olm-adding-operators-to-a-cluster[adding Operators to a cluster] section.
+
+* For more information on using pipelines in a restricted environment see:
+
+** xref:../../creating-applications-with-cicd-pipelines.adoc#op-mirroring-images-to-run-pipelines-in-restricted-environment_creating-applications-with-cicd-pipelines[Mirroring images to run pipelines in a restricted environment]
+
+** xref:../../openshift_images/configuring-samples-operator.adoc#samples-operator-restricted-network-install[Configuring Samples Operator for a restricted cluster]
+
+** xref:../../installing/installing-mirroring-installation-images.adoc#installation-about-mirror-registry_installing-mirroring-installation-images[Creating a cluster with a mirrored registry]
+
+** xref:../../cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/creating-and-deploying-a-component-to-the-disconnected-cluster.adoc#mirroring-a-supported-builder-image_creating-and-deploying-a-component-to-the-disconnected-cluster[Mirroring a supported builder image]
diff --git a/cicd/pipelines/modules b/cicd/pipelines/modules
new file mode 120000
index 000000000000..8b0e8540076d
--- /dev/null
+++ b/cicd/pipelines/modules
@@ -0,0 +1 @@
+../../modules
\ No newline at end of file
diff --git a/cicd/pipelines/op-release-notes.adoc b/cicd/pipelines/op-release-notes.adoc
new file mode 100644
index 000000000000..d16d5b7fa027
--- /dev/null
+++ b/cicd/pipelines/op-release-notes.adoc
@@ -0,0 +1,34 @@
+//OpenShift Pipelines Release Notes
+include::modules/pipelines-document-attributes.adoc[]
+[id="op-release-notes"]
+= {pipelines-title} release notes
+:context: op-release-notes
+include::modules/common-attributes.adoc[]
+
+toc::[]
+
+{pipelines-title} is a cloud-native CI/CD experience based on the Tekton project which provides:
+
+* Standard Kubernetes-native pipeline definitions (CRDs).
+* Serverless pipelines with no CI server management overhead.
+* Extensibility to build images using any Kubernetes tool, such as S2I, Buildah, JIB, and Kaniko.
+* Portability across any Kubernetes distribution.
+* Powerful CLI for interacting with pipelines.
+* Integrated user experience with the *Developer* perspective of the {product-title} web console.
+
+For an overview of {pipelines-title}, see xref:../../cicd/pipelines/understanding-openshift-pipelines.adoc#understanding-openshift-pipelines[Understanding OpenShift Pipelines].
+
+include::modules/making-open-source-more-inclusive.adoc[leveloffset=+1]
+
+// Modules included, most to least recent
+include::modules/op-release-notes-1-5.adoc[leveloffset=+1]
+
+include::modules/op-release-notes-1-4.adoc[leveloffset=+1]
+
+include::modules/op-release-notes-1-3.adoc[leveloffset=+1]
+
+include::modules/op-release-notes-1-2.adoc[leveloffset=+1]
+
+include::modules/op-release-notes-1-1.adoc[leveloffset=+1]
+
+include::modules/op-release-notes-1-0.adoc[leveloffset=+1]
diff --git a/cicd/pipelines/reducing-pipelines-resource-consumption.adoc b/cicd/pipelines/reducing-pipelines-resource-consumption.adoc
new file mode 100644
index 000000000000..ed19812e61c0
--- /dev/null
+++ b/cicd/pipelines/reducing-pipelines-resource-consumption.adoc
@@ -0,0 +1,27 @@
+[id="reducing-pipelines-resource-consumption"]
+= Reducing resource consumption of pipelines
+include::modules/common-attributes.adoc[]
+include::modules/pipelines-document-attributes.adoc[]
+:context: reducing-pipelines-resource-consumption
+
+toc::[]
+
+
+If you use clusters in multi-tenant environments you must control the consumption of CPU, memory, and storage resources for each project and Kubernetes object. This helps prevent any one application from consuming too many resources and affecting other applications.
+
+To define the final resource limits that are set on the resulting pods, {pipelines-title} use resource quota limits and limit ranges of the project in which they are executed.
+
+To restrict resource consumption in your project, you can:
+
+* xref:../../applications/quotas/quotas-setting-per-project.html[Set and manage resource quotas] to limit the aggregate resource consumption.
+* Use xref:../../nodes/clusters/nodes-cluster-limit-ranges.html[limit ranges to restrict resource consumption] for specific objects, such as pods, images, image streams, and persistent volume claims.
+
+include::modules/op-understanding-pipelines-resource-consumption.adoc[leveloffset=+1]
+
+include::modules/op-mitigating-extra-pipeline-resource-consumption.adoc[leveloffset=+1]
+
+== Additional Resources
+
+* xref:../../applications/quotas/quotas-setting-per-project.html[Resource Quotas]
+* xref:../../nodes/clusters/nodes-cluster-limit-ranges.html[Restricting resource consumption using limit ranges]
+* link:https://kubernetes.io/docs/concepts/workloads/pods/init-containers/#resources[Resource requests and limits in Kubernetes]
diff --git a/cicd/pipelines/securing-webhooks-with-event-listeners.adoc b/cicd/pipelines/securing-webhooks-with-event-listeners.adoc
new file mode 100644
index 000000000000..6077b78049b6
--- /dev/null
+++ b/cicd/pipelines/securing-webhooks-with-event-listeners.adoc
@@ -0,0 +1,25 @@
+[id="securing-webhooks-with-event-listeners"]
+= Securing webhooks with event listeners
+include::modules/common-attributes.adoc[]
+include::modules/pipelines-document-attributes.adoc[]
+
+:context: securing-webhooks-with-event-listeners
+
+toc::[]
+
+As an administrator, you can secure webhooks with event listeners. After creating a namespace, you enable HTTPS for the `Eventlistener` resource by adding the `operator.tekton.dev/enable-annotation=enabled` label to the namespace. Then, you create a `Trigger` resource and a secured route using the re-encrypted TLS termination.
+
+Triggers in {pipelines-title} support insecure HTTP and secure HTTPS connections to the `Eventlistener` resource. HTTPS secures connections within and outside the cluster.
+
+{pipelines-title} runs a `tekton-operator-proxy-webhook` pod that watches for the labels in the namespace. When you add the label to the namespace, the webhook sets the `service.beta.openshift.io/serving-cert-secret-name=` annotation on the `EventListener` object. This, in turn, creates secrets and the required certificates.
+
+[source,terminal,subs="attributes+"]
+----
+service.beta.openshift.io/serving-cert-secret-name=
+----
+
+In addition, you can mount the created secret into the `Eventlistener` pod to secure the request.
+
+include::modules/op-providing-secure-connection.adoc[leveloffset=+1]
+
+include::modules/op-sample-eventlistener-resource.adoc[leveloffset=+1]
diff --git a/cicd/pipelines/understanding-openshift-pipelines.adoc b/cicd/pipelines/understanding-openshift-pipelines.adoc
new file mode 100644
index 000000000000..da0f9799a226
--- /dev/null
+++ b/cicd/pipelines/understanding-openshift-pipelines.adoc
@@ -0,0 +1,49 @@
+[id="understanding-openshift-pipelines"]
+= Understanding OpenShift Pipelines
+include::modules/common-attributes.adoc[]
+include::modules/pipelines-document-attributes.adoc[]
+:context: understanding-openshift-pipelines
+
+toc::[]
+
+:FeatureName: OpenShift Pipelines
+
+{pipelines-title} is a cloud-native, continuous integration and continuous delivery (CI/CD) solution based on Kubernetes resources. It uses Tekton building blocks to automate deployments across multiple platforms by abstracting away the underlying implementation details. Tekton introduces a number of standard custom resource definitions (CRDs) for defining CI/CD pipelines that are portable across Kubernetes distributions.
+
+[id="op-key-features"]
+== Key features
+
+* {pipelines-title} is a serverless CI/CD system that runs pipelines with all the required dependencies in isolated containers.
+* {pipelines-title} are designed for decentralized teams that work on microservice-based architecture.
+* {pipelines-title} use standard CI/CD pipeline definitions that are easy to extend and integrate with the existing Kubernetes tools, enabling you to scale on-demand.
+* You can use {pipelines-title} to build images with Kubernetes tools such as Source-to-Image (S2I), Buildah, Buildpacks, and Kaniko that are portable across any Kubernetes platform.
+* You can use the {product-title} Developer console to create Tekton resources, view logs of pipeline runs, and manage pipelines in your {product-title} namespaces.
+
+[id="op-detailed-concepts"]
+== OpenShift Pipeline Concepts
+This guide provides a detailed view of the various pipeline concepts.
+
+//About tasks
+include::modules/op-about-tasks.adoc[leveloffset=+2]
+//About when expression
+include::modules/op-about-whenexpression.adoc[leveloffset=+2]
+//About final tasks
+include::modules/op-about-finally_tasks.adoc[leveloffset=+2]
+//About task run
+include::modules/op-about-taskrun.adoc[leveloffset=+2]
+//About pipelines
+include::modules/op-about-pipelines.adoc[leveloffset=+2]
+//About pipeline run
+include::modules/op-about-pipelinerun.adoc[leveloffset=+2]
+//About workspace
+include::modules/op-about-workspace.adoc[leveloffset=+2]
+//About triggers
+include::modules/op-about-triggers.adoc[leveloffset=+2]
+
+
+== Additional resources
+
+* For information on installing pipelines, see xref:../../cicd/pipelines/installing-pipelines.adoc#installing-pipelines[Installing OpenShift Pipelines].
+* For more details on creating custom CI/CD solutions, see xref:../../cicd/pipelines/creating-applications-with-cicd-pipelines.adoc#creating-applications-with-cicd-pipelines[Creating applications with CI/CD Pipelines].
+* For more details on re-encrypt TLS termination, see link:https://docs.openshift.com/container-platform/3.11/architecture/networking/routes.html#re-encryption-termination[Re-encryption Termination].
+* For more details on secured routes, see the xref:../../networking/routes/secured-routes.adoc#secured-routes[Secured routes] section.
diff --git a/cicd/pipelines/uninstalling-pipelines.adoc b/cicd/pipelines/uninstalling-pipelines.adoc
new file mode 100644
index 000000000000..2139e8292335
--- /dev/null
+++ b/cicd/pipelines/uninstalling-pipelines.adoc
@@ -0,0 +1,22 @@
+[id="uninstalling-pipelines"]
+= Uninstalling OpenShift Pipelines
+include::modules/common-attributes.adoc[]
+include::modules/pipelines-document-attributes.adoc[]
+:context: uninstalling-pipelines
+
+toc::[]
+
+Uninstalling the {pipelines-title} Operator is a two-step process:
+
+. Delete the Custom Resources (CRs) that were added by default when you installed the {pipelines-title} Operator.
+. Uninstall the {pipelines-title} Operator.
+
+Uninstalling only the Operator will not remove the {pipelines-title} components created by default when the Operator is installed.
+
+include::modules/op-deleting-the-pipelines-component-and-custom-resources.adoc[leveloffset=+1]
+
+include::modules/op-uninstalling-the-pipelines-operator.adoc[leveloffset=+1]
+
+.Additional Resources
+
+* You can learn more about uninstalling Operators on {product-title} in the xref:../../operators/admin/olm-deleting-operators-from-cluster.adoc#olm-deleting-operators-from-a-cluster[deleting Operators from a cluster] section.
diff --git a/cicd/pipelines/using-pods-in-a-privileged-security-context.adoc b/cicd/pipelines/using-pods-in-a-privileged-security-context.adoc
new file mode 100644
index 000000000000..15e67419f758
--- /dev/null
+++ b/cicd/pipelines/using-pods-in-a-privileged-security-context.adoc
@@ -0,0 +1,30 @@
+[id='using-pods-in-a-privileged-security-context']
+= Using pods in a privileged security context
+include::modules/common-attributes.adoc[]
+include::modules/pipelines-document-attributes.adoc[]
+:context: using-pods-in-a-privileged-security-context
+
+toc::[]
+
+The default configuration of OpenShift Pipelines 1.3.x and later versions does not allow you to run pods with privileged security context, if the pods result from pipeline run or task run.
+For such pods, the default service account is `pipeline`, and the security context constraint (SCC) associated with the `pipelines` service account is `pipelines-scc`. The `pipelines-scc` SCC is similar to the `anyuid` SCC, but with a minor difference as defined in the YAML file for the SCC of pipelines:
+
+.Example `SecurityContextConstraints` object
+[source,yaml,subs="attributes+"]
+----
+apiVersion: security.openshift.io/v1
+kind: SecurityContextConstraints
+...
+fsGroup:
+ type: MustRunAs
+...
+----
+
+In addition, the `Buildah` cluster task, shipped as part of the OpenShift Pipelines, uses `vfs` as the default storage driver.
+
+include::modules/op-running-pipeline-and-task-run-pods-with-privileged-security-context.adoc[leveloffset=+1]
+
+
+== Additional resources
+
+* For information on managing SCCs, refer to xref:../../authentication/managing-security-context-constraints.adoc[Managing security context constraints].
diff --git a/cicd/pipelines/viewing-pipeline-logs-using-the-openshift-logging-operator.adoc b/cicd/pipelines/viewing-pipeline-logs-using-the-openshift-logging-operator.adoc
new file mode 100644
index 000000000000..10952f328f95
--- /dev/null
+++ b/cicd/pipelines/viewing-pipeline-logs-using-the-openshift-logging-operator.adoc
@@ -0,0 +1,31 @@
+[id="viewing-pipeline-logs-using-the-openshift-logging-operator"]
+= Viewing pipeline logs using the OpenShift Logging Operator
+include::modules/common-attributes.adoc[]
+include::modules/pipelines-document-attributes.adoc[]
+:context: viewing-pipeline-logs-using-the-openshift-logging-operator
+
+toc::[]
+
+The logs generated by pipeline runs, task runs, and event listeners are stored in their respective pods. It is useful to review and analyze logs for troubleshooting and audits.
+
+However, retaining the pods indefinitely leads to unnecessary resource consumption and cluttered namespaces.
+
+To eliminate any dependency on the pods for viewing pipeline logs, you can use the OpenShift Elasticsearch Operator and the OpenShift Logging Operator. These Operators help you to view pipeline logs by using the link:https://www.elastic.co/guide/en/kibana/6.8/connect-to-elasticsearch.html[Elasticsearch Kibana] stack, even after you have deleted the pods that contained the logs.
+
+[id="prerequisites_viewing-pipeline-logs-using-the-openshift-logging-operator"]
+== Prerequisites
+
+Before trying to view pipeline logs in a Kibana dashboard, ensure the following:
+
+* The steps are performed by a cluster administrator.
+* Logs for pipeline runs and task runs are available.
+* The OpenShift Elasticsearch Operator and the OpenShift Logging Operator are installed.
+
+include::modules/op-viewing-pipeline-logs-in-kibana.adoc[leveloffset=+1]
+
+[id="additional-resources_viewing-pipeline-logs-using-the-openshift-logging-operator"]
+== Additional resources
+
+* xref:../../logging/cluster-logging-deploying.adoc[Installing OpenShift Logging]
+* xref:../../logging/viewing-resource-logs.adoc[Viewing logs for a resource]
+* xref:../../logging/cluster-logging-visualizer.adoc[Viewing cluster logs by using Kibana]
diff --git a/cicd/pipelines/working-with-pipelines-using-the-developer-perspective.adoc b/cicd/pipelines/working-with-pipelines-using-the-developer-perspective.adoc
new file mode 100644
index 000000000000..020446b36402
--- /dev/null
+++ b/cicd/pipelines/working-with-pipelines-using-the-developer-perspective.adoc
@@ -0,0 +1,44 @@
+[id='working-with-pipelines-using-the-developer-perspective']
+= Working with {pipelines-title} using the Developer perspective
+include::modules/common-attributes.adoc[]
+include::modules/pipelines-document-attributes.adoc[]
+
+
+:context: working-with-pipelines-using-the-developer-perspective
+
+toc::[]
+
+You can use the *Developer* perspective of the {product-title} web console to create CI/CD pipelines for your software delivery process.
+
+
+In the *Developer* perspective:
+
+* Use the *Add* -> *Pipeline* -> *Pipeline Builder* option to create customized pipelines for your application.
+* Use the *Add* -> *From Git* option to create pipelines using operator-installed pipeline templates and resources while creating an application on {product-title}.
+
+After you create the pipelines for your application, you can view and visually interact with the deployed pipelines in the *Pipelines* view. You can also use the *Topology* view to interact with the pipelines created using the *From Git* option. You need to apply custom labels to a pipeline created using the *Pipeline Builder* to see it in the *Topology* view.
+
+[discrete]
+== Prerequisites
+
+* You have access to an {product-title} cluster and have switched to the xref:../../web_console/odc-about-developer-perspective.adoc[*Developer* perspective] in the web console.
+* You have the xref:../../cicd/pipelines/installing-pipelines.adoc#installing-pipelines[OpenShift Pipelines Operator installed] in your cluster.
+* You are a cluster administrator or a user with create and edit permissions.
+* You have created a project.
+
+
+include::modules/op-constructing-pipelines-using-pipeline-builder.adoc[leveloffset=+1]
+
+== Creating applications with OpenShift Pipelines
+
+To create pipelines along with applications, use the *From Git* option in the *Add* view of the *Developer* perspective. For more information, see xref:../../applications/creating_applications/odc-creating-applications-using-developer-perspective.adoc#odc-importing-codebase-from-git-to-create-application_odc-creating-applications-using-developer-perspective[Creating applications using the Developer perspective].
+
+include::modules/op-interacting-with-pipelines-using-the-developer-perspective.adoc[leveloffset=+1]
+
+include::modules/op-using-custom-pipeline-template-for-git-import.adoc[leveloffset=+1]
+
+include::modules/op-starting-pipelines.adoc[leveloffset=+1]
+
+include::modules/op-editing-pipelines.adoc[leveloffset=+1]
+
+include::modules/op-deleting-pipelines.adoc[leveloffset=+1]
diff --git a/cli_reference/administrator-cli-commands.adoc b/cli_reference/administrator-cli-commands.adoc
deleted file mode 100644
index f934e2037049..000000000000
--- a/cli_reference/administrator-cli-commands.adoc
+++ /dev/null
@@ -1,24 +0,0 @@
-[id="cli-administrator-commands"]
-= Administrator CLI commands
-include::modules/common-attributes.adoc[]
-:context: cli-administrator-commands
-
-toc::[]
-
-// Cluster management CLI commands
-include::modules/cli-administrator-cluster-management.adoc[leveloffset=+1]
-
-// Node management CLI commands
-include::modules/cli-administrator-node-management.adoc[leveloffset=+1]
-
-// Security and policy CLI commands
-include::modules/cli-administrator-security-policy.adoc[leveloffset=+1]
-
-// Maintenance CLI commands
-include::modules/cli-administrator-maintenance.adoc[leveloffset=+1]
-
-// Configuration CLI commands
-include::modules/cli-administrator-configuration.adoc[leveloffset=+1]
-
-// Other administrator CLI commands
-include::modules/cli-administrator-other.adoc[leveloffset=+1]
diff --git a/cli_reference/developer-cli-commands.adoc b/cli_reference/developer-cli-commands.adoc
deleted file mode 100644
index 1fc31758e5a7..000000000000
--- a/cli_reference/developer-cli-commands.adoc
+++ /dev/null
@@ -1,27 +0,0 @@
-[id="cli-developer-commands"]
-= Developer CLI commands
-include::modules/common-attributes.adoc[]
-:context: cli-developer-commands
-
-toc::[]
-
-// Basic CLI commands
-include::modules/cli-developer-basic.adoc[leveloffset=+1]
-
-// Build and deploy CLI commands
-include::modules/cli-developer-build-deploy.adoc[leveloffset=+1]
-
-// Application management CLI commands
-include::modules/cli-developer-application-management.adoc[leveloffset=+1]
-
-// Troubleshooting and debugging CLI commands
-include::modules/cli-developer-troubleshooting.adoc[leveloffset=+1]
-
-// Advanced developer CLI commands
-include::modules/cli-developer-advanced.adoc[leveloffset=+1]
-
-// Settings CLI commands
-include::modules/cli-developer-settings.adoc[leveloffset=+1]
-
-// Other developer CLI commands
-include::modules/cli-developer-other.adoc[leveloffset=+1]
diff --git a/cli_reference/developer_cli_odo/configuring-the-odo-cli.adoc b/cli_reference/developer_cli_odo/configuring-the-odo-cli.adoc
new file mode 100644
index 000000000000..75e50434fa96
--- /dev/null
+++ b/cli_reference/developer_cli_odo/configuring-the-odo-cli.adoc
@@ -0,0 +1,11 @@
+[id='configuring-the-odo-cli']
+= Configuring the odo CLI
+include::modules/developer-cli-odo-attributes.adoc[]
+include::modules/common-attributes.adoc[]
+:context: configuring-the-odo-cli
+
+toc::[]
+
+include::modules/developer-cli-odo-using-command-completion.adoc[leveloffset=+1]
+
+include::modules/developer-cli-odo-ignoring-files-or-patterns.adoc[leveloffset=+1]
diff --git a/cli_reference/developer_cli_odo/creating-instances-of-services-managed-by-operators.adoc b/cli_reference/developer_cli_odo/creating-instances-of-services-managed-by-operators.adoc
new file mode 100644
index 000000000000..2294e46245e7
--- /dev/null
+++ b/cli_reference/developer_cli_odo/creating-instances-of-services-managed-by-operators.adoc
@@ -0,0 +1,22 @@
+[id=creating-instances-of-services-managed-by-operators]
+= Creating instances of services managed by Operators
+include::modules/developer-cli-odo-attributes.adoc[]
+include::modules/common-attributes.adoc[]
+:context: creating-instances-of-services-managed-by-operators
+
+toc::[]
+
+Operators are a method of packaging, deploying, and managing Kubernetes services. With `{odo-title}`, you can create instances of services from the custom resource definitions (CRDs) provided by the Operators. You can then use these instances in your projects and link them to your components.
+
+To create services from an Operator, you must ensure that the Operator has valid values defined in its `metadata` to start the requested service. `{odo-title}` uses the `metadata.annotations.alm-examples` YAML file of an Operator to start
+the service. If this YAML has placeholder values or sample values, a service cannot start. You can modify the YAML file and start the service with the modified values. To learn how to modify YAML files and start services from it, see xref:../../cli_reference/developer_cli_odo/creating-instances-of-services-managed-by-operators.adoc#creating-services-from-yaml-files_creating-instances-of-services-managed-by-operators[Creating services from YAML files].
+
+== Prerequisites
+* Install the `oc` CLI and log in to the cluster.
+** Note that the configuration of the cluster determines the services available to you. To access the Operator services, a cluster administrator must install the respective Operator on the cluster first. To learn more, see xref:../../operators/admin/olm-adding-operators-to-cluster.adoc#olm-installing-operators-from-operatorhub_olm-adding-operators-to-a-cluster[Adding Operators to the cluster].
+* Install the `{odo-title}` CLI.
+
+include::modules/developer-cli-odo-creating-a-project.adoc[leveloffset=+1]
+include::modules/developer-cli-odo-listing-available-services-from-the-operators-installed-on-the-cluster.adoc[leveloffset=+1]
+include::modules/developer-cli-odo-creating-a-service-from-an-operator.adoc[leveloffset=+1]
+include::modules/developer-cli-odo-creating-services-from-yaml-files.adoc[leveloffset=+1]
diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-java-application-with-a-database.adoc b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-java-application-with-a-database.adoc
new file mode 100644
index 000000000000..76ce3f513d2c
--- /dev/null
+++ b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-java-application-with-a-database.adoc
@@ -0,0 +1,23 @@
+[id=creating-a-java-application-with-a-database]
+= Creating a Java application with a database
+include::modules/developer-cli-odo-attributes.adoc[]
+include::modules/common-attributes.adoc[]
+:context: creating-a-java-application-with-a-database
+toc::[]
+
+This example describes how to deploy a Java application by using devfile and connect it to a database service.
+
+.Prerequisites
+
+* A running cluster.
+* `{odo-title}` is installed.
+* A Service Binding Operator is installed in your cluster. To learn how to install Operators, contact your cluster administrator or see xref:../../../operators/user/olm-installing-operators-in-namespace.adoc#olm-installing-operators-from-operatorhub_olm-installing-operators-in-namespace[Installing Operators from OperatorHub].
+* A Dev4Devs PostgreSQL Operator Operator is installed in your cluster. To learn how to install Operators, contact your cluster administrator or see xref:../../../operators/user/olm-installing-operators-in-namespace.adoc#olm-installing-operators-from-operatorhub_olm-installing-operators-in-namespace[Installing Operators from OperatorHub].
+
+include::modules/developer-cli-odo-creating-a-project.adoc[leveloffset=+1]
+
+include::modules/developer-cli-odo-creating-a-java-microservice-jpa-application.adoc[leveloffset=+1]
+
+include::modules/developer-cli-odo-creating-a-database-with-odo.adoc[leveloffset=+1]
+
+include::modules/developer-cli-odo-connecting-a-java-application-to-mysql-database.adoc[leveloffset=+1]
diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-multicomponent-application-with-odo.adoc b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-multicomponent-application-with-odo.adoc
new file mode 100644
index 000000000000..f478042a53e1
--- /dev/null
+++ b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-multicomponent-application-with-odo.adoc
@@ -0,0 +1,31 @@
+[id='creating-a-multicomponent-application-with-odo']
+= Creating a multicomponent application with `{odo-title}`
+include::modules/developer-cli-odo-attributes.adoc[]
+include::modules/common-attributes.adoc[]
+:context: creating-a-multicomponent-application-with-odo
+
+toc::[]
+
+`{odo-title}` allows you to create a multicomponent application, modify it, and link its components in an easy and automated way.
+
+This example describes how to deploy a multicomponent application - a shooter game. The application consists of a front-end Node.js component and a back-end Java component.
+
+.Prerequisites
+
+* `{odo-title}` is installed.
+* You have a running cluster. Developers can use link:https://access.redhat.com/documentation/en-us/red_hat_codeready_containers/[CodeReady Containers (CRC)] to deploy a local cluster quickly.
+* Maven is installed.
+
+include::modules/developer-cli-odo-creating-a-project.adoc[leveloffset=+1]
+
+include::modules/developer-cli-odo-deploying-the-back-end-component.adoc[leveloffset=+1]
+
+include::modules/developer-cli-odo-deploying-the-front-end-component.adoc[leveloffset=+1]
+
+include::modules/developer-cli-odo-linking-both-components.adoc[leveloffset=+1]
+
+include::modules/developer-cli-odo-exposing-the-components-to-the-public.adoc[leveloffset=+1]
+
+include::modules/developer-cli-odo-modifying-the-running-application.adoc[leveloffset=+1]
+
+include::modules/developer-cli-odo-deleting-an-application.adoc[leveloffset=+1]
diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-single-component-application-with-odo.adoc b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-single-component-application-with-odo.adoc
new file mode 100644
index 000000000000..d722fbd8db98
--- /dev/null
+++ b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-a-single-component-application-with-odo.adoc
@@ -0,0 +1,28 @@
+[id='creating-a-single-component-application-with-odo']
+= Creating a single-component application with {odo-title}
+include::modules/developer-cli-odo-attributes.adoc[]
+include::modules/common-attributes.adoc[]
+:context: creating-a-single-component-application-with-odo
+
+toc::[]
+
+With `{odo-title}`, you can create and deploy applications on clusters.
+
+.Prerequisites
+
+* `{odo-title}` is installed.
+* You have a running cluster. You can use link:https://access.redhat.com/documentation/en-us/red_hat_codeready_containers/[CodeReady Containers (CRC)] to deploy a local cluster quickly.
+
+include::modules/developer-cli-odo-creating-a-project.adoc[leveloffset=+1]
+
+include::modules/developer-cli-odo-creating-and-deploying-a-nodejs-application-with-odo.adoc[leveloffset=+1]
+
+include::modules/developer-cli-odo-modifying-your-application-code.adoc[leveloffset=+1]
+
+include::modules/developer-cli-odo-adding-storage-to-the-application-components.adoc[leveloffset=+1]
+
+include::modules/developer-cli-odo-adding-a-custom-builder-to-specify-a-build-image.adoc[leveloffset=+1]
+
+include::modules/developer-cli-odo-connecting-your-application-to-multiple-services-using-openshift-service-catalog.adoc[leveloffset=+1]
+
+include::modules/developer-cli-odo-deleting-an-application.adoc[leveloffset=+1]
diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-an-application-with-a-database.adoc b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-an-application-with-a-database.adoc
new file mode 100644
index 000000000000..9ae14e7e0cd6
--- /dev/null
+++ b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/creating-an-application-with-a-database.adoc
@@ -0,0 +1,31 @@
+[id=creating-an-application-with-a-database]
+= Creating an application with a database
+include::modules/developer-cli-odo-attributes.adoc[]
+include::modules/common-attributes.adoc[]
+:context: creating-an-application-with-a-database
+
+toc::[]
+
+This example describes how to deploy and connect a database to a front-end application.
+
+.Prerequisites
+
+* `{odo-title}` is installed.
+* `oc` client is installed.
+* You have a running cluster. Developers can use link:https://access.redhat.com/documentation/en-us/red_hat_codeready_containers/[CodeReady Containers (CRC)] to deploy a local cluster quickly.
+* The Service Catalog is installed and enabled on your cluster.
++
+[NOTE]
+====
+Service Catalog is deprecated on {product-title} 4 and later.
+====
+
+include::modules/developer-cli-odo-creating-a-project.adoc[leveloffset=+1]
+
+include::modules/developer-cli-odo-deploying-the-front-end-component.adoc[leveloffset=+1]
+
+include::modules/developer-cli-odo-deploying-a-database-in-interactive-mode.adoc[leveloffset=+1]
+
+include::modules/developer-cli-odo-deploying-a-database-manually.adoc[leveloffset=+1]
+
+include::modules/developer-cli-odo-connecting-the-database.adoc[leveloffset=+1]
diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/debugging-applications-in-odo.adoc b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/debugging-applications-in-odo.adoc
new file mode 100644
index 000000000000..f56d8c1dceb7
--- /dev/null
+++ b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/debugging-applications-in-odo.adoc
@@ -0,0 +1,14 @@
+[id='debugging-applications-in-odo']
+= Debugging applications in `{odo-title}`
+include::modules/developer-cli-odo-attributes.adoc[]
+include::modules/common-attributes.adoc[]
+:context: debugging-applications-in-odo
+
+toc::[]
+
+With `{odo-title}`, you can attach a debugger to remotely debug your application. This feature is only supported for NodeJS and Java components.
+
+Components created with `{odo-title}` run in the debug mode by default. A debugger agent runs on the component, on a specific port. To start debugging your application, you must start port forwarding and attach the local debugger bundled in your Integrated development environment (IDE).
+
+include::modules/developer-cli-odo-debugging-an-application.adoc[leveloffset=+1]
+include::modules/developer-cli-odo-configuring-debugging-parameters.adoc[leveloffset=+1]
diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/deleting-applications.adoc b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/deleting-applications.adoc
new file mode 100644
index 000000000000..ac0d5bbbc737
--- /dev/null
+++ b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/deleting-applications.adoc
@@ -0,0 +1,11 @@
+[id='deleting-applications']
+= Deleting applications
+include::modules/developer-cli-odo-attributes.adoc[]
+include::modules/common-attributes.adoc[]
+:context: deleting-applications
+
+toc::[]
+
+You can delete applications and all components associated with the application in your project.
+
+include::modules/developer-cli-odo-deleting-an-application.adoc[leveloffset=+1]
diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/images b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/images
new file mode 120000
index 000000000000..4399cbb3c0f3
--- /dev/null
+++ b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/images
@@ -0,0 +1 @@
+../../../images/
\ No newline at end of file
diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/modules b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/modules
new file mode 120000
index 000000000000..5be29a99c161
--- /dev/null
+++ b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/modules
@@ -0,0 +1 @@
+../../../modules
\ No newline at end of file
diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/sample-applications.adoc b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/sample-applications.adoc
new file mode 100644
index 000000000000..4987804f12d3
--- /dev/null
+++ b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/sample-applications.adoc
@@ -0,0 +1,36 @@
+[id="sample-applications"]
+= Sample applications
+include::modules/developer-cli-odo-attributes.adoc[]
+include::modules/common-attributes.adoc[]
+:context: using-sample-applications
+
+toc::[]
+
+`{odo-title}` offers partial compatibility with any language or runtime listed within the {product-title} catalog of component types. For example:
+
+[source,terminal]
+----
+NAME PROJECT TAGS
+dotnet openshift 3.1,latest
+httpd openshift 2.4,latest
+java openshift 8,latest
+nginx openshift 1.10,1.12,1.8,latest
+nodejs openshift 0.10,4,6,8,latest
+perl openshift 5.16,5.20,5.24,latest
+php openshift 5.5,5.6,7.0,7.1,latest
+python openshift 2.7,3.3,3.4,3.5,3.6,latest
+ruby openshift 2.0,2.2,2.3,2.4,latest
+wildfly openshift 10.0,10.1,8.1,9.0,latest
+----
+
+[NOTE]
+====
+For `{odo-title}` Java and Node.js are the officially supported component types.
+Run `odo catalog list components` to verify the officially supported component types.
+====
+
+To access the component over the web, create a URL using `odo url create`.
+
+
+
+include::modules/developer-cli-odo-sample-applications.adoc[leveloffset=+1]
diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/using-devfiles-in-odo.adoc b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/using-devfiles-in-odo.adoc
new file mode 100644
index 000000000000..84540dcc2e9e
--- /dev/null
+++ b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/using-devfiles-in-odo.adoc
@@ -0,0 +1,29 @@
+[id="using-devfiles-in-odo"]
+= Using devfiles in {odo-title}
+include::modules/developer-cli-odo-attributes.adoc[]
+include::modules/common-attributes.adoc[]
+:context: creating-applications-by-using-devfiles
+
+toc::[]
+
+include::modules/developer-cli-odo-about-devfiles-in-odo.adoc[leveloffset=+1]
+
+== Creating a Java application by using a devfile
+
+.Prerequisites
+
+* You have installed `{odo-title}`.
+* You must know your ingress domain cluster name. Contact your cluster administrator if you do not know it. For example, `apps-crc.testing` is the cluster domain name for https://access.redhat.com/documentation/en-us/red_hat_codeready_containers/[Red Hat CodeReady Containers].
+
+[NOTE]
+====
+Currently odo does not support creating devfile components with `--git` or `--binary` flags. You can only create S2I components when using these flags.
+====
+
+include::modules/developer-cli-odo-creating-a-project.adoc[leveloffset=+2]
+
+include::modules/developer-cli-odo-listing-available-devfile-components.adoc[leveloffset=+2]
+
+include::modules/developer-cli-odo-deploying-a-java-application-using-a-devfile.adoc[leveloffset=+2]
+
+include::modules/developer-cli-odo-converting-an-s2i-component-into-a-devfile-component.adoc[leveloffset=+1]
diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/working-with-projects.adoc b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/working-with-projects.adoc
new file mode 100644
index 000000000000..68dbbee3a2ed
--- /dev/null
+++ b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/working-with-projects.adoc
@@ -0,0 +1,11 @@
+[id="working-with-projects"]
+= Working with projects
+include::modules/developer-cli-odo-attributes.adoc[]
+include::modules/common-attributes.adoc[]
+:context: working-with-projects
+
+toc::[]
+
+Project keeps your source code, tests, and libraries organized in a separate single unit.
+
+include::modules/developer-cli-odo-creating-a-project.adoc[leveloffset=+1]
diff --git a/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/working-with-storage.adoc b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/working-with-storage.adoc
new file mode 100644
index 000000000000..d6c41260c710
--- /dev/null
+++ b/cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/working-with-storage.adoc
@@ -0,0 +1,20 @@
+[id='working-with-storage']
+= Working with storage
+include::modules/developer-cli-odo-attributes.adoc[]
+include::modules/common-attributes.adoc[]
+:context: working-with-storage
+
+toc::[]
+
+Persistent storage keeps data available between restarts of `{odo-title}`.
+
+include::modules/developer-cli-odo-adding-storage-to-the-application-components.adoc[leveloffset=+1]
+
+include::modules/developer-cli-odo-adding-storage-to-a-specific-container.adoc[leveloffset=+1]
+
+include::modules/developer-cli-odo-switching-between-ephemeral-and-persistent-storage.adoc[leveloffset=+1]
+
+.Additional resources
+
+* xref:../../../storage/understanding-ephemeral-storage.adoc#storage-ephemeral-storage-overview_understanding-ephemeral-storage[Understanding ephemeral storage].
+* xref:../../../storage/understanding-persistent-storage.adoc#persistent-storage-overview_understanding-persistent-storage[Understanding persistent storage]
diff --git a/service_mesh/service_mesh_install/images b/cli_reference/developer_cli_odo/images
similarity index 100%
rename from service_mesh/service_mesh_install/images
rename to cli_reference/developer_cli_odo/images
diff --git a/cli_reference/developer_cli_odo/installing-odo.adoc b/cli_reference/developer_cli_odo/installing-odo.adoc
new file mode 100644
index 000000000000..35cd184ba09a
--- /dev/null
+++ b/cli_reference/developer_cli_odo/installing-odo.adoc
@@ -0,0 +1,28 @@
+[id='installing-odo']
+= Installing odo
+include::modules/developer-cli-odo-attributes.adoc[]
+include::modules/common-attributes.adoc[]
+:context: installing-odo
+
+toc::[]
+
+The following section describes how to install `{odo-title}` on different platforms using the CLI or the Visual Studio Code (VS Code) IDE.
+
+[NOTE]
+====
+Currently, `{odo-title}` does not support installation in a restricted network environment.
+====
+
+You can also find the URL to the latest binaries from the {product-title} web console by clicking the *?* icon in the upper-right corner and selecting *Command Line Tools*
+
+include::modules/developer-cli-odo-installing-odo-on-linux.adoc[leveloffset=+1]
+
+include::modules/developer-cli-odo-installing-odo-on-linux-on-ibm-power.adoc[leveloffset=+1]
+
+include::modules/developer-cli-odo-installing-odo-on-linux-on-ibm-z.adoc[leveloffset=+1]
+
+include::modules/developer-cli-odo-installing-odo-on-windows.adoc[leveloffset=+1]
+
+include::modules/developer-cli-odo-installing-odo-on-macos.adoc[leveloffset=+1]
+
+include::modules/developer-cli-odo-installing-odo-on-vs-code.adoc[leveloffset=+1]
diff --git a/cli_reference/developer_cli_odo/managing-environment-variables-in-odo.adoc b/cli_reference/developer_cli_odo/managing-environment-variables-in-odo.adoc
new file mode 100644
index 000000000000..8c387876a73b
--- /dev/null
+++ b/cli_reference/developer_cli_odo/managing-environment-variables-in-odo.adoc
@@ -0,0 +1,11 @@
+[id='managing-environment-variables']
+= Managing environment variables
+include::modules/developer-cli-odo-attributes.adoc[]
+include::modules/common-attributes.adoc[]
+:context: managing-environment-variables
+
+toc::[]
+
+`{odo-title}` stores component-specific configurations and environment variables in the `config` file. You can use the `odo config` command to set, unset, and list environment variables for components without the need to modify the `config` file.
+
+include::modules/developer-cli-odo-setting-and-unsetting-environment-variables.adoc[leveloffset=+1]
diff --git a/cli_reference/developer_cli_odo/modules b/cli_reference/developer_cli_odo/modules
new file mode 120000
index 000000000000..8b0e8540076d
--- /dev/null
+++ b/cli_reference/developer_cli_odo/modules
@@ -0,0 +1 @@
+../../modules
\ No newline at end of file
diff --git a/cli_reference/developer_cli_odo/odo-architecture.adoc b/cli_reference/developer_cli_odo/odo-architecture.adoc
new file mode 100644
index 000000000000..abe36a126cb2
--- /dev/null
+++ b/cli_reference/developer_cli_odo/odo-architecture.adoc
@@ -0,0 +1,16 @@
+[id="odo-architecture"]
+= odo architecture
+include::modules/developer-cli-odo-attributes.adoc[]
+include::modules/common-attributes.adoc[]
+:context: odo-architecture
+
+toc::[]
+
+This section describes `{odo-title}` architecture and how `{odo-title}` manages resources on a cluster.
+
+include::modules/developer-cli-odo-developer-setup.adoc[leveloffset=+1]
+include::modules/developer-cli-odo-openshift-source-to-image.adoc[leveloffset=+1]
+include::modules/developer-cli-odo-openshift-cluster-objects.adoc[leveloffset=+1]
+include::modules/developer-cli-odo-push-workflow.adoc[leveloffset=+1]
+
+// == Additional resources
diff --git a/cli_reference/developer_cli_odo/odo-cli-reference.adoc b/cli_reference/developer_cli_odo/odo-cli-reference.adoc
new file mode 100644
index 000000000000..a155f753f15a
--- /dev/null
+++ b/cli_reference/developer_cli_odo/odo-cli-reference.adoc
@@ -0,0 +1,9 @@
+[id='odo-cli-reference']
+= odo CLI reference
+include::modules/developer-cli-odo-attributes.adoc[]
+include::modules/common-attributes.adoc[]
+:context: odo-cli-reference
+
+toc::[]
+
+include::modules/developer-cli-odo-basic-odo-cli-commands.adoc[leveloffset=+1]
diff --git a/cli_reference/developer_cli_odo/odo-release-notes.adoc b/cli_reference/developer_cli_odo/odo-release-notes.adoc
new file mode 100644
index 000000000000..3ef9321d2fd7
--- /dev/null
+++ b/cli_reference/developer_cli_odo/odo-release-notes.adoc
@@ -0,0 +1,84 @@
+[id='odo-release-notes']
+= `{odo-title}` release notes
+include::modules/developer-cli-odo-attributes.adoc[]
+include::modules/common-attributes.adoc[]
+:context: odo-release-notes
+
+toc::[]
+
+[id="odo-notable-improvements_{context}"]
+== Notable changes and improvements in `{odo-title}`
+
+* `{odo-title}` now supports Devfile v2.
+
+* `odo create -s2i` now converts an S2I component into a devfile component. When running `odo create --s2i ` `{odo-title}` now creates a converted Devfile component based on the S2I images of the specified component type.
++
+Note that this feature introduces many breaking changes, see xref:../../cli_reference/developer_cli_odo/odo-release-notes.adoc#odo-known-issues_odo-release-notes[Known Issues] to learn more.
+
+* Operator based service is now created on the cluster only after you run `odo push` and not after `odo service create` anymore.
+
+* You can now use the `--container` flag to specify the container you want to attach storage to when running `odo storage create` command. See xref:../../cli_reference/developer_cli_odo/creating_and_deploying_applications_with_odo/working-with-storage.adoc#adding-storage-to-a-specific-container_working-with-storage[Adding storage to a specific container] to learn the details.
+
+* `odo catalog component describe` now returns correct JSON if the same name is used for a component in multiple registries.
+
+* Commands that implement changes directly on a cluster now display a message informing a user that `odo push` is not required.
+
+* When creating a component from a devfile, `odo create` now uses a default component name if the name is not specified.
+
+* `odo` now has Telemetry. See xref:../../cli_reference/developer_cli_odo/understanding-odo.adoc#telemetry-in-odo[Telemetry section] to learn how to modify your Telemetry consent preferences.
+
+* With `odo service`, you can now add or remove custom resource definitions and `ServiceInstance` information in your devfile.
+
+
+[id="odo-getting-support_{context}"]
+== Getting support
+
+.For Documentation
+
+If you find an error or have suggestions for improving the documentation, file an issue in link:http://bugzilla.redhat.com[Bugzilla]. Choose the *{product-title}* product type and the *Documentation* component type.
+
+.For Product
+
+If you find an error, encounter a bug, or have suggestions for improving the functionality of `{odo-title}`, file an issue in link:http://bugzilla.redhat.com[Bugzilla]. Choose *OpenShift Developer Tools and Services* as a product type and *odo* as a component.
+
+Provide as many details in the issue description as possible.
+
+////
+[id="odo-fixed-issues_{context}"]
+== Fixed issues
+////
+
+[id="odo-known-issues_{context}"]
+== Known issues
+
+* link:https://bugzilla.redhat.com/show_bug.cgi?id=1760574[Bug 1760574] A deleted namespace is listed in the `odo project get` command.
+
+* link:https://bugzilla.redhat.com/show_bug.cgi?id=1760586[Bug 1760586] The `odo delete` command starts an infinite loop after a project is deleted and a component name is set.
+
+* link:https://bugzilla.redhat.com/show_bug.cgi?id=1760588[Bug 1760588] The `odo service create` command crashes when run in Cygwin.
+
+* link:https://bugzilla.redhat.com/show_bug.cgi?id=1760590[Bug 1760590] In Git BASH for Windows, the `odo login -u developer` command does not hide a typed password when requested.
+
+* link:https://bugzilla.redhat.com/show_bug.cgi?id=1783188[Bug 1783188] In a disconnected cluster, the `odo component create` command throws an error `...tag not found...` despite the component being listed in the catalog list.
+
+* link:https://bugzilla.redhat.com/show_bug.cgi?id=1761440[Bug 1761440] It is not possible to create two Services of the same type in one project.
+
+* link:https://bugzilla.redhat.com/show_bug.cgi?id=1821643[Bug 1821643] `odo push` does not work on the .NET component tag 2.1+.
++
+Workaround: specify your .NET project file by running:
++
+[source,terminal]
+----
+$ odo config set --env DOTNET_STARTUP_PROJECT=
+----
+
+* When running `odo url create` after `odo create --s2i`, the command fails. `{odo-title}` creates a URL now directly without asking.
+
+* Wildfly and dotnet S2I components cannot be created with `odo create`.
+
+* `odo env set DebugPort` does not work with converted devfile components. Workaround: use `odo config set --env DEBUG_PORT`.
+
+* `odo delete --wait` does not wait for the resources to be terminated for devfile components.
+
+//[id="odo-technology-preview_{context}"]
+//== Technology Preview features `{odo-title}`
diff --git a/cli_reference/developer_cli_odo/understanding-odo.adoc b/cli_reference/developer_cli_odo/understanding-odo.adoc
new file mode 100644
index 000000000000..ae549723a907
--- /dev/null
+++ b/cli_reference/developer_cli_odo/understanding-odo.adoc
@@ -0,0 +1,136 @@
+[id="understanding-odo"]
+= Understanding odo
+include::modules/developer-cli-odo-attributes.adoc[]
+include::modules/common-attributes.adoc[]
+:context: understanding-odo
+
+toc::[]
+
+`{odo-title}` is a CLI tool for creating applications on {product-title} and Kubernetes. With `{odo-title}`, you can write, build, and debug applications on a cluster without the need to administer the cluster itself.
+Creating deployment configurations, build configurations, service routes and other {product-title} or Kubernetes elements are all automated by `{odo-title}`.
+
+Existing tools such as xref:../../cli_reference/openshift_cli/getting-started-cli.adoc#cli-about-cli_cli-developer-commands[`oc`] are operations-focused and require a deep understanding of Kubernetes and {product-title} concepts. `{odo-title}` abstracts away complex Kubernetes and {product-title} concepts allowing developers to focus on what is most important to them: code.
+
+[id="odo-key-features"]
+== Key features
+
+`{odo-title}` is designed to be simple and concise with the following key features:
+
+* Simple syntax and design centered around concepts familiar to developers, such as projects, applications, and components.
+* Completely client based. No additional server other than {product-title} is required for deployment.
+* Official support for Node.js and Java components.
+* Partial compatibility with languages and frameworks such as Ruby, Perl, PHP, and Python.
+* Detects changes to local code and deploys it to the cluster automatically, giving instant feedback to validate changes in real time.
+* Lists all the available components and services from the cluster.
+
+== Core concepts
+
+Project::
+A project is your source code, tests, and libraries organized in a separate single unit.
+Application::
+An application is a program designed for end users. An application consists of multiple microservices or components that work individually to build the entire application.
+Examples of applications: a video game, a media player, a web browser.
+Component::
+A component is a set of Kubernetes resources which host code or data. Each component can be run and deployed separately.
+Examples of components: Node.js, Perl, PHP, Python, Ruby.
+Service::
+A service is software that your component links to or depends on.
+Examples of services: MariaDB, Jenkins, MySQL.
+In `{odo-title}`, services are provisioned from the OpenShift Service Catalog and must be enabled within your cluster.
+
+[id="odo-supported-languages-and-images"]
+=== Officially supported languages and corresponding container images
+
+.Supported languages, container images, package managers, and platforms
+[options="header"]
+|===
+|Language |Container image |Package manager |Platform
+|*Node.js*
+|https://access.redhat.com/containers/#/registry.access.redhat.com/rhscl/nodejs-10-rhel7[rhscl/nodejs-10-rhel7]
+|NPM
+|amd64, s390x, ppc64le
+
+|
+|https://access.redhat.com/containers/#/registry.access.redhat.com/rhscl/nodejs-12-rhel7[rhscl/nodejs-12-rhel7]
+|NPM
+|amd64, s390x, ppc64le
+
+|*Java*
+|https://access.redhat.com/containers/#/registry.access.redhat.com/redhat-openjdk-18/openjdk18-openshift[redhat-openjdk-18/openjdk18-openshift]
+|Maven, Gradle
+|amd64, s390x, ppc64le
+
+|
+|https://access.redhat.com/containers/#/registry.access.redhat.com/openjdk/openjdk-11-rhel8[openjdk/openjdk-11-rhel8]
+|Maven, Gradle
+|amd64, s390x, ppc64le
+
+|
+|https://access.redhat.com/containers/#/registry.access.redhat.com/openjdk/openjdk-11-rhel7[openjdk/openjdk-11-rhel7]
+|Maven, Gradle
+|amd64, s390x, ppc64le
+|===
+
+[id="odo-listing-available-images"]
+==== Listing available container images
+
+[NOTE]
+====
+The list of available container images is sourced from the cluster's internal container registry and external registries associated with the cluster.
+====
+
+To list the available components and associated container images for your cluster:
+
+. Log in to the cluster with `{odo-title}`:
++
+[source,terminal]
+----
+$ odo login -u developer -p developer
+----
+
+. List the available `{odo-title}` supported and unsupported components and corresponding container images:
++
+[source,terminal]
+----
+$ odo catalog list components
+----
++
+.Example output
+[source,terminal]
+----
+Odo Devfile Components:
+NAME DESCRIPTION REGISTRY
+java-maven Upstream Maven and OpenJDK 11 DefaultDevfileRegistry
+java-openliberty Open Liberty microservice in Java DefaultDevfileRegistry
+java-quarkus Upstream Quarkus with Java+GraalVM DefaultDevfileRegistry
+java-springboot Spring Boot® using Java DefaultDevfileRegistry
+nodejs Stack with NodeJS 12 DefaultDevfileRegistry
+
+Odo OpenShift Components:
+NAME PROJECT TAGS SUPPORTED
+java openshift 11,8,latest YES
+dotnet openshift 2.1,3.1,latest NO
+golang openshift 1.13.4-ubi7,1.13.4-ubi8,latest NO
+httpd openshift 2.4-el7,2.4-el8,latest NO
+nginx openshift 1.14-el7,1.14-el8,1.16-el7,1.16-el8,latest NO
+nodejs openshift 10-ubi7,10-ubi8,12-ubi7,12-ubi8,latest NO
+perl openshift 5.26-el7,5.26-ubi8,5.30-el7,latest NO
+php openshift 7.2-ubi7,7.2-ubi8,7.3-ubi7,7.3-ubi8,latest NO
+python openshift 2.7-ubi7,2.7-ubi8,3.6-ubi7,3.6-ubi8,3.8-ubi7,3.8-ubi8,latest NO
+ruby openshift 2.5-ubi7,2.5-ubi8,2.6-ubi7,2.6-ubi8,2.7-ubi7,latest NO
+wildfly openshift 10.0,10.1,11.0,12.0,13.0,14.0,15.0,16.0,17.0,18.0,19.0,20.0,8.1,9.0,latest NO
+----
++
+The `TAGS` column represents the available image versions, for example, `10` represents the `rhoar-nodejs/nodejs-10` container image.
+To learn more about CLI commands, go to xref:../../cli_reference/developer_cli_odo/odo-cli-reference.adoc#basic-odo-cli-commands_odo-cli-reference[odo CLI reference].
+
+[id="telemetry-in-odo"]
+=== Telemetry in odo
+
+`{odo-title}` collects information about how `{odo-title}` is used: operating system, RAM, CPU size, number of cores, version of `{odo-title}`, errors, success/failure, and time it took for a command to complete.
+
+You can modify your Telemetry consent by using `odo preference`:
+
+* `odo preference set ConsentTelemetry true` to consent to Telemetry.
+* `odo preference unset ConsentTelemetry` to disable Telemetry.
+* `odo preference view` to verify the current preferences.
diff --git a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/about-odo-in-a-restricted-environment.adoc b/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/about-odo-in-a-restricted-environment.adoc
new file mode 100644
index 000000000000..c49d2155239c
--- /dev/null
+++ b/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/about-odo-in-a-restricted-environment.adoc
@@ -0,0 +1,22 @@
+[id="about-odo-in-a-restricted-environment"]
+= About {odo-title} in a restricted environment
+include::modules/developer-cli-odo-attributes.adoc[]
+include::modules/common-attributes.adoc[]
+:context: about-odo-in-a-restricted-environment
+
+toc::[]
+
+
+To run `{odo-title}` in a disconnected cluster or a cluster provisioned in a restricted environment, you must ensure that a cluster administrator has created a cluster with a mirrored registry.
+
+
+To start working in a disconnected cluster, you must first xref:../../../cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/pushing-the-odo-init-image-to-the-restricted-cluster-registry.adoc#pushing-the-odo-init-image-to-a-mirror-registry_pushing-the-odo-init-image-to-the-restricted-cluster-registry[push the `odo` init image to the registry of the cluster] and then overwrite the `odo` init image path using the `ODO_BOOTSTRAPPER_IMAGE` environment variable.
+
+
+After you push the `odo` init image, you must xref:../../../cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/creating-and-deploying-a-component-to-the-disconnected-cluster.adoc#mirroring-a-supported-builder-image_creating-and-deploying-a-component-to-the-disconnected-cluster[mirror a supported builder image] from the registry, xref:../../../cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/creating-and-deploying-a-component-to-the-disconnected-cluster.adoc#overwriting-the-mirror-registry_creating-and-deploying-a-component-to-the-disconnected-cluster[overwrite a mirror registry] and then xref:../../../cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/creating-and-deploying-a-component-to-the-disconnected-cluster.adoc#creating-a-nodejs-application-with-odo_creating-and-deploying-a-component-to-the-disconnected-cluster[create your application]. A builder image is necessary to configure a runtime environment for your application and also contains the build tool needed to build your application, for example npm for Node.js or Maven for Java. A mirror registry contains all the necessary dependencies for your application.
+
+.Additional resources
+ifdef::openshift-enterprise,openshift-webscale[]
+* xref:../../../installing/installing-mirroring-installation-images.adoc#installation-about-mirror-registry_installing-mirroring-installation-images[Mirroring images for a disconnected installation]
+endif::[]
+* xref:../../../registry/accessing-the-registry.adoc#registry-accessing-directly_accessing-the-registry[Accessing the registry]
diff --git a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/creating-and-deploying-a-component-to-the-disconnected-cluster.adoc b/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/creating-and-deploying-a-component-to-the-disconnected-cluster.adoc
new file mode 100644
index 000000000000..e9fd7242c6e6
--- /dev/null
+++ b/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/creating-and-deploying-a-component-to-the-disconnected-cluster.adoc
@@ -0,0 +1,20 @@
+[id="creating-and-deploying-a-component-to-the-disconnected-cluster"]
+= Creating and deploying a component to the disconnected cluster
+include::modules/developer-cli-odo-attributes.adoc[]
+include::modules/common-attributes.adoc[]
+:context: creating-and-deploying-a-component-to-the-disconnected-cluster
+
+toc::[]
+
+After you push the `init` image to a cluster with a mirrored registry, you must mirror a supported builder image for your application with the `oc` tool, overwrite the mirror registry using the environment variable, and then create your component.
+
+== Prerequisites
+
+* Install `oc` on the client operating system.
+* xref:../../../cli_reference/developer_cli_odo/installing-odo.adoc#installing-odo-on-linux_installing-odo[Install `{odo-title}`] on the client operating system.
+* Access to an restricted cluster with a configured internal registry or a mirror registry.
+* xref:../../../cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/pushing-the-odo-init-image-to-the-restricted-cluster-registry.adoc#pushing-the-odo-init-image-to-a-mirror-registry_pushing-the-odo-init-image-to-the-restricted-cluster-registry[Push the `odo` init image to your cluster registry].
+
+include::modules/developer-cli-odo-mirroring-a-supported-builder-image.adoc[leveloffset=+1]
+include::modules/developer-cli-odo-overwriting-a-mirror-registry.adoc[leveloffset=+1]
+include::modules/developer-cli-odo-creating-and-deploying-a-nodejs-application-with-odo.adoc[leveloffset=+1]
diff --git a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/creating-and-deploying-devfile-components-to-the-disconnected-cluster.adoc b/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/creating-and-deploying-devfile-components-to-the-disconnected-cluster.adoc
new file mode 100644
index 000000000000..d6067515b5a8
--- /dev/null
+++ b/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/creating-and-deploying-devfile-components-to-the-disconnected-cluster.adoc
@@ -0,0 +1,11 @@
+[id="creating-and-deploying-devfile-components-to-the-disconnected-cluster"]
+= Creating and deploying devfile components to the disconnected cluster
+include::modules/developer-cli-odo-attributes.adoc[]
+include::modules/common-attributes.adoc[]
+:context: creating-and-deploying-a-component-to-the-disconnected-cluster
+
+toc::[]
+
+include::modules/developer-cli-odo-creating-a-nodejs-application-by-using-a-devfile-in-a-disconnected-cluster.adoc[leveloffset=+1]
+
+include::modules/developer-cli-odo-creating-a-java-application-by-using-a-devfile-in-a-disconnected-cluster.adoc[leveloffset=+1]
diff --git a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/images b/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/images
new file mode 120000
index 000000000000..4399cbb3c0f3
--- /dev/null
+++ b/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/images
@@ -0,0 +1 @@
+../../../images/
\ No newline at end of file
diff --git a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/modules b/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/modules
new file mode 120000
index 000000000000..7e8b50bee77a
--- /dev/null
+++ b/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/modules
@@ -0,0 +1 @@
+../../../modules/
\ No newline at end of file
diff --git a/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/pushing-the-odo-init-image-to-the-restricted-cluster-registry.adoc b/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/pushing-the-odo-init-image-to-the-restricted-cluster-registry.adoc
new file mode 100644
index 000000000000..d35c93c54eca
--- /dev/null
+++ b/cli_reference/developer_cli_odo/using_odo_in_a_restricted_environment/pushing-the-odo-init-image-to-the-restricted-cluster-registry.adoc
@@ -0,0 +1,18 @@
+[id="pushing-the-odo-init-image-to-the-restricted-cluster-registry"]
+= Pushing the {odo-title} init image to the restricted cluster registry
+include::modules/developer-cli-odo-attributes.adoc[]
+include::modules/common-attributes.adoc[]
+:context: pushing-the-odo-init-image-to-the-restricted-cluster-registry
+
+toc::[]
+
+Depending on the configuration of your cluster and your operating system you can either push the `odo` init image to a mirror registry or directly to an internal registry.
+
+== Prerequisites
+
+* Install `oc` on the client operating system.
+* xref:../../../cli_reference/developer_cli_odo/installing-odo.adoc#installing-odo-on-linux_installing-odo[Install `{odo-title}`] on the client operating system.
+* Access to a restricted cluster with a configured internal registry or a mirror registry.
+
+include::modules/developer-cli-odo-pushing-the-odo-init-image-to-a-mirror-registry.adoc[leveloffset=+1]
+include::modules/developer-cli-odo-pushing-the-odo-init-image-to-an-internal-registry-directly.adoc[leveloffset=+1]
diff --git a/cli_reference/getting-started-cli.adoc b/cli_reference/getting-started-cli.adoc
deleted file mode 100644
index 178d045cb537..000000000000
--- a/cli_reference/getting-started-cli.adoc
+++ /dev/null
@@ -1,24 +0,0 @@
-[id="cli-getting-started"]
-= Getting started with the CLI
-include::modules/common-attributes.adoc[]
-:context: cli-developer-commands
-
-toc::[]
-
-// About the CLI
-include::modules/cli-about-cli.adoc[leveloffset=+1]
-
-// Installing the CLI
-include::modules/cli-installing-cli.adoc[leveloffset=+1]
-
-// Logging in to the CLI
-include::modules/cli-logging-in.adoc[leveloffset=+1]
-
-// Using the CLI
-include::modules/cli-using-cli.adoc[leveloffset=+1]
-
-// Getting help
-include::modules/cli-getting-help.adoc[leveloffset=+1]
-
-// Logging out of the CLI
-include::modules/cli-logging-out.adoc[leveloffset=+1]
diff --git a/cli_reference/index.adoc b/cli_reference/index.adoc
new file mode 100644
index 000000000000..b1397f257335
--- /dev/null
+++ b/cli_reference/index.adoc
@@ -0,0 +1,32 @@
+[id="cli-tools-overview"]
+= {product-title} CLI tools overview
+include::modules/common-attributes.adoc[]
+:context: cli-tools-overview
+
+toc::[]
+
+A user performs a range of operations while working on {product-title} such as the following:
+
+* Managing clusters
+* Building, deploying, and managing applications
+* Managing deployment processes
+* Developing Operators
+* Creating and maintaining Operator catalogs
+
+{product-title} offers a set of command-line interface (CLI) tools that simplify these tasks by enabling users to perform various administration and development operations from the terminal.
+These tools expose simple commands to manage the applications, as well as interact with each component of the system.
+
+[id="cli-tools-list"]
+== List of CLI tools
+
+The following set of CLI tools are available in {product-title}:
+
+* xref:../cli_reference/openshift_cli/getting-started-cli.adoc#cli-getting-started[OpenShift CLI (oc)]: This is the most commonly used CLI tool by {product-title} users. It helps both cluster administrators and developers to perform end-to-end operations across {product-title} using the terminal. Unlike the web console, it allows the user to work directly with the project source code using command scripts.
+
+* xref:../cli_reference/kn-cli-tools.adoc#kn-cli-tools[Knative CLI (kn)]: The `kn` CLI tool provides simple and intuitive terminal commands that can be used to interact with OpenShift Serverless components, such as Knative Serving and Eventing.
+
+* xref:../cli_reference/tkn_cli/installing-tkn.adoc#installing-tkn[Pipelines CLI (tkn)]: OpenShift Pipelines is a continuous integration and continuous delivery (CI/CD) solution in {product-title}, which internally uses Tekton. The `tkn` CLI tool provides simple and intuitive commands to interact with OpenShift Pipelines using the terminal.
+
+* xref:../cli_reference/opm/cli-opm-install.adoc#cli-opm-install[opm CLI]: The `opm` CLI tool helps the Operator developers and cluster administrators to create and maintain the catalogs of Operators from the terminal.
+
+* xref:../cli_reference/osdk/cli-osdk-install.adoc#cli-osdk-install[Operator SDK]: The Operator SDK, a component of the Operator Framework, provides a CLI tool that Operator developers can use to build, test, and deploy an Operator from the terminal. It simplifies the process of building Kubernetes-native applications, which can require deep, application-specific operational knowledge.
diff --git a/cli_reference/kn-cli-tools.adoc b/cli_reference/kn-cli-tools.adoc
new file mode 100644
index 000000000000..7690a2d38aa3
--- /dev/null
+++ b/cli_reference/kn-cli-tools.adoc
@@ -0,0 +1,35 @@
+include::modules/serverless-document-attributes.adoc[]
+[id="kn-cli-tools"]
+= Knative CLI (kn) for use with OpenShift Serverless
+:context: kn-cli-tools
+include::modules/common-attributes.adoc[]
+
+toc::[]
+
+The Knative `kn` CLI enables simple interaction with Knative components on {product-title}.
+
+You can enable Knative on {product-title} by installing {ServerlessProductName}. For more information, see xref:../serverless/serverless-getting-started.adoc#serverless-getting-started[Getting started with {ServerlessProductName}].
+
+[NOTE]
+====
+{ServerlessProductName} cannot be installed using the `kn` CLI. A cluster administrator must install the {ServerlessOperatorName} and set up the Knative components, as described in the xref:../serverless/admin_guide/install-serverless-operator.adoc#install-serverless-operator[Serverless applications] documentation for {product-title}.
+====
+
+[id="kn-cli-tools-key-features"]
+== Key features
+
+The `kn` CLI is designed to make serverless computing tasks simple and concise.
+Key features of the `kn` CLI include:
+
+* Deploy serverless applications from the command line.
+* Manage features of Knative Serving, such as services, revisions, and traffic-splitting.
+* Create and manage Knative Eventing components, such as event sources and triggers.
+* Create sink bindings to connect existing Kubernetes applications and Knative services.
+* Extend the `kn` CLI with flexible plug-in architecture, similar to the `kubectl` CLI.
+* Configure autoscaling parameters for Knative services.
+* Scripted usage, such as waiting for the results of an operation, or deploying custom rollout and rollback strategies.
+
+[id="kn-cli-tools-installing-kn"]
+== Installing the Knative CLI
+
+See xref:../serverless/cli_tools/installing-kn.adoc#installing-kn[Installing the Knative CLI].
diff --git a/cli_reference/openshift_cli/administrator-cli-commands.adoc b/cli_reference/openshift_cli/administrator-cli-commands.adoc
new file mode 100644
index 000000000000..7c2535386f61
--- /dev/null
+++ b/cli_reference/openshift_cli/administrator-cli-commands.adoc
@@ -0,0 +1,19 @@
+[id="cli-administrator-commands"]
+= OpenShift CLI administrator command reference
+include::modules/common-attributes.adoc[]
+:context: cli-administrator-commands
+
+toc::[]
+
+This reference provides descriptions and example commands for OpenShift CLI (`oc`) administrator commands. For developer commands, see the xref:../../cli_reference/openshift_cli/developer-cli-commands.adoc#cli-developer-commands[OpenShift CLI developer command reference].
+
+Run `oc adm help` to list all administrator commands or run `oc --help` to get additional details for a specific command.
+
+// The following file is auto-generated from the openshift/oc repository
+// OpenShift CLI (oc) administrator commands
+include::modules/oc-adm-by-example-content.adoc[leveloffset=+1]
+
+[id="additional-resources_cli-administrator-commands"]
+== Additional resources
+
+* xref:../../cli_reference/openshift_cli/developer-cli-commands.adoc#cli-developer-commands[OpenShift CLI developer command reference]
diff --git a/cli_reference/configuring-cli.adoc b/cli_reference/openshift_cli/configuring-cli.adoc
similarity index 86%
rename from cli_reference/configuring-cli.adoc
rename to cli_reference/openshift_cli/configuring-cli.adoc
index 28ae11769bd7..e58efba752c3 100644
--- a/cli_reference/configuring-cli.adoc
+++ b/cli_reference/openshift_cli/configuring-cli.adoc
@@ -1,5 +1,5 @@
[id="cli-configuring-cli"]
-= Configuring the CLI
+= Configuring the OpenShift CLI
include::modules/common-attributes.adoc[]
:context: cli-configuring-cli
diff --git a/cli_reference/openshift_cli/developer-cli-commands.adoc b/cli_reference/openshift_cli/developer-cli-commands.adoc
new file mode 100644
index 000000000000..aca75d412d4d
--- /dev/null
+++ b/cli_reference/openshift_cli/developer-cli-commands.adoc
@@ -0,0 +1,26 @@
+[id="cli-developer-commands"]
+= OpenShift CLI developer command reference
+include::modules/common-attributes.adoc[]
+:context: cli-developer-commands
+
+toc::[]
+
+This reference provides descriptions and example commands for OpenShift CLI (`oc`) developer commands.
+ifdef::openshift-enterprise,openshift-origin[]
+For administrator commands, see the xref:../../cli_reference/openshift_cli/administrator-cli-commands.adoc#cli-administrator-commands[OpenShift CLI administrator command reference].
+endif::openshift-enterprise,openshift-origin[]
+
+Run `oc help` to list all commands or run `oc --help` to get additional details for a specific command.
+
+// The following file is auto-generated from the openshift/oc repository
+// OpenShift CLI (oc) developer commands
+include::modules/oc-by-example-content.adoc[leveloffset=+1]
+
+ifdef::openshift-enterprise,openshift-origin[]
+
+[id="additional-resources_cli-developer-commands"]
+== Additional resources
+
+* xref:../../cli_reference/openshift_cli/administrator-cli-commands.adoc#cli-administrator-commands[OpenShift CLI administrator command reference]
+
+endif::openshift-enterprise,openshift-origin[]
diff --git a/cli_reference/extending-cli-plugins.adoc b/cli_reference/openshift_cli/extending-cli-plugins.adoc
similarity index 91%
rename from cli_reference/extending-cli-plugins.adoc
rename to cli_reference/openshift_cli/extending-cli-plugins.adoc
index 10195d8489a5..44f5a3a7f195 100644
--- a/cli_reference/extending-cli-plugins.adoc
+++ b/cli_reference/openshift_cli/extending-cli-plugins.adoc
@@ -1,5 +1,5 @@
[id="cli-extend-plugins"]
-= Extending the CLI with plug-ins
+= Extending the OpenShift CLI with plug-ins
include::modules/common-attributes.adoc[]
:context: cli-extend-plugins
diff --git a/cli_reference/openshift_cli/getting-started-cli.adoc b/cli_reference/openshift_cli/getting-started-cli.adoc
new file mode 100644
index 000000000000..db886c76d393
--- /dev/null
+++ b/cli_reference/openshift_cli/getting-started-cli.adoc
@@ -0,0 +1,34 @@
+[id="cli-getting-started"]
+= Getting started with the OpenShift CLI
+include::modules/common-attributes.adoc[]
+:context: cli-developer-commands
+
+toc::[]
+
+// About the CLI
+include::modules/cli-about-cli.adoc[leveloffset=+1]
+
+[id="installing-openshift-cli"]
+== Installing the OpenShift CLI
+
+You can install the OpenShift CLI (`oc`) either by downloading the binary or by using an RPM.
+
+// Installing the CLI by downloading the binary
+include::modules/cli-installing-cli.adoc[leveloffset=+2]
+
+ifndef::openshift-origin[]
+// Installing the CLI by using an RPM
+include::modules/cli-installing-cli-rpm.adoc[leveloffset=+2]
+endif::[]
+
+// Logging in to the CLI
+include::modules/cli-logging-in.adoc[leveloffset=+1]
+
+// Using the CLI
+include::modules/cli-using-cli.adoc[leveloffset=+1]
+
+// Getting help
+include::modules/cli-getting-help.adoc[leveloffset=+1]
+
+// Logging out of the CLI
+include::modules/cli-logging-out.adoc[leveloffset=+1]
diff --git a/service_mesh/service_mesh_release_notes/images b/cli_reference/openshift_cli/images
similarity index 100%
rename from service_mesh/service_mesh_release_notes/images
rename to cli_reference/openshift_cli/images
diff --git a/cnv/cnv_users_guide/modules b/cli_reference/openshift_cli/modules
similarity index 100%
rename from cnv/cnv_users_guide/modules
rename to cli_reference/openshift_cli/modules
diff --git a/cli_reference/openshift_cli/usage-oc-kubectl.adoc b/cli_reference/openshift_cli/usage-oc-kubectl.adoc
new file mode 100644
index 000000000000..c0c0bc751f31
--- /dev/null
+++ b/cli_reference/openshift_cli/usage-oc-kubectl.adoc
@@ -0,0 +1,30 @@
+[id="usage-oc-kubectl"]
+= Usage of oc and kubectl commands
+include::modules/common-attributes.adoc[]
+:context: usage-oc-kubectl
+
+The Kubernetes command-line interface (CLI), `kubectl`, can be used to run commands against a Kubernetes cluster. Because {product-title} is a certified Kubernetes distribution, you can use the supported `kubectl` binaries that ship with {product-title}, or you can gain extended functionality by using the `oc` binary.
+
+== The oc binary
+
+The `oc` binary offers the same capabilities as the `kubectl` binary, but it extends to natively support additional {product-title} features, including:
+
+* **Full support for {product-title} resources**
++
+Resources such as `DeploymentConfig`, `BuildConfig`, `Route`, `ImageStream`, and `ImageStreamTag` objects are specific to {product-title} distributions, and build upon standard Kubernetes primitives.
++
+* **Authentication**
++
+The `oc` binary offers a built-in `login` command that allows authentication and enables you to work with {product-title} projects, which map Kubernetes namespaces to authenticated users. See xref:../../authentication/understanding-authentication.adoc#understanding-authentication[Understanding authentication] for more information.
++
+* **Additional commands**
++
+The additional command `oc new-app`, for example, makes it easier to get new applications started using existing source code or pre-built images. Similarly, the additional command `oc new-project` makes it easier to start a project that you can switch to as your default.
+
+== The kubectl binary
+
+The `kubectl` binary is provided as a means to support existing workflows and scripts for new {product-title} users coming from a standard Kubernetes environment, or for those who prefer to use the `kubectl` CLI. Existing users of `kubectl` can continue to use the binary to interact with Kubernetes primitives, with no changes required to the {product-title} cluster.
+
+You can install the supported `kubectl` binary by following the steps to xref:../../cli_reference/openshift_cli/getting-started-cli.adoc#cli-installing-cli_cli-developer-commands[Install the OpenShift CLI]. The `kubectl` binary is included in the archive if you download the binary, or is installed when you install the CLI by using an RPM.
+
+For more information, see the link:https://kubernetes.io/docs/reference/kubectl/overview/[kubectl documentation].
diff --git a/cli_reference/opm/cli-opm-install.adoc b/cli_reference/opm/cli-opm-install.adoc
new file mode 100644
index 000000000000..a325c68e308c
--- /dev/null
+++ b/cli_reference/opm/cli-opm-install.adoc
@@ -0,0 +1,20 @@
+[id="cli-opm-install"]
+= Installing the opm CLI
+include::modules/common-attributes.adoc[]
+:context: cli-opm-install
+
+toc::[]
+
+include::modules/olm-about-opm.adoc[leveloffset=+1]
+.Additional resources
+
+* See xref:../../operators/understanding/olm-packaging-format.adoc#olm-bundle-format_olm-packaging-format[Operator Framework packaging format] for more information about the bundle format.
+* To create a bundle image using the Operator SDK, see
+xref:../../operators/operator_sdk/osdk-working-bundle-images.adoc#osdk-working-bundle-images[Working with bundle images].
+
+include::modules/olm-installing-opm.adoc[leveloffset=+1]
+
+[id="opm-addtl-resources"]
+== Additional resources
+
+* See xref:../../operators/admin/olm-managing-custom-catalogs.adoc#olm-managing-custom-catalogs[Managing custom catalogs] for `opm` procedures including creating, updating, and pruning catalogs.
diff --git a/cli_reference/opm/cli-opm-ref.adoc b/cli_reference/opm/cli-opm-ref.adoc
new file mode 100644
index 000000000000..b314d47f9f8b
--- /dev/null
+++ b/cli_reference/opm/cli-opm-ref.adoc
@@ -0,0 +1,33 @@
+[id="cli-opm-ref"]
+= opm CLI reference
+include::modules/common-attributes.adoc[]
+:context: cli-opm-ref
+
+toc::[]
+
+The `opm` command-line interface (CLI) is a tool for creating and maintaining Operator catalogs.
+
+.`opm` CLI syntax
+[source,terminal]
+----
+$ opm [] [] []
+----
+
+.Global flags
+[options="header",cols="1,3"]
+|===
+|Flag |Description
+
+|`--skip-tls`
+|Skip TLS certificate verification for container image registries while pulling bundles or indexes.
+
+|===
+
+:FeatureName: The SQLite-based catalog format, including the related CLI commands,
+include::modules/deprecated-feature.adoc[]
+
+include::modules/opm-cli-ref-index.adoc[leveloffset=+1]
+include::modules/opm-cli-ref-init.adoc[leveloffset=+1]
+include::modules/opm-cli-ref-render.adoc[leveloffset=+1]
+include::modules/opm-cli-ref-validate.adoc[leveloffset=+1]
+include::modules/opm-cli-ref-serve.adoc[leveloffset=+1]
diff --git a/cli_reference/osdk/cli-osdk-install.adoc b/cli_reference/osdk/cli-osdk-install.adoc
new file mode 100644
index 000000000000..00c4d166d440
--- /dev/null
+++ b/cli_reference/osdk/cli-osdk-install.adoc
@@ -0,0 +1,19 @@
+[id="cli-osdk-install"]
+= Installing the Operator SDK CLI
+include::modules/common-attributes.adoc[]
+:context: cli-osdk-install
+
+toc::[]
+
+The Operator SDK provides a command-line interface (CLI) tool that Operator developers can use to build, test, and deploy an Operator. You can install the Operator SDK CLI on your workstation so that you are prepared to start authoring your own Operators.
+
+Operator authors with cluster administrator access to a Kubernetes-based cluster, such as {product-title}, can use the Operator SDK CLI to develop their own Operators based on Go, Ansible, or Helm. link:https://kubebuilder.io/[Kubebuilder] is embedded into the Operator SDK as the scaffolding solution for Go-based Operators, which means existing Kubebuilder projects can be used as is with the Operator SDK and continue to work.
+
+See xref:../../operators/operator_sdk/osdk-about.adoc#osdk-about[Developing Operators] for full documentation on the Operator SDK.
+
+[NOTE]
+====
+{product-title} 4.9 and later supports Operator SDK v1.10.1.
+====
+
+include::modules/osdk-installing-cli-linux-macos.adoc[leveloffset=+1]
diff --git a/cli_reference/osdk/cli-osdk-ref.adoc b/cli_reference/osdk/cli-osdk-ref.adoc
new file mode 100644
index 000000000000..46d5076e363d
--- /dev/null
+++ b/cli_reference/osdk/cli-osdk-ref.adoc
@@ -0,0 +1,41 @@
+[id="cli-osdk-ref"]
+= Operator SDK CLI reference
+include::modules/common-attributes.adoc[]
+:context: cli-osdk-ref
+
+toc::[]
+
+The Operator SDK command-line interface (CLI) is a development kit designed to make writing Operators easier.
+
+.Operator SDK CLI syntax
+[source,terminal]
+----
+$ operator-sdk [] [] []
+----
+
+See xref:../../operators/operator_sdk/osdk-about.adoc#osdk-about[Developing Operators] for full documentation on the Operator SDK.
+
+include::modules/osdk-cli-ref-bundle.adoc[leveloffset=+1]
+include::modules/osdk-cli-ref-cleanup.adoc[leveloffset=+1]
+include::modules/osdk-cli-ref-completion.adoc[leveloffset=+1]
+include::modules/osdk-cli-ref-create.adoc[leveloffset=+1]
+include::modules/osdk-cli-ref-generate.adoc[leveloffset=+1]
+include::modules/osdk-cli-ref-generate-bundle.adoc[leveloffset=+2]
+.Additional resources
+
+* See xref:../../operators/operator_sdk/osdk-working-bundle-images.adoc#osdk-bundle-deploy-olm_osdk-working-bundle-images[Bundling an Operator and deploying with Operator Lifecycle Manager] for a full procedure that includes using the `make bundle` command to call the `generate bundle` subcommand.
+
+include::modules/osdk-cli-ref-generate-kustomize.adoc[leveloffset=+2]
+
+include::modules/osdk-cli-ref-init.adoc[leveloffset=+1]
+include::modules/osdk-cli-ref-run.adoc[leveloffset=+1]
+include::modules/osdk-cli-ref-run-bundle.adoc[leveloffset=+2]
+.Additional resources
+
+* See xref:../../operators/understanding/olm/olm-understanding-operatorgroups.adoc#olm-operatorgroups-membership_olm-understanding-operatorgroups[Operator group membership] for details on possible install modes.
+
+include::modules/osdk-cli-ref-run-bundle-upgrade.adoc[leveloffset=+2]
+include::modules/osdk-cli-ref-scorecard.adoc[leveloffset=+1]
+.Additional resources
+
+* See xref:../../operators/operator_sdk/osdk-scorecard.adoc#osdk-scorecard[Validating Operators using the scorecard tool] for details about running the scorecard tool.
diff --git a/cli_reference/osdk/images b/cli_reference/osdk/images
new file mode 120000
index 000000000000..847b03ed0541
--- /dev/null
+++ b/cli_reference/osdk/images
@@ -0,0 +1 @@
+../../images/
\ No newline at end of file
diff --git a/installing/installing_aws_user_infra/modules b/cli_reference/osdk/modules
similarity index 100%
rename from installing/installing_aws_user_infra/modules
rename to cli_reference/osdk/modules
diff --git a/applications/operators/images b/cli_reference/tkn_cli/images
similarity index 100%
rename from applications/operators/images
rename to cli_reference/tkn_cli/images
diff --git a/cli_reference/tkn_cli/installing-tkn.adoc b/cli_reference/tkn_cli/installing-tkn.adoc
new file mode 100644
index 000000000000..aad4ace6305e
--- /dev/null
+++ b/cli_reference/tkn_cli/installing-tkn.adoc
@@ -0,0 +1,23 @@
+[id='installing-tkn']
+= Installing tkn
+include::modules/common-attributes.adoc[]
+include::modules/pipelines-document-attributes.adoc[]
+:context: installing-tkn
+
+toc::[]
+
+Use the `tkn` CLI to manage {pipelines-title} from a terminal. The following section describes how to install `tkn` on different platforms.
+
+You can also find the URL to the latest binaries from the {product-title} web console by clicking the *?* icon in the upper-right corner and selecting *Command Line Tools*.
+
+// Install tkn on Linux
+include::modules/op-installing-tkn-on-linux.adoc[leveloffset=+1]
+
+// Install tkn on Linux using RPM
+include::modules/op-installing-tkn-on-linux-using-rpm.adoc[leveloffset=+1]
+
+//Install tkn on Windows
+include::modules/op-installing-tkn-on-windows.adoc[leveloffset=+1]
+
+//Install tkn on macOS
+include::modules/op-installing-tkn-on-macos.adoc[leveloffset=+1]
diff --git a/applications/operators/modules b/cli_reference/tkn_cli/modules
similarity index 100%
rename from applications/operators/modules
rename to cli_reference/tkn_cli/modules
diff --git a/cli_reference/tkn_cli/op-configuring-tkn.adoc b/cli_reference/tkn_cli/op-configuring-tkn.adoc
new file mode 100644
index 000000000000..d4c9a4ca3cb3
--- /dev/null
+++ b/cli_reference/tkn_cli/op-configuring-tkn.adoc
@@ -0,0 +1,12 @@
+[id="op-configuring-tkn"]
+= Configuring the OpenShift Pipelines tkn CLI
+include::modules/common-attributes.adoc[]
+include::modules/pipelines-document-attributes.adoc[]
+:context: configuring-tkn
+
+toc::[]
+
+Configure the {pipelines-title} `tkn` CLI to enable tab completion.
+
+// Enabling tab completion
+include::modules/op-tkn-enabling-tab-completion.adoc[leveloffset=+1]
diff --git a/cli_reference/tkn_cli/op-tkn-reference.adoc b/cli_reference/tkn_cli/op-tkn-reference.adoc
new file mode 100644
index 000000000000..38926d3f66f9
--- /dev/null
+++ b/cli_reference/tkn_cli/op-tkn-reference.adoc
@@ -0,0 +1,46 @@
+[id='op-tkn-reference']
+= OpenShift Pipelines tkn reference
+include::modules/common-attributes.adoc[]
+include::modules/pipelines-document-attributes.adoc[]
+:context: op-tkn-reference
+
+toc::[]
+
+
+This section lists the basic `tkn` CLI commands.
+
+== Basic syntax
+`tkn [command or options] [arguments...]`
+
+== Global options
+`--help, -h`
+
+// Utility commands
+include::modules/op-tkn-utility-commands.adoc[leveloffset=+1]
+
+// Pipeline management commands
+include::modules/op-tkn-pipeline-management.adoc[leveloffset=+1]
+
+// Pipeline run commands
+include::modules/op-tkn-pipeline-run.adoc[leveloffset=+1]
+
+// Task management commands
+include::modules/op-tkn-task-management.adoc[leveloffset=+1]
+
+// Task run commands
+include::modules/op-tkn-task-run.adoc[leveloffset=+1]
+
+// Condition management commands
+include::modules/op-tkn-condition-management.adoc[leveloffset=+1]
+
+// Pipeline resources commands
+include::modules/op-tkn-pipeline-resource-management.adoc[leveloffset=+1]
+
+// ClusterTask management commands
+include::modules/op-tkn-clustertask-management.adoc[leveloffset=+1]
+
+// Trigger management commands
+include::modules/op-tkn-trigger-management.adoc[leveloffset=+1]
+
+// Hub interaction commands
+include::modules/op-tkn-hub-interaction.adoc[leveloffset=+1]
diff --git a/cloud_infrastructure_access/dedicated-aws-access.adoc b/cloud_infrastructure_access/dedicated-aws-access.adoc
new file mode 100644
index 000000000000..cd73316ff573
--- /dev/null
+++ b/cloud_infrastructure_access/dedicated-aws-access.adoc
@@ -0,0 +1,75 @@
+[id="dedicated-aws-access"]
+= Accessing AWS infrastructure
+include::modules/common-attributes.adoc[]
+:context: dedicated-aws-access
+
+toc::[]
+
+Amazon Web Services (AWS) infrastructure access allows
+link:https://access.redhat.com/node/3610411[Customer Portal Organization Administrators]
+and cluster owners to enable AWS Identity and Access Management (IAM) users to
+have federated access to the AWS Management Console for their {product-title}
+cluster. Administrators can select between Network Management or Read-only
+access options.
+
+[id="dedicated-configuring-aws-access"]
+== Configuring AWS infrastructure access
+
+== Prerequisites
+* An AWS account with IAM permissions.
+
+[id="dedicated-aws-account-creation"]
+=== Creating an AWS account with IAM permissions
+
+Before you can configure access to AWS infrastructure, you will need to set up IAM permissions in your AWS account.
+
+.Procedure
+
+. Log in to your AWS account. If necessary, you can create a new AWS account by following link:https://aws.amazon.com/premiumsupport/knowledge-center/create-and-activate-aws-account/[AWS documentation].
+. Create an IAM user with `STS:AllowAssumeRole` permissions within the AWS account.
+.. Open the IAM dashboard of the AWS Management Console.
+.. In the *Policies* section, click *Create Policy*.
+.. Select the `JSON` tab and replace the existing text with the following:
++
+[source,json]
+----
+ {
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": "sts:AssumeRole",
+ "Resource": "*"
+ }
+ ]
+ }
+----
+.. Click *Review Policy*.
+.. Provide an appropriate name and description, then click *Create Policy*.
+.. In the *Users* section, click *Add user*.
+.. Provide an appropriate user name.
+.. Select *AWS Management Console access* and other roles as needed.
+.. Adjust the password requirements as necessary for your organization, then click *Next: Policy*.
+.. Click the *Attach existing policies directly* option.
+.. Search for and check the policy created in previous steps.
++
+[NOTE]
+====
+It is not recommended to set a permissions boundary.
+====
+.. Click *Next: Tags*, then click *Next: Review*. Confirm the configuration is correct.
+.. Click *Create user*, then click *Close* on the success page.
+. Gather the IAM user's Amazon Resource Name (ARN). The ARN will have the following format: `arn:aws:iam::000111222333:user/username`.
+
+[id=dedicated-aws-ocm-iam-role]
+=== Granting the IAM role from the OpenShift Cluster Manager
+
+.Procedure
+
+. Open the {product-title} Cluster Manager in your browser and select the cluster you want to allow AWS infrastructure access.
+. Select the *Access control* tab, and scroll to the *AWS Infrastructure Access* section.
+. Paste the AWS IAM ARN and select `Network Management` or `Read-only` permissions, then click *Grant role*.
+. Copy the AWS OSD Console URL to your clipboard.
+. Sign in to your AWS account with your Account ID or alias, IAM user name, and password.
+. In a new browser tab, paste the AWS OSD Console URL that will be used to route to the AWS Switch Role page.
+. Your account number and role will be filled in already. Choose a display name if necessary, then click *Switch Role*. You will now see *VPC* under *Recently visited services*.
diff --git a/cloud_infrastructure_access/dedicated-aws-dc.adoc b/cloud_infrastructure_access/dedicated-aws-dc.adoc
new file mode 100644
index 000000000000..c2d832df0d7a
--- /dev/null
+++ b/cloud_infrastructure_access/dedicated-aws-dc.adoc
@@ -0,0 +1,23 @@
+[id="dedicated-aws-dc"]
+= Configuring AWS Direct Connect
+include::modules/common-attributes.adoc[]
+:context: dedicated-aws-direct-connect
+
+toc::[]
+
+This process describes accepting an AWS Direct Connect virtual interface with
+{product-title}. For more information about AWS Direct Connect types and
+configuration, see the
+link:https://docs.aws.amazon.com/directconnect/latest/UserGuide/Welcome.html#overview-components[AWS Direct Connect components]
+documentation.
+
+include::modules/dedicated-aws-dc-methods.adoc[leveloffset=+1]
+include::modules/dedicated-aws-dc-hvif.adoc[leveloffset=+1]
+include::modules/dedicated-aws-dc-existing.adoc[leveloffset=+1]
+
+[id="dedicated-aws-dc-tshooting"]
+== Troubleshooting Direct Connect
+
+Further troubleshooting can be found in the
+link:https://docs.aws.amazon.com/directconnect/latest/UserGuide/Troubleshooting.html[Troubleshooting AWS Direct Connect]
+documentation.
diff --git a/cloud_infrastructure_access/dedicated-aws-peering.adoc b/cloud_infrastructure_access/dedicated-aws-peering.adoc
new file mode 100644
index 000000000000..4210a1654e9e
--- /dev/null
+++ b/cloud_infrastructure_access/dedicated-aws-peering.adoc
@@ -0,0 +1,23 @@
+[id="dedicated-aws-peering"]
+= Configuring AWS VPC peering
+include::modules/common-attributes.adoc[]
+:context: dedicated-aws-peering
+
+toc::[]
+
+This sample process configures an Amazon Web Services (AWS) VPC containing an
+{product-title} cluster to peer with another AWS VPC network. For more
+information about creating an AWS VPC Peering connection or for other possible
+configurations, see the
+link:http://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide/Welcome.html[AWS VPC Peering]
+guide.
+
+include::modules/dedicated-aws-vpc-peering-terms.adoc[leveloffset=+1]
+include::modules/dedicated-aws-vpc-initiating-peering.adoc[leveloffset=+1]
+.Additional resources
+
+* xref:../cloud_infrastructure_access/dedicated-aws-access.adoc#dedicated-aws-ocm-iam-role[Logging into the Web Console for the OSD AWS Account]
+
+include::modules/dedicated-aws-vpc-accepting-peering.adoc[leveloffset=+1]
+include::modules/dedicated-aws-vpc-configuring-routing-tables.adoc[leveloffset=+1]
+include::modules/dedicated-aws-vpc-verifying-troubleshooting.adoc[leveloffset=+1]
diff --git a/cloud_infrastructure_access/dedicated-aws-private-cluster.adoc b/cloud_infrastructure_access/dedicated-aws-private-cluster.adoc
new file mode 100644
index 000000000000..b32b5dca66a7
--- /dev/null
+++ b/cloud_infrastructure_access/dedicated-aws-private-cluster.adoc
@@ -0,0 +1,21 @@
+[id="dedicated-aws-private-cluster"]
+= Configuring a private cluster
+include::modules/common-attributes.adoc[]
+:context: dedicated-private-cluster
+
+toc::[]
+
+An {product-title} cluster can be made private so that internal applications can be hosted inside a corporate network. In addition, private clusters can be configured to have only internal API endpoints for increased security.
+
+{product-title} administrators can choose between public and private cluster configuration from within the *OpenShift Cluster Manager* (OCM). Privacy settings can be configured during cluster creation or after a cluster is established.
+
+include::modules/dedicated-enable-private-cluster-new.adoc[leveloffset=+1]
+
+include::modules/dedicated-enable-private-cluster-existing.adoc[leveloffset=+1]
+
+include::modules/dedicated-enable-public-cluster.adoc[leveloffset=+1]
+
+[NOTE]
+====
+Red Hat Service Reliability Engineers (SREs) can access a public or private cluster through the `cloud-ingress-operator` and existing ElasticSearch Load Balancer or Amazon S3 framework. SREs can access clusters through a secure endpoint to perform maintenance and service tasks.
+====
diff --git a/cloud_infrastructure_access/dedicated-aws-vpn.adoc b/cloud_infrastructure_access/dedicated-aws-vpn.adoc
new file mode 100644
index 000000000000..061d2d6371b0
--- /dev/null
+++ b/cloud_infrastructure_access/dedicated-aws-vpn.adoc
@@ -0,0 +1,31 @@
+[id="dedicated-aws-vpn"]
+= Configuring AWS VPN
+include::modules/common-attributes.adoc[]
+:context: dedicated-aws-vpn
+
+toc::[]
+
+This sample process configures an Amazon Web Services (AWS) {product-title}
+cluster to use a customer's on-site hardware VPN device.
+
+[NOTE]
+====
+AWS VPN does not currently provide a managed option to apply NAT to VPN traffic.
+See the
+link:https://aws.amazon.com/premiumsupport/knowledge-center/configure-nat-for-vpn-traffic/[AWS Knowledge Center]
+for more details.
+====
+
+[NOTE]
+====
+Routing all traffic, for example `0.0.0.0/0`, through a private connection is not supported. This requires deleting the internet gateway, which disables SRE management traffic.
+====
+
+For more information about connecting an AWS VPC to remote networks using a
+hardware VPN device, see the Amazon VPC
+link:https://docs.aws.amazon.com/vpc/latest/userguide/vpn-connections.html[VPN Connections]
+documentation.
+
+include::modules/dedicated-aws-vpn-creating.adoc[leveloffset=+1]
+include::modules/dedicated-aws-vpn-verifying.adoc[leveloffset=+1]
+include::modules/dedicated-aws-vpn-troubleshooting.adoc[leveloffset=+1]
diff --git a/cloud_infrastructure_access/dedicated-understanding-aws.adoc b/cloud_infrastructure_access/dedicated-understanding-aws.adoc
new file mode 100644
index 000000000000..b9e06363d049
--- /dev/null
+++ b/cloud_infrastructure_access/dedicated-understanding-aws.adoc
@@ -0,0 +1,26 @@
+:context: dedicated-understanding-aws
+[id="welcome-index"]
+= Understanding cloud infrastructure access
+include::modules/common-attributes.adoc[]
+
+Amazon Web Services (AWS) infrastructure access permits
+link:https://access.redhat.com/node/3610411[Customer Portal Organization Administrators]
+and cluster owners to enable AWS Identity and Access Management (IAM) users to
+have federated access to the AWS Management Console for their {product-title}
+cluster.
+
+[id="enabling-aws-access"]
+== Enabling AWS access
+AWS access can be granted for customer AWS users, and private cluster access can be implemented to suit the needs of your {product-title} environment.
+
+Get started with xref:../cloud_infrastructure_access/dedicated-aws-access.adoc#dedicated-aws-access[Accessing AWS infrastructure] for your {product-title} cluster. By creating an AWS user and account and providing that user with access to the {product-title} AWS account.
+
+After you have access to the {product-title} AWS account, use one or more of the following methods to establish a private connection to your cluster:
+
+- xref:../cloud_infrastructure_access/dedicated-aws-peering.adoc#dedicated-aws-peering[Configuring AWS VPC peering]: Enable VPC peering to route network traffic between two private IP addresses.
+
+- xref:../cloud_infrastructure_access/dedicated-aws-vpn.adoc#dedicated-aws-vpn[Configuring AWS VPN]: Establish a Virtual Private Network to securely connect your private network to your Amazon Virtual Private Cloud.
+
+- xref:../cloud_infrastructure_access/dedicated-aws-dc.adoc#dedicated-aws-dc[Configuring AWS Direct Connect]: Configure AWS Direct Connect to establish a dedicated network connection between your private network and an AWS Direct Connect location.
+
+After configuring your cloud infrastructure access, learn more about xref:../cloud_infrastructure_access/dedicated-aws-private-cluster.adoc#dedicated-aws-private-cluster[Configuring a private cluster].
diff --git a/cloud_infrastructure_access/images b/cloud_infrastructure_access/images
new file mode 120000
index 000000000000..e4c5bd02a10a
--- /dev/null
+++ b/cloud_infrastructure_access/images
@@ -0,0 +1 @@
+../images/
\ No newline at end of file
diff --git a/telemetry/modules b/cloud_infrastructure_access/modules
similarity index 100%
rename from telemetry/modules
rename to cloud_infrastructure_access/modules
diff --git a/cnv/cnv_users_guide/cnv-attaching-vm-multiple-networks.adoc b/cnv/cnv_users_guide/cnv-attaching-vm-multiple-networks.adoc
deleted file mode 100644
index dfaf1d5e54b8..000000000000
--- a/cnv/cnv_users_guide/cnv-attaching-vm-multiple-networks.adoc
+++ /dev/null
@@ -1,30 +0,0 @@
-[id="attaching-to-multiple-networks"]
-= Attaching a virtual machine to multiple networks
-include::modules/cnv-document-attributes.adoc[]
-:context: cnv-attaching-multiple-networks
-toc::[]
-
-{ProductName} provides Layer-2 networking capabilities that allow you to connect
-virtual machines to multiple networks. You can import virtual machines with
-existing workloads that depend on access to multiple interfaces. You can also
-configure a PXE network so that you can boot machines over the network.
-
-To get started, a network administrator configures a NetworkAttachmentDefinition
-of type `cnv-bridge`. Then, users can attach Pods, VMIs, and VMs to the
-bridge network. From the {ProductName} web console, you can create a vNIC
-that refers to the bridge network.
-
-include::modules/cnv-networking-glossary.adoc[leveloffset=+1]
-
-include::modules/cnv-connecting-resource-bridge-network.adoc[leveloffset=+1]
-
-[NOTE]
-====
-When defining the vNIC in the next section, ensure that the *NETWORK* value is
-the bridge network name from the NetworkAttachmentDefinition you created
-in the previous section.
-====
-
-include::modules/cnv-vm-create-nic-web.adoc[leveloffset=+1]
-
-include::modules/cnv-networking-wizard-fields-web.adoc[leveloffset=+1]
\ No newline at end of file
diff --git a/cnv/cnv_users_guide/cnv-configuring-pxe-booting.adoc b/cnv/cnv_users_guide/cnv-configuring-pxe-booting.adoc
deleted file mode 100644
index f0d45ec176a6..000000000000
--- a/cnv/cnv_users_guide/cnv-configuring-pxe-booting.adoc
+++ /dev/null
@@ -1,22 +0,0 @@
-[id="configuring-pxe-booting"]
-= Configuring PXE booting for virtual machines
-include::modules/cnv-document-attributes.adoc[]
-:context: pxe-booting
-toc::[]
-
-PXE booting, or network booting, is available in {ProductName}.
-Network booting allows a computer to boot and load an
-operating system or other program without requiring a locally attached
-storage device. For example, you can use it to choose your desired OS
-image from a PXE server when deploying a new host.
-
-.Prerequisites
-
-* A Linux bridge must be xref:../../cnv/cnv_users_guide/cnv-attaching-vm-multiple-networks.adoc#connecting-resource-bridge-network-cnv-attaching-multiple-networks[connected].
-* The PXE server must be connected to the same VLAN as the bridge.
-
-include::modules/cnv-networking-glossary.adoc[leveloffset=+1]
-
-include::modules/cnv-pxe-booting-with-mac-address.adoc[leveloffset=+1]
-
-include::modules/cnv-template-vmi-pxe-config.adoc[leveloffset=+1]
\ No newline at end of file
diff --git a/contributing_to_docs/contributing.adoc b/contributing_to_docs/contributing.adoc
index 4013b8f9086b..a52bde126016 100644
--- a/contributing_to_docs/contributing.adoc
+++ b/contributing_to_docs/contributing.adoc
@@ -9,6 +9,7 @@
toc::[]
== Different ways to contribute
+
There are a few different ways you can contribute to OpenShift documentation:
// * Submit comments at the bottom of each topic (still awaiting implementation)
@@ -24,10 +25,10 @@ The
https://github.com/orgs/openshift/teams/team-documentation[documentation team]
reviews the PR and arranges further review by the development and quality
assurance teams, as required.
-If the PR requires changes, updates, or corrections required, we will let you know
-in the PR. We might request that you make the changes or let you know that we
+If the PR requires changes, updates, or corrections, we will let you know
+in the PR. We might request that you make the changes, or let you know that we
incorporated your content in a different PR. When the PR has been reviewed, all
-all updates are complete, and all commits are squashed, we'll merge your PR and
+updates are complete, and all commits are squashed, we'll merge your PR and
apply it to the valid versions.
== Repository organization
@@ -57,8 +58,8 @@ contain all the images and modules for the collection.
----
== Version management
-Most of the content applies to all four OpenShift products: OKD, OpenShift
-Online, OpenShift Dedicated, and OpenShift Container Platform. While a large
+Most of the content applies to all five OpenShift products: OKD, OpenShift
+Online, OpenShift Dedicated, Azure Red Hat OpenShift and OpenShift Container Platform. While a large
amount of content is reused for all product collections, some information
applies to only specific collections. Content inclusion and exclusion is managed
on the assembly level by specifying distributions in the
@@ -88,9 +89,11 @@ are:
* _openshift-online_
* _openshift-enterprise_
* _openshift-dedicated_
+* _openshift-aro_
+* _openshift-webscale_
These attributes can be used by themselves or in conjunction to conditionalize
-text within a topic document.
+text within an assembly or module.
Here is an example of this concept in use:
@@ -99,69 +102,76 @@ This first line is unconditionalized, and will appear for all versions.
\ifdef::openshift-online[]
This line will only appear for OpenShift Online.
-endif::[]
+\endif::[]
-\ifdef::openshift-enterprise[]
+ifdef::openshift-enterprise
This line will only appear for OpenShift Container Platform.
-endif::[]
+\endif::[]
-\ifdef::openshift-origin,openshift-enterprise[]
+ifdef::openshift-origin,openshift-enterprise
This line will appear for OKD and OpenShift Container Platform, but not for OpenShift Online or OpenShift Dedicated.
-endif::[]
+\endif::[]
----
-Note that the following limitations exist when conditionalizing text:
+Note that the following limitation exists when conditionalizing text:
-1. While the `ifdef/endif` blocks have no size limit, do not use them to
+* While the `ifdef/endif` blocks have no size limit, do not use them to
to conditionalize an entire file. If an entire file is specific to a
only some OpenShift distributions, specify them in the `_topic_map.yml`
file.
-2. Avoid using `ifndef/endif`. As of writing, it's use is broken and buggy.
-
== Release branches
+
With the combination of conditionalizing content within files with
`ifdef/endif` and conditionalizing whole files in the `_topic_map.yml`
-file, the `master` branch of
+file, the `main` branch of
this repository always contains a complete set of documentation for all
OpenShift products. However, when and as new versions of an OpenShift product
-are released, the `master` branch is merged down to new or existing release
+are released, the `main` branch is merged down to new or existing release
branches. Here is the general naming scheme used in the branches:
-* `master` - OKD latest code; essentially, this is our *working*
-branch.
+* `main` - This is our *working* branch.
* `enterprise-N.N` - OpenShift Container Platform support releases. The docs
for OpenShift Online and OpenShift Dedicated are based on the appropriate
`enterprise-N.N` branch.
-On a 6 hourly basis, the documentation web sites are rebuilt for each of these
+On a 12-hourly basis, the documentation web sites are rebuilt for each of these
branches. This way the published content for each released version of an
OpenShift product will remain the same while development continues on the
-`master` branch. Additionally, any corrections or additions that are
+`main` branch. Additionally, any corrections or additions that are
"cherry-picked" into the release branches will show up in the published
-documentation after 6 hours.
+documentation after 12 hours.
[NOTE]
====
-All OpenShift content development occurs on the `master`, or *working* branch.
-Therefore, when submitting your work the PR must be created against the `master`
+All OpenShift content development for the 4.x stream occurs on the `main`, or
+ *working* branch.
+Therefore, when submitting your work the PR must be created against the `main`
branch. After it is reviewed, a writer will apply the content to the relevant
release branches. If you know which branches a change applies to, be sure to
specify it in your PR.
+
+When adding or updating content for version 3.11, you should create a feature
+branch against enterprise-3.11 to submit your changes.
====
== Adding files to the collection
-After you create files, you must add them to the `_topic_map.yml` so
+After you create assembly files, you must add them to the `_topic_map.yml` so
that the build system can render them. The documentation build system reads
-the `_distro_map.yml` from the master branch to determine
+the `_distro_map.yml` from the main branch to determine
which branches to build and then the `_topic_map.yml` file
for each of the branches
to construct the content from the source files and publish to the relevant
product site at https://docs.openshift.com. The build system _only_ reads this
-file to determine which topic files to include. Therefore, all new topics that
+file to determine which topic files to include. Therefore, all new assemblies that
are created must be included in the `_topic_map.yml` file in
order to be processed by the build system.
+[NOTE]
+====
+Module files are included in the appropriate assembly files. Modules are not added directly to the `_topic_map.yml` file.
+====
+
=== Topic map file format
The `_topic_map.yml` file uses the following format:
@@ -190,22 +200,22 @@ Topics:
<3> Directory name of topic group.
<4> Which OpenShift versions this topic group is part of.
* The *Distros* setting is optional for topic groups and topic items. By
-default, if the *Distros* setting is not used, it is process as if it was set
+default, if the *Distros* setting is not used, it is processed as if it was set
to *Distros: all* for that particular topic or topic group. This means that
-topic or topic group will appear in all three product documentation.
+topic or topic group will appear in all product documentation versions.
* The *all* value for *Distros* is a synonym for
-_openshift-origin,openshift-enterprise,openshift-online,openshift-dedicated_.
+_openshift-origin,openshift-enterprise,openshift-online,openshift-dedicated,openshift-aro,openshift-webscale_.
* The *all* value overrides other values, so _openshift-online,all_ is processed
as *all*.
-<5> Topic name.
-<6> Topic file under the topic group dir without `.adoc`.
+<5> Assembly name.
+<6> Assembly file under the topic group dir without `.adoc`.
<7> This topic is actually a subtopic group. Instead of a `File` path it has a
`Dir` path and `Topics`, just like a top-level topic group.
-<8> Topics belonging to a subtopic group are listed just like regular topics
+<8> Assemblies belonging to a subtopic group are listed just like regular assemblies
with a `Name` and `File`.
== Next steps
-* First, you should link:tools_and_setup.adoc[Install and set up the tools and software]
+* First, you should link:tools_and_setup.adoc[install and set up the tools and software]
on your workstation so that you can contribute.
* Next, link:doc_guidelines.adoc[review the documentation guidelines] to
understand some basic guidelines to keep things consistent
diff --git a/contributing_to_docs/contributing_user_stories.adoc b/contributing_to_docs/contributing_user_stories.adoc
new file mode 100644
index 000000000000..3a3f9ae6878d
--- /dev/null
+++ b/contributing_to_docs/contributing_user_stories.adoc
@@ -0,0 +1,97 @@
+[[contributing-user-stories]]
+= Contribute user stories to OpenShift documentation
+:icons:
+:toc: macro
+:toc-title:
+:toclevels: 1
+:description: Basic information about how to create user stories for OpenShift GitHub repository
+
+toc::[]
+
+== Modularization backstory
+OpenShift docs are modularized, starting from OpenShift 4.1.
+All existing content has been replaced with content that is based on user stories and
+complies with the modularization guidelines. All future content must both
+support a user story and be modular.
+
+== How do I contribute modularized content?
+To contribute modularized content, you need to write a user story, create
+documentation modules to support the user story, and create an assembly for the
+story.
+
+== What if I don't want to write in modules?
+If you don't want to write the modules yourself but have a content change,
+write a user story, provide details to support the story, and reach out to the
+OpenShift docs team.
+
+== How do I write a user story? Is there a template?
+Instead of a template, we have a series of questions for you to answer to
+create the user story. Follow the same steps if you are writing the modules
+yourself or if you plan to work with the docs team.
+
+The basic format of a user story is:
+
+----
+As a , I want to because .
+----
+
+For example, "As a cluster administrator, I want to enable an Auto Scaling group to manage my OpenShift Enterprise
+cluster deployed on AWS because I want my node count to scale based on application demand."
+
+Use the following questions to guide you in providing the context for your user story and the necessary technical details to start a draft.
+You don't have to answer all of these questions, only the ones that make sense to your particular user story.
+
+=== Feature info
+* What is the feature being developed? What does it do?
+* How does it work?
+* Are there any configuration files/settings/parameters being added or modified? Are any new commands being added or modified?
+* What tools or software does the docs team need to test how this feature works? Does the docs team need to update any installed software?
+* Are there any existing blogs, Wiki posts, Kbase articles, or Bzs involving this feature? Or any other existing information that may help to understand this feature?
+
+=== Customer impact
+* Who is the intended audience for this feature? If it's for Enterprise, does it apply to developers, admins, or both?
+* Why is it important for our users? Why would they want to use this feature? How does it benefit them?
+* How will the customer use it? Is there a use case?
+* How will the customer interact with this feature? Client tools? Web console? REST API?
+
+=== Product info
+* Is this feature being developed for Online? Enterprise? Dedicated? OKD? All?
+* Will this feature be rolled back to previous versions?
+* If it's for Online, what type of plan do users need to use this feature?
+* Is it user-facing, or more behind-the-scenes admin stuff?
+* What tools or software does the docs team need to test how this feature works?
+
+== How do I write in modules?
+The full guidelines for writing modules are in the Customer Content Services (CCS)
+link:https://redhat-documentation.github.io/modular-docs/[modularization guide].
+
+The main concepts of writing in modules are:
+
+* Each assembly contains the information required for a user to achieve a single
+goal.
+* Assemblies contain primarily `include` statements, which are references to
+smaller, targeted module files.
+* Modules can contain conceptual information, reference information, or steps,
+but not a combination of the types.
+
+For example, a simple assembly might contain the following three modules:
+
+* A concept module that contains background information about the feature
+that the user will configure
+* A reference module that contains an annotated sample yaml file that the user
+needs to modify
+* A procedure module that contains the prerequisites that the user needs to
+complete before they start configuring and steps that the user takes to
+complete the configuration.
+
+The `enterprise-4.1` branch contains sample assemblies that explain how to
+get started with modular documentation for OpenShift and that serve as
+references for including modules in assemblies. The
+link:https://raw.githubusercontent.com/openshift/openshift-docs/enterprise-4.1/mod_docs_guide/mod-docs-conventions-ocp.adoc[Modular Docs OpenShift conventions]
+assembly contains the
+link:https://raw.githubusercontent.com/openshift/openshift-docs/enterprise-4.1/modules/mod-docs-ocp-conventions.adoc[Modular docs OpenShift conventions]
+reference module, and the
+link:https://github.com/openshift/openshift-docs/blob/enterprise-4.1/mod_docs_guide/getting-started-modular-docs-ocp.adoc[Getting started with modular docs on OpenShift]
+assembly contains the
+link:https://raw.githubusercontent.com/openshift/openshift-docs/enterprise-4.1/modules/creating-your-first-content.adoc[Creating your first content]
+procedure module.
diff --git a/contributing_to_docs/create_or_edit_content.adoc b/contributing_to_docs/create_or_edit_content.adoc
index 91a84966e378..9668a5daad16 100644
--- a/contributing_to_docs/create_or_edit_content.adoc
+++ b/contributing_to_docs/create_or_edit_content.adoc
@@ -26,17 +26,21 @@ with the remote repository.
[NOTE]
====
-Because most changes in this repository must be committed to the `master`
-branch, the following process always uses `master` as the name of the source
+Because most changes in this repository must be committed to the `main`
+branch (which is the main for the 4.x stream), the following process always
+uses `main` as the name of the source
branch. If you must use another branch as the source for your change, make
-sure that you consistently use that branch name instead of `master`
+sure that you consistently use that branch name instead of `main`.
+
+When adding or updating content for version 3.11, you should create a feature
+branch against enterprise-3.11 to submit your changes.
====
-1. From your local repository, make sure you have the `master` branch checked
+1. From your local repository, make sure you have the `main` branch checked
out:
+
----
-$ git checkout master
+$ git checkout main
----
2. Fetch the current state of the OpenShift documentation repository:
@@ -49,14 +53,14 @@ $ git fetch upstream
`openshift/openshift-docs`, into your local repository:
+
----
-$ git rebase upstream/master
+$ git rebase upstream/main
----
4. Push the latest updates to your forked repository so that it is also in sync
with the remote:
+
----
-$ git push origin master
+$ git push origin main
----
== Add content or update existing content on local branch
@@ -110,13 +114,13 @@ commit those changes locally:
$ git commit -am "Detailed comments about what changes were made; for example, fixed typo"
----
-*Step 5:* Rebase updates from `master` into your working branch
+*Step 5:* Rebase updates from `main` into your working branch
Remember that you must rebase against the branch that you created this working
-branch from. In most cases, it will be the master branch.
+branch from. In most cases, it will be the main branch for the 4.x stream.
----
-$ git rebase upstream/master
+$ git rebase upstream/main
----
[NOTE]
@@ -135,32 +139,34 @@ $ git push origin
----
== Submit PR to merge your work
+
When you have pushed your changes to your GitHub account, you can submit a PR to
-have your work from your GitHub fork to the `master` branch of the OpenShift
+have your work from your GitHub fork to the `main` branch of the OpenShift
documentation repository. The documentation team will review the work, advise of
any further changes that are required, and finally merge your work.
1. Go to your forked GitHub repository on the GitHub website, and you should see
your local branch that includes all of your work.
-2. Click on *Pull Request* to submit the PR against the `master` branch of the
+2. Click on *Pull Request* to submit the PR against the `main` branch of the
`openshift-docs` repository.
3. If you know which product versions your change applies to, include a comment
that specifies the minimum version that the change applies to. The docs team
maintains these branches for all active and future distros and your PR will be
-applied to one or more these.
-4. Tag the documentation team with @openshift/team-documentation.
+applied to one or more of these branches.
+4. Tag the documentation team with @openshift/team-documentation (if you are a part of the OpenShift organization. If not, tag @vikram-redhat).
== Confirm your changes have been merged
-When your PR has been merged into the `master` branch, you should confirm and
-then sync your local and GitHub repositories with the `master` branch.
-1. On your workstation, switch to the `master` branch:
+When your PR has been merged into the `main` branch, you should confirm and
+then sync your local and GitHub repositories with the `main` branch.
+
+1. On your workstation, switch to the `main` branch:
+
----
-$ git checkout master
+$ git checkout main
----
-2. Pull the latest changes from `master`:
+2. Pull the latest changes from `main`:
+
----
$ git fetch upstream
@@ -170,14 +176,14 @@ $ git fetch upstream
`openshift/openshift-docs`, into your local repository:
+
----
-$ git rebase upstream/master
+$ git rebase upstream/main
----
4. After confirming in your rebased local repository that your changes have been
merged, push the latest changes, including your work, to your GitHub account:
+
----
-$ git push origin master
+$ git push origin main
----
== Add changes to an existing PR, if required
@@ -208,8 +214,9 @@ everything with the local copy. You should now see the new commits in the
existing PR. Sometimes a refresh of your browser may be required.
== Delete the local working branch
+
When you have confirmed that all of your changes have been accepted and merged,
-and you have pulled the latest changes on `master` and pushed them to your
+and you have pulled the latest changes on `main` and pushed them to your
GitHub account, you can delete the local working branch. Ensure you are in your
local repository before proceeding.
diff --git a/contributing_to_docs/doc_guidelines.adoc b/contributing_to_docs/doc_guidelines.adoc
index 2cc7a18138c7..c0f50338ac7d 100644
--- a/contributing_to_docs/doc_guidelines.adoc
+++ b/contributing_to_docs/doc_guidelines.adoc
@@ -1,16 +1,27 @@
[id="contributing-to-docs-doc-guidelines"]
= Documentation guidelines
include::modules/common-attributes.adoc
+:toc: macro
The documentation guidelines for OpenShift 4 build on top of the
link:https://redhat-documentation.github.io/modular-docs/[Red Hat modular docs reference guide].
+toc::[]
+
== General file guidelines
* Set your editor to strip trailing whitespace.
-* Unless your line contains a link or xref, wrap each line at 80 characters.
+* Do *not* hard wrap lines at 80 characters (or at any other length).
++
+It is not necessary to update existing content to unwrap lines, but you can remove existing hard wrapping from any lines that you are currently working in.
++
+[TIP]
+====
+In the Atom editor, you can use `Ctrl`+`J` to undo hard wrapping on a paragraph.
+====
== Assembly file metadata
+
Every assembly file should contain the following metadata at the top, with no line
spacing in between, except where noted:
@@ -19,12 +30,22 @@ spacing in between, except where noted:
= Assembly title <2>
include::modules/common-attributes.adoc[] <3>
:context: <4>
+ <5>
+toc::[] <6>
----
<1> A unique (within OpenShift docs) anchor id for this assembly. Example: cli-developer-commands
<2> Human readable title (notice the '=' top-level header)
-<3> Includes all attributes common to OpenShift docs
+<3> Includes attributes common to OpenShift docs.
++
+[NOTE]
+====
+The `{product-name}` and `{product-version}` common attributes are not defined in the `modules/common-attributes.adoc` file. Those attributes are pulled by AsciiBinder from the distro mapping definitions in the https://github.com/openshift/openshift-docs/blob/main/_distro_map.yml[_distro_map.yml] file. See xref:product-name-and-version[Product name and version] for more information on this topic.
+====
++
<4> Context used for identifying headers in modules that is the same as the anchor id. Example: cli-developer-commands.
+<5> A blank line. You *must* have a blank line here before the toc.
+<6> The table of contents for the current assembly.
After the heading block and a single whitespace line, you can include any content for this assembly.
@@ -58,34 +79,56 @@ Example:
----
// Module included in the following assemblies:
//
-// * cli_reference/developer-cli-commands.adoc
+// * cli_reference/openshift_cli/developer-cli-commands.adoc
[id="cli-basic-commands_{context}"]
= Basic CLI commands
----
-*All headings after the initial title must have a section anchor, with no line spaces between the
-anchor and the section title and the anchor id should be similar to the section title*:
+== Assembly/module file names
+
+Try to shorten the file name as much as possible _without_ abbreviating important terms that may cause confusion. For example, the `managing-authorization-policies.adoc` file name would be appropriate for an assembly titled "Managing Authorization Policies".
+
+== Directory names
+
+If you create a directory with a multiple-word name, separate each word with an underscore, for example `backup_and_restore`. Do not create a top-level directory in the repository without checking with the docs team. In the main OpenShift docs, you can create one level of subdirectories. In the docs for features that are designed to be used with OpenShift, such as Service Mesh and OpenShift virtualization, you can create two levels of subdirectories.
+When creating a new directory or subdirectory, you must create two symbolic links in it:
+
+* An `images` symbolic link to the top-level `images/` directory
+* A `modules` symbolic link to the top-level `modules/` directory
+
+If the directory that contains an assembly does not have the `images` symbolic link, any images in that assembly or its modules will not be included properly when building the docs.
+
+[TIP]
+====
+To create the symbolic links:
+
+. Navigate to the directory that you need to add the links in.
+. Use the following command to create a symbolic link:
++
----
-[id="_{context}"]
-=== Section title
+$ ln -s
----
++
+For example, if you are creating the links in a directory that is two levels deep, such as `cli_reference/openshift_cli`, use the following commands:
++
+----
+$ ln -s ../../images/ images
+$ ln -s ../../modules/ modules
+----
++
+Be sure to adjust the number of levels to back up (`../`) depending on how deep your directory is.
-== Assembly/module file names
-
-Try to shorten the file name as much as possible _without_ abbreviating
-important terms that may cause confusion. For example, the
-`managing_authorization_policies.adoc` file name would be appropriate for an
-assembly titled "Managing Authorization Policies".
+If you accidentally create an incorrect link, you can remove that link by using `unlink `.
+====
== Assembly/Module titles and section headings
-Use sentence case in all titles and section headings. See http://www.titlecase.com/ or
-https://convertcase.net/ for a conversion tool.
+Use sentence case in all titles and section headings. See http://www.titlecase.com/ or https://convertcase.net/ for a conversion tool.
Try to be as descriptive as possible with the title or section headings
-without making them unnecessarily too long. For assemblies and task modules,
+without making them unnecessarily long. For assemblies and task modules,
use a gerund form in headings, such as:
* Creating
@@ -94,22 +137,17 @@ use a gerund form in headings, such as:
Do not use "Overview" as a heading.
+Do not use backticks or other markup in assembly or module headings.
+
=== Discrete headings
-If you have a section heading that you do not want to appear in the TOC
-(like if you think that some section is not worth showing up or if there are already
-too many nested levels), you can use a discrete (or floating) heading:
+If you have a section heading that you do not want to appear in the TOC (like if you think that some section is not worth showing up or if there are already too many nested levels), you can use a discrete (or floating) heading:
http://asciidoctor.org/docs/user-manual/#discrete-or-floating-section-titles
-A discrete heading also will not get a section number in the Customer Portal
-build of the doc. Previously, we would use plain bold mark-up around a heading
-like this, but discrete headings also allow you to ignore section nesting rules
-(like jumping from a `==` section level to a `====` level if you wanted for some
-style reason).
+A discrete heading also will not get a section number in the Customer Portal build of the doc. Previously, we would use plain bold mark-up around a heading like this, but discrete headings also allow you to ignore section nesting rules (like jumping from a `==` section level to a `====` level if you wanted for some style reason).
-To use a discrete heading, just add `[discrete]` to the line before your unique
-ID. For example:
+To use a discrete heading, just add `[discrete]` to the line before your unique ID. For example:
----
[discrete]
@@ -117,69 +155,154 @@ ID. For example:
== Managing authorization policies
----
+== Anchoring titles and section headings
+
+All titles and section headings must have an anchor ID. The anchor ID must be similar to the title or section heading.
+
+=== Anchoring in assembly files
+
+The following is an example anchor ID in an assembly file:
+
+----
+[id="configuring-alert-notifications"]
+= Configuring alert notifications
+----
+
+[NOTE]
+====
+Do not include line spaces between the anchor ID and the section title.
+====
+
+=== Anchoring in module files
+
+You must add the `{context}` variable to the end of anchor IDs in module files. When called, the `{context}` variable is resolved into the value declared in the `:context:` attribute in the corresponding assembly file. This enables cross-referencing to module IDs in context to a specific assembly and is useful when a module is included in multiple assemblies.
+
+The following is an example of an anchor ID in a module file:
+
+----
+[id="sending-notifications-to-external-systems_{context}"]
+= Sending notifications to external systems
+----
+
+[NOTE]
+====
+The `{context}` variable must be preceded by an underscore (`_`) when declared in an anchor ID.
+====
+
+=== Anchoring "Prerequisites", "Additional resources", and "Next steps" titles in assemblies
+
+Use unique IDs for "Prerequisites", "Additional resources", and "Next steps" titles in assemblies. You can add the prefixes `prerequisites_`, `additional-resources_`, or `next-steps_` to a unique string that describes the assembly topic. The unique string can match the value assigned to the `:context:` attribute in the assembly.
+
+[NOTE]
+====
+The `prerequisites_`, `additional-resources_`, and `next-steps_` prefixes must end with an underscore (`_`) when declared in an anchor ID in an assembly.
+====
+
+The following examples include IDs that are unique to the "Configuring alert notifications" assembly:
+
+*Example unique ID for a "Prerequisites" title*
+
+----
+[id="prerequisites_configuring-alert-notifications"]
+== Prerequisites
+----
+
+*Example unique ID for an "Additional resources" title*
+
+----
+[id="additional-resources_configuring-alert-notifications"]
+== Additional resources
+----
+
+*Example unique ID for a "Next steps" title*
+
+----
+[id="next-steps_configuring-alert-notifications"]
+== Next steps
+----
+
+== Writing assemblies
+An _assembly_ is a collection of modules that describes how to accomplish a user story.
+
+For more information about forming assemblies, see the
+link:https://redhat-documentation.github.io/modular-docs/#forming-assemblies[Red Hat modular docs reference guide] and the link:https://raw.githubusercontent.com/redhat-documentation/modular-docs/master/modular-docs-manual/files/TEMPLATE_ASSEMBLY_a-collection-of-modules.adoc[assembly template].
+
+[NOTE]
+====
+When using the "Prerequisites", "Next steps", or "Additional resources" headings in an assembly, use `==` formatting, such as `== Prerequisites` or `== Additional resources`. Use of this heading syntax at the assembly level indicates that the sections relate to the whole assembly.
+
+Only use `.` formatting (`.Additional resources`) to follow a module in an assembly. Because you cannot use the xrefs in modules, this functions as a _trailing include_ at the assembly level, where the `.` formatting of the `include` statement indicates that the resource applies specifically to the module and not to the assembly.
+====
+
== Writing concepts
A _concept_ contains information to support the tasks that users want to do and
must not include task information like commands or numbered steps. In most
cases, create your concepts as individual modules and include them in
-appropriate assemblies. Avoid using gerunds in concept titles. "About "
+appropriate assemblies.
+
+Avoid using gerunds in concept titles. "About "
is a common concept module title.
-For more information about writing concepts, see the
-link:https://redhat-documentation.github.io/modular-docs/[Red Hat modular docs reference guide].
+For more information about creating concept modules, see the
+link:https://redhat-documentation.github.io/modular-docs/#creating-concept-modules[Red Hat modular docs reference guide] and the link:https://raw.githubusercontent.com/redhat-documentation/modular-docs/master/modular-docs-manual/files/TEMPLATE_CONCEPT_concept-explanation.adoc[concept template].
-== Writing tasks
-A _task_ contains the steps that users follow to complete a process. Tasks
-contain ordered steps and explicit commands. In most cases, create your tasks
-as individual modules and include them in appropriate assemblies.
+== Writing procedures
+A _procedure_ contains the steps that users follow to complete a process or task. Procedures contain ordered steps and explicit commands. In most cases, create your procedures as individual modules and include them in appropriate assemblies.
-[IMPORTANT]
-====
-Use a verb in the task title (for example, Creating).
-====
+Use a gerund in the procedure title, such as "Creating".
-Task modules take the following form:
+For more information about writing procedures, see the
+link:https://redhat-documentation.github.io/modular-docs/#creating-procedure-modules[Red Hat modular docs reference guide] and the link:https://raw.githubusercontent.com/redhat-documentation/modular-docs/master/modular-docs-manual/files/TEMPLATE_PROCEDURE_doing-one-procedure.adoc[procedure template].
-----
-
+[NOTE]
+====
+When needed, use `.Prerequisites`, `.Next steps`, or `.Additional resources` syntax to suppress TOC formatting within a module. Do not use `==` syntax for these headings in modules. Because you cannot use the xrefs in modules, if you need to include a link under one of these headings, place the entire subsection in the assembly instead.
+====
-After the module file metadata, include a paragraph explaining why the user must
-perform this task. This should be 1-2 sentences maximum.
+[id="using-conscious-language"]
+== Using conscious language
-If applicable, include any gotchas (things that could trip up the user or cause the task to fail).
+To assist with the removal of the problematic word "master" from the documentation, use the following terminology when referring to OpenShift control plane nodes:
-.Prerequisites
+[options="header"]
+|===
+|Branch |Control plane node reference
-* A bulleted list of pre-requisites that MUST be performed before the user can complete this task.
-Skip if there isn't any related information.
+|`main` and `enterprise-4.9`
+|Control plane node
-.Procedure
+|`enterprise-4.8` and earlier
+|Control plane (also known as master) node
-. Step 1 - One command per step.
+|`enterprise-3.11`
+|Master node
-. Step 2 - One command per step.
+|===
-. Step N
+You can replace "node" in the preceding examples with "machine", "host", or another suitable description.
-.Next steps
+[NOTE]
+====
+If you are cherry picking from `main` to `enterprise-4.8` or earlier, you must manually cherry pick to include the “(also known as master)” phrasing. This is required only if the phrase “control plane” is introduced for the first time in an assembly or module.
+====
-You can explain any other tasks that MUST be completed after this task. You can
-skip this if there are none. Do not include xrefs. If the next steps are closely
-related to the task, you might be able to include their modules in the assembly.
+[id="adding-a-subsection-on-making-open-source-more-inclusive"]
+=== Adding a subsection on making open source more inclusive
-.Related information
+If you create a release notes assembly for a sub-product within the `openshift/openshift-docs` repo, you might include a "Making open source more inclusive" statement. Instead of pasting the statement from the OpenShift Release Notes, use the following module, which is available in the `enterprise-4.8` branch and later:
-* A bulleted list of links to related information about this task. Skip if there isn't any related information.
+[source,text]
+----
+\include::modules/making-open-source-more-inclusive.adoc[leveloffset=+1]
----
-For more information about writing tasks, see the
-link:https://redhat-documentation.github.io/modular-docs/[Red Hat modular docs reference guide].
-
-== Product name & version
+[id="product-name-and-version"]
+== Product name and version
-When possible, generalize references to the product name and/or version using
+When possible, generalize references to the product name and/or version by using
the `{product-title}` and/or `{product-version}` attributes. These attributes
-are pulled from distro mapping definitions in the
-https://github.com/openshift/openshift-docs/blob/master/_distro_map.yml[distro_map.yml]
+are pulled by AsciiBinder from the distro mapping definitions in the
+https://github.com/openshift/openshift-docs/blob/main/_distro_map.yml[_distro_map.yml]
file.
The `{product-title}` comes from the first `name:` field in a distro mapping,
@@ -196,11 +319,11 @@ possible values for `{product-title}` and `{product-version}`.
|`openshift-origin`
|OKD
-|1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 3.6, 3.7, 3.9, 3.10, 3.11, 4.0, Latest
+|1.2, 1.3, 1.4, 1.5, 3.6, 3.7, 3.9, 3.10, 3.11, Latest
|`openshift-enterprise`
|OpenShift Container Platform
-|3.0, 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.9, 3.10, 3.11, 4.0
+|3.0, 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.9, 3.10, 3.11, 4.1, 4.2, 4.3, 4.4, 4.5, 4.6, 4.7
|`openshift-dedicated`
|OpenShift Dedicated
@@ -218,36 +341,107 @@ You can deploy applications on {product-title}.
----
This is a safe statement that could appear in probably any of the builds, so an
-https://github.com/openshift/openshift-docs/blob/master/contributing_to_docs/contributing.adoc#conditional-text-between-products[ifdef/endif
+https://github.com/openshift/openshift-docs/blob/main/contributing_to_docs/contributing.adoc#conditional-text-between-products[ifdef/endif
statement] is not necessary. For example, if you were viewing a build for the
`openshift-enterprise` distro (for any of the distro-defined branches), this
would render as:
-"You can deploy applications on OpenShift Container Platform."
+> You can deploy applications on OpenShift Container Platform.
And for the `openshift-origin` distro:
-"You can deploy applications on OKD."
+> You can deploy applications on OKD.
+
+Considering that we use distinct branches to keep content for product versions separated, global use of `{product-version}` across all branches is probably less useful, but it is available if you come across a requirement for it. Just consider how it will render across any branches that the content appears in.
+
+If it makes more sense in context to refer to the major version of the product instead of a specific minor version (for example, if comparing how something in OpenShift Container Platform 4 differs from OpenShift Container Platform 3), just use the major version number. Do not prepend with a `v`, as in `v3` or `v4`.
+
+[NOTE]
+====
+Other common attribute values are defined in the `modules/common-attributes.adoc` file. Where possible, generalize references to those values by using the common attributes. For example, use `{cloud-redhat-com}` to refer to Red Hat OpenShift Cluster Manager.
+====
+
+== Node names
+
+Do not use internal company server names in commands or example output. Provide generic OpenShift Container Platform node name examples that are not provider-specific, unless required. Where possible, use the example.com domain name when providing fully qualified domain names (FQDNs).
+
+The following table includes example OpenShift Container Platform 4 node names and their corresponding role types:
+
+[options="header"]
+|===
+
+|Node name |Role type
+
+|*node-1.example.com*
+.3+.^|You can use this format for nodes that do not need role-specific node names.
+
+|*node-2.example.com*
+
+|*node-3.example.com*
+
+|*control-plane-1.example.com*
+.3+.^|You can use this format if you need to describe the control plane role type within a node name.
+
+|*control-plane-2.example.com*
+
+|*control-plane-3.example.com*
+
+|*compute-1.example.com*
+.2+.^|You can use this format if you need to describe the compute node role type within a node name.
+
+|*compute-2.example.com*
+
+|*bootstrap.example.com*
+|You can use this format if you need to describe the bootstrap node role type within a node name.
+|===
+
+This example lists the status of cluster nodes that use the node name formatting guidelines:
+
+....
+[source,terminal]
+----
+$ oc get nodes
+----
++
+.Example output
+[source,terminal]
+----
+NAME STATUS ROLES AGE VERSION
+compute-1.example.com Ready worker 33m v1.19.0+9f84db3
+control-plane-1.example.com Ready master 41m v1.19.0+9f84db3
+control-plane-2.example.com Ready master 45m v1.19.0+9f84db3
+compute-2.example.com Ready worker 38m v1.19.0+9f84db3
+compute-3.example.com Ready worker 33m v1.19.0+9f84db3
+control-plane-3.example.com Ready master 41m v1.19.0+9f84db3
+----
+....
-Considering that we use distinct branches to keep content for product versions
-separated, global use of `{product-version}` across all branches is probably
-less useful, but it is available if you come across a requirement for it. Just consider
-how it will render across any branches that the content appears in.
+[NOTE]
+====
+Some provider-formatted hostnames include IPv4 addresses. An OpenShift Container Platform node name typically reflects the hostname of a node. If node names in your output need to be provider-specific and require this format, use private IPv4 addresses. For example, you could use `ip-10-0-48-9.example.com` as a node name that includes a private IPv4 address.
+====
+
+== IP addresses
+
+You may include IPv4 addresses from test clusters in examples in the documentation, as long as they are private. Private IPv4 addresses fall into one of the following ranges:
-If it makes more sense in context to refer to the major version of the product
-instead of a specific minor version (for example, if comparing how something in
-OpenShift Container Platform 4 differs from OpenShift Container Platform 3),
-just use the major version number. Do not prepend with a `v`, as in `v3` or `v4`.
+* 10.0.0.0 to 10.255.255.255 (class A address block 10.0.0.0/8)
+* 172.16.0.0 to 172.31.255.255 (class B address block 172.16.0.0/12)
+* 192.168.0.0 to 192.168.255.255 (class C address block 192.168.0.0/16)
-Do not use markup in headings.
+Replace all public IP addresses with an address from the following blocks. These address blocks are reserved for documentation:
-*Do not use internal company server names in command or example output*. See
-suggested host name examples
-https://docs.openshift.com/container-platform/3.11/install/example_inventories.html#multi-masters-single-etcd-using-native-ha[here].
+* 192.0.2.0 to 192.0.2.255 (TEST-NET-1 address block 192.0.2.0/24)
+* 198.51.100.0 to 198.51.100.255 (TEST-NET-2 address block 198.51.100.0/24)
+* 203.0.113.0 to 203.0.113.255 (TEST-NET-3 address block 203.0.113.0/24)
+[NOTE]
+====
+There might be advanced networking examples that require specific IP addresses, or cloud provider-specific examples that require a public IP address. Contact a subject matter expert if you need assistance with replacing IP addresses.
+====
== Links, hyperlinks, and cross references
-Links can be used to cross-reference internal assemblies or send customers to external information resources for further reading.
+Links can be used to cross-reference internal assemblies or send readers to external information resources for further reading.
In OpenShift docs:
@@ -289,17 +483,15 @@ xref:../../architecture/networking/routes.adoc#architecture-core-concepts-routes
.Markup example of cross-referencing
----
Rollbacks revert part of an application back to a previous deployment. Rollbacks can be performed using the REST API or
-the xref:../cli_reference/get_started_cli.adoc#installing-the-cli[OpenShift CLI].
+the xref:../cli_reference/openshift_cli/get_started_cli.adoc#installing-openshift-cli[OpenShift CLI].
Before you can create a domain, you must first xref:../dev_guide/application_lifecycle/new_app.adoc#dev-guide-new-app[create an application].
----
-.Rendered output of cross-referencing:
-====
-Rollbacks revert part of an application back to a previous deployment. Rollbacks can be performed using the REST API or the xref:../cli_reference/get_started_cli.adoc#installing-the-cli[OpenShift CLI].
-
-Before you can create a domain, you must first xref:../dev_guide/application_lifecycle/new_app.adoc#dev-guide-new-app[create an application].
-====
+.Rendered output of cross-referencing
+> Rollbacks revert part of an application back to a previous deployment. Rollbacks can be performed using the REST API or the xref:../cli_reference/openshift_cli/get_started_cli.adoc#installing-openshift-cli[OpenShift CLI].
+>
+> Before you can create a domain, you must first xref:../dev_guide/application_lifecycle/new_app.adoc#dev-guide-new-app[create an application].
=== Links to external websites
@@ -364,28 +556,124 @@ xref:../baz/zig.adoc#baz-zig[see the ZIG manual for more]
You must use the .adoc extension in order for the link to work correctly and you must specify an anchor ID.
====
-== Indicating Technology Preview Features
+== Embedding an external file
+
+You can embed content hosted outside the link:https://github.com/openshift/openshift-docs[openshift-docs]
+GitHub repository by using the `include` directive to target the URI of a raw
+file. This is helpful for cases where content frequently changes; you embed the raw
+file and the content auto-updates based on the changes made to the content on its
+host site.
+
+[IMPORTANT]
+====
+You are restricted to only embed files from GitHub repositories managed by the
+`openshift` GitHub user. You must also prefix your external file URI with `https`.
+URIs beginning with `http` are forbidden for security reasons and will fail the
+documentation build.
+====
+
+For example, if you want to embed the link:https://github.com/openshift/installer/blob/release-4.8/upi/azure/01_vnet.json[01_vnet.json] template, include the URI of its raw file version like this:
+
+```
+.`01_vnet.json` ARM template
+[source,json]
+----
+\include::https://raw.githubusercontent.com/openshift/installer/release-4.8/upi/azure/01_vnet.json[]
+----
+```
+
+[NOTE]
+====
+Embedding external files is restricted for files that change frequently, like templates. You must ensure that embedded files are QE verified before they are updated on their host site.
+====
+
+[NOTE]
+====
+You must get approval from the Engineering, QE, and Docs teams before embedding an external file.
+====
+
+== Indicating Technology Preview features
To indicate that a feature is in Technology Preview, include the
`modules/technology-preview.adoc` file in the feature's assembly to keep the
-supportablity wording consistent across Technology Preview features and provide a value for thee :FeatureName: variable before you include this module.
+supportability wording consistent across Technology Preview features and provide a value for the :FeatureName: variable before you include this module.
+
+See link:https://github.com/openshift/openshift-docs/pull/13878/files#diff-615ba1bf3b09d11a9c2604b775aa32f2[an example] of how this is applied.
-See
-link:https://github.com/openshift/openshift-docs/pull/13878/files#diff-615ba1bf3b09d11a9c2604b775aa32f2[an
-example] of how this is applied.
+== Indicating deprecated features
+To indicate that a feature is deprecated, include the `modules/deprecated-feature.adoc` file in the feature's assembly, or to each relevant assembly such as for a deprecated Operator, to keep the supportability wording consistent across deprecated features. Provide a value for the :FeatureName: variable before you include this module.
+
+See link:https://github.com/openshift/openshift-docs/pull/31776/files[an example] of how this is applied.
+
+== Verification of your content
+All documentation changes must be verified by a QE team associate before merging. This includes executing all "Procedure" changes and confirming expected results. There are exceptions for typo-level changes, formatting-only changes, and other negotiated documentation sets and distributions.
+
+If a documentation change is due to a Bugzilla bug or Jira issue, the bug/issue should be put on ON_QA when you have a PR ready. After QE approval is given (either in the bug/issue or in the PR), the QE associate should move the bug/issue status to VERIFIED, at which point the associated PR can be merged. It is also ok for the assigned writer to change the status of the bug/issue to VERIFIED if approval for the changes has been provided in another forum (slack, PR, or email). The writer should indicate that the QE team approved the change as a comment in the bug/issue.
== Images
-If you want to link to an image:
-1. Put it in symlinked `images` folder.
-2. In the content, use this format to link to an image:
+=== Block images
+
+To include a block image (an image on its own line):
+
+1. Put the image file in the `images` folder.
++
+Ensure that the folder containing your assembly contains an `images` symbolic link to the top-level `images/` directory, otherwise the image will not be found when building the docs.
+
+2. In the `.adoc` content, use this format to link to the image:
++
+----
+image::[]
+----
++
+Note the double `::` instead of a single `:`, as seen in inline image usage.
+You only have to specify `` itself and not the full file path;
+the build mechanism automatically expands this appropriately.
+
+=== Inline images (icons)
+
+Inline images can be used to indicate graphic items in the web console, such as
+buttons or menu icons.
+
+==== Inserting reusable images inline
+
+To simplify reuse, the following common SVGs (the OpenShift web console uses the
+Font Awesome icon set) have already been added to the `images` folder with a
+user-defined entity added to the `common-attributes.adoc` module:
+
+|===
+|Icon |Entity |Alt text |File name
+
+|Kebab
+|`:kebab:`
+|Options menu
+|`ellipsis-v.svg`
+
+|===
+
+When using inline, include the image after the UI element name. For example:
+
+----
+Click the *Options* menu {kebab}.
+----
+
+==== Inserting images inline without reuse
+
+If you are inserting an image that is not part of the `common-attributes.adoc`
+module, then include the image using this formatting:
----
-image::[image]
+image:[title=""]
----
-You only have to specify ``. The build mechanism automatically specifies the file path.
+Note the single `:` instead of a double `::`, as seen in block image usage.
+
+For example:
+
+----
+image:manage-columns.png[title="Manage Columns icon"]
+----
== Formatting
@@ -396,20 +684,72 @@ For all of the system blocks including table delimiters, use four characters. Fo
---- for code blocks
....
+[NOTE]
+====
+You can use backticks or other markup in the title for a block, such as a code block `.Example` or a table `.Description` title.
+====
+
+=== Code blocks, command syntax, and example output
+
+Code blocks are generally used to show examples of command syntax, example
+screen output, and configuration files.
+
+The main distinction between showing command syntax and a command example is
+that a command syntax shows readers how to use the command without real values.
+An example command, however, shows the command with actual values with an
+example output of that command, where applicable.
+
+For example:
+
+....
+In the following example, the `oc get` operation returns a complete list of services that are currently defined:
+
+[source,terminal]
+----
+$ oc get se
+----
+
+.Example output
+[source,terminal]
+----
+NAME LABELS SELECTOR IP PORT
+kubernetes component=apiserver,provider=kubernetes 172.30.17.96 443
+kubernetes-ro component=apiserver,provider=kubernetes 172.30.17.77 80
+docker-registry name=registrypod 172.30.17.158 5001
+----
+....
-=== Code blocks
-Code blocks are used to show examples of command screen outputs, or
-configuration files. When using command blocks, always use the actual values for
-any items that a user would normally replace. Code blocks should represent
-exactly what a customer would see on their screen. If you must expand or
-provide information on what some of the contents of a screen output or
-configuration file represent, then use callouts to provide that information.
+This renders as:
-Follow these general guidelines when using code blocks:
+> In the following example, the `oc get` operation returns a complete list of services that are currently defined:
+>
+> ----
+> $ oc get se
+> ----
+>
+> .Example output
+> ----
+> NAME LABELS SELECTOR IP PORT
+> kubernetes component=apiserver,provider=kubernetes 172.30.17.96 443
+> kubernetes-ro component=apiserver,provider=kubernetes 172.30.17.77 80
+> docker-registry name=registrypod 172.30.17.158 5001
+> ----
+
+The following guidelines go into more detail about specific requirements and
+recommendations when using code blocks:
+
+* If a step in a procedure is to run a command, make sure that the step
+text includes an explicit instruction to "run" or "enter" the command. In most cases,
+use one of the following patterns to introduce the code block:
+
+** , run the following command:
+** , enter the following command:
* Do NOT use any markup in code blocks; code blocks generally do not accept any markup.
-* For all code blocks, you must include an empty line above a code block.
+* For all code blocks, you must include an empty line above a code block (unless
+that line is introducing block metadata, such as `[source,terminal]` for syntax
+highlighting).
+
Acceptable:
+
@@ -432,20 +772,101 @@ $ lorem.sh
+
Without the line spaces, the content is likely to be not parsed correctly.
-* It is recommended to include source tags for the programming language used in the code block to enable syntax highlighting. For example, use the source tags
- `[source, yaml]` or `[source, javascript]`.
+* Use `[source,terminal]` for `oc` commands or any terminal commands to enable
+syntax highlighting. Any `[source]` metadata must go on the line directly before
+the code block. For example:
+
-NOTE: Do not use `[source, bash]` for `oc` commands or any terminal commands, because it does not render properly in some cases.
+....
+[source,terminal]
+----
+$ oc get nodes
+----
+....
++
+If you are also showing a code block for the output of the command, use
+`[source,terminal]` for that code block as well.
+
+* Use source tags for the programming language used in the code block to enable
+syntax highlighting. For example:
+
+** `[source,yaml]`
+** `[source,go]`
+** `[source,javascript]`
+
+* Do not use more than one command per code block. For example, the following must
+be split up into three separate code blocks:
+
....
-Lorem ipsum
+To create templates you can modify, run the following commands :
+[source,terminal]
----
-$ lorem.sh
+$ oc adm create-login-template > login.html
+----
+
+[source,terminal]
+----
+$ oc adm create-provider-selection-template > providers.html
+----
+
+[source,terminal]
+----
+$ oc adm create-error-template > errors.html
+----
+....
+
+* Separate a command and its related example output into individual code blocks.
+This allows the command to be easily copied using the button on
++++docs.openshift.com+++.
++
+In addition, prepend the code block for the output with the title `.Example output`
+to make it consistently clear across the docs when this is being represented. A
+lead-in sentence explaining the example output is optional. For example:
++
+....
+Use the `oc new-project` command to create a new project:
+
+[source,terminal]
+----
+$ oc new-project my-project
+----
+
+The output verifies that a new project was created:
+
+.Example output
+[source,terminal]
+----
+Now using project "my-project" on server "https://openshift.example.com:6443".
----
....
-* Try to use callouts to provide information on what the output represents when required.
+* To mark up command syntax, use the code block and wrap any replaceable values in
+angle brackets (`<>`) with the required command parameter, using underscores
+(`_`) between words as necessary for legibility. For example:
++
+....
+To view a list of objects for the specified object type, enter the following command:
+
+[source,terminal]
+----
+$ oc get
+----
+....
++
+This renders as:
++
+--
+> To view a list of objects for the specified object type, enter the following command:
+>
+> ----
+> $ oc get
+> ----
+--
++
+NOTE: Avoid using full command syntax inline with sentences.
+
+* If you must provide additional information on what a line of a code block
+represents, use callouts (`<1>`, `<2>`, etc.) to provide that information.
+
Use this format when embedding callouts into the code block:
+
@@ -459,6 +880,27 @@ code example 2 <2>
<2> A note about the second example value.
....
+* If you must provide additional information on what a line of a code block
+represents and the use of callouts is impractical, you can use a description list
+to provide information about the variables in the code block. Using callouts
+might be impractical if a code block contains too many conditional statements to
+easily use numbered callouts or if the same note applies to multiple lines of the codeblock.
++
+....
+----
+code
+code
+----
++
+where:
+
+:: Specifies the explanation of the first variable.
+:: Specifies the explanation of the first variable.
+....
++
+Be sure to introduce the description list with "where:" and start each variable
+description with "Specifies."
+
* For long lines of code that you want to break up among multiple lines, use a
backslash to show the line break. For example:
+
@@ -469,12 +911,51 @@ $ oc get endpoints --all-namespaces --template \
{{ end }}{{ end }}{{ "\n" }}{{ end }}' | awk '/ 172\.30\./ { print $1 }'
----
-* If the user must run a command as root, use a number sign, `#`, at the start of the command instead of a dollar sign, `$`. For example:
+* If the user must run a command as root, use a number sign (`#`) at the start of the command instead of a dollar sign (`$`). For example:
+
----
# subscription-manager list
----
+* For snippets or sections of a larger file, use an ellipsis (`...`) to show that
+the file continues before and/or after the quoted block:
++
+----
+apiVersion: v1
+kind: Pod
+metadata:
+ labels:
+ test: liveness
+...
+----
++
+or
++
+----
+...
+spec:
+ containers:
+ - args:
+ image: k8s.gcr.io/liveness
+...
+----
++
+Do not use `[...]`, ``, or any other variant.
+
+* Do not use `jq` in commands (unless it is truly required), because this requires users to install the `jq` tool. Oftentimes, the same or similar result can be accomplished using `jsonpath` for `oc` commands.
++
+For example, this command that uses `jq`:
++
+----
+$ oc get clusterversion -o json|jq ".items[0].spec"
+----
++
+can be updated to use `jsonpath` instead:
++
+----
+$ oc get clusterversion -o jsonpath='{.items[0].spec}{"\n"}'
+----
+
=== Inline code or commands
Do NOT show full commands or command syntax inline within a sentence. The next section covers how to show commands and command syntax.
@@ -486,61 +967,46 @@ Use the `GET` operation to do x.
This renders as:
-Use the `GET` operation to do x.
+> Use the `GET` operation to do x.
-=== Command Syntax and Examples
-The main distinction between showing command syntax and example is that a command syntax should just show customers how to use the command without real values. An example on the other hand should show the command with actual values with an example output of that command, where applicable.
+=== System messages
-==== Command syntax
-To markup command syntax, use the code block and wrap the replaceables in <> with the required command parameters, as shown in the following example. Do NOT use commands or command syntax inline with sentences.
+System messages include error, warning, confirmation, and information messages that are presented to the user in places such as the GUI, CLI, or system logs.
-....
-The following command returns a list of objects for the specified object type:
+If a message is short enough to include inline, enclose it in back ticks:
-----
-$ oc get
-----
+....
+Previously, image builds and pushes would fail with the `error reading blob from source` error message because the builder logic would compute the contents of new layers twice.
....
-This would render as follows:
-
-The following command returns a list of objects for the specified object type:
-
-----
-$ oc get
-----
+This renders as:
-==== Examples
-As mentioned an example of a command should use actual values and also show an output of the command, as shown in the following example. In some a heading may not be required.
+> Previously, image builds and pushes would fail with the `error reading blob from source` error message because the builder logic would compute the contents of new layers twice.
+If a message is too long to include inline, put it inside a code block with `[source,text]` metadata:
....
-In the following example the `oc get` operation returns a complete list of services that are currently defined.
-
-.Example title
+Previously, the AWS Terraform provider that the installation program used occasionally caused a race condition with the S3 bucket, and the cluster installation failed with the following error message:
+[source,text]
----
-$ oc get se
-NAME LABELS SELECTOR IP PORT
-kubernetes component=apiserver,provider=kubernetes 172.30.17.96 443
-kubernetes-ro component=apiserver,provider=kubernetes 172.30.17.77 80
-docker-registry name=registrypod 172.30.17.158 5001
+When applying changes to module.bootstrap.aws_s3_bucket.ignition, provider level=error msg="\"aws\" produced an unexpected new value for was present, but now absent.
----
-....
-This would render as shown:
+Now, the installation program uses different AWS Terraform provider code, which now robustly handles S3 eventual consistency, and the installer-provisioned AWS cluster installation does not fail with that error message.
+....
-In the following example the `oc get` operation returns a complete list of services that are currently defined.
+This renders as:
-.Example title
+> Previously, the AWS Terraform provider that the installation program used occasionally caused a race condition with the S3 bucket, and the cluster installation failed with the following error message:
+>
+> ----
+> When applying changes to module.bootstrap.aws_s3_bucket.ignition, provider level=error msg="\"aws\" produced an unexpected new value for was present, but now absent.
+> ----
+>
+> Now, the installation program uses different AWS Terraform provider code, which now robustly handles S3 eventual consistency, and the installer-provisioned AWS cluster installation does not fail with that error message.
-----
-$ oc get se
-NAME LABELS SELECTOR IP PORT
-kubernetes component=apiserver,provider=kubernetes 172.30.17.96 443
-kubernetes-ro component=apiserver,provider=kubernetes 172.30.17.77 80
-docker-registry name=registrypod 172.30.17.158 5001
-----
+NOTE: Always refer to a message with the type of message it is, followed by the word "message". For example, refer to an error message as an "error message", and not simply as an "error".
=== Lists
Lists are created as shown in this example:
@@ -553,13 +1019,11 @@ Lists are created as shown in this example:
. Item 3
....
-This will render as such:
-
-. Item 1
+This renders as:
-. Item 2
-
-. Item 3
+> . Item 1
+> . Item 2
+> . Item 3
If you must add any text, admonitions, or code blocks you have to add the continuous +, as shown in the example:
@@ -575,29 +1039,165 @@ some code block
. Item 3
....
-This renders as shown:
+This renders as:
-. Item 1
-+
+> . Item 1
+> +
+> ----
+> some code block
+> ----
+> . Item 2
+> . Item 3
+
+=== Footnotes
+
+Avoid footnotes when possible.
+
+If you reference a footnote from only a single location, use the following syntax:
+
+.Footnote
+....
+footnote:[This is the footnote text.]
+....
+
+If you reference a footnote from multiple locations, set an attribute with the footnote text. As a consequence, this will duplicate the footnote text at bottom of the page.
+
+.Footnote with text set by an attribute
+....
+:note-text: This is a footnote.
+
+This text has a footnote qualifier attached footnote:[{note-text}].
+
+But this other text uses the same qualifier elsewhere footnote:[{note-text}].
+....
+
+Avoid using `footnoteref`.
+
+[IMPORTANT]
+====
+The `footnoteref` directive is deprecated in asciidoctor and causes a build warning when `ascii_binder` is run.
+====
+
+.Footnote with reference
+....
+footnoteref:[ref-string, This is the footnote text.]
+....
+
+==== Alternative footnote styling in tables
+
+For footnotes in tables, use the following syntax to mimic Asciidoctor's
+styling:
+
+....
+[cols="3",options="header"]
+|===
+|Header 1
+|Header 2
+|Header 3
+
+|Item A ^[1]^
+|Item B
+|Item C ^[2]^
+
+|Item D
+|Item E ^[3]^
+|Item F ^[3]^
+|===
+[.small]
+--
+1. A description.
+2. Another description.
+3. Two items relate to this description.
+--
+....
+
+The notes are kept immediately after the table, instead of moved to the bottom
+of the rendered assembly. This manual method also allows you to reuse the same
+footnote number for multiple references as needed.
+
+Note the following:
+
+* Add a space before the superscripted numbers with square brackets.
+* To match the table cell's font size, start the ordered list with a `[.small]`
+style and wrap it in a `--` block.
+
+[id="collapsible-content"]
+=== Collapsible content
+You can collapse sections of content by using the `collapsible` option, which converts the Asciidoctor markup to HTML `details` and `summary` sections. The `collapsible` option is used at the writer's discretion and is appropriate for considerably long code blocks, lists, or other such content that significantly increases the length of a module or assembly.
+
+[NOTE]
+====
+You must set a title for the `summary` section. If a title is not set, the default title is "Details."
+====
+
+Collapsible content is formatted as shown:
+
+....
+.Title of the `summary` dropdown
+[%collapsible]
+====
+This is content within the `details` section.
+====
+....
+
+This renders as a dropdown with collapsed content:
+
+.Title of the `summary` dropdown
+[%collapsible]
+====
+This is content within the `details` section.
+====
+
+If your collapsible content includes an admonition such as a note or warning, the admonition must be nested:
+
+....
+.Collapsible content that includes an admonition
+[%collapsible]
+====
+This content includes an admonition.
+
+[source,terminal]
----
-some code block
+$ oc whoami
----
-. Item 2
+[NOTE]
+=====
+Nest admonitions when using the `collapsible` option.
+=====
+====
+....
-. Item 3
+This renders as:
+
+.Collapsible content that includes an admonition
+[%collapsible]
+====
+This content includes an admonition.
+
+[source,terminal]
+----
+$ oc whoami
+----
+
+[NOTE]
+=====
+Nest admonitions when using the `collapsible` option.
+=====
+====
+
+=== Quick reference
-==== Quick reference
.User accounts and info
[option="header"]
|===
|Markup in command syntax |Description |Substitute value in Example block
-|
+|``
|Name of user account
|user@example.com
-|
+|``
|User password
|password
|===
@@ -607,11 +1207,11 @@ some code block
|===
|Markup in command syntax |Description |Substitute value in Example block
-|
+|``
|Name of project
|myproject
-|
+|``
|Name of an application
|myapp
|===
@@ -629,31 +1229,167 @@ Text for admonition
[id="api-object-formatting"]
== API object formatting
-Use initial capitalization and camel case for Kubernetes/OpenShift API objects
-and do not mark them up unless referring to a specific field or variable name
-from a spec or manifest.
+For terms that are API objects, the way they are written depends on whether the term is a general reference or an actual reference to the object.
+
+[id="api-object-general-references"]
+=== General references
+
+A general reference is any time you are speaking conceptually, or generally, about these components in a cluster.
+
+When referring to API object terms in general usage, use lowercase and separate multi-word API objects. *Default to following this guidance unless you are specifically interacting with/referring to the API object (see xref:api-object-object-references[Object references]).*
+
+For example:
+
+* pod
+* node
+* daemon set
+* config map
+* deployment
+* image stream
+* persistent volume claim
-This matches general Kubernetes usage and makes it obvious that a specific
-concept is being referred to. For example:
+.Examples of general references
+....
+Kubernetes runs your workload by placing containers into pods to run on nodes.
+
+You must have at least one secret, config map, or service account.
+
+The total number of persistent volume claims in a project.
+....
+
+Note that if an object uses an acronym or other special capitalization, then its general reference should honor that. For example, general references to `APIService` should be written as "API service", not "api service". Any other exceptions or special guidance are noted in the xref:../contributing_to_docs/term_glossary.adoc[glossary].
+
+[id="api-object-object-references"]
+=== Object references
+
+An object reference is when you are referring to the actual instance of an API object, where the object name is important.
+
+When referring to actual instances of API objects, use link:https://en.wikipedia.org/wiki/Camel_case#Variations_and_synonyms[PascalCase] and mark it up as monospace in backticks (````).
+
+[NOTE]
+====
+Do not use backticks or other markup in assembly or module headings. You can use backticks or other markup in the title for a block, such as a code block `.Example` or a table `.Description` title.
+====
+
+Be sure to match the proper object type (or `kind` in Kubernetes terms); for example, do not add an "s" to make it plural. *Only follow this guidance if you are explicitly referring to the API object (for example, when editing an object in the CLI or viewing an object in the web console).*
+
+For example:
-- Pod
-- Deployment
-- Operator
-- DaemonSet (and not "daemon set", "daemonset", or "Daemonset")
+* `Pod`
+* `Node`
+* `DaemonSet`
+* `ConfigMap`
+* `Deployment`
+* `ImageStream`
+* `PersistentVolumeClaim`
+
+.Examples of API object references
+....
+After you create a `Node` object, or the kubelet on a node self-registers, the control plane checks whether the new `Node` object is valid.
+
+The default amount of CPU that a container can use if not specified in the `Pod` spec.
+
+Create a file, `pvc.yaml`, with the `PersistentVolumeClaim` object definition.
+....
+
+[NOTE]
+====
+Use "object", "resource", "custom resource", "spec", etc. as appropriate after the object reference. This helps with clarity and readability.
+
+Another situation where this is necessary is when referring to the plural version of objects. Do not add an "s" to the end of an object name reference to make it plural. Use only the official `kind` of object (for example, seen when you run `oc api-resources`).
+
+For example, the object `kind` for a node is `Node`, not `Nodes`. So do not write "You can create `Nodes` using `kubectl`." Instead, rewrite to something like "You can create `Node` objects using `kubectl`."
+====
[id="operator-name-capitalization"]
-==== Operator name capitalization
+=== Operator capitalization
+
+The term "Operator" is always capitalized. For example:
+
+----
+= Support policy for unmanaged Operators
+
+Individual Operators have a `managementState` parameter in their configuration.
+----
An Operator's full name must be a proper noun, with each word initially
capitalized. If it includes a product name, defer the product's capitalization
style guidelines. For example:
-- Cluster Logging Operator
+- Red Hat OpenShift Logging Operator
- Prometheus Operator
- etcd Operator
- Node Tuning Operator
- Cluster Version Operator
+== Declarative config examples
+
+Many of our procedures provide imperative `oc` commands (which cannot be stored in a Git repo). Due to efforts around improving the experience for GitOps users, we sometimes also want to provide a declarative YAML example that achieves the same configuration. This allows users to store these YAML configurations in a Git repo and follow GitOps practices to configure OpenShift.
+
+[IMPORTANT]
+====
+When adding declarative examples to procedures, do not completely replace the imperative command with the declarative YAML example. Some users might still prefer the imperative option.
+====
+
+To add a declarative YAML example to a procedure step with an existing imperative command, add it in a "TIP" admonition by following the template in the example below. This example uses an imperative command (`oc create configmap`) to create a config map, and then provides the declarative YAML example of the `ConfigMap` object afterward.
+
+....
+* Define a `ConfigMap` object containing the certificate authority by using the following command:
++
+[source,terminal]
+----
+$ oc create configmap ca-config-map --from-file=ca.crt=/path/to/ca -n openshift-config
+----
++
+[TIP]
+====
+You can alternatively apply the following YAML to create the config map:
+
+[source,yaml]
+----
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: ca-config-map
+ namespace: openshift-config
+type: Opaque
+data:
+ ca.crt:
+----
+====
+....
+
+This renders as:
+
+> * Define a `ConfigMap` object containing the certificate authority by using the following command:
+> +
+> [source,terminal]
+> ----
+> $ oc create configmap ca-config-map --from-file=ca.crt=/path/to/ca -n openshift-config
+> ----
+> +
+> [TIP]
+> ====
+> You can alternatively apply the following YAML to create the config map:
+>
+> [source,yaml]
+> ----
+> apiVersion: v1
+> kind: ConfigMap
+> metadata:
+> name: ca-config-map
+> namespace: openshift-config
+> type: Opaque
+> data:
+> ca.crt:
+> ----
+> ====
+
+[NOTE]
+====
+If you are adding a particularly long YAML block, you can optionally use the xref:collapsible-content[`%collapsible`] feature to allow users to collapse the code block.
+====
+
== Quick markup reference
|===
@@ -661,101 +1397,144 @@ style guidelines. For example:
|Code blocks
-a|Use the following syntax for the `oc` command:
+a|
+....
+Use the following syntax for the `oc` command:
----
$ oc
----
+....
-a|Use the following syntax for the `oc` command:
-
-----
-$ oc
-----
+a|
+> Use the following syntax for the `oc` command:
+>
+> ----
+> $ oc
+> ----
a|Use backticks for all non-GUI "system items", including:
* Inline commands, operations, literal values, variables, parameters, settings,
flags, environment variables, user input
* System term/item, user names, unique or example names for individual API
-objects/resources (e.g., a Pod named "mypod"), daemon, service, or software
+objects/resources (e.g., a pod named `mypod`), daemon, service, or software
package
* RPM packages
* File names or directory paths
-a|$$`oc get`$$
-
-$$Set the `upgrade` variable to `true`.$$
-
-$$Use the `--amend` flag.$$
-
-$$Answer by typing `Yes` or `No` when prompted.$$
-
-$$`user_name`$$
-
-$$`service_name`$$
-
-$$`package_name`$$
-
-$$`filename`$$
+a|
+....
+`oc get`
-a|Use the `oc get services` command to get a list of services that are currently defined.
+Set the `upgrade` variable to `true`.
Use the `--amend` flag.
-Set the `upgrade` variable to `true`.
-
Answer by typing `Yes` or `No` when prompted.
-`cluster-admin` user
+`user_name`
-`firewalld` service
+`service_name`
-`rubygems` RPM package
+`package_name`
-The `express.conf` configuration file is located in the `/usr/share` directory.
+`filename`
+....
-|System or software variable to be replaced by the user
-a|$$``$$
+a|
+> Use the `oc get services` command to get a list of services that are currently defined.
+>
+>
+>
+> Use the `--amend` flag.
+>
+>
+>
+> Set the `upgrade` variable to `true`.
+>
+>
+>
+> Answer by typing `Yes` or `No` when prompted.
+>
+>
+>
+> `cluster-admin` user
+>
+>
+>
+> `firewalld` service
+>
+>
+>
+> `rubygems` RPM package
+>
+>
+>
+> The `express.conf` configuration file is located in the `/usr/share` directory.
-$$``$$
+|System or software variable to be replaced by the user
+a|
+....
+``
-a|Use the following command to roll back a Deployment, specifying the Deployment name:
+``
+....
-`oc rollback `
+a|
+> Use the following command to roll back a Deployment, specifying the Deployment name:
+>
+> `oc rollback `
|Use single asterisks for web console / GUI items (menus, buttons, page titles, etc.).
-Use two characters to form the arrow in a series of menu items, $$->$$.
-
-a|Choose $$*Cluster Console*$$ from the list.
-
-Navigate to the $$*Operators* -> *Catalog Sources*$$ page.
-
-Click $$*Create Subscription*$$.
+Use two characters to form the arrow in a series of menu items (`$$->$$`).
-a|Choose *Cluster Console* from the list.
+a|
+....
+Choose *Cluster Console* from the list.
Navigate to the *Operators* -> *Catalog Sources* page.
Click *Create Subscription*.
+....
+
+a|
+> Choose *Cluster Console* from the list.
+>
+>
+>
+> Navigate to the *Operators* -> *Catalog Sources* page.
+>
+>
+>
+> Click *Create Subscription*.
|Use underscores to emphasize the first appearance of a new term.
-|An $$_Operator_$$ is a method of packaging, deploying, and managing a Kubernetes application.
+a|
+....
+An _Operator_ is a method of packaging, deploying,
+and managing a Kubernetes application.
+....
-|An _Operator_ is a method of packaging, deploying, and managing a Kubernetes application.
+a|
+> An _Operator_ is a method of packaging, deploying, and managing a Kubernetes application.
-|Use of single asterisks for general emphasis is allowed but should only be used
+|Use of underscores for general emphasis is allowed but should only be used
very sparingly. Let the writing, instead of font usage, create the emphasis
wherever possible.
-a|Do $$*not*$$ delete the file.
+a|
+....
+Do _not_ delete the file.
+....
-a|Do *not* delete the file.
+a|
+> Do _not_ delete the file.
|Footnotes
-|A footnote is created with the footnote macro. If you plan to reference a footnote more than once, use the ID footnoteref macro. The customer portal does not support spaces in the footnoteref. For example, "dynamic PV" should be "dynamicPV".
+|A footnote is created with the footnote macro. If you plan to reference a footnote more than once, use the ID footnoteref macro. The Customer Portal does not support spaces in the footnoteref. For example, "dynamic PV" should be "dynamicPV".
|See link:http://asciidoctor.org/docs/user-manual/#user-footnotes[Footnotes] for the footnote and footnoteref syntax.
diff --git a/contributing_to_docs/term_glossary.adoc b/contributing_to_docs/term_glossary.adoc
index d907a1c92d2a..4bab6392446d 100644
--- a/contributing_to_docs/term_glossary.adoc
+++ b/contributing_to_docs/term_glossary.adoc
@@ -8,11 +8,6 @@
:toc: macro
:toc-title:
-NOTE: The guidelines in this branch are specific for OpenShift / OKD 4.x
-documentation. See the
-link:https://github.com/openshift/openshift-docs/tree/master/contributing_to_docs[`master` branch]
-for guidelines for OpenShift / OKD 3.x documentation.
-
toc::[]
== Usage of OpenShift terms
@@ -23,6 +18,8 @@ goal is to standardize terminology across OpenShift content and be consistent in
the usage of our terminology when referring to OpenShift components or
architecture.
+For terms that are also API objects, there is different guidance for general usage of the term versus referencing the actual API object. This glossary mainly defines the general usage guideline (lowercase, separating words), but be sure to use the object formatting (PascalCase, in monospace) when referring to the actual object. See link:doc_guidelines.adoc#api-object-formatting[API object formatting] for more information.
+
[NOTE]
====
If you want to add terms or other content to this document, or if anything must
@@ -42,17 +39,24 @@ An action consists of _project_, _verb_, and _resource_:
* *Project* is the project containing the resource that is to be acted upon.
* *Verb* is a get, list, create, or update operation.
* *Resource* is the API endpoint being accessed. This is distinct from the
-referenced resource itself, which can be a Pod, DeploymentConfig, Build, etc.
+referenced resource itself, which can be a pod, deployment, build, etc.
''''
-=== apiserver
+=== API server
-Usage: apiserver(s) or API server(s) as appropriate
+Usage: API server(s)
A REST API endpoint for interacting with the system. New deployments and
configurations can be created with this endpoint, and the state of the system
can be interrogated through this endpoint as well.
+''''
+=== API service
+
+Usage: API service(s)
+
+When referencing the actual object, write as `APIService`. See link:doc_guidelines.adoc#api-object-formatting[API object formatting] for more details.
+
''''
=== app
@@ -73,17 +77,17 @@ refer to some combination of an image, a Git repository, or a replication
controller, and this application might be running PHP, MySQL, Ruby, JBoss, or
something else.
-*Examples of correct usage*
+.Examples of correct usage
====
-_OpenShift runs your applications_.
+OpenShift runs your applications.
-_The `new-app` command creates a new application from the components you specify._
+The `new-app` command creates a new application from the components you specify.
-_My application has two Ruby web Services connected to a database back end and a RabbitMQ message queue, as well as a python worker framework._
+My application has two Ruby web services connected to a database back end and a RabbitMQ message queue, as well as a python worker framework.
-_You can check the health of your application by adding probes to the various parts._
+You can check the health of your application by adding probes to the various parts.
-_You can host a WordPress application on OpenShift._
+You can host a WordPress application on OpenShift.
====
''''
@@ -97,11 +101,23 @@ action. It consists of _identity_ and _action_.
== B
''''
-=== Build
+=== build
+
+Usage: build(s), or when speaking generally about `Build` objects.
+
+* A build is the process of transforming input parameters into a resulting object.
+* A `Build` object encapsulates the inputs needed to produce a new deployable image, as well as the status of the execution and a reference to the pod that executed the build.
+
+When referencing the actual object, write as "``Build`` object" as appropriate. See link:doc_guidelines.adoc#api-object-formatting[API object formatting] for more details.
+
+''''
+=== build configuration
-Usage: Build(s) as appropriate
+Usage: build configuration(s) when speaking generally about `BuildConfig` objects.
-See link:doc_guidelines.adoc#api-object-formatting[API Object Formatting].
+A `BuildConfig` object is the definition of the entire build process. A build configuration describes a single build definition and a set of triggers for when a new build is created.
+
+When referencing the actual object, write as "``BuildConfig`` object" as appropriate. See link:doc_guidelines.adoc#api-object-formatting[API object formatting] for more details.
== C
@@ -110,15 +126,26 @@ See link:doc_guidelines.adoc#api-object-formatting[API Object Formatting].
Usage: cluster
-The collection of controllers, Pods, and Services and related DNS and networking
+The collection of controllers, pods, and services and related DNS and networking
routing configuration that are defined on the system.
''''
-=== ConfigMap
+=== cluster service version
+
+Usage: cluster service version
+
+Operator Lifecycle Manager (OLM), part of the Operator Framework, uses a cluster service version (CSV) to define the metadata that accompanies an Operator container image and assist in running the Operator in a cluster. This metadata is defined in a `ClusterServiceVersion` API object used to populate user interfaces with information such as its logo, description, and version. It is also a source of technical information that is required to run the Operator, like the RBAC rules it requires and which custom resources (CRs) it manages or depends on.
+
+This is commonly abbreviated as a CSV.
+
+''''
+=== config map
-Usage: ConfigMap(s)
+Usage: config map(s)
-ConfigMaps hold configuration data for Pods to consume.
+Config maps hold configuration data for pods to consume.
+
+When referencing the actual object, write as `ConfigMap`. See link:doc_guidelines.adoc#api-object-formatting[API object formatting] for more details.
Do not use: configuration map(s)
@@ -141,35 +168,42 @@ used interchangeably with "container-based."
Usage: container group
-== D
+''''
+=== custom resource
+
+Usage: custom resource (CR)
+
+A resource implemented through the Kubernetes `CustomResourceDefinition` API. A custom resource is distinct from the built-in Kubernetes resources, such as the pod and service resources. Every CR is part of an API group.
+
+Do not capitalize.
''''
-=== Deployment
+=== custom resource definition (CRD)
-Usage: Deployment(s)
+Usage: custom resource definition (CRD) for the first time reference; CRD thereafter.
-Kubernetes-native objects that provide declarative updates for Pods and
-ReplicaSets.
+Create a custom resource definition to define a new custom resource.
-Do not confuse with DeploymentConfigs, which predate Deployments.
+This is commonly abbreviated as a CRD.
-To avoid further confusion, do not refer to an overall OpenShift installation /
-instance / cluster as an "OpenShift deployment".
+== D
-See link:https://kubernetes.io/docs/concepts/workloads/controllers/deployment/[Deployments - Kubernetes].
-See link:doc_guidelines.adoc#api-object-formatting[API Object Formatting].
+''''
+=== deployment
-=== DeploymentConfig
+Usage: deployment(s) when speaking generally about `Deployment` or `DeploymentConfig` objects
-Usage: DeploymentConfig(s)
+* A `Deployment` is a Kubernetes-native object that provides declarative updates for pods and
+replica sets.
+* A `DeploymentConfig` is an OpenShift-specific object that defines the template for a pod and manages
+deploying new images or configuration changes. Uses replication controllers. Predates Kubernetes `Deployment` objects.
-OpenShift-specific objects that define the template for a Pod and manage
-deploying new images or configuration changes. Uses ReplicationControllers.
+When referencing the actual object, write as `Deployment` or `DeploymentConfig` as appropriate. See link:doc_guidelines.adoc#api-object-formatting[API object formatting] for more details.
-Do not confuse with the Kubernetes native object Deployment, which were
-introduced later and use ReplicaSets.
+To avoid further confusion, do not refer to an overall OpenShift installation /
+instance / cluster as an "OpenShift deployment".
-Do not use: deployment configuration(s)
+Do not use: deployment configuration(s), deployment config(s)
''''
=== Dockerfile
@@ -179,7 +213,7 @@ link:doc_guidelines.adoc[Documentation Guidelines] for markup information.
Docker can build images automatically by reading the instructions from a
Dockerfile. A Dockerfile is a text document that contains all the commands you
-would normally execute manually in order to build a Docker image.
+would normally execute manually to build a docker image.
Source: https://docs.docker.com/reference/builder/
@@ -190,52 +224,143 @@ Open the [filename]#Dockerfile# and make the following changes.
Create a [filename]#Dockerfile# at the root of your repository.
====
+== E
+
+''''
+=== event
+
+Usage: event(s)
+
+An event is a data record expressing an occurrence and its context, based on the CNCF CloudEvents specification.
+Events contain two types of information: the event data representing the occurrence, and the context metadata providing contextual information about the occurrence.
+Events are routed from an event producer, or source, to connected event consumers.
+
+Routing can be performed based on information contained in the event, but an event will not identify a specific routing destination.
+Events can be delivered through various industry standard protocols such as HTTP, AMQP, MQTT, or SMTP, or through messaging and broker systems, such as Kafka, NATS, AWS Kinesis, or Azure Event Grid.
+
+When referencing the actual object, write as `Event`. See link:doc_guidelines.adoc#api-object-formatting[API object formatting] for more details.
+
+// NOTE: This is inconsistently used, e.g. https://docs.openshift.com/container-platform/4.5/rest_api/metadata_apis/event-core-v1.html
+See: link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#event-v1-core[Event v1 core API], link:https://github.com/cloudevents/spec/blob/master/primer.md#cloudevents-concepts[CloudEvents concepts], and link:https://github.com/cloudevents/spec/blob/master/spec.md#event[CloudEvents specification].
+
+== F
+
+== G
+
+''''
+=== group/version/kind (GVK)
+
+Usage: group/version/kind (GVK) for the first time reference; GVK thereafter.
+
+A unique identifier for a Kubernetes API, specifying its _group_ (a collection of related APIs), _version_ (defines the release and level of stability), and _kind_ (an individual API type or name).
+
+While "GroupVersionKind" does appear in the API guide, typically there should not be a reason to mark up in reference to a specific object. Favor simply "GVK", or "GVKs" for pluralization, after the first time reference as much as possible. Avoid pluralizing the long form (e.g., group/version/kinds or groups/versions/kinds).
+
+== H
+
== I
''''
=== identity
-Usage: identity or identities as appropriate
+Usage: identity or identities
-Both the username and list of groups the user belongs to.
+Both the user name and list of groups the user belongs to.
''''
=== image
Usage: image(s)
+''''
+=== image stream
+
+Usage: image stream(s)
+
+Image streams provide a means of creating and updating container images in an ongoing way.
+
''''
=== Ignition config
Usage: Ignition config file or Ignition config files
-The file that Ignition uses to configure Red Hat Enterprise Linux CoreOS during
+The file that Ignition uses to configure Red Hat Enterprise Linux CoreOS (RHCOS) during
operating system initialization. The installation program generates different
-Ignition config files to initialize bootstrap, master, and worker nodes.
+Ignition config files to initialize bootstrap, control plane, and worker nodes.
+
+''''
+
+=== Ingress
+
+Usage: Ingress
+
+API object that allows developers to expose services through an HTTP(S) aware
+load balancing and proxy layer via a public DNS entry. The Ingress resource may
+further specify TLS options and a certificate, or specify a public CNAME that
+the OpenShift Ingress Controller should also accept for HTTP and HTTPS traffic.
+An administrator typically configures their Ingress Controller to be visible
+outside the cluster firewall, and might also add additional security, caching, or
+traffic controls on the service content.
+
+''''
+
+=== Ingress Controller
+
+Usage: Ingress Controller(s)
+
+A resource that forwards traffic to endpoints of services. The Ingress Controller
+replaces router from {product-title} 3 and earlier.
+
+''''
+=== installer-provisioned infrastructure
+
+Usage: installer-provisioned infrastructure
+
+If the installation program deploys and configures the infrastructure that the
+cluster runs on, it is an installer-provisioned infrastructure installation.
+
+Do not use: IPI
+
+== J
== K
''''
=== kubelet
-Usage: kubelet(s) as appropriate
+Usage: kubelet(s)
The agent that controls a Kubernetes node. Each node runs a kubelet, which
handles starting and stopping containers on a node, based on the desired state
-defined by the master.
+defined by the control plane (also known as master).
+
+''''
+=== Kubernetes control plane
+
+Usage: Kubernetes control plane
+
+The Kubernetes-native equivalent to the link:#project[OpenShift control plane].
+An OpenShift system runs OpenShift control planes (also known as masters), not Kubernetes control planes, and
+an OpenShift control plane provides a superset of the functionality of a Kubernetes control plane, so it is generally preferred to use the term OpenShift control plane.
''''
-=== Kubernetes master
+=== Kubernetes API server
-Usage: Kubernetes master(s) as appropriate
+Usage: Kubernetes API server
-The Kubernetes-native equivalent to the link:#project[OpenShift master].
-An OpenShift system runs OpenShift masters, not Kubernetes masters, and
-an OpenShift master provides a superset of the functionality of a Kubernetes
-master, so it is generally preferred to use the term OpenShift master.
+== L
== M
+''''
+=== MetalLB
+
+Usage: MetalLB, MetalLB Operator, MetalLB project
+
+MetalLB is an open source project that provides a way to add services of type `LoadBalancer` to clusters that are not installed on infrastructure from a cloud provider. MetalLB primarily targets on-premise, bare-metal clusters, but any infrastructure that does not include a native load-balancing capability is a candidate.
+
+"MetalLB" always has the first letter and last two letters capitalized in general text. Do not use "Metallb."
+
''''
=== minion
@@ -246,7 +371,7 @@ Usage: Deprecated. Use link:#node[node] instead.
''''
=== node
-Usage: node(s) as appropriate
+Usage: node(s)
A
http://docs.openshift.org/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#node[node]
@@ -278,28 +403,36 @@ applies to all distributions, as OKD does not have OpenShift in its name.
However, the following components currently use "OpenShift" in the name and are
allowed for use across all distribution documentation:
-- OpenShift Ansible Broker
- OpenShift Pipeline
- OpenShift SDN
+- OpenShift Ansible Broker (deprecated in 4.2 / removed in 4.4)
+
+''''
+=== OpenShift API server
+
+Usage: OpenShift API server
''''
=== OpenShift CLI
-Usage: OpenShift CLI
+Usage: OpenShift CLI (`oc`)
-The `oc` tool is the command line interface of OpenShift 3 and 4.
+The `oc` tool is the command-line interface of OpenShift 3 and 4.
+
+When referencing as a prerequisite for a procedure module, use the following
+construction: Install the OpenShift CLI (`oc`).
''''
-=== OpenShift master
+=== OpenShift control plane (also known as master)
-Usage: OpenShift master(s) as appropriate
+Usage: OpenShift control plane
Provides a REST endpoint for interacting with the system and manages the state
of the system, ensuring that all containers expected to be running are actually
running and that other requests such as builds and deployments are serviced.
New deployments and configurations are created with the REST API, and the state
of the system can be interrogated through this endpoint as well. An OpenShift
-master comprises the apiserver, scheduler, and SkyDNS.
+control plane comprises the API server, scheduler, and SkyDNS.
''''
=== Operator
@@ -311,6 +444,8 @@ application. A Kubernetes application is an application that is both deployed on
a Kubernetes cluster (including OpenShift clusters) and managed using the
Kubernetes APIs and `kubectl` or `oc` tooling.
+The term "Operator" is always captalized.
+
While "containerized" is allowed, do not use "Operatorize" to refer to building an
Operator that packages an application.
@@ -321,9 +456,25 @@ Install the etcd Operator.
Build an Operator using the Operator SDK.
====
-See link:doc_guidelines.adoc#api-object-formatting[API Object Formatting] for
+See link:doc_guidelines.adoc#api-object-formatting[API object formatting] for
more on Operator naming.
+''''
+=== OperatorHub
+
+Usage: OperatorHub
+
+''''
+=== Operator Lifecycle Manager (OLM)
+Usage: Operator Lifecycle Manager, OLM
+
+Refer to this component without a preceding article ("the").
+
+.Examples of correct usage
+====
+You can use OpenShift Lifecycle Manager (OLM) to manually or automatically upgrade an Operator.
+====
+
''''
=== Options menu
@@ -338,15 +489,29 @@ menu that does not have hover text or a given name or label in the web console.
== P
''''
-=== Pod
+=== persistent volume (PV)
+
+Usage: persistent volume
+
+Developers can use a persistent volume claim (PVC) to request a persistent volume (PV) resource without having specific knowledge of the underlying storage infrastructure.
+
+''''
+=== persistent volume claim (PVC)
-Usage: Pod(s) as appropriate
+Usage: persistent volume claim
+
+Developers can use a persistent volume claim (PVC) to request a persistent volume (PV) resource without having specific knowledge of the underlying storage infrastructure.
+
+''''
+=== pod
+
+Usage: pod(s)
Kubernetes object that groups related Docker containers that have to share
-network, filesystem, or memory together for placement on a node. Multiple
-instances of a Pod can run to provide scaling and redundancy.
+network, file system, or memory together for placement on a node. Multiple
+instances of a pod can run to provide scaling and redundancy.
-See link:doc_guidelines.adoc#api-object-formatting[API Object Formatting].
+When referencing the actual object, write as `Pod`. See link:doc_guidelines.adoc#api-object-formatting[API object formatting] for more details.
''''
=== project
@@ -354,59 +519,70 @@ See link:doc_guidelines.adoc#api-object-formatting[API Object Formatting].
Usage: project(s)
A project allows a community of users to organize and manage their content in
-isolation from other communities. It is an extension of the namespace object
+isolation from other communities. It is an extension of the `Namespace` object
from Kubernetes.
-Even though projects are an OpenShift API object, it is not capitalized, much
-like namespace is not capitalized.
+When referencing the actual object, write as `Project`. See link:doc_guidelines.adoc#api-object-formatting[API object formatting] for more details.
+
+== Q
+
+''''
+=== quick start
+
+Usage: quick start(s)
+
+There are two types of quick starts in OpenShift:
+
+* quick starts that are guided tutorials in the web console
+* quick start templates that allow users to quickly get started creating a new application
+
+Be sure to provide context about which type of quick start you are referring to.
== R
''''
-=== ReplicaSet
+=== replica set
-Usage: ReplicaSet(s)
+Usage: replica set(s)
-Similar to a ReplicationController, a ReplicaSet is a native Kubernetes API
+Similar to a replication controller, a replica set is a native Kubernetes API
object that ensures a specified number of pod replicas are running at any given
-time. Used by Deployments.
+time. Used by `Deployment` objects.
-Do not use: replica set(s)
+When referencing the actual object, write as `ReplicaSet`. See link:doc_guidelines.adoc#api-object-formatting[API object formatting] for more details.
See link:https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/[ReplicaSet - Kubernetes].
''''
-=== ReplicationController
+=== replication controller
-Usage: ReplicationController(s)
+Usage: replication controller(s)
Kubernetes object that ensures N (as specified by the user) instances of a given
-Pod are running at all times. Used by DeploymentConfigs.
-
-Do not use: replication controller(s)
+pod are running at all times. Used by deployment configs.
''''
-=== Route
+=== route
-Usage: Route(s)
+Usage: route(s)
-OpenShift-specific API object that allows developers to expose Services through
+OpenShift-specific API object that allows developers to expose services through
an HTTP(S) aware load balancing and proxy layer via a public DNS entry. The
-Route may further specify TLS options and a certificate, or specify a public
-CNAME that the OpenShift router should also accept for HTTP and HTTPS traffic.
-An administrator typically configures their router to be visible outside the
-cluster firewall, and may also add additional security, caching, or traffic
-controls on the Service content.
+route might further specify TLS options and a certificate, or specify a public
+CNAME that the OpenShift Ingress Controller should also accept for HTTP and
+HTTPS traffic. An administrator typically configures their Ingress Controller to
+be visible outside the cluster firewall, and might also add additional security,
+caching, or traffic controls on the service content.
== S
''''
=== scheduler
-Usage: scheduler(s) as appropriate
+Usage: scheduler(s)
-Component of the Kubernetes master or OpenShift master that manages the state of
-the system, places Pods on nodes, and ensures that all containers that are
+Component of the Kubernetes control plane or OpenShift control plane that manages the state of
+the system, places pods on nodes, and ensures that all containers that are
expected to be running are actually running.
''''
@@ -419,21 +595,30 @@ Kubernetes API object that holds secret data of a certain type.
See link:https://kubernetes.io/docs/concepts/configuration/secret/[Secrets - Kubernetes].
''''
-=== Service
+=== security context constraints (SCC)
+
+Usage: security context constraints
+
+Security context constraints govern the ability to make requests that affect the security context that will be applied to a container.
+
+When referencing the actual object, write as `SecurityContextConstraints`. See link:doc_guidelines.adoc#api-object-formatting[API object formatting] for more details.
+
+This is commonly abbreviated as SCC.
+
+''''
+=== service
-Usage: Service(s)
+Usage: service(s)
Kubernetes native API object that serves as an internal load balancer. It
-identifies a set of replicated Pods in order to proxy the connections it
-receives to them. Backing Pods can be added to or removed from a Service
-arbitrarily while the Service remains consistently available, enabling anything
-that depends on the Service to refer to it at a consistent address.
+identifies a set of replicated pods to proxy the connections it
+receives to them. Backing pods can be added to or removed from a service
+arbitrarily while the service remains consistently available, enabling anything
+that depends on the service to refer to it at a consistent address.
-A Service is a named abstraction of software service (for example, `mysql`)
+A service is a named abstraction of software service (for example, `mysql`)
consisting of local port (for example `3306`) that the proxy listens on, and the
-selector that determines which Pods will answer requests sent through the proxy.
-
-Capitalize when referring to the Kubernetes object specifically.
+selector that determines which pods will answer requests sent through the proxy.
Do not confuse with link:https://www.openservicebrokerapi.org/[Open Service Broker API related objects].
See
@@ -455,8 +640,8 @@ A service account binds together:
Usage: SkyDNS
-Component of the Kubernetes master or OpenShift master that provides
-cluster-wide DNS resolution of internal host names for Services and Pods.
+Component of the Kubernetes control plane or OpenShift control plane that provides
+cluster-wide DNS resolution of internal hostnames for services and pods.
''''
=== Source-to-Image (S2I)
@@ -474,21 +659,48 @@ In addition to "spec file" being allowed related to RPM spec files, general
usage of "spec" is allowed when describing Kubernetes or OpenShift object specs
/ manifests / definitions.
-*Examples of correct usage*
+.Examples of correct usage
====
-Update the Pod spec to reflect the changes.
+Update the `Pod` spec to reflect the changes.
====
''''
-=== StorageClass
+=== storage class
-Usage: StorageClass(es)
+Usage: storage class(es)
Kubernetes API object that describes the parameters for a class of storage for
-which PersistentVolumes can be dynamically provisioned. StorageClasses are
-non-namespaced; the name of the StorageClass according to etcd is in
-ObjectMeta.Name.
+which persistent volumes can be dynamically provisioned. storage classes are
+non-namespaced; the name of the storage class according to etcd is in
+`ObjectMeta.Name`.
+
+When referencing the actual object, write as `StorageClass`. See link:doc_guidelines.adoc#api-object-formatting[API object formatting] for more details.
See link:https://kubernetes.io/docs/concepts/storage/storage-classes/[Storage Classes - Kubernetes].
+== T
+
+== U
+
''''
+=== user-provisioned infrastructure
+
+Usage: user-provisioned infrastructure
+
+If the user must deploy and configure separate virtual or physical hosts as part of
+the cluster deployment process, it is a user-provisioned infrastructure
+installation.
+
+Do not use: UPI
+
+''''
+
+== V
+
+== W
+
+== X
+
+== Y
+
+== Z
diff --git a/contributing_to_docs/tools_and_setup.adoc b/contributing_to_docs/tools_and_setup.adoc
index 4ee69dd0d2ea..8bb2bae91e72 100644
--- a/contributing_to_docs/tools_and_setup.adoc
+++ b/contributing_to_docs/tools_and_setup.adoc
@@ -29,20 +29,28 @@ You must fork and set up the OpenShift documentation repository on your
workstation so that you can create PRs and contribute. These steps must only
be performed during initial setup.
-1. Fork the https://github.com/openshift/openshift-docs repository into your
+. Fork the https://github.com/openshift/openshift-docs repository into your
GitHub account from the GitHub UI. You can do this by clicking on *Fork* in the
upper right-hand corner.
-2. On your workstation, clone the forked repository on your workstation with the
-following command. Be sure to change into the directory where you want to clone,
-and replace __ with your actual GitHub username.
+. In the terminal on your workstation, change into the directory where you want
+to clone the forked repository.
+
+. Clone the forked repository onto your workstation with the following
+command, replacing __ with your actual GitHub username.
+
----
-$ git clone git@github.com:user_name/openshift-docs.git
+$ git clone git@github.com:/openshift-docs.git
----
-3. From your local repository you just cloned, add an upstream pointer back to
-the OpenShift's remote repository, in this case _openshift-docs_.
+. Change into the directory for the local repository you just cloned.
++
+----
+$ cd openshift-docs
+----
+
+. Add an upstream pointer back to the OpenShift's remote repository, in this
+case _openshift-docs_.
+
----
$ git remote add upstream git@github.com:openshift/openshift-docs.git
@@ -54,7 +62,7 @@ repository in sync with it.
== Install AsciiBinder and dependencies
When you have the documentation repository cloned and set up, you are ready to
install the software and tools you will use to create the content. All OpenShift
-documentation is created in AsciiDoc, and is processed with http://asciibinder.org[AsciiBinder],
+documentation is created in AsciiDoc, and is processed with https://github.com/redhataccess/ascii_binder[AsciiBinder],
which is an http://asciidoctor.org/[AsciiDoctor]-based docs management system.
@@ -74,15 +82,26 @@ The following instructions describe how to install all the required tools to do
live content editing on a Fedora Linux system.
1. Install the _RubyGems_ package with `yum install rubygems`
++
+[NOTE]
+====
+On certain systems, `yum` installs an older version of RubyGems that can cause issues. As an alternative, you can install RubyGems by using RVM. The following example is referenced from the link:https://rvm.io/rvm/install[RVM site]:
+
+[source,terminal]
+----
+$ curl -sSL https://get.rvm.io | bash -s stable --ruby
+----
+====
+
2. Install _Ruby_ development packages with `yum install ruby-devel`
3. Install _gcc_ with `yum install gcc-c++`
4. Install _redhat-rpm-config_ with `yum install redhat-rpm-config`
5. Install _make_ with `yum install make`
-6. Install (1.5.6 version of) _asciidoctor-diagram_ with `gem install asciidoctor-diagram --version=1.5.6`
+6. Install _asciidoctor-diagram_ with `gem install asciidoctor-diagram`
7. Install the _ascii_binder_ gem with `gem install ascii_binder`
NOTE: If you already have AsciiBinder installed, you might be due for an update.
-These directions assume that you are using AsciiBinder 0.1.15 or newer. To check
+These directions assume that you are using AsciiBinder 0.2.0 or newer. To check
and update if necessary, simply run `gem update ascii_binder`. Note that you might require root permissions.
=== Building the collection
@@ -116,3 +135,55 @@ edit existing content or create assemblies and modules.
some basic guidelines to keep things consistent across our content.
* link:create_or_edit_content.adoc[Create a local working branch] on your
workstation to edit existing content or create content.
+
+=== How to deploy to your own OpenShift cluster for testing
+
+You can deploy to your own OpenShift cluster for development. This process will use your github repo to launch the website,
+and therefore your github repo must have all of the upstream branches. `main` is used for site changes,
+so assuming all your work is in `main`, you can remove all remote branches and then push the upstream branches.
+
+
+Removing remote branches and updating with upstream branches (this assumes remote repos called `origin` and `upstream`)
+[WARNING]
+====
+This is a destructive process, make sure that this is purely a development repo, as all local and remote branches will be deleted
+by performing the below commands.
+====
+----
+$ git fetch --all
+$ for branch in $(git branch -r | grep -v "main" | grep "^ origin"); do git push origin --delete $(echo $branch | cut -d '/' -f 2); done
+$ git branch -D $(git branch | grep -v 'main' | xargs)
+$ for branch in $(git branch -r | grep -v "main" | grep "^ upstream"); do git branch --track $(echo $branch | cut -d '/' -f 2) $(echo $branch | tr -d '[:space:]'); done
+$ for branch in $(git branch | grep -v "main"); do git push origin $(echo $branch | tr -d '[:space:]'); done
+----
+
+Deploying the docs site to an OpenShift cluster
+----
+$ oc process -f asciibinder-template.yml -p NAME=community-docs \
+ -p SOURCE_REPOSITORY_URL=$(git remote get-url origin) \
+ -p SOURCE_REPOSITORY_REF=$(git rev-parse --abbrev-ref HEAD) \
+ -p DOC_TYPE=community \
+ | oc create -f -
+$ oc process -f asciibinder-template.yml -p NAME=commercial-docs \
+ -p SOURCE_REPOSITORY_URL=$(git remote get-url origin) \
+ -p SOURCE_REPOSITORY_REF=$(git rev-parse --abbrev-ref HEAD) \
+ -p DOC_TYPE=commercial \
+ | oc create -f -
+----
+
+[NOTE]
+====
+If the build fails with "Fetch source failed" status, you can
+delete all the created objects and re-run above with an HTTP uri
+as the `SOURCE_REPOSITORY_URL`, or you can
+link:https://docs.okd.io/latest/dev_guide/builds/build_inputs.html#source-secrets-combinations[create a source secret]
+and add it to the stg1 build, `oc set build-secret --source bc/stg1-docs `.
+====
+
+
+You can delete all created objects by running
+
+----
+$ oc delete all -l app=community-docs
+$ oc delete all -l app=commercial-docs
+----
diff --git a/disaster_recovery/backing-up-etcd.adoc b/disaster_recovery/backing-up-etcd.adoc
deleted file mode 100644
index 88d23be09297..000000000000
--- a/disaster_recovery/backing-up-etcd.adoc
+++ /dev/null
@@ -1,9 +0,0 @@
-[id="backup-etcd"]
-= Backing up etcd
-include::modules/common-attributes.adoc[]
-:context: backup-etcd
-
-toc::[]
-
-// Backing up etcd data
-include::modules/backup-etcd.adoc[leveloffset=+1]
diff --git a/disaster_recovery/scenario-1-infra-recovery.adoc b/disaster_recovery/scenario-1-infra-recovery.adoc
deleted file mode 100644
index 018fa272c711..000000000000
--- a/disaster_recovery/scenario-1-infra-recovery.adoc
+++ /dev/null
@@ -1,21 +0,0 @@
-[id="dr-infrastructure-recovery"]
-= Recovering from lost master hosts
-include::modules/common-attributes.adoc[]
-:context: dr-infrastructure-recovery
-
-toc::[]
-
-This document describes the process to recover from a complete loss of a master host. This includes
-situations where a majority of master hosts have been lost, leading to etcd quorum loss and the cluster going offline.
-
-At a high level, the procedure is to:
-
-. Restore etcd quorum on a remaining master host.
-. Create new master hosts.
-. Correct DNS and load balancer entries.
-. Grow etcd to full membership.
-
-If the majority of master hosts have been lost, you will need a xref:../disaster_recovery/backing-up-etcd.html#backing-up-etcd-data_backup-etcd[backed up etcd snapshot] to restore etcd quorum on the remaining master host.
-
-// Recovering from lost master hosts
-include::modules/dr-recover-lost-control-plane-hosts.adoc[leveloffset=+1]
diff --git a/disaster_recovery/scenario-2-restoring-cluster-state.adoc b/disaster_recovery/scenario-2-restoring-cluster-state.adoc
deleted file mode 100644
index 9520749dc65a..000000000000
--- a/disaster_recovery/scenario-2-restoring-cluster-state.adoc
+++ /dev/null
@@ -1,11 +0,0 @@
-[id="dr-restoring-cluster-state"]
-= Restoring back to a previous cluster state
-include::modules/common-attributes.adoc[]
-:context: dr-restoring-cluster-state
-
-toc::[]
-
-In order to restore the cluster to a previous state, you must have previously xref:../disaster_recovery/backing-up-etcd.html#backing-up-etcd-data_backup-etcd[backed up etcd data] by creating a snapshot. You will use this snapshot to restore the cluster state.
-
-// Restoring back to a previous cluster state
-include::modules/dr-restoring-cluster-state.adoc[leveloffset=+1]
diff --git a/getting_started/accessing-your-services.adoc b/getting_started/accessing-your-services.adoc
new file mode 100644
index 000000000000..cffc2de5a029
--- /dev/null
+++ b/getting_started/accessing-your-services.adoc
@@ -0,0 +1,27 @@
+[id="accessing-your-services"]
+= Accessing your services
+include::modules/common-attributes.adoc[]
+:context: access
+
+toc::[]
+
+Once you have an {product-title} subscription, you can access your services.
+
+include::modules/dedicated-creating-your-cluster.adoc[leveloffset=+1]
+
+////
+
+== Receiving status updates
+
+Access the status portal at link:https://status-dedicated.openshift.com[]. You
+can also subscribe to notifications via email, SMS, or RSS by changing your
+preferences in the status portal.
+
+////
+
+== Requesting support
+
+If you have questions about your environment or must open a support ticket,
+you can open or view a support case in the
+link:https://access.redhat.com/support/cases/#/case/list[Red Hat Customer
+Portal].
diff --git a/getting_started/dedicated-networking.adoc b/getting_started/dedicated-networking.adoc
new file mode 100644
index 000000000000..177410834af6
--- /dev/null
+++ b/getting_started/dedicated-networking.adoc
@@ -0,0 +1,9 @@
+[id="dedicated-networking"]
+= Neworking
+include::modules/common-attributes.adoc[]
+:context: access
+
+toc::[]
+
+include::modules/dedicated-configuring-your-application-routes.adoc[leveloffset=+1]
+include::modules/dedicated-exposing-TCP-services.adoc[leveloffset=+1]
diff --git a/getting_started/deleting-your-cluster.adoc b/getting_started/deleting-your-cluster.adoc
new file mode 100644
index 000000000000..d1d4fb14b0b5
--- /dev/null
+++ b/getting_started/deleting-your-cluster.adoc
@@ -0,0 +1,15 @@
+[id="deleting-your-cluster"]
+= Deleting your cluster
+include::modules/common-attributes.adoc[]
+:context: access
+
+To delete your {product-title} cluster:
+
+. From link:https://console.redhat.com/openshift[console.redhat.com/openshift], click
+ on the cluster you want to delete.
+
+. Click the *Actions* button, then *Delete Cluster*.
+
+. Type the name of the cluster highlighted in bold, then click *Delete*.
+
+Cluster deletion occurs automatically.
diff --git a/getting_started/images b/getting_started/images
new file mode 120000
index 000000000000..e4c5bd02a10a
--- /dev/null
+++ b/getting_started/images
@@ -0,0 +1 @@
+../images/
\ No newline at end of file
diff --git a/getting_started/modules b/getting_started/modules
new file mode 120000
index 000000000000..43aab75b53c9
--- /dev/null
+++ b/getting_started/modules
@@ -0,0 +1 @@
+../modules/
\ No newline at end of file
diff --git a/getting_started/scaling-your-cluster.adoc b/getting_started/scaling-your-cluster.adoc
new file mode 100644
index 000000000000..1a63dd5c1d3d
--- /dev/null
+++ b/getting_started/scaling-your-cluster.adoc
@@ -0,0 +1,8 @@
+[id="scaling-your-cluster"]
+= Scaling your cluster
+include::modules/common-attributes.adoc[]
+:context: access
+
+toc::[]
+
+include::modules/dedicated-scaling-your-cluster.adoc[leveloffset=+1]
diff --git a/hardware_enablement/about-hardware-enablement.adoc b/hardware_enablement/about-hardware-enablement.adoc
new file mode 100644
index 000000000000..25a8d8053ff3
--- /dev/null
+++ b/hardware_enablement/about-hardware-enablement.adoc
@@ -0,0 +1,12 @@
+[id="about-hardware-enablement"]
+= About specialized hardware and driver enablement
+include::modules/common-attributes.adoc[]
+:context: about-hardware-enablement
+
+toc::[]
+
+Many applications require specialized hardware or software that depends on kernel modules or drivers. You can use driver containers to load out-of-tree kernel modules on {op-system-first} nodes. To deploy out-of-tree drivers during cluster installation, use the `kmods-via-containers` framework. To load drivers or kernel modules on an existing {product-title} cluster, {product-title} offers several tools:
+
+* The Driver Toolkit is a container image that is a part of every {product-title} release. It contains the kernel packages and other common dependencies that are needed to build a driver or kernel module. The Driver Toolkit can be used as a base image for driver container image builds on {product-title}.
+* The Special Resource Operator (SRO) orchestrates the building and management of driver containers to load kernel modules and drivers on an existing OpenShift or Kubernetes cluster.
+* The Node Feature Discovery (NFD) Operator adds node labels for CPU capabilities, kernel version, PCIe device vendor IDs, and more.
diff --git a/applications/service_brokers/images b/hardware_enablement/images
similarity index 100%
rename from applications/service_brokers/images
rename to hardware_enablement/images
diff --git a/applications/service_brokers/modules b/hardware_enablement/modules
similarity index 100%
rename from applications/service_brokers/modules
rename to hardware_enablement/modules
diff --git a/hardware_enablement/psap-driver-toolkit.adoc b/hardware_enablement/psap-driver-toolkit.adoc
new file mode 100644
index 000000000000..7b87e94e8273
--- /dev/null
+++ b/hardware_enablement/psap-driver-toolkit.adoc
@@ -0,0 +1,22 @@
+[id="driver-toolkit"]
+= Driver Toolkit
+include::modules/common-attributes.adoc[]
+:context: driver-toolkit
+
+toc::[]
+
+Learn about the Driver Toolkit and how you can use it as a base image for driver containers for enabling special software and hardware devices on Kubernetes.
+
+:FeatureName: The Driver Toolkit
+include::modules/technology-preview.adoc[leveloffset=+0]
+
+include::modules/psap-driver-toolkit.adoc[leveloffset=+1]
+
+include::modules/psap-driver-toolkit-pulling.adoc[leveloffset=+1]
+
+include::modules/psap-driver-toolkit-using.adoc[leveloffset=+1]
+
+[id="additional-resources_driver-toolkkit-id"]
+== Additional resources
+
+* For more information about configuring registry storage for your cluster, see xref:../registry/configuring-registry-operator.adoc#registry-removed_configuring-registry-operator[Image Registry Operator in OpenShift Container Platform].
\ No newline at end of file
diff --git a/hardware_enablement/psap-node-feature-discovery-operator.adoc b/hardware_enablement/psap-node-feature-discovery-operator.adoc
new file mode 100644
index 000000000000..9993ee2e3cd3
--- /dev/null
+++ b/hardware_enablement/psap-node-feature-discovery-operator.adoc
@@ -0,0 +1,16 @@
+[id="node-feature-discovery-operator"]
+= Node Feature Discovery Operator
+include::modules/common-attributes.adoc[]
+:context: node-feature-discovery-operator
+
+toc::[]
+
+Learn about the Node Feature Discovery (NFD) Operator and how you can use it to expose node-level information by orchestrating Node Feature Discovery, a Kubernetes add-on for detecting hardware features and system configuration.
+
+include::modules/psap-node-feature-discovery-operator.adoc[leveloffset=+1]
+
+include::modules/psap-installing-node-feature-discovery-operator.adoc[leveloffset=+1]
+
+include::modules/psap-using-node-feature-discovery-operator.adoc[leveloffset=+1]
+
+include::modules/psap-configuring-node-feature-discovery.adoc[leveloffset=+1]
diff --git a/hardware_enablement/psap-special-resource-operator.adoc b/hardware_enablement/psap-special-resource-operator.adoc
new file mode 100644
index 000000000000..05170c892aac
--- /dev/null
+++ b/hardware_enablement/psap-special-resource-operator.adoc
@@ -0,0 +1,36 @@
+[id="special-resource-operator"]
+= Special Resource Operator
+include::modules/common-attributes.adoc[]
+:context: special-resource-operator
+
+toc::[]
+
+Learn about the Special Resource Operator (SRO) and how you can use it to build and manage driver containers for loading kernel modules and device drivers on nodes in an {product-title} cluster.
+
+
+:FeatureName: The Special Resource Operator
+include::modules/technology-preview.adoc[leveloffset=+0]
+
+include::modules/psap-special-resource-operator.adoc[leveloffset=+1]
+
+[id="installing-special-resource-operator"]
+== Installing the Special Resource Operator
+
+As a cluster administrator, you can install the Special Resource Operator (SRO) by using the OpenShift CLI or the web console.
+
+include::modules/psap-special-resource-operator-installing-using-cli.adoc[leveloffset=+2]
+
+include::modules/psap-special-resource-operator-installing-using-web-console.adoc[leveloffset=+2]
+
+include::modules/psap-special-resource-operator-using.adoc[leveloffset=+1]
+
+include::modules/psap-special-resource-operator-using-manifests.adoc[leveloffset=+2]
+
+include::modules/psap-special-resource-operator-using-configmaps.adoc[leveloffset=+2]
+
+[id="additional-resources_special-resource-operator"]
+== Additional resources
+
+* For information about restoring the Image Registry Operator state before using the Special Resource Operator, see
+xref:../registry/configuring-registry-operator.adoc#registry-removed_configuring-registry-operator[Image registry removed during installation].
+* For details about installing the NFD Operator see xref:../psap-node-feature-discovery-operator.adoc#installing-the-node-feature-discovery-operator_node-feature-discovery-operator[Node Feature Discovery (NFD) Operator].
diff --git a/images/135_OpenShift_Distributed_Unit_0121.svg b/images/135_OpenShift_Distributed_Unit_0121.svg
new file mode 100644
index 000000000000..bac62a3ed04f
--- /dev/null
+++ b/images/135_OpenShift_Distributed_Unit_0121.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/images/142_OpenShift_credentials_STS_0221.svg b/images/142_OpenShift_credentials_STS_0221.svg
new file mode 100644
index 000000000000..0b90d3f8a49c
--- /dev/null
+++ b/images/142_OpenShift_credentials_STS_0221.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/images/150_OpenShift_VMware_on_AWS_0321_arch.svg b/images/150_OpenShift_VMware_on_AWS_0321_arch.svg
new file mode 100644
index 000000000000..963242149269
--- /dev/null
+++ b/images/150_OpenShift_VMware_on_AWS_0321_arch.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/images/152_OpenShift_Config_NTP_0421.svg b/images/152_OpenShift_Config_NTP_0421.svg
new file mode 100644
index 000000000000..d9e21af5f437
--- /dev/null
+++ b/images/152_OpenShift_Config_NTP_0421.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/images/161_OpenShift_Baremetal_IPI_Deployment_updates_0521.svg b/images/161_OpenShift_Baremetal_IPI_Deployment_updates_0521.svg
new file mode 100644
index 000000000000..4a5508c8c975
--- /dev/null
+++ b/images/161_OpenShift_Baremetal_IPI_Deployment_updates_0521.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/images/175_OpenShift_ACM_0821_1.png b/images/175_OpenShift_ACM_0821_1.png
new file mode 100644
index 000000000000..61743c591d6f
Binary files /dev/null and b/images/175_OpenShift_ACM_0821_1.png differ
diff --git a/images/175_OpenShift_ACM_0821_2.png b/images/175_OpenShift_ACM_0821_2.png
new file mode 100644
index 000000000000..7f517282613e
Binary files /dev/null and b/images/175_OpenShift_ACM_0821_2.png differ
diff --git a/images/176_OpenShift_zero_touch_provisioning_0821.png b/images/176_OpenShift_zero_touch_provisioning_0821.png
new file mode 100644
index 000000000000..766887b66ada
Binary files /dev/null and b/images/176_OpenShift_zero_touch_provisioning_0821.png differ
diff --git a/images/177_OpenShift_cluster_provisioning_0821.png b/images/177_OpenShift_cluster_provisioning_0821.png
new file mode 100644
index 000000000000..eff4a6d28fc7
Binary files /dev/null and b/images/177_OpenShift_cluster_provisioning_0821.png differ
diff --git a/images/179_OpenShift_NBDE_implementation_0821_1.png b/images/179_OpenShift_NBDE_implementation_0821_1.png
new file mode 100644
index 000000000000..7007bcbeb721
Binary files /dev/null and b/images/179_OpenShift_NBDE_implementation_0821_1.png differ
diff --git a/images/179_OpenShift_NBDE_implementation_0821_2.png b/images/179_OpenShift_NBDE_implementation_0821_2.png
new file mode 100644
index 000000000000..2fe21e0832db
Binary files /dev/null and b/images/179_OpenShift_NBDE_implementation_0821_2.png differ
diff --git a/images/179_OpenShift_NBDE_implementation_0821_3.png b/images/179_OpenShift_NBDE_implementation_0821_3.png
new file mode 100644
index 000000000000..f70ddba3da14
Binary files /dev/null and b/images/179_OpenShift_NBDE_implementation_0821_3.png differ
diff --git a/images/179_OpenShift_NBDE_implementation_0821_4.png b/images/179_OpenShift_NBDE_implementation_0821_4.png
new file mode 100644
index 000000000000..67d574b2c50a
Binary files /dev/null and b/images/179_OpenShift_NBDE_implementation_0821_4.png differ
diff --git a/images/183_OpenShift_ZTP_0921.png b/images/183_OpenShift_ZTP_0921.png
new file mode 100644
index 000000000000..6ec2ceac46ce
Binary files /dev/null and b/images/183_OpenShift_ZTP_0921.png differ
diff --git a/images/4.4-71_OpenShift_Baremetal_IPI_Depoyment_0320_1.png b/images/4.4-71_OpenShift_Baremetal_IPI_Depoyment_0320_1.png
new file mode 100644
index 000000000000..cd53a0a5dcc6
Binary files /dev/null and b/images/4.4-71_OpenShift_Baremetal_IPI_Depoyment_0320_1.png differ
diff --git a/images/4.4-71_OpenShift_Baremetal_IPI_Depoyment_0320_2.png b/images/4.4-71_OpenShift_Baremetal_IPI_Depoyment_0320_2.png
new file mode 100644
index 000000000000..9ddf2df1f1ff
Binary files /dev/null and b/images/4.4-71_OpenShift_Baremetal_IPI_Depoyment_0320_2.png differ
diff --git a/images/71_OpenShift_4.6_Baremetal_IPI_Deployment_1020_1.svg b/images/71_OpenShift_4.6_Baremetal_IPI_Deployment_1020_1.svg
new file mode 100644
index 000000000000..04effb30efd1
--- /dev/null
+++ b/images/71_OpenShift_4.6_Baremetal_IPI_Deployment_1020_1.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/images/71_OpenShift_4.6_Baremetal_IPI_Deployment_1020_2.svg b/images/71_OpenShift_4.6_Baremetal_IPI_Deployment_1020_2.svg
new file mode 100644
index 000000000000..3f07dcb5af48
--- /dev/null
+++ b/images/71_OpenShift_4.6_Baremetal_IPI_Deployment_1020_2.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/images/92_OpenShift_Cluster_Install_RHV_0520.png b/images/92_OpenShift_Cluster_Install_RHV_0520.png
new file mode 100644
index 000000000000..885d338abe38
Binary files /dev/null and b/images/92_OpenShift_Cluster_Install_RHV_0520.png differ
diff --git a/images/OCP_3_to_4_App_migration.png b/images/OCP_3_to_4_App_migration.png
new file mode 100644
index 000000000000..a1f14cd4dd89
Binary files /dev/null and b/images/OCP_3_to_4_App_migration.png differ
diff --git a/images/Operator_Icon-OpenShift_Virtualization-5.png b/images/Operator_Icon-OpenShift_Virtualization-5.png
new file mode 100644
index 000000000000..7fd768c9155b
Binary files /dev/null and b/images/Operator_Icon-OpenShift_Virtualization-5.png differ
diff --git a/images/add-serverless-app-dev.png b/images/add-serverless-app-dev.png
new file mode 100644
index 000000000000..1e69d3206d9c
Binary files /dev/null and b/images/add-serverless-app-dev.png differ
diff --git a/images/add-trigger-odc.png b/images/add-trigger-odc.png
new file mode 100644
index 000000000000..b5c98cc4bd9b
Binary files /dev/null and b/images/add-trigger-odc.png differ
diff --git a/images/admin-console-create-binding-event-source-1.png b/images/admin-console-create-binding-event-source-1.png
new file mode 100644
index 000000000000..0f30f0cb4399
Binary files /dev/null and b/images/admin-console-create-binding-event-source-1.png differ
diff --git a/images/admin-console-create-binding-event-source-2.png b/images/admin-console-create-binding-event-source-2.png
new file mode 100644
index 000000000000..5b9b512c7baf
Binary files /dev/null and b/images/admin-console-create-binding-event-source-2.png differ
diff --git a/images/admin-console-create-role-event-source.png b/images/admin-console-create-role-event-source.png
new file mode 100644
index 000000000000..621b1b555223
Binary files /dev/null and b/images/admin-console-create-role-event-source.png differ
diff --git a/images/admin-console-create-sa-event-source.png b/images/admin-console-create-sa-event-source.png
new file mode 100644
index 000000000000..a1501d594768
Binary files /dev/null and b/images/admin-console-create-sa-event-source.png differ
diff --git a/images/alerts-screen.png b/images/alerts-screen.png
deleted file mode 100644
index a59255f8c4a6..000000000000
Binary files a/images/alerts-screen.png and /dev/null differ
diff --git a/images/api-admission-chain.png b/images/api-admission-chain.png
new file mode 100644
index 000000000000..8c483eab6d3c
Binary files /dev/null and b/images/api-admission-chain.png differ
diff --git a/images/app-launcher.png b/images/app-launcher.png
new file mode 100644
index 000000000000..6a5ac43a54c8
Binary files /dev/null and b/images/app-launcher.png differ
diff --git a/images/bringing_it_all_together.png b/images/bringing_it_all_together.png
new file mode 100644
index 000000000000..557d6196f208
Binary files /dev/null and b/images/bringing_it_all_together.png differ
diff --git a/images/build_process1.png b/images/build_process1.png
new file mode 100644
index 000000000000..c721722629e3
Binary files /dev/null and b/images/build_process1.png differ
diff --git a/images/build_process2.png b/images/build_process2.png
new file mode 100644
index 000000000000..8edbaf557cc9
Binary files /dev/null and b/images/build_process2.png differ
diff --git a/images/create-eventing-namespace.png b/images/create-eventing-namespace.png
new file mode 100644
index 000000000000..cd316ed23011
Binary files /dev/null and b/images/create-eventing-namespace.png differ
diff --git a/images/create-nodes.png b/images/create-nodes.png
index 3f791f8184d2..07d9ea6a5fbe 100644
Binary files a/images/create-nodes.png and b/images/create-nodes.png differ
diff --git a/images/create-serving-namespace.png b/images/create-serving-namespace.png
new file mode 100644
index 000000000000..9d8ef2abb9ce
Binary files /dev/null and b/images/create-serving-namespace.png differ
diff --git a/images/create-sub-ODC.png b/images/create-sub-ODC.png
new file mode 100644
index 000000000000..54a81308a898
Binary files /dev/null and b/images/create-sub-ODC.png differ
diff --git a/images/cso-namespace-vulnerable.png b/images/cso-namespace-vulnerable.png
new file mode 100644
index 000000000000..948a6dc81276
Binary files /dev/null and b/images/cso-namespace-vulnerable.png differ
diff --git a/images/cso-registry-vulnerable.png b/images/cso-registry-vulnerable.png
new file mode 100644
index 000000000000..c9b147d11cf3
Binary files /dev/null and b/images/cso-registry-vulnerable.png differ
diff --git a/images/custom_4.5.png b/images/custom_4.5.png
new file mode 100644
index 000000000000..7a8d1607d2cd
Binary files /dev/null and b/images/custom_4.5.png differ
diff --git a/images/custom_4.5.svg b/images/custom_4.5.svg
new file mode 100644
index 000000000000..8116cb5e88eb
--- /dev/null
+++ b/images/custom_4.5.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/images/darkcircle-0.png b/images/darkcircle-0.png
new file mode 100644
index 000000000000..5ab465076d8f
Binary files /dev/null and b/images/darkcircle-0.png differ
diff --git a/images/darkcircle-1.png b/images/darkcircle-1.png
new file mode 100644
index 000000000000..7b16d8ed9932
Binary files /dev/null and b/images/darkcircle-1.png differ
diff --git a/images/darkcircle-10.png b/images/darkcircle-10.png
new file mode 100644
index 000000000000..dfdc4f8d4ea4
Binary files /dev/null and b/images/darkcircle-10.png differ
diff --git a/images/darkcircle-11.png b/images/darkcircle-11.png
new file mode 100644
index 000000000000..9bdcfea71d14
Binary files /dev/null and b/images/darkcircle-11.png differ
diff --git a/images/darkcircle-12.png b/images/darkcircle-12.png
new file mode 100644
index 000000000000..303bcd41f52d
Binary files /dev/null and b/images/darkcircle-12.png differ
diff --git a/images/darkcircle-2.png b/images/darkcircle-2.png
new file mode 100644
index 000000000000..a537be6f42c8
Binary files /dev/null and b/images/darkcircle-2.png differ
diff --git a/images/darkcircle-3.png b/images/darkcircle-3.png
new file mode 100644
index 000000000000..a22625c683a8
Binary files /dev/null and b/images/darkcircle-3.png differ
diff --git a/images/darkcircle-4.png b/images/darkcircle-4.png
new file mode 100644
index 000000000000..27d03e8c1f30
Binary files /dev/null and b/images/darkcircle-4.png differ
diff --git a/images/darkcircle-5.png b/images/darkcircle-5.png
new file mode 100644
index 000000000000..8a59e47b1498
Binary files /dev/null and b/images/darkcircle-5.png differ
diff --git a/images/darkcircle-6.png b/images/darkcircle-6.png
new file mode 100644
index 000000000000..c8b686908d54
Binary files /dev/null and b/images/darkcircle-6.png differ
diff --git a/images/darkcircle-7.png b/images/darkcircle-7.png
new file mode 100644
index 000000000000..2503523947d4
Binary files /dev/null and b/images/darkcircle-7.png differ
diff --git a/images/darkcircle-8.png b/images/darkcircle-8.png
new file mode 100644
index 000000000000..b14edd088c2c
Binary files /dev/null and b/images/darkcircle-8.png differ
diff --git a/images/darkcircle-9.png b/images/darkcircle-9.png
new file mode 100644
index 000000000000..5cc237f70578
Binary files /dev/null and b/images/darkcircle-9.png differ
diff --git a/images/default_4.5.png b/images/default_4.5.png
new file mode 100644
index 000000000000..9aa577d33900
Binary files /dev/null and b/images/default_4.5.png differ
diff --git a/images/default_4.5.svg b/images/default_4.5.svg
new file mode 100644
index 000000000000..42ea9d8ad941
--- /dev/null
+++ b/images/default_4.5.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/images/delete-apiserversource-odc.png b/images/delete-apiserversource-odc.png
new file mode 100644
index 000000000000..eec08d930913
Binary files /dev/null and b/images/delete-apiserversource-odc.png differ
diff --git a/images/delete-trigger-odc.png b/images/delete-trigger-odc.png
new file mode 100644
index 000000000000..abe9bc1d8a49
Binary files /dev/null and b/images/delete-trigger-odc.png differ
diff --git a/images/delete.png b/images/delete.png
new file mode 100644
index 000000000000..cec427f39142
Binary files /dev/null and b/images/delete.png differ
diff --git a/images/ellipsis-v.svg b/images/ellipsis-v.svg
new file mode 100644
index 000000000000..c3074e62602a
--- /dev/null
+++ b/images/ellipsis-v.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/images/event-sources-odc.png b/images/event-sources-odc.png
new file mode 100644
index 000000000000..d3a13fea55a3
Binary files /dev/null and b/images/event-sources-odc.png differ
diff --git a/images/eventing-YAML-HA.png b/images/eventing-YAML-HA.png
new file mode 100644
index 000000000000..c928146179ba
Binary files /dev/null and b/images/eventing-YAML-HA.png differ
diff --git a/images/eventing-conditions-true.png b/images/eventing-conditions-true.png
new file mode 100644
index 000000000000..ba904f8fcf61
Binary files /dev/null and b/images/eventing-conditions-true.png differ
diff --git a/images/eventing-form-view.png b/images/eventing-form-view.png
new file mode 100644
index 000000000000..935f9b2c2ce6
Binary files /dev/null and b/images/eventing-form-view.png differ
diff --git a/images/eventing-installed-operator.png b/images/eventing-installed-operator.png
new file mode 100644
index 000000000000..469204bad5e8
Binary files /dev/null and b/images/eventing-installed-operator.png differ
diff --git a/images/eventing-overview.png b/images/eventing-overview.png
new file mode 100644
index 000000000000..4264c5bc4d61
Binary files /dev/null and b/images/eventing-overview.png differ
diff --git a/images/eventing-tab-created.png b/images/eventing-tab-created.png
new file mode 100644
index 000000000000..a9276a4aa8d2
Binary files /dev/null and b/images/eventing-tab-created.png differ
diff --git a/images/filtered-messages.png b/images/filtered-messages.png
new file mode 100644
index 000000000000..a2849401fe99
Binary files /dev/null and b/images/filtered-messages.png differ
diff --git a/images/flow1.png b/images/flow1.png
new file mode 100644
index 000000000000..0c1201620c33
Binary files /dev/null and b/images/flow1.png differ
diff --git a/images/flow2.png b/images/flow2.png
new file mode 100644
index 000000000000..105d2dbea3c2
Binary files /dev/null and b/images/flow2.png differ
diff --git a/images/flow3.png b/images/flow3.png
new file mode 100644
index 000000000000..af0b1c94c442
Binary files /dev/null and b/images/flow3.png differ
diff --git a/images/flow4.png b/images/flow4.png
new file mode 100644
index 000000000000..dcf737241600
Binary files /dev/null and b/images/flow4.png differ
diff --git a/images/grid.png b/images/grid.png
new file mode 100644
index 000000000000..57998b4e58e7
Binary files /dev/null and b/images/grid.png differ
diff --git a/images/image_security.png b/images/image_security.png
new file mode 100644
index 000000000000..a1254d2a273a
Binary files /dev/null and b/images/image_security.png differ
diff --git a/images/ingress-certificates-workflow.png b/images/ingress-certificates-workflow.png
new file mode 100644
index 000000000000..77edf2652acf
Binary files /dev/null and b/images/ingress-certificates-workflow.png differ
diff --git a/images/kafka-YAML-HA.png b/images/kafka-YAML-HA.png
new file mode 100644
index 000000000000..10252db282f2
Binary files /dev/null and b/images/kafka-YAML-HA.png differ
diff --git a/images/kafka-installed-operator.png b/images/kafka-installed-operator.png
new file mode 100644
index 000000000000..95d5fbd9710c
Binary files /dev/null and b/images/kafka-installed-operator.png differ
diff --git a/images/kafka-tab-created.png b/images/kafka-tab-created.png
new file mode 100644
index 000000000000..46fc0483fdc7
Binary files /dev/null and b/images/kafka-tab-created.png differ
diff --git a/images/knative-admin-health-status-dash.png b/images/knative-admin-health-status-dash.png
new file mode 100644
index 000000000000..90854acc3679
Binary files /dev/null and b/images/knative-admin-health-status-dash.png differ
diff --git a/images/knative-kafka-overview.png b/images/knative-kafka-overview.png
new file mode 100644
index 000000000000..2be315547a64
Binary files /dev/null and b/images/knative-kafka-overview.png differ
diff --git a/images/knative-service-architecture.png b/images/knative-service-architecture.png
new file mode 100644
index 000000000000..fcd4f72826af
Binary files /dev/null and b/images/knative-service-architecture.png differ
diff --git a/images/knative-serving-created.png b/images/knative-serving-created.png
new file mode 100644
index 000000000000..d6e4f6a47720
Binary files /dev/null and b/images/knative-serving-created.png differ
diff --git a/images/knative-serving-overview.png b/images/knative-serving-overview.png
new file mode 100644
index 000000000000..7980664456f3
Binary files /dev/null and b/images/knative-serving-overview.png differ
diff --git a/images/migration-PV-copy.png b/images/migration-PV-copy.png
new file mode 100644
index 000000000000..a5b5baa39d98
Binary files /dev/null and b/images/migration-PV-copy.png differ
diff --git a/images/migration-PV-move.png b/images/migration-PV-move.png
new file mode 100644
index 000000000000..291fce220cf7
Binary files /dev/null and b/images/migration-PV-move.png differ
diff --git a/images/migration-architecture.png b/images/migration-architecture.png
new file mode 100644
index 000000000000..3ba8f035d15c
Binary files /dev/null and b/images/migration-architecture.png differ
diff --git a/images/mixed-windows-linux-workloads.png b/images/mixed-windows-linux-workloads.png
new file mode 100644
index 000000000000..27d4dbe21533
Binary files /dev/null and b/images/mixed-windows-linux-workloads.png differ
diff --git a/images/alert-overview.png b/images/monitoring-alert-overview.png
similarity index 100%
rename from images/alert-overview.png
rename to images/monitoring-alert-overview.png
diff --git a/images/alerting-rule-overview.png b/images/monitoring-alerting-rule-overview.png
similarity index 100%
rename from images/alerting-rule-overview.png
rename to images/monitoring-alerting-rule-overview.png
diff --git a/images/monitoring-alerting-rules-screen.png b/images/monitoring-alerting-rules-screen.png
new file mode 100644
index 000000000000..401365332e17
Binary files /dev/null and b/images/monitoring-alerting-rules-screen.png differ
diff --git a/images/monitoring-alerts-screen.png b/images/monitoring-alerts-screen.png
new file mode 100644
index 000000000000..0ed404546a33
Binary files /dev/null and b/images/monitoring-alerts-screen.png differ
diff --git a/images/monitoring-architecture.svg b/images/monitoring-architecture.svg
new file mode 100644
index 000000000000..9a648fdc3788
--- /dev/null
+++ b/images/monitoring-architecture.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/images/monitoring-dashboard-administrator.png b/images/monitoring-dashboard-administrator.png
new file mode 100644
index 000000000000..1138a2fc4722
Binary files /dev/null and b/images/monitoring-dashboard-administrator.png differ
diff --git a/images/monitoring-dashboard-compute-resources.png b/images/monitoring-dashboard-compute-resources.png
new file mode 100644
index 000000000000..ca1cdd7e209e
Binary files /dev/null and b/images/monitoring-dashboard-compute-resources.png differ
diff --git a/images/monitoring-dashboard-developer.png b/images/monitoring-dashboard-developer.png
new file mode 100644
index 000000000000..280515bd1748
Binary files /dev/null and b/images/monitoring-dashboard-developer.png differ
diff --git a/images/monitoring-metrics-developer.png b/images/monitoring-metrics-developer.png
new file mode 100644
index 000000000000..cbd0abaa88c0
Binary files /dev/null and b/images/monitoring-metrics-developer.png differ
diff --git a/images/monitoring-metrics-screen.png b/images/monitoring-metrics-screen.png
new file mode 100644
index 000000000000..4058f7f196dd
Binary files /dev/null and b/images/monitoring-metrics-screen.png differ
diff --git a/images/monitoring-silences-screen.png b/images/monitoring-silences-screen.png
new file mode 100644
index 000000000000..e46f5925cbad
Binary files /dev/null and b/images/monitoring-silences-screen.png differ
diff --git a/images/monitoring-yaml-screen.png b/images/monitoring-yaml-screen.png
new file mode 100644
index 000000000000..f18af6b0cc02
Binary files /dev/null and b/images/monitoring-yaml-screen.png differ
diff --git a/images/node-add-hpa-action.png b/images/node-add-hpa-action.png
new file mode 100644
index 000000000000..01f7c73fd779
Binary files /dev/null and b/images/node-add-hpa-action.png differ
diff --git a/images/not-placetools.png b/images/not-placetools.png
new file mode 100644
index 000000000000..cb70ff8c0325
Binary files /dev/null and b/images/not-placetools.png differ
diff --git a/images/nw-egress-ips-diagram.svg b/images/nw-egress-ips-diagram.svg
new file mode 100644
index 000000000000..3c996e5648dc
--- /dev/null
+++ b/images/nw-egress-ips-diagram.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/images/nw-ipsec-encryption.png b/images/nw-ipsec-encryption.png
new file mode 100644
index 000000000000..0d1fc46201b1
Binary files /dev/null and b/images/nw-ipsec-encryption.png differ
diff --git a/images/nw-metallb-layer2.png b/images/nw-metallb-layer2.png
new file mode 100644
index 000000000000..3cc343622e17
Binary files /dev/null and b/images/nw-metallb-layer2.png differ
diff --git a/images/odc-binding-connector.png b/images/odc-binding-connector.png
new file mode 100644
index 000000000000..efce41ce87af
Binary files /dev/null and b/images/odc-binding-connector.png differ
diff --git a/images/odc-recreate-update.png b/images/odc-recreate-update.png
new file mode 100644
index 000000000000..a0fda1238410
Binary files /dev/null and b/images/odc-recreate-update.png differ
diff --git a/images/odc-rolling-update.png b/images/odc-rolling-update.png
new file mode 100644
index 000000000000..1fa736639419
Binary files /dev/null and b/images/odc-rolling-update.png differ
diff --git a/images/odc-serverless-app.png b/images/odc-serverless-app.png
new file mode 100644
index 000000000000..8378acde156f
Binary files /dev/null and b/images/odc-serverless-app.png differ
diff --git a/images/odc-serverless-revisions.png b/images/odc-serverless-revisions.png
new file mode 100644
index 000000000000..b373b4753bdc
Binary files /dev/null and b/images/odc-serverless-revisions.png differ
diff --git a/images/odc-view-broker.png b/images/odc-view-broker.png
new file mode 100644
index 000000000000..d8ed44717f51
Binary files /dev/null and b/images/odc-view-broker.png differ
diff --git a/images/odc-wto-icon.png b/images/odc-wto-icon.png
new file mode 100644
index 000000000000..2c323700f609
Binary files /dev/null and b/images/odc-wto-icon.png differ
diff --git a/images/odc_add_to_project.png b/images/odc_add_to_project.png
new file mode 100644
index 000000000000..5b276655a9e4
Binary files /dev/null and b/images/odc_add_to_project.png differ
diff --git a/images/odc_add_view.png b/images/odc_add_view.png
new file mode 100644
index 000000000000..d343a614fc41
Binary files /dev/null and b/images/odc_add_view.png differ
diff --git a/images/odc_app_grouping_label.png b/images/odc_app_grouping_label.png
new file mode 100644
index 000000000000..d30ed4ea8a30
Binary files /dev/null and b/images/odc_app_grouping_label.png differ
diff --git a/images/odc_app_metrics.png b/images/odc_app_metrics.png
new file mode 100644
index 000000000000..d81ae7902451
Binary files /dev/null and b/images/odc_app_metrics.png differ
diff --git a/images/odc_application_topology.png b/images/odc_application_topology.png
new file mode 100644
index 000000000000..f418a45cfe97
Binary files /dev/null and b/images/odc_application_topology.png differ
diff --git a/images/odc_build_canceled.png b/images/odc_build_canceled.png
new file mode 100644
index 000000000000..0e4da383baf5
Binary files /dev/null and b/images/odc_build_canceled.png differ
diff --git a/images/odc_build_completed.png b/images/odc_build_completed.png
new file mode 100644
index 000000000000..fa55d761cf79
Binary files /dev/null and b/images/odc_build_completed.png differ
diff --git a/images/odc_build_failed.png b/images/odc_build_failed.png
new file mode 100644
index 000000000000..eb5a62cd455f
Binary files /dev/null and b/images/odc_build_failed.png differ
diff --git a/images/odc_build_new.png b/images/odc_build_new.png
new file mode 100644
index 000000000000..52cb97fd6b2c
Binary files /dev/null and b/images/odc_build_new.png differ
diff --git a/images/odc_build_pending.png b/images/odc_build_pending.png
new file mode 100644
index 000000000000..a9d4615733d3
Binary files /dev/null and b/images/odc_build_pending.png differ
diff --git a/images/odc_build_running.png b/images/odc_build_running.png
new file mode 100644
index 000000000000..a0aaa83590f0
Binary files /dev/null and b/images/odc_build_running.png differ
diff --git a/images/odc_che_workspace.png b/images/odc_che_workspace.png
new file mode 100644
index 000000000000..fd17944d96c3
Binary files /dev/null and b/images/odc_che_workspace.png differ
diff --git a/images/odc_connecting_multiple_applications.png b/images/odc_connecting_multiple_applications.png
new file mode 100644
index 000000000000..564b9593f1d3
Binary files /dev/null and b/images/odc_connecting_multiple_applications.png differ
diff --git a/images/odc_connector.png b/images/odc_connector.png
new file mode 100644
index 000000000000..c12ae3a16630
Binary files /dev/null and b/images/odc_connector.png differ
diff --git a/images/odc_context_menu.png b/images/odc_context_menu.png
new file mode 100644
index 000000000000..ef8905e784a8
Binary files /dev/null and b/images/odc_context_menu.png differ
diff --git a/images/odc_context_operator.png b/images/odc_context_operator.png
new file mode 100644
index 000000000000..112c608ecc78
Binary files /dev/null and b/images/odc_context_operator.png differ
diff --git a/images/odc_context_project.png b/images/odc_context_project.png
new file mode 100644
index 000000000000..8342c69d3dbc
Binary files /dev/null and b/images/odc_context_project.png differ
diff --git a/images/odc_create_project.png b/images/odc_create_project.png
new file mode 100644
index 000000000000..23a8dda1916f
Binary files /dev/null and b/images/odc_create_project.png differ
diff --git a/images/odc_deleting_deployment.png b/images/odc_deleting_deployment.png
new file mode 100644
index 000000000000..400329ce5794
Binary files /dev/null and b/images/odc_deleting_deployment.png differ
diff --git a/images/odc_devcatalog_toplogy.png b/images/odc_devcatalog_toplogy.png
new file mode 100644
index 000000000000..951ede330b87
Binary files /dev/null and b/images/odc_devcatalog_toplogy.png differ
diff --git a/images/odc_developer_perspective.png b/images/odc_developer_perspective.png
new file mode 100644
index 000000000000..ad24511c19c0
Binary files /dev/null and b/images/odc_developer_perspective.png differ
diff --git a/images/odc_edit_app.png b/images/odc_edit_app.png
new file mode 100644
index 000000000000..bdf41a27b477
Binary files /dev/null and b/images/odc_edit_app.png differ
diff --git a/images/odc_edit_redeploy.png b/images/odc_edit_redeploy.png
new file mode 100644
index 000000000000..3708f2868262
Binary files /dev/null and b/images/odc_edit_redeploy.png differ
diff --git a/images/odc_git_repository.png b/images/odc_git_repository.png
new file mode 100644
index 000000000000..4b22c5d567c1
Binary files /dev/null and b/images/odc_git_repository.png differ
diff --git a/images/odc_helm_chart_devcatalog.png b/images/odc_helm_chart_devcatalog.png
new file mode 100644
index 000000000000..53dbf118cfcb
Binary files /dev/null and b/images/odc_helm_chart_devcatalog.png differ
diff --git a/images/odc_helm_chart_repo_filter.png b/images/odc_helm_chart_repo_filter.png
new file mode 100644
index 000000000000..bfc44325b1a2
Binary files /dev/null and b/images/odc_helm_chart_repo_filter.png differ
diff --git a/images/odc_helm_chart_select_chart_ver.png b/images/odc_helm_chart_select_chart_ver.png
new file mode 100644
index 000000000000..6d7b52aa2d92
Binary files /dev/null and b/images/odc_helm_chart_select_chart_ver.png differ
diff --git a/images/odc_helm_revision_history.png b/images/odc_helm_revision_history.png
new file mode 100644
index 000000000000..1cee05afb284
Binary files /dev/null and b/images/odc_helm_revision_history.png differ
diff --git a/images/odc_info.png b/images/odc_info.png
new file mode 100644
index 000000000000..c59cfc70e233
Binary files /dev/null and b/images/odc_info.png differ
diff --git a/images/odc_list_view_icon.png b/images/odc_list_view_icon.png
new file mode 100644
index 000000000000..4a6013aba005
Binary files /dev/null and b/images/odc_list_view_icon.png differ
diff --git a/images/odc_open_url.png b/images/odc_open_url.png
new file mode 100644
index 000000000000..ad5498ec94a2
Binary files /dev/null and b/images/odc_open_url.png differ
diff --git a/images/odc_pod_failed.png b/images/odc_pod_failed.png
new file mode 100644
index 000000000000..94bb0a686b09
Binary files /dev/null and b/images/odc_pod_failed.png differ
diff --git a/images/odc_pod_not_ready.png b/images/odc_pod_not_ready.png
new file mode 100644
index 000000000000..5b0f76daa2f8
Binary files /dev/null and b/images/odc_pod_not_ready.png differ
diff --git a/images/odc_pod_pending.png b/images/odc_pod_pending.png
new file mode 100644
index 000000000000..c0181dfdba37
Binary files /dev/null and b/images/odc_pod_pending.png differ
diff --git a/images/odc_pod_running.png b/images/odc_pod_running.png
new file mode 100644
index 000000000000..75c015fab634
Binary files /dev/null and b/images/odc_pod_running.png differ
diff --git a/images/odc_pod_succeeded.png b/images/odc_pod_succeeded.png
new file mode 100644
index 000000000000..257b956f58dc
Binary files /dev/null and b/images/odc_pod_succeeded.png differ
diff --git a/images/odc_pod_terminating.png b/images/odc_pod_terminating.png
new file mode 100644
index 000000000000..f4f3fcdd3772
Binary files /dev/null and b/images/odc_pod_terminating.png differ
diff --git a/images/odc_pod_unknown.png b/images/odc_pod_unknown.png
new file mode 100644
index 000000000000..d4d0b65664ff
Binary files /dev/null and b/images/odc_pod_unknown.png differ
diff --git a/images/odc_pod_warning.png b/images/odc_pod_warning.png
new file mode 100644
index 000000000000..8a6d5afb5bfa
Binary files /dev/null and b/images/odc_pod_warning.png differ
diff --git a/images/odc_project_alerts.png b/images/odc_project_alerts.png
new file mode 100644
index 000000000000..7266b3776829
Binary files /dev/null and b/images/odc_project_alerts.png differ
diff --git a/images/odc_project_dashboard.png b/images/odc_project_dashboard.png
new file mode 100644
index 000000000000..03da036fa655
Binary files /dev/null and b/images/odc_project_dashboard.png differ
diff --git a/images/odc_project_events.png b/images/odc_project_events.png
new file mode 100644
index 000000000000..a24cc1e0903a
Binary files /dev/null and b/images/odc_project_events.png differ
diff --git a/images/odc_project_metrics.png b/images/odc_project_metrics.png
new file mode 100644
index 000000000000..bd6ded4a54b4
Binary files /dev/null and b/images/odc_project_metrics.png differ
diff --git a/images/odc_project_permissions.png b/images/odc_project_permissions.png
new file mode 100644
index 000000000000..b2f34484fd72
Binary files /dev/null and b/images/odc_project_permissions.png differ
diff --git a/images/odc_quick_search.png b/images/odc_quick_search.png
new file mode 100644
index 000000000000..59fbb1b9d6cc
Binary files /dev/null and b/images/odc_quick_search.png differ
diff --git a/images/odc_serverless_app.png b/images/odc_serverless_app.png
new file mode 100644
index 000000000000..2b748c403b17
Binary files /dev/null and b/images/odc_serverless_app.png differ
diff --git a/images/odc_topology_view_icon.png b/images/odc_topology_view_icon.png
new file mode 100644
index 000000000000..76b517a24145
Binary files /dev/null and b/images/odc_topology_view_icon.png differ
diff --git a/images/odc_verified_icon.png b/images/odc_verified_icon.png
new file mode 100644
index 000000000000..36c754bd9347
Binary files /dev/null and b/images/odc_verified_icon.png differ
diff --git a/images/oke-about.png b/images/oke-about.png
new file mode 100644
index 000000000000..bd07fe730c03
Binary files /dev/null and b/images/oke-about.png differ
diff --git a/images/olm-catalogsource.png b/images/olm-catalogsource.png
new file mode 100644
index 000000000000..7d7401cf7836
Binary files /dev/null and b/images/olm-catalogsource.png differ
diff --git a/images/olm-channels.png b/images/olm-channels.png
new file mode 100644
index 000000000000..845d72a77560
Binary files /dev/null and b/images/olm-channels.png differ
diff --git a/images/olm-replaces.png b/images/olm-replaces.png
new file mode 100644
index 000000000000..5394fee620c0
Binary files /dev/null and b/images/olm-replaces.png differ
diff --git a/images/olm-skipping-updates.png b/images/olm-skipping-updates.png
new file mode 100644
index 000000000000..dae054588cff
Binary files /dev/null and b/images/olm-skipping-updates.png differ
diff --git a/images/olm-workflow.png b/images/olm-workflow.png
index f78f5f68ffc8..aeb48f3ecb0e 100644
Binary files a/images/olm-workflow.png and b/images/olm-workflow.png differ
diff --git a/images/olm-z-stream.png b/images/olm-z-stream.png
new file mode 100644
index 000000000000..b55a5ffa0c83
Binary files /dev/null and b/images/olm-z-stream.png differ
diff --git a/images/op-install-subscription.png b/images/op-install-subscription.png
new file mode 100644
index 000000000000..5f41c09482f3
Binary files /dev/null and b/images/op-install-subscription.png differ
diff --git a/images/op-installed-tile.png b/images/op-installed-tile.png
new file mode 100644
index 000000000000..dfd4af39fca7
Binary files /dev/null and b/images/op-installed-tile.png differ
diff --git a/images/op-pipeline-builder-task-details.png b/images/op-pipeline-builder-task-details.png
new file mode 100644
index 000000000000..34b2f08e3fa0
Binary files /dev/null and b/images/op-pipeline-builder-task-details.png differ
diff --git a/images/op-pipeline-builder.png b/images/op-pipeline-builder.png
new file mode 100644
index 000000000000..9b19bebe7802
Binary files /dev/null and b/images/op-pipeline-builder.png differ
diff --git a/images/op-pipeline-details.png b/images/op-pipeline-details.png
new file mode 100644
index 000000000000..01cf2d79227b
Binary files /dev/null and b/images/op-pipeline-details.png differ
diff --git a/images/op-pipeline-yaml.png b/images/op-pipeline-yaml.png
new file mode 100644
index 000000000000..cdf3a2e43bae
Binary files /dev/null and b/images/op-pipeline-yaml.png differ
diff --git a/images/op_pipeline_run.png b/images/op_pipeline_run.png
new file mode 100644
index 000000000000..145915eff5e6
Binary files /dev/null and b/images/op_pipeline_run.png differ
diff --git a/images/op_pipeline_topology.png b/images/op_pipeline_topology.png
new file mode 100644
index 000000000000..94d65ea12616
Binary files /dev/null and b/images/op_pipeline_topology.png differ
diff --git a/images/openshift-on-openstack-provider-network.png b/images/openshift-on-openstack-provider-network.png
new file mode 100644
index 000000000000..9700c13b2972
Binary files /dev/null and b/images/openshift-on-openstack-provider-network.png differ
diff --git a/images/operator-maturity-model.png b/images/operator-maturity-model.png
index bd8dcc151c37..c4f745c299f8 100644
Binary files a/images/operator-maturity-model.png and b/images/operator-maturity-model.png differ
diff --git a/images/orchestration.png b/images/orchestration.png
new file mode 100644
index 000000000000..4e77da1c5d8f
Binary files /dev/null and b/images/orchestration.png differ
diff --git a/images/ossm-grafana-dashboard-no-traffic.png b/images/ossm-grafana-dashboard-no-traffic.png
new file mode 100644
index 000000000000..7c2182c016b3
Binary files /dev/null and b/images/ossm-grafana-dashboard-no-traffic.png differ
diff --git a/images/ossm-grafana-home-screen.png b/images/ossm-grafana-home-screen.png
new file mode 100644
index 000000000000..0690720c6a47
Binary files /dev/null and b/images/ossm-grafana-home-screen.png differ
diff --git a/images/ossm-grafana-mesh-no-traffic.png b/images/ossm-grafana-mesh-no-traffic.png
new file mode 100644
index 000000000000..c5f717e266a0
Binary files /dev/null and b/images/ossm-grafana-mesh-no-traffic.png differ
diff --git a/images/ossm-grafana-mesh-with-traffic.png b/images/ossm-grafana-mesh-with-traffic.png
new file mode 100644
index 000000000000..37e53db9e305
Binary files /dev/null and b/images/ossm-grafana-mesh-with-traffic.png differ
diff --git a/images/ossm-grafana-services.png b/images/ossm-grafana-services.png
new file mode 100644
index 000000000000..dec1e32dcd1c
Binary files /dev/null and b/images/ossm-grafana-services.png differ
diff --git a/images/ossm-grafana-workloads.png b/images/ossm-grafana-workloads.png
new file mode 100644
index 000000000000..a32e69e3e64f
Binary files /dev/null and b/images/ossm-grafana-workloads.png differ
diff --git a/images/ossm-jaeger-main-screen.png b/images/ossm-jaeger-main-screen.png
new file mode 100644
index 000000000000..9a1aabc199fb
Binary files /dev/null and b/images/ossm-jaeger-main-screen.png differ
diff --git a/images/ossm-jaeger-spans.png b/images/ossm-jaeger-spans.png
new file mode 100644
index 000000000000..24a347728dff
Binary files /dev/null and b/images/ossm-jaeger-spans.png differ
diff --git a/images/ossm-kiali-services-list.png b/images/ossm-kiali-services-list.png
new file mode 100644
index 000000000000..169e6ad6d9f4
Binary files /dev/null and b/images/ossm-kiali-services-list.png differ
diff --git a/images/ossm-prometheus-home-screen.png b/images/ossm-prometheus-home-screen.png
new file mode 100644
index 000000000000..c570ae4b41cc
Binary files /dev/null and b/images/ossm-prometheus-home-screen.png differ
diff --git a/images/ossm-prometheus-metrics.png b/images/ossm-prometheus-metrics.png
new file mode 100644
index 000000000000..c115e31d2441
Binary files /dev/null and b/images/ossm-prometheus-metrics.png differ
diff --git a/images/question-circle.png b/images/question-circle.png
new file mode 100644
index 000000000000..a505ba1fcadd
Binary files /dev/null and b/images/question-circle.png differ
diff --git a/images/quick-start-conclusion.png b/images/quick-start-conclusion.png
new file mode 100644
index 000000000000..f93257032c6d
Binary files /dev/null and b/images/quick-start-conclusion.png differ
diff --git a/images/quick-start-description.png b/images/quick-start-description.png
new file mode 100644
index 000000000000..e8829706d2e9
Binary files /dev/null and b/images/quick-start-description.png differ
diff --git a/images/quick-start-display-name.png b/images/quick-start-display-name.png
new file mode 100644
index 000000000000..c6b6aa3579ca
Binary files /dev/null and b/images/quick-start-display-name.png differ
diff --git a/images/quick-start-duration.png b/images/quick-start-duration.png
new file mode 100644
index 000000000000..7d88be59b935
Binary files /dev/null and b/images/quick-start-duration.png differ
diff --git a/images/quick-start-icon.png b/images/quick-start-icon.png
new file mode 100644
index 000000000000..1b1c7c0b8e8a
Binary files /dev/null and b/images/quick-start-icon.png differ
diff --git a/images/quick-start-introduction.png b/images/quick-start-introduction.png
new file mode 100644
index 000000000000..450cb8dc63b9
Binary files /dev/null and b/images/quick-start-introduction.png differ
diff --git a/images/red-hat-applications-menu-icon.jpg b/images/red-hat-applications-menu-icon.jpg
new file mode 100644
index 000000000000..c2afc1e8099e
Binary files /dev/null and b/images/red-hat-applications-menu-icon.jpg differ
diff --git a/images/secure_deployments.png b/images/secure_deployments.png
new file mode 100644
index 000000000000..57add95959b2
Binary files /dev/null and b/images/secure_deployments.png differ
diff --git a/images/serverless-create-eventing-yaml.png b/images/serverless-create-eventing-yaml.png
new file mode 100644
index 000000000000..45f2af92315a
Binary files /dev/null and b/images/serverless-create-eventing-yaml.png differ
diff --git a/images/serverless-create-eventing.png b/images/serverless-create-eventing.png
new file mode 100644
index 000000000000..05644c6e580d
Binary files /dev/null and b/images/serverless-create-eventing.png differ
diff --git a/images/serverless-create-namespaces.png b/images/serverless-create-namespaces.png
new file mode 100644
index 000000000000..292db72db060
Binary files /dev/null and b/images/serverless-create-namespaces.png differ
diff --git a/images/serverless-create-service-admin.png b/images/serverless-create-service-admin.png
new file mode 100644
index 000000000000..b3c99c8ef11f
Binary files /dev/null and b/images/serverless-create-service-admin.png differ
diff --git a/images/serverless-create-serving-yaml.png b/images/serverless-create-serving-yaml.png
new file mode 100644
index 000000000000..ba2ca43b38b5
Binary files /dev/null and b/images/serverless-create-serving-yaml.png differ
diff --git a/images/serverless-create-serving.png b/images/serverless-create-serving.png
new file mode 100644
index 000000000000..f387294b15d7
Binary files /dev/null and b/images/serverless-create-serving.png differ
diff --git a/images/serverless-event-broker-workflow.png b/images/serverless-event-broker-workflow.png
new file mode 100644
index 000000000000..aea669722c38
Binary files /dev/null and b/images/serverless-event-broker-workflow.png differ
diff --git a/images/serverless-event-channel-workflow.png b/images/serverless-event-channel-workflow.png
new file mode 100644
index 000000000000..99957bc3aed7
Binary files /dev/null and b/images/serverless-event-channel-workflow.png differ
diff --git a/images/serverless-monitoring-service-example-dashboard.png b/images/serverless-monitoring-service-example-dashboard.png
new file mode 100644
index 000000000000..d9c29422ac29
Binary files /dev/null and b/images/serverless-monitoring-service-example-dashboard.png differ
diff --git a/images/serverless-monitoring-service-example1.png b/images/serverless-monitoring-service-example1.png
new file mode 100644
index 000000000000..27c73173cfc5
Binary files /dev/null and b/images/serverless-monitoring-service-example1.png differ
diff --git a/images/serverless-monitoring-service-example2.png b/images/serverless-monitoring-service-example2.png
new file mode 100644
index 000000000000..f0149cd5dd11
Binary files /dev/null and b/images/serverless-monitoring-service-example2.png differ
diff --git a/images/serverless-search.png b/images/serverless-search.png
new file mode 100644
index 000000000000..4d2af649a02c
Binary files /dev/null and b/images/serverless-search.png differ
diff --git a/images/service-yaml-admin.png b/images/service-yaml-admin.png
new file mode 100644
index 000000000000..28823e44a019
Binary files /dev/null and b/images/service-yaml-admin.png differ
diff --git a/images/serving-YAML-HA.png b/images/serving-YAML-HA.png
new file mode 100644
index 000000000000..caf0b5b1261a
Binary files /dev/null and b/images/serving-YAML-HA.png differ
diff --git a/images/serving-conditions-true.png b/images/serving-conditions-true.png
new file mode 100644
index 000000000000..22f38a447bab
Binary files /dev/null and b/images/serving-conditions-true.png differ
diff --git a/images/serving-form-view.png b/images/serving-form-view.png
new file mode 100644
index 000000000000..1f9e539fede7
Binary files /dev/null and b/images/serving-form-view.png differ
diff --git a/images/serving-installed-operator.png b/images/serving-installed-operator.png
new file mode 100644
index 000000000000..afdde8b4a768
Binary files /dev/null and b/images/serving-installed-operator.png differ
diff --git a/images/serving-overview.png b/images/serving-overview.png
new file mode 100644
index 000000000000..c0e6478a9530
Binary files /dev/null and b/images/serving-overview.png differ
diff --git a/images/serving-tab-created.png b/images/serving-tab-created.png
new file mode 100644
index 000000000000..682de782a13e
Binary files /dev/null and b/images/serving-tab-created.png differ
diff --git a/images/silences-screen.png b/images/silences-screen.png
deleted file mode 100644
index 77ba6b38cc98..000000000000
Binary files a/images/silences-screen.png and /dev/null differ
diff --git a/images/targets-and-dependencies.png b/images/targets-and-dependencies.png
index ae7512863c0b..b39ba1420d4c 100644
Binary files a/images/targets-and-dependencies.png and b/images/targets-and-dependencies.png differ
diff --git a/images/telmetry-and-insights-operator-data-flow.svg b/images/telmetry-and-insights-operator-data-flow.svg
new file mode 100644
index 000000000000..4d0820cdd731
--- /dev/null
+++ b/images/telmetry-and-insights-operator-data-flow.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/images/toplogy-odc-apiserver.png b/images/toplogy-odc-apiserver.png
new file mode 100644
index 000000000000..22e6532e2091
Binary files /dev/null and b/images/toplogy-odc-apiserver.png differ
diff --git a/images/trustedsupplychain.png b/images/trustedsupplychain.png
new file mode 100644
index 000000000000..e62e0f70824d
Binary files /dev/null and b/images/trustedsupplychain.png differ
diff --git a/images/verify-channel-odc.png b/images/verify-channel-odc.png
new file mode 100644
index 000000000000..43a36617d790
Binary files /dev/null and b/images/verify-channel-odc.png differ
diff --git a/images/verify-kafka-ODC.png b/images/verify-kafka-ODC.png
new file mode 100644
index 000000000000..1f94c0e028c5
Binary files /dev/null and b/images/verify-kafka-ODC.png differ
diff --git a/images/verify-pingsource-ODC.png b/images/verify-pingsource-ODC.png
new file mode 100644
index 000000000000..dac0464b36cf
Binary files /dev/null and b/images/verify-pingsource-ODC.png differ
diff --git a/images/verify-sinkbinding-odc.png b/images/verify-sinkbinding-odc.png
new file mode 100644
index 000000000000..91039cf2ab1a
Binary files /dev/null and b/images/verify-sinkbinding-odc.png differ
diff --git a/images/verify-subscription-odc.png b/images/verify-subscription-odc.png
new file mode 100644
index 000000000000..c621e6d34f18
Binary files /dev/null and b/images/verify-subscription-odc.png differ
diff --git a/images/verify-trigger-odc.png b/images/verify-trigger-odc.png
new file mode 100644
index 000000000000..be56d1a87b42
Binary files /dev/null and b/images/verify-trigger-odc.png differ
diff --git a/images/whatarecontainers.png b/images/whatarecontainers.png
new file mode 100644
index 000000000000..8c3bd20379d5
Binary files /dev/null and b/images/whatarecontainers.png differ
diff --git a/images/wmco-design.png b/images/wmco-design.png
new file mode 100644
index 000000000000..d2ad6fdca5da
Binary files /dev/null and b/images/wmco-design.png differ
diff --git a/index-commercial.html b/index-commercial.html
index 4de13db9c488..b84b4c0fbe78 100644
--- a/index-commercial.html
+++ b/index-commercial.html
@@ -8,8 +8,10 @@
-
- Home | Red Hat OpenShift Documentation
+
+
+
+ Home | Official Red Hat OpenShift Documentation
@@ -17,16 +19,6 @@
-
-
-
-
-
-
-
+
+
+
-
-
-
-
-
+