diff --git a/README.md b/README.md index b0d16c170..c6e81abb5 100644 --- a/README.md +++ b/README.md @@ -8,30 +8,17 @@ An [Ansible AWX](https://github.com/ansible/awx) operator for Kubernetes built with [Operator SDK](https://github.com/operator-framework/operator-sdk) and Ansible. - - - +This Kubernetes Operator is meant to be deployed in your Kubernetes cluster(s) and can be used to install and manage the lifecycle of an AWX instance in the same namespace. -# AWX Operator Documentation +## Documentation -The AWX Operator documentation is now available at https://awx-operator.readthedocs.io/ - -For docs changes, create PRs on the appropriate files in the /docs folder. +The AWX Operator documentation is available at ## Contributing Please visit [our contributing guidelines](https://github.com/ansible/awx-operator/blob/devel/CONTRIBUTING.md). -## Release Process - -The first step is to create a draft release. Typically this will happen in the [Stage Release](https://github.com/ansible/awx/blob/devel/.github/workflows/stage.yml) workflow for AWX and you don't need to do it as a separate step. - -If you need to do an independent release of the operator, you can run the [Stage Release](https://github.com/ansible/awx-operator/blob/devel/.github/workflows/stage.yml) in the awx-operator repo. Both of these workflows will run smoke tests, so there is no need to do this manually. - -After the draft release is created, publish it and the [Promote AWX Operator image](https://github.com/ansible/awx-operator/blob/devel/.github/workflows/promote.yaml) will run, which will: - -- Publish image to Quay -- Release Helm chart +For docs changes, create PRs on the appropriate files in the `/docs` folder. ## Author @@ -43,7 +30,7 @@ We ask all of our community members and contributors to adhere to the [Ansible c ## Get Involved -We welcome your feedback and ideas. The AWX operator uses the same mailing list and IRC channel as AWX itself. Here's how to reach us with feedback and questions: +We welcome your feedback and ideas. The AWX operator uses the same Matrix channel and Ansible Community Forum as AWX itself. Here's how to reach us with feedback and questions: - Join the [Ansible AWX channel on Matrix](https://matrix.to/#/#awx:ansible.com) - Join the [Ansible Community Forum](https://forum.ansible.com) diff --git a/docs/awx-demo.svg b/docs/awx-demo.svg deleted file mode 100644 index 8414086e7..000000000 --- a/docs/awx-demo.svg +++ /dev/null @@ -1 +0,0 @@ -$$#Forthisdemo,I'mrunningitonFedora34withkvm2$#Pleasenotetheoptionspassedtotheminikubestartcommand$#shallbedifferentforeachenvironment.$#RefertotheMinikubedocumentationforfurtherassistance$#Tospeedthingsuponthisvideo,hereisthecommandIusedtostartmyminikubeversion$#minikubestart--driver=kvm2--addons=ingress--cpus=4--cni=flannel\$#--install-addons=true--kubernetes-version=stable--memory=12g$minikubestart😄minikubev1.20.0onFedora34Usingthekvm2driverbasedonexistingprofile👍Startingcontrolplanenodeminikubeinclusterminikube🏃Updatingtherunningkvm2"minikube"VM...🐳PreparingKubernetesv1.20.2onDocker20.10.6...🔎VerifyingKubernetescomponents...Usingimagek8s.gcr.io/ingress-nginx/controller:v0.44.0Usingimagedocker.io/jettech/kube-webhook-certgen:v1.5.1Usingimagegcr.io/k8s-minikube/storage-provisioner:v5🔎Verifyingingressaddon...🌟Enabledaddons:storage-provisioner,default-storageclass,ingress🏄Done!kubectlisnowconfiguredtouse"minikube"clusterand"default"namespacebydefault$#Nowlet'scheckifthepodsareontherunningstate.$#Itmighttakeafewsecondstogetallofthemupandrunningonyourend$kubectlgetnodesNAMESTATUSROLESAGEVERSIONminikubeReadycontrol-plane,master45mv1.20.2$kubectlgetpods-ANAMESPACENAMEREADYSTATUSRESTARTSAGEingress-nginxingress-nginx-admission-create-jws4j0/1Completed044mingress-nginxingress-nginx-admission-patch-sjk7b0/1Completed044mingress-nginxingress-nginx-controller-5d88495688-fpfnk1/1Running044mkube-systemcoredns-74ff55c5b-xcpzh1/1Running044mkube-systemetcd-minikube1/1Running045mkube-systemkube-apiserver-minikube1/1Running045mkube-systemkube-controller-manager-minikube1/1Running045mkube-systemkube-flannel-ds-amd64-v7c7g1/1Running044mkube-systemkube-proxy-8mpm91/1Running044mkube-systemkube-scheduler-minikube1/1Running045mkube-systemstorage-provisioner1/1Running145m$#Forthisdemo,I'mdeployingtheawx-operatorfromthedevelbranch$#butpleaserefertothedocumentationonhowdeployastablerelease$kubectlapply-fawx-operator.yamlcustomresourcedefinition.apiextensions.k8s.io/awxs.awx.ansible.comcreatedcustomresourcedefinition.apiextensions.k8s.io/awxbackups.awx.ansible.comcreatedcustomresourcedefinition.apiextensions.k8s.io/awxrestores.awx.ansible.comcreatedclusterrole.rbac.authorization.k8s.io/awx-operatorcreatedclusterrolebinding.rbac.authorization.k8s.io/awx-operatorcreatedserviceaccount/awx-operatorcreateddeployment.apps/awx-operatorcreated$#Let'swaitthecontainerimagetobedownloadandtheawx-operator$#shouldbeinarunningstate$kubectl$kubectlget$kubectlgetpods$kubectlgetpods-wNAMEREADYSTATUSRESTARTSAGEawx-operator-7dbf9db9d7-z62pm1/1Running012s^C^C$$#Nowfirst,let'screateaAWXinstancewithusingaNodePortservice$#Thisprocesswilltakeafewminutestofinish,butweshouldhavea$#PostgreSQLstatefulsetrunningandaAWXpodwith4containersinRunningstate$catawx-demo.yml---apiVersion:awx.ansible.com/v1beta1kind:AWXmetadata:name:awx-demospec:service_type:nodeporthostname:awx-demo.example.com$kubectlapply-fawx-demo.ymlawx.awx.ansible.com/awx-democreated$#Inthemeantime,youcaninspecttheawx-operatorlogsaswell$#kubectllogs-fdeploy/awx-operator$#Allowsometimetogettheimagespulledandforthedatabasemigration$#Gogetacoffee:)$kubectlgetpods-l"app.kubernetes.io/managed-by=awx-operator"-wNAMEREADYSTATUSRESTARTSAGEawx-demo-postgres-00/1Pending00sawx-demo-postgres-00/1ContainerCreating00sawx-demo-postgres-01/1Running02sawx-demo-77d96f88d5-rbqz90/4Pending00sawx-demo-77d96f88d5-rbqz90/4ContainerCreating00sawx-demo-77d96f88d5-rbqz94/4Running05s$#Nowthateverythingisintherunningstate,weshouldbegood:)$kubectl get pods -l "app.kubernetes.io/managed-by=awx-operator"$kubectlgetpods-l"app.kubernetes.io/managed-by=awx-operator"NAMEREADYSTATUSRESTARTSAGEawx-demo-77d96f88d5-rbqz94/4Running018sawx-demo-postgres-01/1Running028s$kubectl get svc -l "app.kubernetes.io/managed-by=awx-operator"$kubectlgetsvc-l"app.kubernetes.io/managed-by=awx-operator"NAMETYPECLUSTER-IPEXTERNAL-IPPORT(S)AGEawx-demo-postgresClusterIPNone<none>5432/TCP35sawx-demo-serviceNodePort10.98.233.228<none>80:32747/TCP27s$#Let'sdoaquicktesttocheckifthingsarelookinggood$kubectliexecawx-demo/bin/bashUsethearrowkeystonavigate:?SelectPod:Namespace:default|Pod:awx-demo-77d96f88d5-rbqz9Namespace:default|Pod:awx-demo-postgres-0Namespace:default|Pod:awx-demo-77d96f88d5-rbqz9?SelectContainer:Container:redisContainer:awx-demo-webContainer:awx-demo-taskContainer:awx-demo-eeContainer:redisContainer:awx-demo-webContainer:awx-demo-taskContainer:awx-demo-taskbash-4.4$bash-4.4$awx-manageversion19.1.0bash-4.4$awx-managecheck_dbDatabaseVersion:PostgreSQL12.7(Debian12.7-1.pgdg100+1)onx86_64-pc-linux-gnu,compiledbygcc(Debian8.3.0-6)8.3.0,64-bitbash-4.4$exitexit$#Let'stovisittheNodePortURLusingtheminikubeservicecommand$minikube service list$minikubeservicelist|---------------|------------------------------------|--------------|----------------------------||NAMESPACE|NAME|TARGETPORT|URL||default|awx-demo-postgres|Nonodeport||default|awx-demo-service|http/80|http://192.168.39.79:32747||default|default|awx-operator-metrics|Nonodeport|||default|kubernetes|Nonodeport||ingress-nginx|ingress-nginx-controller|http/80|http://192.168.39.79:32732||||https/443|http://192.168.39.79:30947||ingress-nginx|ingress-nginx-controller-admission|Nonodeport||kube-system|kube-dns|Nonodeport|$minikubeserviceawx-demo-service--urlhttp://192.168.39.79:32747$#$#L$#Le$#Let$#Let'$#Let's$#Let'scheck$#Let'scheckthe$#Let'schecktheAPI$#Let'schecktheAPInow$curl-L-q$(minikubeserviceawx-demo-service--url)/api/v2/ping2>/dev/null|python-mjson.tool{"ha":false,"version":"19.1.0","active_node":"awx-demo-77d96f88d5-rbqz9","install_uuid":"352415ad-564e-41cc-bbc2-85bd9169342e","instances":[{"node":"awx-demo-77d96f88d5-rbqz9","uuid":"00000000-0000-0000-0000-000000000000","heartbeat":"2021-05-25T05:00:46.328860Z","capacity":0,"version":"19.1.0"}],"instance_groups":["name":"tower","instances":[]]}$#S$#Sweet!!$#Sweet!!You$#Sweet!!Yougot$#Sweet!!Yougotit!!$#Sweet!!Yougotit!!:)$#Nowlet'sdeployanotherAWXinstanceusingaNGINXIngressController$#OncewehaveitinaRunningstate,wenowhaveaningressresourcecreated$catawx-nginx-ingress.ymlname:awx-nginxservice_type:clusteripingress_type:ingresshostname:my-awx.example.com$kubectlapply-fawx-nginx-ingress.ymlawx.awx.ansible.com/awx-nginxcreated$kubectlgetawxNAMEAGEawx-demo2m22sawx-nginx4sawx-demo-77d96f88d5-rbqz94/4Running02mawx-demo-postgres-01/1Running02m10sawx-nginx-postgres-00/1Pending00sawx-nginx-postgres-00/1ContainerCreating00sawx-nginx-postgres-01/1Running03sawx-nginx-cb66586f-m6qrj0/4Pending00sawx-nginx-cb66586f-m6qrj0/4ContainerCreating00sawx-nginx-cb66586f-m6qrj4/4Running05s$#Now$#Nowwe$#Nowwehave$#Nowwehavethe$#Nowwehavetheservices$#Nowwehavetheservicescreated$#Nowwehavetheservicescreatedand$#Nowwehavetheservicescreatedandthe$#Nowwehavetheservicescreatedandthepods$#Nowwehavetheservicescreatedandthepodsrunningawx-demo-77d96f88d5-rbqz94/4Running02m43sawx-demo-postgres-01/1Running02m53sawx-nginx-cb66586f-m6qrj4/4Running027sawx-nginx-postgres-01/1Running037sNAMETYPECLUSTER-IPEXTERNAL-IPPORT(S)AGEawx-demo-postgresClusterIPNone<none>5432/TCP2m55sawx-demo-serviceNodePort10.98.233.228<none>80:32747/TCP2m47sawx-nginx-postgresClusterIPNone<none>5432/TCP39sawx-nginx-serviceClusterIP10.100.157.46<none>80/TCP31s|default|awx-nginx-postgres|Nonodeport||default|awx-nginx-service|Nonodeport|$#Let'snow$#Let'snowinspect$#Let'snowinspectthe$#Let'snowinspecttheingress$kubectlgetingressesNAMECLASSHOSTSADDRESSPORTSAGEawx-nginx-ingress<none>my-awx.example.com192.168.39.798054s$kubectldescribeingressawx-nginx-ingressName:awx-nginx-ingressNamespace:defaultAddress:192.168.39.79Defaultbackend:default-http-backend:80(<error:endpoints"default-http-backend"notfound>)Rules:HostPathBackends----------------my-awx.example.com/awx-nginx-service:80(10.244.0.19:8052)Annotations:<none>Events:TypeReasonAgeFromMessage-------------------------NormalSync56s(x2over58s)nginx-ingress-controllerScheduledforsync$#Withtheingressruleinplace,weshouldbeabletoaccessit$#Forthat,let'saddtheentryonthe/etc/hosts$kubectlgetingressesawx-nginx-ingress|grep-vNAME|awk-F'''{print$4""$3}'192.168.39.79my-awx.example.com$kubectlgetingressesawx-nginx-ingress|grep-vNAME|awk-F'''{print$4""$3}'|sudotee-a/etc/hosts$curl-L-qhttp://my-awx.example.com/api/v2/ping2>/dev/null|python-mjson.tool"active_node":"awx-nginx-cb66586f-m6qrj","install_uuid":"3f21fe54-5f12-4546-8200-b8976cca45f9","node":"awx-nginx-cb66586f-m6qrj","heartbeat":"2021-05-25T05:03:02.224788Z",$#ThanksforwatchingandhappyAnsibleautomation!$# For this demo, I'm running it on Fedora 34 with kvm2$# Please note the options passed to the minikube start command$# shall be different for each environment.$# Refer to the Minikube documentation for further assistance$# To speed things up on this video, here is the command I used to start my minikube version$# minikube start --driver=kvm2 --addons=ingress --cpus=4 --cni=flannel \$# --install-addons=true --kubernetes-version=stable --memory=12g$minikube start🐳PreparingKubernetesv1.20.2onDocker20.10.6...|🐳PreparingKubernetesv1.20.2onDocker20.10.6.../🐳PreparingKubernetesv1.20.2onDocker20.10.6...-🐳PreparingKubernetesv1.20.2onDocker20.10.6...\$# Now let's check if the pods are on the running state.$# It might take a few seconds to get all of them up and running on your end$kubectl get nodes$kubectl get pods -Aingress-nginxkube-systemkube-systemstorage-provisioner1/1$# For this demo, I'm deploying the awx-operator from the devel branch$# but please refer to the documentation on how deploy a stable release$kubectl apply -f awx-operator.yaml$# Let's wait the container image to be download and the awx-operator$# should be in a running state$k$ku$kub$kube$kubec$kubect$kubectlg$kubectlge$kubectlgetp$kubectlgetpo$kubectlgetpod$kubectlgetpods-$# Now first, let's create a AWX instance with using a NodePort service$# This process will take a few minutes to finish, but we should have a$# PostgreSQL statefulset running and a AWX pod with 4 containers in Running state$cat awx-demo.yml$kubectl apply -f awx-demo.yml$# In the meantime, you can inspect the awx-operator logs as well$# kubectl logs -f deploy/awx-operator$# Allow some time to get the images pulled and for the database migration$# Go get a coffee :)$kubectl get pods -l "app.kubernetes.io/managed-by=awx-operator" -w$# Now that everything is in the running state, we should be good :)$# Let's do a quick test to check if things are looking good$kubectl iexec awx-demo /bin/bashbash-4.4$ebash-4.4$exbash-4.4$exi$# Let's to visit the NodePort URL using the minikube service command|NAMESPACE|NAME||default|awx-demo-service|http/80|||https/443||kube-system$minikube service awx-demo-service --url$#Let'sc$#Let'sch$#Let'sche$#Let'schec$#Let'scheckt$#Let'scheckth$#Let'schecktheA$#Let'schecktheAP$#Let'schecktheAPIn$#Let'schecktheAPIno$curl -L -q $(minikube service awx-demo-service --url)/api/v2/ping 2>/dev/null | python -m json.tool$#Se$#Sw$#Swe$#Swee$#Sweet$#Sweet!$#Sweet!!Y$#Sweet!!Yo$#Sweet!!Youg$#Sweet!!Yougo$#Sweet!!Yougoti$#Sweet!!Yougotit$#Sweet!!Yougotit!$#Sweet!!Yougotit!!:$# Now let's deploy another AWX instance using a NGINX Ingress Controller$# Once we have it in a Running state, we now have an ingress resource created$cat awx-nginx-ingress.yml$kubectl apply -f awx-nginx-ingress.yml$kubectl get awx$kubectl get pods -l "app.kubernetes.io/managed-by=awx-operator" -w$#N$#No$#Noww$#Nowweh$#Nowweha$#Nowwehav$#Nowwehavet$#Nowwehaveth$#Nowwehavethes$#Nowwehavethese$#Nowwehavetheser$#Nowwehavetheserv$#Nowwehavetheservi$#Nowwehavetheservic$#Nowwehavetheservice$#Nowwehavetheservicesc$#Nowwehavetheservicescr$#Nowwehavetheservicescre$#Nowwehavetheservicescrea$#Nowwehavetheservicescreat$#Nowwehavetheservicescreate$#Nowwehavetheservicescreateda$#Nowwehavetheservicescreatedan$#Nowwehavetheservicescreatedandt$#Nowwehavetheservicescreatedandth$#Nowwehavetheservicescreatedandthep$#Nowwehavetheservicescreatedandthepo$#Nowwehavetheservicescreatedandthepod$#Nowwehavetheservicescreatedandthepodsr$#Nowwehavetheservicescreatedandthepodsru$#Nowwehavetheservicescreatedandthepodsrun$#Nowwehavetheservicescreatedandthepodsrunn$#Nowwehavetheservicescreatedandthepodsrunni$#Nowwehavetheservicescreatedandthepodsrunninawx-demo-serviceNodePort|default|awx-demo-postgres||default|awx-nginx-service||default|awx-operator-metrics||default|kubernetes||ingress-nginx|ingress-nginx-controller|http/80||ingress-nginx|ingress-nginx-controller-admission|Nonodeport|---------------|------------------------------------|$#Let'sn$#Let'sno$#Let'snowi$#Let'snowin$#Let'snowins$#Let'snowinsp$#Let'snowinspe$#Let'snowinspec$#Let'snowinspectt$#Let'snowinspectth$#Let'snowinspectthei$#Let'snowinspectthein$#Let'snowinspecttheing$#Let'snowinspecttheingr$#Let'snowinspecttheingre$#Let'snowinspecttheingres$kubectl get ingresses$kubectl describe ingress awx-nginx-ingress$# With the ingress rule in place, we should be able to access it$# For that, let's add the entry on the$#Forthat,let'saddtheentryonthe$#Forthat,let'saddtheentryonthe/$#Forthat,let'saddtheentryonthe/e$#Forthat,let'saddtheentryonthe/et$#Forthat,let'saddtheentryonthe/etc$#Forthat,let'saddtheentryonthe/etc/$#Forthat,let'saddtheentryonthe/etc/h$#Forthat,let'saddtheentryonthe/etc/ho$#Forthat,let'saddtheentryonthe/etc/hos$#Forthat,let'saddtheentryonthe/etc/host$kubectl get ingresses awx-nginx-ingress | grep -v NAME | awk -F' ' '{print $4 " " $3 }'$kubectl get ingresses awx-nginx-ingress | grep -v NAME | awk -F' ' '{print $4 " " $3 }' | sudo tee -a /etc/hosts$curl -L -q http://my-awx.example.com/api/v2/ping 2>/dev/null | python -m json.tool$# Thanks for watching and happy Ansible automation! \ No newline at end of file diff --git a/docs/contributors-guide/author.md b/docs/contributors-guide/author.md index 1cdc45d96..662aba4d1 100644 --- a/docs/contributors-guide/author.md +++ b/docs/contributors-guide/author.md @@ -1,3 +1,3 @@ -## Author +# Author This operator was originally built in 2019 by [Jeff Geerling](https://www.jeffgeerling.com) and is now maintained by the Ansible Team diff --git a/docs/contributors-guide/code-of-conduct.md b/docs/contributors-guide/code-of-conduct.md index aa9d5b01a..3662c3308 100644 --- a/docs/contributors-guide/code-of-conduct.md +++ b/docs/contributors-guide/code-of-conduct.md @@ -1,3 +1,3 @@ -## Code of Conduct +# Code of Conduct We ask all of our community members and contributors to adhere to the [Ansible code of conduct](http://docs.ansible.com/ansible/latest/community/code_of_conduct.html). If you have questions or need assistance, please reach out to our community team at [codeofconduct@ansible.com](mailto:codeofconduct@ansible.com) diff --git a/docs/contributors-guide/contributing.md b/docs/contributors-guide/contributing.md index d762c53ad..dd143b3af 100644 --- a/docs/contributors-guide/contributing.md +++ b/docs/contributors-guide/contributing.md @@ -1,3 +1,5 @@ -## Contributing +# Contributing Please visit [our contributing guidelines](https://github.com/ansible/awx-operator/blob/devel/CONTRIBUTING.md). + +For docs changes, create PRs on the appropriate files in the `/docs` folder. diff --git a/docs/contributors-guide/get-involved.md b/docs/contributors-guide/get-involved.md index 0845a978f..9eef5c122 100644 --- a/docs/contributors-guide/get-involved.md +++ b/docs/contributors-guide/get-involved.md @@ -1,6 +1,6 @@ -## Get Involved +# Get Involved -We welcome your feedback and ideas. The AWX operator uses the same mailing list and IRC channel as AWX itself. Here's how to reach us with feedback and questions: +We welcome your feedback and ideas. The AWX operator uses the same Matrix channel and Ansible Community Forum as AWX itself. Here's how to reach us with feedback and questions: -- Join the `#ansible-awx` channel on irc.libera.chat -- Join the [mailing list](https://groups.google.com/forum/#!forum/awx-project) +- Join the [Ansible AWX channel on Matrix](https://matrix.to/#/#awx:ansible.com) +- Join the [Ansible Community Forum](https://forum.ansible.com) diff --git a/docs/contributors-guide/release-process.md b/docs/contributors-guide/release-process.md index 3ebc89eae..04db3438e 100644 --- a/docs/contributors-guide/release-process.md +++ b/docs/contributors-guide/release-process.md @@ -1,4 +1,4 @@ -## Release Process +# Release Process The first step is to create a draft release. Typically this will happen in the [Stage Release](https://github.com/ansible/awx/blob/devel/.github/workflows/stage.yml) workflow for AWX and you don't need to do it as a separate step. @@ -10,16 +10,18 @@ After the draft release is created, publish it and the [Promote AWX Operator ima - Release Helm chart After the GHA is complete, the final step is to run the [publish-to-operator-hub.sh](https://github.com/ansible/awx-operator/blob/devel/hack/publish-to-operator-hub.sh) script, which will create a PR in the following repos to add the new awx-operator bundle version to OperatorHub: -* https://github.com/k8s-operatorhub/community-operators (community operator index) -* https://github.com/redhat-openshift-ecosystem/community-operators-prod (operator index shipped with Openshift) -The usage is documented in the script itself, but here is an example of how you would use the script to publish the 2.5.3 awx-opeator bundle to OperatorHub. -Note that you need to specify the version being released, as well as the previous version. This is because the bundle has a pointer to the previous version that is it being upgrade from. This is used by OLM to create a dependency graph. +- (community operator index) +- (operator index shipped with Openshift) -```bash -$ VERSION=2.5.3 PREV_VERSION=2.5.2 ./publish-operator.sh -``` +!!! note + The usage is documented in the script itself, but here is an example of how you would use the script to publish the 2.5.3 awx-opeator bundle to OperatorHub. + Note that you need to specify the version being released, as well as the previous version. This is because the bundle has a pointer to the previous version that is it being upgrade from. This is used by OLM to create a dependency graph. -> Note: There are some quirks with running this on OS X that still need to be fixed, but the script runs smoothly on linux. + ```bash + VERSION=2.5.3 PREV_VERSION=2.5.2 ./hack/publish-to-operator-hub.sh + ``` + + There are some quirks with running this on OS X that still need to be fixed, but the script runs smoothly on linux. As soon as CI completes successfully, the PR's will be auto-merged. Please remember to monitor those PR's to make sure that CI passes, sometimes it needs a retry. diff --git a/docs/index.md b/docs/index.md deleted file mode 100644 index 4ca40f460..000000000 --- a/docs/index.md +++ /dev/null @@ -1,2 +0,0 @@ - -The AWX operator is meant to provide a more Kubernetes-native installation method for AWX via an AWX Custom Resource Definition (CRD). diff --git a/docs/index.md b/docs/index.md new file mode 120000 index 000000000..32d46ee88 --- /dev/null +++ b/docs/index.md @@ -0,0 +1 @@ +../README.md \ No newline at end of file diff --git a/docs/installation/basic-install.md b/docs/installation/basic-install.md index f5d3a6820..d5c88304e 100644 --- a/docs/installation/basic-install.md +++ b/docs/installation/basic-install.md @@ -1,6 +1,7 @@ -### Basic Install +# Basic Install After cloning this repository, you must choose the tag to run: + ```sh git clone git@github.com:ansible/awx-operator.git cd awx-operator @@ -20,17 +21,23 @@ export VERSION= export VERSION=2.7.2 ``` -Once you have a running Kubernetes cluster, you can deploy AWX Operator into your cluster using [Kustomize](https://kubectl.docs.kubernetes.io/guides/introduction/kustomize/). Since kubectl version 1.14 kustomize functionality is built-in (otherwise, follow the instructions here to install the latest version of Kustomize: https://kubectl.docs.kubernetes.io/installation/kustomize/ ) +Once you have a running Kubernetes cluster, you can deploy AWX Operator into your cluster using [Kustomize](https://kubectl.docs.kubernetes.io/guides/introduction/kustomize/). Since kubectl version 1.14 kustomize functionality is built-in (otherwise, follow the instructions here to install the latest version of Kustomize: ) + +!!! tip + If you don't have a Kubernetes cluster, you can use [Minikube](https://minikube.sigs.k8s.io/docs/) for testing purposes. See the [Minikube install docs](./creating-a-minikube-cluster-for-testing.md) for more details. -> Some things may need to be configured slightly differently for different Kubernetes flavors for the networking aspects. When installing on Kind, see the [kind install docs](./kind-install.md) for more details. +!!! note + Some things may need to be configured slightly differently for different Kubernetes flavors for the networking aspects. When installing on Kind, see the [kind install docs](./kind-install.md) for more details. There is a make target you can run: -``` + +```sh make deploy ``` If you have a custom operator image you have built, you can specify it with: -``` + +```sh IMG=quay.io/$YOURNAMESPACE/awx-operator:$YOURTAG make deploy ``` @@ -52,11 +59,12 @@ images: namespace: awx ``` -> **TIP:** If you need to change any of the default settings for the operator (such as resources.limits), you can add [patches](https://kubectl.docs.kubernetes.io/references/kustomize/kustomization/patches/) at the bottom of your kustomization.yaml file. +!!! tip + If you need to change any of the default settings for the operator (such as resources.limits), you can add [patches](https://kubectl.docs.kubernetes.io/references/kustomize/kustomization/patches/) at the bottom of your kustomization.yaml file. Install the manifests by running this: -``` +```sh $ kubectl apply -k . namespace/awx created customresourcedefinition.apiextensions.k8s.io/awxbackups.awx.ansible.com created @@ -77,7 +85,7 @@ deployment.apps/awx-operator-controller-manager created Wait a bit and you should have the `awx-operator` running: -``` +```sh $ kubectl get pods -n awx NAME READY STATUS RESTARTS AGE awx-operator-controller-manager-66ccd8f997-rhd4z 2/2 Running 0 11s @@ -85,13 +93,14 @@ awx-operator-controller-manager-66ccd8f997-rhd4z 2/2 Running 0 So we don't have to keep repeating `-n awx`, let's set the current namespace for `kubectl`: -``` -$ kubectl config set-context --current --namespace=awx +```sh +kubectl config set-context --current --namespace=awx ``` Next, create a file named `awx-demo.yml` in the same folder with the suggested content below. The `metadata.name` you provide will be the name of the resulting AWX deployment. -**Note:** If you deploy more than one AWX instance to the same namespace, be sure to use unique names. +!!! note + If you deploy more than one AWX instance to the same namespace, be sure to use unique names. ```yaml --- @@ -103,7 +112,8 @@ spec: service_type: nodeport ``` -> It may make sense to create and specify your own secret key for your deployment so that if the k8s secret gets deleted, it can be re-created if needed. If it is not provided, one will be auto-generated, but cannot be recovered if lost. Read more [here](../user-guide/admin-user-account-configuration.md#secret-key-configuration). +!!! tip + It may make sense to create and specify your own secret key for your deployment so that if the k8s secret gets deleted, it can be re-created if needed. If it is not provided, one will be auto-generated, but cannot be recovered if lost. Read more [here](../user-guide/admin-user-account-configuration.md#secret-key-configuration). If you are on Openshift, you can take advantage of Routes by specifying the following your spec. This will automatically create a Route for you with a custom hostname. This can be found on the Route section of the Openshift Console. @@ -118,8 +128,7 @@ spec: ingress_type: Route ``` - -Make sure to add this new file to the list of "resources" in your `kustomization.yaml` file: +Make sure to add this new file to the list of `resources` in your `kustomization.yaml` file: ```yaml ... @@ -132,19 +141,13 @@ resources: Finally, apply the changes to create the AWX instance in your cluster: -``` +```sh kubectl apply -k . ``` -After a few minutes, the new AWX instance will be deployed. You can look at the operator pod logs in order to know where the installation process is at: - -``` -$ kubectl logs -f deployments/awx-operator-controller-manager -c awx-manager -``` - After a few seconds, you should see the operator begin to create new resources: -``` +```sh $ kubectl get pods -l "app.kubernetes.io/managed-by=awx-operator" NAME READY STATUS RESTARTS AGE awx-demo-77d96f88d5-pnhr8 4/4 Running 0 3m24s @@ -156,19 +159,19 @@ awx-demo-postgres ClusterIP None 5432/TCP 4m4s awx-demo-service NodePort 10.109.40.38 80:31006/TCP 3m56s ``` -Once deployed, the AWX instance will be accessible by running: +After a few minutes, the new AWX instance will be deployed. You can look at the operator pod logs in order to know where the installation process is at: +```sh +kubectl logs -f deployments/awx-operator-controller-manager -c awx-manager ``` -$ minikube service -n awx awx-demo-service --url -``` + +Once deployed, your AWX instance should now be reachable at `http://localhost:/` (in this case, `http://localhost:31006/`). By default, the admin user is `admin` and the password is available in the `-admin-password` secret. To retrieve the admin password, run: -``` +```sh $ kubectl get secret awx-demo-admin-password -o jsonpath="{.data.password}" | base64 --decode ; echo yDL2Cx5Za94g9MvBP6B73nzVLlmfgPjR ``` You just completed the most basic install of an AWX instance via this operator. Congratulations!!! - -For an example using the Nginx Ingress Controller in Minikube, don't miss our [demo video](https://asciinema.org/a/416946). diff --git a/docs/installation/creating-a-minikube-cluster-for-testing.md b/docs/installation/creating-a-minikube-cluster-for-testing.md index c72b747c4..8c8d10ac9 100644 --- a/docs/installation/creating-a-minikube-cluster-for-testing.md +++ b/docs/installation/creating-a-minikube-cluster-for-testing.md @@ -1,8 +1,8 @@ -### Creating a minikube cluster for testing +# Creating a minikube cluster for testing If you do not have an existing cluster, the `awx-operator` can be deployed on a [Minikube](https://minikube.sigs.k8s.io/docs/) cluster for testing purposes. Due to different OS and hardware environments, please refer to the official Minikube documentation for further information. -``` +```sh $ minikube start --cpus=4 --memory=6g --addons=ingress 😄 minikube v1.23.2 on Fedora 34 ✨ Using the docker driver based on existing profile @@ -22,7 +22,7 @@ $ minikube start --cpus=4 --memory=6g --addons=ingress Once Minikube is deployed, check if the node(s) and `kube-apiserver` communication is working as expected. -``` +```sh $ minikube kubectl -- get nodes NAME STATUS ROLES AGE VERSION minikube Ready control-plane,master 113s v1.22.2 @@ -45,6 +45,17 @@ It is not required for `kubectl` to be separately installed since it comes alrea Let's create an alias for easier usage: +```sh +alias kubectl="minikube kubectl --" ``` -$ alias kubectl="minikube kubectl --" -``` + +Now, you can proceed with the installation of the AWX Operator and AWX. Please refer to the [Basic Install](basic-install.md) for further instructions. + +!!! tip + Once your AWX has been deployed, the AWX instance will be accessible by running: + + ```sh + minikube service -n awx awx-demo-service --url + ``` + +For an example using the Nginx Ingress Controller in Minikube, don't miss our [demo video](https://asciinema.org/a/416946). diff --git a/docs/installation/helm-install-on-existing-cluster.md b/docs/installation/helm-install-on-existing-cluster.md index 89e50edf9..2aecb67c4 100644 --- a/docs/installation/helm-install-on-existing-cluster.md +++ b/docs/installation/helm-install-on-existing-cluster.md @@ -1,4 +1,4 @@ -### Helm Install on existing cluster +# Helm Install on existing cluster For those that wish to use [Helm](https://helm.sh/) to install the awx-operator to an existing K8s cluster: diff --git a/docs/installation/index.md b/docs/installation/index.md deleted file mode 100644 index 6828a62e1..000000000 --- a/docs/installation/index.md +++ /dev/null @@ -1,2 +0,0 @@ - -This Kubernetes Operator is meant to be deployed in your Kubernetes cluster(s) and can be used to install and manage the lifecycle of an AWX instance in the same namespace. diff --git a/docs/installation/kind-install.md b/docs/installation/kind-install.md index 8b643af16..a41a0c05f 100644 --- a/docs/installation/kind-install.md +++ b/docs/installation/kind-install.md @@ -2,9 +2,9 @@ ## Kind Install -Install Kind by running the following +Install Kind by running the following. Refer to the [official Kind documentation](https://kind.sigs.k8s.io/docs/user/quick-start/) for more information. -``` +```sh # For Intel Macs [ $(uname -m) = x86_64 ] && curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.20.0/kind-darwin-amd64 # For M1 / ARM Macs @@ -13,9 +13,6 @@ chmod +x ./kind mv ./kind /some-dir-in-your-PATH/kind ``` -> https://kind.sigs.k8s.io/docs/user/quick-start/ - - ### Create the Kind cluster Create a file called `kind.config` @@ -35,40 +32,39 @@ nodes: Then create a cluster using that config -``` +```sh kind create cluster --config=kind.config ``` Set cluster context for kubectl -``` +```sh kubectl cluster-info --context kind-kind ``` Install NGINX Ingress Controller -``` +```sh kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/kind/deploy.yaml ``` - ## AWX Set the namespace context -``` +```sh kubectl config set-context --current --namespace=awx ``` Checkout the tag you want to install from -``` +```sh git checkout 2.7.2 ``` Create a file named `kustomization.yaml` in the root of your local awx-operator clone. Include the following: -``` +```sh apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: @@ -86,14 +82,13 @@ namespace: awx Run the following to apply the yaml -``` +```sh kubectl apply -k . ``` - Create a file called `awx-cr.yaml` with the following contents and any configuration changes you may wish to add. -``` +```yaml --- apiVersion: awx.ansible.com/v1beta1 kind: AWX @@ -106,20 +101,19 @@ spec: Create your AWX CR +```sh +kubectl create -f awx-cr.yaml ``` -oc create -f awx-cr.yaml -``` - -Your AWX instance should now be reacheable at http://localhost:32000/ - -> If you configured a custom nodeport_port, you can find it by running `kubectl -n awx get svc awx-demo-service` +Your AWX instance should now be reachable at +!!! note + If you configured a custom `nodeport_port`, you can find it by running `kubectl -n awx get svc awx-demo-service` ## Cleanup When you are done, you can delete all of this by running -``` +```sh kind delete cluster ``` diff --git a/docs/migration/migration.md b/docs/migration/migration.md index bdf9a8cd9..7c569e76f 100644 --- a/docs/migration/migration.md +++ b/docs/migration/migration.md @@ -19,7 +19,8 @@ stringData: type: Opaque ``` -**Note**: `` must match the `name` of the AWX object you are creating. In our example below, it is `awx`. +!!! note + `` must match the `name` of the AWX object you are creating. In our example below, it is `awx`. ### Old Database Credentials @@ -41,16 +42,14 @@ stringData: type: Opaque ``` -> For `host`, a URL resolvable by the cluster could look something like `postgresql..svc.`, where `` is filled in with the namespace of the AWX deployment you are migrating data from, and `` is filled in with the internal kubernretes cluster domain (In most cases it's `cluster.local`). +!!! note + For `host`, a URL resolvable by the cluster could look something like `postgresql..svc.`, where `` is filled in with the namespace of the AWX deployment you are migrating data from, and `` is filled in with the internal kubernetes cluster domain (In most cases it's `cluster.local`). -If your AWX deployment is already using an external database server or its database is otherwise not managed -by the AWX deployment, you can instead create the same secret as above but omit the `-old-` from the `name`. -In the next section pass it in through `postgres_configuration_secret` instead, omitting the `_old_` -from the key and ensuring the value matches the name of the secret. This will make AWX pick up on the existing -database and apply any pending migrations. It is strongly recommended to backup your database beforehand. +If your AWX deployment is already using an external database server or its database is otherwise not managed by the AWX deployment, you can instead create the same secret as above but omit the `-old-` from the `name`. +In the next section pass it in through `postgres_configuration_secret` instead, omitting the `_old_` from the key and ensuring the value matches the name of the secret. This will make AWX pick up on the existing database and apply any pending migrations. +It is strongly recommended to backup your database beforehand. -The postgresql pod for the old deployment is used when streaming data to the new postgresql pod. If your postgresql pod has a custom label, -you can pass that via the `postgres_label_selector` variable to make sure the postgresql pod can be found. +The postgresql pod for the old deployment is used when streaming data to the new postgresql pod. If your postgresql pod has a custom label, you can pass that via the `postgres_label_selector` variable to make sure the postgresql pod can be found. ## Deploy AWX @@ -66,7 +65,9 @@ spec: secret_key_secret: -secret-key ... ``` + ## Important Note + If you intend to put all the above in one file, make sure to separate each block with three dashes like so: ```yaml @@ -79,4 +80,5 @@ If you intend to put all the above in one file, make sure to separate each block --- # AWX Config ``` + Failing to do so will lead to an inoperable setup. diff --git a/docs/troubleshooting/debugging.md b/docs/troubleshooting/debugging.md index 8654b2a34..20a708a04 100644 --- a/docs/troubleshooting/debugging.md +++ b/docs/troubleshooting/debugging.md @@ -4,13 +4,14 @@ When the operator is deploying AWX, it is running the `installer` role inside the operator container. If the AWX CR's status is `Failed`, it is often useful to look at the awx-operator container logs, which shows the output of the installer role. To see these logs, run: -``` +```sh kubectl logs deployments/awx-operator-controller-manager -c awx-manager -f ``` ### Inspect k8s Resources Past that, it is often useful to inspect various resources the AWX Operator manages like: + * awx * awxbackup * awxrestore @@ -24,6 +25,7 @@ Past that, it is often useful to inspect various resources the AWX Operator mana * serviceaccount And if installing via OperatorHub and OLM: + * subscription * csv * installPlan @@ -31,7 +33,7 @@ And if installing via OperatorHub and OLM: To inspect these resources you can use these commands -``` +```sh # Inspecting k8s resources kubectl describe -n kubectl get -n -o yaml @@ -41,7 +43,6 @@ kubectl logs -n kubectl exec -it -n ``` - ### Configure No Log It is possible to show task output for debugging by setting no_log to false on the AWX CR spec. @@ -49,7 +50,7 @@ This will show output in the awx-operator logs for any failed tasks where no_log For example: -``` +```sh --- apiVersion: awx.ansible.com/v1beta1 kind: AWX @@ -63,19 +64,19 @@ spec: ## Iterating on the installer without deploying the operator -Go through the [normal basic install](https://github.com/ansible/awx-operator/blob/devel/README.md#basic-install) steps. +Go through the [normal basic install](../installation/basic-install.md) steps. Install some dependencies: -``` -$ ansible-galaxy collection install -r molecule/requirements.yml -$ pip install -r molecule/requirements.txt +```sh +ansible-galaxy collection install -r molecule/requirements.yml +pip install -r molecule/requirements.txt ``` To prevent the changes we're about to make from being overwritten, scale down any running instance of the operator: -``` -$ kubectl scale deployment awx-operator-controller-manager --replicas=0 +```sh +kubectl scale deployment awx-operator-controller-manager --replicas=0 ``` Create a playbook that invokes the installer role (the operator uses ansible-runner's role execution feature): @@ -98,6 +99,7 @@ ansible_operator_meta: namespace: awx service_type: nodeport ``` + The vars file will replace the awx resource so any value that you wish to over ride using the awx resource, put in the vars file. For example, if you wish to use your own image, version and pull policy, you can specify it like below: ```yaml @@ -114,14 +116,13 @@ image_version: $COMPOSE_TAG Run the installer: -``` -$ ansible-playbook run.yml -e @vars.yml -v +```sh +ansible-playbook run.yml -e @vars.yml -v ``` Grab the URL and admin password: -``` -$ minikube service awx-service --url -n awx -$ minikube kubectl get secret awx-admin-password -- -o jsonpath="{.data.password}" | base64 --decode +```sh +$ kubectl get secret awx-admin-password -- -o jsonpath="{.data.password}" | base64 --decode ; echo LU6lTfvnkjUvDwL240kXKy1sNhjakZmT ``` diff --git a/docs/uninstall/uninstall.md b/docs/uninstall/uninstall.md index 1422b1019..fe2d79036 100644 --- a/docs/uninstall/uninstall.md +++ b/docs/uninstall/uninstall.md @@ -1,12 +1,13 @@ -### Uninstall ### +# Uninstall To uninstall an AWX deployment instance, you basically need to remove the AWX kind related to that instance. For example, to delete an AWX instance named awx-demo, you would do: -``` +```sh $ kubectl delete awx awx-demo awx.awx.ansible.com "awx-demo" deleted ``` Deleting an AWX instance will remove all related deployments and statefulsets, however, persistent volumes and secrets will remain. To enforce secrets also getting removed, you can use `garbage_collect_secrets: true`. -**Note**: If you ever intend to recover an AWX from an existing database you will need a copy of the secrets in order to perform a successful recovery. +!!! note + If you ever intend to recover an AWX from an existing database you will need a copy of the secrets in order to perform a successful recovery. diff --git a/docs/upgrade/upgrading.md b/docs/upgrade/upgrading.md index cde0e7918..924531e07 100644 --- a/docs/upgrade/upgrading.md +++ b/docs/upgrade/upgrading.md @@ -1,4 +1,4 @@ -### Upgrading +# Upgrading To upgrade AWX, it is recommended to upgrade the awx-operator to the version that maps to the desired version of AWX. To find the version of AWX that will be installed by the awx-operator by default, check the version specified in the `DEFAULT_AWX_VERSION` variable for that particular release. You can do so by running the following command @@ -7,9 +7,9 @@ AWX_OPERATOR_VERSION=2.8.0 docker run --entrypoint="" quay.io/ansible/awx-operator:$AWX_OPERATOR_VERSION bash -c "env | grep DEFAULT_AWX_VERSION" ``` -Apply the awx-operator.yml for that release to upgrade the operator, and in turn also upgrade your AWX deployment. +Make sure you have a backup before upgrading, then upgrade operator by invoking `make deploy` on the desired tag or by applying the `kustomization.yaml` that contains desired version of the operator, and in turn also upgrade your AWX deployment. -#### Backup +## Backup The first part of any upgrade should be a backup. Note, there are secrets in the pod which work in conjunction with the database. Having just a database backup without the required secrets will not be sufficient for recovering from an issue when upgrading to a new version. See the [backup role documentation](https://github.com/ansible/awx-operator/tree/devel/roles/backup) for information on how to backup your database and secrets. @@ -20,7 +20,7 @@ In the event you need to recover the backup see the [restore role documentation] **Note**: Do not delete the namespace/project, as that will delete the backup and the backup's PVC as well. -#### PostgreSQL Upgrade Considerations +## PostgreSQL Upgrade Considerations If there is a PostgreSQL major version upgrade, after the data directory on the PVC is migrated to the new version, the old PVC is kept by default. This provides the ability to roll back if needed, but can take up extra storage space in your cluster unnecessarily. You can configure it to be deleted automatically after a successful upgrade by setting the following variable on the AWX spec. @@ -30,28 +30,27 @@ spec: postgres_keep_pvc_after_upgrade: False ``` -#### v0.14.0 +## Caveats for upgrading to v0.14.0 -##### Cluster-scope to Namespace-scope considerations +### Cluster-scope to Namespace-scope considerations -Starting with awx-operator 0.14.0, AWX can only be deployed in the namespace that the operator exists in. This is called a namespace-scoped operator. If you are upgrading from an earlier version, you will want to -delete your existing `awx-operator` service account, role and role binding. +Starting with awx-operator 0.14.0, AWX can only be deployed in the namespace that the operator exists in. This is called a namespace-scoped operator. If you are upgrading from an earlier version, you will want to delete your existing `awx-operator` service account, role and role binding. -##### Project is now based on v1.x of the operator-sdk project +### Project is now based on v1.x of the operator-sdk project Starting with awx-operator 0.14.0, the project is now based on operator-sdk 1.x. You may need to manually delete your old operator Deployment to avoid issues. -##### Steps to upgrade +### Steps to upgrade to v0.14.0 Delete your old AWX Operator and existing `awx-operator` service account, role and role binding in `default` namespace first: -``` -$ kubectl -n default delete deployment awx-operator -$ kubectl -n default delete serviceaccount awx-operator -$ kubectl -n default delete clusterrolebinding awx-operator -$ kubectl -n default delete clusterrole awx-operator +```sh +kubectl -n default delete deployment awx-operator +kubectl -n default delete serviceaccount awx-operator +kubectl -n default delete clusterrolebinding awx-operator +kubectl -n default delete clusterrole awx-operator ``` -Then install the new AWX Operator by following the instructions in [Basic Install](#basic-install-on-existing-cluster). The `NAMESPACE` environment variable have to be the name of the namespace in which your old AWX instance resides. +Then install the new AWX Operator by following the instructions in [Basic Install](../installation/basic-install.md). The `NAMESPACE` environment variable have to be the name of the namespace in which your old AWX instance resides. Once the new AWX Operator is up and running, your AWX deployment will also be upgraded. diff --git a/docs/user-guide/admin-user-account-configuration.md b/docs/user-guide/admin-user-account-configuration.md index 90dda8139..2a8da32a0 100644 --- a/docs/user-guide/admin-user-account-configuration.md +++ b/docs/user-guide/admin-user-account-configuration.md @@ -1,15 +1,15 @@ -### Admin user account configuration +# Admin user account configuration There are three variables that are customizable for the admin user account creation. -| Name | Description | Default | -| --------------------- | -------------------------------------------- | ---------------- | -| admin_user | Name of the admin user | admin | -| admin_email | Email of the admin user | test@example.com | -| admin_password_secret | Secret that contains the admin user password | Empty string | +| Name | Description | Default | +| --------------------- | -------------------------------------------- | ------------------ | +| admin_user | Name of the admin user | `admin` | +| admin_email | Email of the admin user | `test@example.com` | +| admin_password_secret | Secret that contains the admin user password | Empty string | - -> :warning: **admin_password_secret must be a Kubernetes secret and not your text clear password**. +!!! warning + `admin_password_secret` must be a Kubernetes secret and not your text clear password. If `admin_password_secret` is not provided, the operator will look for a secret named `-admin-password` for the admin password. If it is not present, the operator will generate a password and create a Secret from it named `-admin-password`. @@ -28,16 +28,16 @@ stringData: password: mysuperlongpassword ``` -### Secret Key Configuration +## Secret Key Configuration This key is used to encrypt sensitive data in the database. | Name | Description | Default | | ----------------- | ----------------------------------------------------- | ---------------- | -| secret_key_secret | Secret that contains the symmetric key for encryption | Generated | - +| secret_key_secret | Secret that contains the symmetric key for encryption | Generated | -> :warning: **secret_key_secret must be a Kubernetes secret and not your text clear secret value**. +!!! warning + `secret_key_secret` must be a Kubernetes secret and not your text clear secret value. If `secret_key_secret` is not provided, the operator will look for a secret named `-secret-key` for the secret key. If it is not present, the operator will generate a password and create a Secret from it named `-secret-key`. It is important to not delete this secret as it will be needed for upgrades and if the pods get scaled down at any point. If you are using a GitOps flow, you will want to pass a secret key secret. diff --git a/docs/user-guide/advanced-configuration/adding-execution-nodes.md b/docs/user-guide/advanced-configuration/adding-execution-nodes.md deleted file mode 100644 index ba29640eb..000000000 --- a/docs/user-guide/advanced-configuration/adding-execution-nodes.md +++ /dev/null @@ -1,4 +0,0 @@ -### Adding Execution Nodes -Starting with AWX Operator v0.30.0 and AWX v21.7.0, standalone execution nodes can be added to your deployments. -See [Managing Capacity With Instances](https://ansible.readthedocs.io/projects/awx/en/latest/administration/instances.html) chapter of the AWX Administration Guide for information about this feature. - diff --git a/docs/user-guide/advanced-configuration/assigning-awx-pods-to-specific-nodes.md b/docs/user-guide/advanced-configuration/assigning-awx-pods-to-specific-nodes.md index 941986266..c0e33bea5 100644 --- a/docs/user-guide/advanced-configuration/assigning-awx-pods-to-specific-nodes.md +++ b/docs/user-guide/advanced-configuration/assigning-awx-pods-to-specific-nodes.md @@ -1,4 +1,4 @@ -#### Assigning AWX pods to specific nodes +# Assigning AWX pods to specific nodes You can constrain the AWX pods created by the operator to run on a certain subset of nodes. `node_selector` and `postgres_selector` constrains the AWX pods to run only on the nodes that match all the specified key/value pairs. `tolerations` and `postgres_tolerations` allow the AWX @@ -6,8 +6,8 @@ pods to be scheduled onto nodes with matching taints. The ability to specify topologySpreadConstraints is also allowed through `topology_spread_constraints` If you want to use affinity rules for your AWX pod you can use the `affinity` option. -If you want to constrain the web and task pods individually, you can do so by specificying the deployment type before the specific setting. For -example, specifying `task_tolerations` will allow the AWX task pod to be scheduled onto nodes with matching taints. +If you want to constrain the web and task pods individually, you can do so by specifying the deployment type before the specific setting. For +example, specifying `task_tolerations` will allow the AWX task pod to be scheduled onto nodes with matching taints. | Name | Description | Default | | -------------------------------- | ---------------------------------------- | -------------------------------- | @@ -89,7 +89,7 @@ spec: topologyKey: topology.kubernetes.io/zone ``` -#### Special Note on DB-Migration Job Scheduling +## Special Note on DB-Migration Job Scheduling For the **db-migration job**, which applies database migrations at cluster startup, you can specify scheduling settings using the `task_*` configurations such as `task_node_selector`, `task_tolerations`, etc. If these task-specific settings are not defined, the job will automatically use the global AWX configurations like `node_selector` and `tolerations`. diff --git a/docs/user-guide/advanced-configuration/auto-upgrade.md b/docs/user-guide/advanced-configuration/auto-upgrade.md index 298ff0911..3d75de0fb 100644 --- a/docs/user-guide/advanced-configuration/auto-upgrade.md +++ b/docs/user-guide/advanced-configuration/auto-upgrade.md @@ -1,10 +1,10 @@ -#### Auto upgrade +# Auto upgrade + With this parameter you can influence the behavior during an operator upgrade. If set to `true`, the operator will upgrade the specific instance directly. When the value is set to `false`, and we have a running deployment, the operator will not update the AWX instance. This can be useful when you have multiple AWX instances which you want to upgrade step by step instead of all at once. - | Name | Description | Default | | -------------| ---------------------------------- | ------- | | auto_upgrade | Automatic upgrade of AWX instances | true | @@ -12,11 +12,11 @@ This can be useful when you have multiple AWX instances which you want to upgrad Example configuration of `auto_upgrade` parameter ```yaml - spec: - auto_upgrade: true +spec: + auto_upgrade: true ``` -##### Upgrade of instances without auto upgrade +## Upgrade of instances without auto upgrade There are two ways to upgrade instances which are marked with the 'auto_upgrade: false' flag. @@ -28,8 +28,10 @@ Changing flags: Delete the deployment: -- delete the deployment object of your AWX instance -``` -$ kubectl -n awx delete deployment -``` -- wait until the instance gets redeployed +- delete the deployment object of your AWX instance + + ```sh + kubectl -n awx delete deployment + ``` + +- wait until the instance gets redeployed diff --git a/docs/user-guide/advanced-configuration/container-probes.md b/docs/user-guide/advanced-configuration/container-probes.md index 5d437d6f6..bbe6184fe 100644 --- a/docs/user-guide/advanced-configuration/container-probes.md +++ b/docs/user-guide/advanced-configuration/container-probes.md @@ -1,19 +1,20 @@ -#### Container Probes +# Container Probes + These parameters control the usage of liveness and readiness container probes for the web and task containers. -> [!ALERT] -> All of probes are disabled by default for now, to enable it, set the *_period parameters. For example: - -``` +!!! tip + All of probes are disabled by default for now, to enable it, set the `*_period` parameters. For example: -web_liveness_period: 15 -web_readiness_period: 15 -task_liveness_period: 15 -task_readiness_period: 15 -``` + ```yaml + spec: + web_liveness_period: 15 + web_readiness_period: 15 + task_liveness_period: 15 + task_readiness_period: 15 + ``` -#### Web / Task Container Liveness Check +## Web / Task Container Liveness Check The liveness probe queries the status of the supervisor daemon of the container. The probe will fail if it detects one of the services in a state other than "RUNNING". @@ -29,7 +30,7 @@ detects one of the services in a state other than "RUNNING". | task_liveness_failure_threshold| Number of consecutive failure events to identify failure of container | 3 | | task_liveness_timeout | Number of seconds to wait for a probe response from container | 1 | -#### Web Container Readiness Check +## Web Container Readiness Check This is an HTTP check against the status endpoint to confirm the system is still able to respond to web requests. @@ -40,7 +41,7 @@ This is an HTTP check against the status endpoint to confirm the system is still | web_readiness_failure_threshold| Number of consecutive failure events to identify failure of container | 3 | | web_readiness_timeout | Number of seconds to wait for a probe response from container | 1 | -#### Task Container Readiness Check +## Task Container Readiness Check This is a command probe using the builtin check command of the awx-manage utility. diff --git a/docs/user-guide/advanced-configuration/containers-resource-requirements.md b/docs/user-guide/advanced-configuration/containers-resource-requirements.md index a014c3bf4..72eac0d9b 100644 --- a/docs/user-guide/advanced-configuration/containers-resource-requirements.md +++ b/docs/user-guide/advanced-configuration/containers-resource-requirements.md @@ -1,24 +1,4 @@ -#### Containers HostAliases Requirements - -Sometimes you might need to use [HostAliases](https://kubernetes.io/docs/tasks/network/customize-hosts-file-for-pods/) in web/task containers. - -| Name | Description | Default | -| ------------ | --------------------- | ------- | -| host_aliases | A list of HostAliases | None | - -Example of customization could be: - -```yaml ---- -spec: - ... - host_aliases: - - ip: - hostnames: - - -``` - -#### Containers Resource Requirements +# Containers Resource Requirements The resource requirements for both, the task and the web containers are configurable - both the lower end (requests) and the upper end (limits). @@ -26,13 +6,12 @@ The resource requirements for both, the task and the web containers are configur | ------------------------------------ | ------------------------------------------------------------ | ------------------------------------ | | web_resource_requirements | Web container resource requirements | requests: {cpu: 100m, memory: 128Mi} | | task_resource_requirements | Task container resource requirements | requests: {cpu: 100m, memory: 128Mi} | -| ee_resource_requirements | EE control plane container resource requirements | requests: {cpu: 50m, memory: 64Mi} | +| ee_resource_requirements | EE control plane container resource requirements | requests: {cpu: 50m, memory: 64Mi} | | redis_resource_requirements | Redis container resource requirements | requests: {cpu: 100m, memory: 128Mi} | | postgres_resource_requirements | Postgres container (and initContainer) resource requirements | requests: {cpu: 10m, memory: 64Mi} | | rsyslog_resource_requirements | Rsyslog container resource requirements | requests: {cpu: 100m, memory: 128Mi} | | init_container_resource_requirements | Init Container resource requirements | requests: {cpu: 100m, memory: 128Mi} | - Example of customization could be: ```yaml @@ -86,8 +65,7 @@ spec: memory: 2Gi ``` - -#### Limits and ResourceQuotas +## Limits and ResourceQuotas If the cluster you are deploying in has a ResoruceQuota, you will need to configure resource limits for all of the pods deployed in that cluster. This can be done for AWX pods on the AWX spec in the manner shown above. diff --git a/docs/user-guide/advanced-configuration/csrf-cookie-secure-setting.md b/docs/user-guide/advanced-configuration/csrf-cookie-secure-setting.md index a0f0e4079..411bc1952 100644 --- a/docs/user-guide/advanced-configuration/csrf-cookie-secure-setting.md +++ b/docs/user-guide/advanced-configuration/csrf-cookie-secure-setting.md @@ -1,4 +1,4 @@ -#### CSRF Cookie Secure Setting +# CSRF Cookie Secure Setting With `csrf_cookie_secure`, you can pass the value for `CSRF_COOKIE_SECURE` to `/etc/tower/settings.py` @@ -9,6 +9,6 @@ With `csrf_cookie_secure`, you can pass the value for `CSRF_COOKIE_SECURE` to `/ Example configuration of the `csrf_cookie_secure` setting: ```yaml - spec: - csrf_cookie_secure: 'False' +spec: + csrf_cookie_secure: 'False' ``` diff --git a/docs/user-guide/advanced-configuration/custom-receptor-certs.md b/docs/user-guide/advanced-configuration/custom-receptor-certs.md index e0c8dfe07..039243f4b 100644 --- a/docs/user-guide/advanced-configuration/custom-receptor-certs.md +++ b/docs/user-guide/advanced-configuration/custom-receptor-certs.md @@ -1,5 +1,4 @@ - -### Custom Receptor CA +# Custom Receptor CA The control nodes on the K8S cluster will communicate with execution nodes via mutual TLS TCP connections, running via Receptor. Execution nodes will verify incoming connections by ensuring the x509 certificate was issued by a trusted Certificate Authority (CA). @@ -21,4 +20,5 @@ If this secret is created after AWX is deployed, run the following to restart th kubectl rollout restart deployment awx-demo ``` -**Important Note**, changing the receptor CA will break connections to any existing execution nodes. These nodes will enter an `unavailable` state, and jobs will not be able to run on them. Users will need to download and re-run the install bundle for each execution node. This will replace the TLS certificate files with those signed by the new CA. The execution nodes should then appear in a `ready` state after a few minutes. \ No newline at end of file +!!! warning + Changing the receptor CA will break connections to any existing execution nodes. These nodes will enter an `unavailable` state, and jobs will not be able to run on them. Users will need to download and re-run the install bundle for each execution node. This will replace the TLS certificate files with those signed by the new CA. The execution nodes should then appear in a `ready` state after a few minutes. diff --git a/docs/user-guide/advanced-configuration/default-execution-environments-from-private-registries.md b/docs/user-guide/advanced-configuration/default-execution-environments-from-private-registries.md deleted file mode 100644 index e0427e86d..000000000 --- a/docs/user-guide/advanced-configuration/default-execution-environments-from-private-registries.md +++ /dev/null @@ -1,43 +0,0 @@ -#### Default execution environments from private registries - -In order to register default execution environments from private registries, the Custom Resource needs to know about the pull credentials. Those credentials should be stored as a secret and either specified as `ee_pull_credentials_secret` at the CR spec level, or simply be present on the namespace under the name `-ee-pull-credentials` . Instance initialization will register a `Container registry` type credential on the deployed instance and assign it to the registered default execution environments. - -The secret should be formatted as follows: - -```yaml ---- -apiVersion: v1 -kind: Secret -metadata: - name: -ee-pull-credentials - namespace: -stringData: - url: - username: - password: - ssl_verify: -type: Opaque -``` - -##### Control plane ee from private registry -The images listed in "ee_images" will be added as globally available Execution Environments. The "control_plane_ee_image" will be used to run project updates. In order to use a private image for any of these you'll need to use `image_pull_secrets` to provide a list of k8s pull secrets to access it. Currently the same secret is used for any of these images supplied at install time. - -You can create `image_pull_secret` -``` -kubectl create secret -cp-pull-credentials regcred --docker-server= --docker-username= --docker-password= --docker-email= -``` -If you need more control (for example, to set a namespace or a label on the new secret) then you can customize the Secret before storing it - -Example spec file extra-config - -```yaml ---- -apiVersion: v1 -kind: Secret -metadata: - name: -cp-pull-credentials - namespace: -data: - .dockerconfigjson: -type: kubernetes.io/dockerconfigjson -``` diff --git a/docs/user-guide/advanced-configuration/deploying-a-specific-version-of-awx.md b/docs/user-guide/advanced-configuration/deploying-a-specific-version-of-awx.md index 91e60d69f..a4fcad0a9 100644 --- a/docs/user-guide/advanced-configuration/deploying-a-specific-version-of-awx.md +++ b/docs/user-guide/advanced-configuration/deploying-a-specific-version-of-awx.md @@ -1,20 +1,23 @@ -#### Deploying a specific version of AWX - -There are a few variables that are customizable for awx the image management. - -| Name | Description | Default | -| ----------------------------- | ------------------------- | ------------------------------------------ | -| image | Path of the image to pull | quay.io/ansible/awx | -| image_version | Image version to pull | value of DEFAULT_AWX_VERSION or latest | -| image_pull_policy | The pull policy to adopt | IfNotPresent | -| image_pull_secrets | The pull secrets to use | None | -| ee_images | A list of EEs to register | quay.io/ansible/awx-ee:DEFAULT_AWX_VERSION | -| redis_image | Path of the image to pull | docker.io/redis | -| redis_image_version | Image version to pull | latest | -| control_plane_ee_image | Image version to pull | quay.io/ansible/awx-ee:DEFAULT_AWX_VERSION | -| init_container_image | Path of the image to pull | quay.io/ansible/awx-ee | -| init_container_image_version | Image version to pull | value of DEFAULT_AWX_VERSION or latest | -| init_projects_container_image | Image version to pull | quay.io/centos/centos:stream9 | +# Using images from private registries + +## Available variables to use images from private registries + +There are variables that are customizable for awx the image management. + +| Name | Description | Default | +| ----------------------------- | ----------------------------- | ------------------------------------------ | +| image | Path of the image to pull | quay.io/ansible/awx | +| image_version | Image version to pull | value of DEFAULT_AWX_VERSION or latest | +| image_pull_policy | The pull policy to adopt | IfNotPresent | +| image_pull_secrets | The pull secrets to use | None | +| ee_images | A list of EEs to register | quay.io/ansible/awx-ee:DEFAULT_AWX_VERSION | +| ee_pull_credentials_secret | The pull secret for ee_images | None | +| redis_image | Path of the image to pull | docker.io/redis | +| redis_image_version | Image version to pull | latest | +| control_plane_ee_image | Image version to pull | quay.io/ansible/awx-ee:DEFAULT_AWX_VERSION | +| init_container_image | Path of the image to pull | quay.io/ansible/awx-ee | +| init_container_image_version | Image version to pull | value of DEFAULT_AWX_VERSION or latest | +| init_projects_container_image | Image version to pull | quay.io/centos/centos:stream9 | Example of customization could be: @@ -36,4 +39,52 @@ spec: init_projects_container_image: myorg/my-mirrored-centos:stream9 ``` -**Note**: The `image` and `image_version` are intended for local mirroring scenarios. Please note that using a version of AWX other than the one bundled with the `awx-operator` is **not** supported. For the default values, check the [main.yml](https://github.com/ansible/awx-operator/blob/devel/roles/installer/defaults/main.yml) file. +!!! warning + The `image` and `image_version` are intended for local mirroring scenarios. Please note that using a version of AWX other than the one bundled with the `awx-operator` is **not** supported. For the default values, check the [main.yml](https://github.com/ansible/awx-operator/blob/devel/roles/installer/defaults/main.yml) file. + +## Default execution environments from private registries + +In order to register default execution environments from private registries, the Custom Resource needs to know about the pull credentials. Those credentials should be stored as a secret and either specified as `ee_pull_credentials_secret` at the CR spec level, or simply be present on the namespace under the name `-ee-pull-credentials` . Instance initialization will register a `Container registry` type credential on the deployed instance and assign it to the registered default execution environments. + +The secret should be formatted as follows: + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: -ee-pull-credentials + namespace: +stringData: + url: + username: + password: + ssl_verify: +type: Opaque +``` + +## Control plane ee from private registry + +The images listed in `ee_images` will be added as globally available Execution Environments. The `control_plane_ee_image` will be used to run project updates. In order to use a private image for any of these you'll need to use `image_pull_secrets` to provide a list of k8s pull secrets to access it. Currently the same secret is used for any of these images supplied at install time. + +You can create `image_pull_secret` + +```sh +kubectl create secret -cp-pull-credentials regcred --docker-server= --docker-username= --docker-password= --docker-email= +``` + +If you need more control (for example, to set a namespace or a label on the new secret) then you can customize the Secret before storing it + +Example spec file extra-config + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: -cp-pull-credentials + namespace: +data: + .dockerconfigjson: +type: kubernetes.io/dockerconfigjson +``` diff --git a/docs/user-guide/advanced-configuration/disable-ipv6.md b/docs/user-guide/advanced-configuration/disable-ipv6.md index b87aff3bd..d42006015 100644 --- a/docs/user-guide/advanced-configuration/disable-ipv6.md +++ b/docs/user-guide/advanced-configuration/disable-ipv6.md @@ -1,12 +1,13 @@ -### Disable IPV6 -Starting with AWX Operator release 0.24.0,[IPV6 was enabled in ngnix configuration](https://github.com/ansible/awx-operator/pull/950) which causes +# Disable IPv6 + +Starting with AWX Operator release 0.24.0, [IPv6 was enabled in ngnix configuration](https://github.com/ansible/awx-operator/pull/950) which causes upgrades and installs to fail in environments where IPv6 is not allowed. Starting in 1.1.1 release, you can set the `ipv6_disabled` flag on the AWX spec. If you need to use an AWX operator version between 0.24.0 and 1.1.1 in an IPv6 disabled environment, it is suggested to enabled ipv6 on worker nodes. In order to disable ipv6 on ngnix configuration (awx-web container), add following to the AWX spec. -The following variables are customizable +The following variables are customizable: | Name | Description | Default | | ------------- | ---------------------- | ------- | diff --git a/docs/user-guide/advanced-configuration/enabling-ldap-integration-at-awx-bootstrap.md b/docs/user-guide/advanced-configuration/enabling-ldap-integration-at-awx-bootstrap.md index ce0a3cbdf..9fbccf7fa 100644 --- a/docs/user-guide/advanced-configuration/enabling-ldap-integration-at-awx-bootstrap.md +++ b/docs/user-guide/advanced-configuration/enabling-ldap-integration-at-awx-bootstrap.md @@ -1,10 +1,97 @@ -#### Enabling LDAP Integration at AWX bootstrap +# Enabling LDAP Integration at AWX bootstrap -A sample of extra settings can be found as below. All possible options can be found here: https://django-auth-ldap.readthedocs.io/en/latest/reference.html#settings +A sample of extra settings can be found as below. All possible options can be found here: -> **NOTE:** These values are inserted into a Python file, so pay close attention to which values need quotes and which do not. +Refer to the [Extra Settings](./extra-settings.md) page for more information on how to configure extra settings. + +!!! tip + To trust a custom Certificate Authority for your LDAP server, or to specify password LDAP bind DN, refer to the [Trusting a Custom Certificate Authority](./trusting-a-custom-certificate-authority.md) page. + +## Configure LDAP integration via `extra_settings_files` + +Create a Python file with arbitrary name, e.g. `custom_ldap_settings.py`, and add the following content for example: + +```python title="custom_ldap_settings.py" +AUTH_LDAP_SERVER_URI = "ldaps://ad01.abc.com:636 ldaps://ad02.abc.com:636" +AUTH_LDAP_BIND_DN = "CN=LDAP User,OU=Service Accounts,DC=abc,DC=com" +AUTH_LDAP_USER_SEARCH = LDAPSearch( + "DC=abc,DC=com", + ldap.SCOPE_SUBTREE, + "(sAMAccountName=%(user)s)", +) +AUTH_LDAP_GROUP_SEARCH = LDAPSearch( + "OU=Groups,DC=abc,DC=com", + ldap.SCOPE_SUBTREE, + "(objectClass=group)", +) +AUTH_LDAP_GROUP_TYPE = GroupOfNamesType() +AUTH_LDAP_USER_ATTR_MAP = { + "first_name": "givenName", + "last_name": "sn", + "email": "mail", +} +AUTH_LDAP_REQUIRE_GROUP = "CN=operators,OU=Groups,DC=abc,DC=com" +AUTH_LDAP_USER_FLAGS_BY_GROUP = { + "is_superuser": ["CN=admin,OU=Groups,DC=abc,DC=com"], +} +AUTH_LDAP_ORGANIZATION_MAP = { + "abc": { + "admins": "CN=admin,OU=Groups,DC=abc,DC=com", + "remove_admins": False, + "remove_users": False, + "users": True, + } +} +AUTH_LDAP_TEAM_MAP = { + "admin": { + "organization": "abc", + "remove": True, + "users": "CN=admin,OU=Groups,DC=abc,DC=com", + } +} +``` + +Create a `ConfigMap` with the content of the above Python file. + +```bash +kubectl create configmap custom-ldap-settings \ + --from-file /PATH/TO/YOUR/custom_ldap_settings.py +``` + +Then specify this ConfigMap to the `extra_settings_files` parameter. ```yaml +spec: + extra_settings_files: + configmaps: + - name: custom-ldap-settings + key: custom_ldap_settings.py +``` + +!!! note + If you have embedded some sensitive information like passwords in the Python file, you can create and pass a `Secret` instead of a `ConfigMap`. + + ```bash + kubectl create secret generic custom-ldap-settings \ + --from-file /PATH/TO/YOUR/custom_ldap_settings.py + ``` + + ```yaml + spec: + extra_settings_files: + secrets: + - name: custom-ldap-settings + key: custom_ldap_settings.py + ``` + +## Configure LDAP integration via `extra_settings` + +!!! note + These values are inserted into a Python file, so pay close attention to which values need quotes and which do not. + +```yaml +spec: + extra_settings: - setting: AUTH_LDAP_SERVER_URI value: >- "ldaps://ad01.abc.com:636 ldaps://ad02.abc.com:636" @@ -35,7 +122,6 @@ A sample of extra settings can be found as below. All possible options can be fo ] } - - setting: AUTH_LDAP_ORGANIZATION_MAP value: { "abc": { diff --git a/docs/user-guide/advanced-configuration/exporting-environment-variables-to-containers.md b/docs/user-guide/advanced-configuration/exporting-environment-variables-to-containers.md index 25dbcbb42..5f047c8ab 100644 --- a/docs/user-guide/advanced-configuration/exporting-environment-variables-to-containers.md +++ b/docs/user-guide/advanced-configuration/exporting-environment-variables-to-containers.md @@ -1,4 +1,4 @@ -#### Exporting Environment Variables to Containers +# Exporting Environment Variables to Containers If you need to export custom environment variables to your containers. @@ -9,7 +9,8 @@ If you need to export custom environment variables to your containers. | rsyslog_extra_env | Environment variables to be added to Rsyslog container | '' | | ee_extra_env | Environment variables to be added to EE container | '' | -> :warning: The `ee_extra_env` will only take effect to the globally available Execution Environments. For custom `ee`, please [customize the Pod spec](https://docs.ansible.com/ansible-tower/latest/html/administration/external_execution_envs.html#customize-the-pod-spec). +!!! warning + The `ee_extra_env` will only take effect to the globally available Execution Environments. For custom `ee`, please [customize the Pod spec](https://docs.ansible.com/ansible-tower/latest/html/administration/external_execution_envs.html#customize-the-pod-spec). Example configuration of environment variables diff --git a/docs/user-guide/advanced-configuration/horizontal-pod-autoscaler.md b/docs/user-guide/advanced-configuration/horizontal-pod-autoscaler.md index e44527e8b..9057ad5ab 100644 --- a/docs/user-guide/advanced-configuration/horizontal-pod-autoscaler.md +++ b/docs/user-guide/advanced-configuration/horizontal-pod-autoscaler.md @@ -1,4 +1,4 @@ -### Horizontal Pod Autoscaler (HPA) +# Horizontal Pod Autoscaler (HPA) Horizontal Pod Autoscaler allows Kubernetes to scale the number of replicas of deployments in response to configured metrics. @@ -10,15 +10,12 @@ The use of the settings below will tell the operator to not manage the replicas field on the identified deployments even if a replicas count has been set for those properties in the operator resource. -| Name | Description | Default | -| -----------------------| ----------------------------------------- | ------- | -| web_manage_replicas | Indicates operator should control the | true | -| | replicas count for the web deployment. | | -| | | | -| task_manage_replicas | Indicates operator should control the | true | -| | replicas count for the task deployment. | | +| Name | Description | Default | +| ---------------------- | ----------------------------------------------------------------------------- | ------- | +| web_manage_replicas | Indicates operator should control the replicas count for the web deployment. | true | +| task_manage_replicas | Indicates operator should control the replicas count for the task deployment. | true | -#### Recommended Settings for HPA +## Recommended Settings for HPA Please see the Kubernetes documentation on how to configure the horizontal pod autoscaler. diff --git a/docs/user-guide/advanced-configuration/host-aliases.md b/docs/user-guide/advanced-configuration/host-aliases.md new file mode 100644 index 000000000..4d40f1d16 --- /dev/null +++ b/docs/user-guide/advanced-configuration/host-aliases.md @@ -0,0 +1,19 @@ +# HostAliases + +Sometimes you might need to use [HostAliases](https://kubernetes.io/docs/tasks/network/customize-hosts-file-for-pods/) in web/task containers. + +| Name | Description | Default | +| ------------ | --------------------- | ------- | +| host_aliases | A list of HostAliases | None | + +Example of customization could be: + +```yaml +--- +spec: + ... + host_aliases: + - ip: + hostnames: + - +``` diff --git a/docs/user-guide/advanced-configuration/mesh-ingress-instance-listener-address-on-awx-ui.png b/docs/user-guide/advanced-configuration/images/mesh-ingress-instance-listener-address-on-awx-ui.png similarity index 100% rename from docs/user-guide/advanced-configuration/mesh-ingress-instance-listener-address-on-awx-ui.png rename to docs/user-guide/advanced-configuration/images/mesh-ingress-instance-listener-address-on-awx-ui.png diff --git a/docs/user-guide/advanced-configuration/mesh-ingress-instance-on-awx-ui.png b/docs/user-guide/advanced-configuration/images/mesh-ingress-instance-on-awx-ui.png similarity index 100% rename from docs/user-guide/advanced-configuration/mesh-ingress-instance-on-awx-ui.png rename to docs/user-guide/advanced-configuration/images/mesh-ingress-instance-on-awx-ui.png diff --git a/docs/user-guide/advanced-configuration/peering-to-mesh-ingress-on-awx-ui.png b/docs/user-guide/advanced-configuration/images/peering-to-mesh-ingress-on-awx-ui.png similarity index 100% rename from docs/user-guide/advanced-configuration/peering-to-mesh-ingress-on-awx-ui.png rename to docs/user-guide/advanced-configuration/images/peering-to-mesh-ingress-on-awx-ui.png diff --git a/docs/user-guide/advanced-configuration/labeling-operator-managed-objects.md b/docs/user-guide/advanced-configuration/labeling-operator-managed-objects.md index 7846e8124..40da85119 100644 --- a/docs/user-guide/advanced-configuration/labeling-operator-managed-objects.md +++ b/docs/user-guide/advanced-configuration/labeling-operator-managed-objects.md @@ -1,15 +1,13 @@ -#### Labeling operator managed objects +# Labeling operator managed objects -In certain situations labeling of Kubernetes objects managed by the operator -might be desired (e.g. for owner identification purposes). For that -`additional_labels` parameter could be used +In certain situations labeling of Kubernetes objects managed by the operator might be desired (e.g. for owner identification purposes). +For that `additional_labels` parameter could be used: | Name | Description | Default | | --------------------------- | ---------------------------------------------------------------------------------------- | ------- | | additional_labels | Additional labels defined on the resource, which should be propagated to child resources | [] | -Example configuration where only `my/team` and `my/service` labels will be -propagated to child objects (`Deployment`, `Secret`s, `ServiceAccount`, etc): +Example configuration where only `my/team` and `my/service` labels will be propagated to child objects (`Deployment`, `Secret`s, `ServiceAccount`, etc): ```yaml apiVersion: awx.ansible.com/v1beta1 @@ -22,7 +20,7 @@ metadata: my/do-not-inherit: "yes" spec: additional_labels: - - my/team - - my/service + - my/team + - my/service ... ``` diff --git a/docs/user-guide/advanced-configuration/mesh-ingress.md b/docs/user-guide/advanced-configuration/mesh-ingress.md index 88332a5c9..ab368ec23 100644 --- a/docs/user-guide/advanced-configuration/mesh-ingress.md +++ b/docs/user-guide/advanced-configuration/mesh-ingress.md @@ -168,7 +168,7 @@ spec: After AWXMeshIngress has been successfully created, a new Instance with the same name will be registered to AWX and will be visible on the Instance UI page -![mesh ingress instance on AWX UI](mesh-ingress-instance-on-awx-ui.png) +![mesh ingress instance on AWX UI](./images/mesh-ingress-instance-on-awx-ui.png) The Instance should have at least 2 listener addresses. @@ -177,10 +177,10 @@ In this example, the mesh ingress has two listener addresses: - one for internal, that is used for peering to by all control nodes (top) - one for external, that is exposed to a route so external execution nodes can peer into it (bottom)) -![mesh ingress instance listener address on awx ui](mesh-ingress-instance-listener-address-on-awx-ui.png) +![mesh ingress instance listener address on awx ui](./images/mesh-ingress-instance-listener-address-on-awx-ui.png) When selecting peer for new instance the mesh ingress instance should now be present as a option. -![peering to mesh ingress on awx ui](peering-to-mesh-ingress-on-awx-ui.png) +![peering to mesh ingress on awx ui](./images/peering-to-mesh-ingress-on-awx-ui.png) For more information about how to create external remote execution and hop nodes and configuring the mesh, see AWX Documentation on [Add a instance](https://ansible.readthedocs.io/projects/awx/en/latest/administration/instances.html#add-an-instance). diff --git a/docs/user-guide/advanced-configuration/no-log.md b/docs/user-guide/advanced-configuration/no-log.md index 0b8862529..270e556b3 100644 --- a/docs/user-guide/advanced-configuration/no-log.md +++ b/docs/user-guide/advanced-configuration/no-log.md @@ -1,4 +1,5 @@ -#### No Log +# No Log + Configure no_log for tasks with no_log | Name | Description | Default | @@ -8,6 +9,6 @@ Configure no_log for tasks with no_log Example configuration of `no_log` parameter ```yaml - spec: - no_log: true +spec: + no_log: true ``` diff --git a/docs/user-guide/advanced-configuration/persisting-projects-directory.md b/docs/user-guide/advanced-configuration/persisting-projects-directory.md index 6fa1c9597..2bea30bcf 100644 --- a/docs/user-guide/advanced-configuration/persisting-projects-directory.md +++ b/docs/user-guide/advanced-configuration/persisting-projects-directory.md @@ -1,4 +1,4 @@ -#### Persisting Projects Directory +# Persisting Projects Directory In cases which you want to persist the `/var/lib/projects` directory, there are few variables that are customizable for the `awx-operator`. diff --git a/docs/user-guide/advanced-configuration/pods-termination-grace-period.md b/docs/user-guide/advanced-configuration/pods-termination-grace-period.md index e922e4a53..1616617e3 100644 --- a/docs/user-guide/advanced-configuration/pods-termination-grace-period.md +++ b/docs/user-guide/advanced-configuration/pods-termination-grace-period.md @@ -1,38 +1,17 @@ -#### Pods termination grace period +# Pods termination grace period -During deployment restarts or new rollouts, when old ReplicaSet Pods are being -terminated, the corresponding jobs which are managed (executed or controlled) -by old AWX Pods may end up in `Error` state as there is no mechanism to -transfer them to the newly spawned AWX Pods. To work around the problem one -could set `termination_grace_period_seconds` in AWX spec, which does the -following: +During deployment restarts or new rollouts, when old ReplicaSet Pods are being terminated, the corresponding jobs which are managed (executed or controlled) by old AWX Pods may end up in `Error` state as there is no mechanism to transfer them to the newly spawned AWX Pods. +To work around the problem one could set `termination_grace_period_seconds` in AWX spec, which does the following: -* It sets the corresponding - [`terminationGracePeriodSeconds`](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination) - Pod spec of the AWX Deployment to the value provided +- It sets the corresponding [`terminationGracePeriodSeconds`](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination) Pod spec of the AWX Deployment to the value provided + - The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal - > The grace period is the duration in seconds after the processes running in - > the pod are sent a termination signal and the time when the processes are - > forcibly halted with a kill signal - -* It adds a - [`PreStop`](https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#hook-handler-execution) - hook script, which will keep AWX Pods in terminating state until it finished, - up to `terminationGracePeriodSeconds`. - - > This grace period applies to the total time it takes for both the PreStop - > hook to execute and for the Container to stop normally - - While the hook script just waits until the corresponding AWX Pod (instance) - no longer has any managed jobs, in which case it finishes with success and - hands over the overall Pod termination process to normal AWX processes. - -One may want to set this value to the maximum duration they accept to wait for -the affected Jobs to finish. Keeping in mind that such finishing jobs may -increase Pods termination time in such situations as `kubectl rollout restart`, -AWX upgrade by the operator, or Kubernetes [API-initiated -evictions](https://kubernetes.io/docs/concepts/scheduling-eviction/api-eviction/). +- It adds a [`PreStop`](https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#hook-handler-execution) hook script, which will keep AWX Pods in terminating state until it finished, up to `terminationGracePeriodSeconds`. + - This grace period applies to the total time it takes for both the PreStop hook to execute and for the Container to stop normally + - While the hook script just waits until the corresponding AWX Pod (instance) no longer has any managed jobs, in which case it finishes with success and hands over the overall Pod termination process to normal AWX processes. +One may want to set this value to the maximum duration they accept to wait for the affected Jobs to finish. +Keeping in mind that such finishing jobs may increase Pods termination time in such situations as `kubectl rollout restart`, AWX upgrade by the operator, or Kubernetes [API-initiatedevictions](https://kubernetes.io/docs/concepts/scheduling-eviction/api-eviction/). | Name | Description | Default | | -------------------------------- | --------------------------------------------------------------- | ------- | diff --git a/docs/user-guide/advanced-configuration/priority-classes.md b/docs/user-guide/advanced-configuration/priority-classes.md index acecf9476..7a832a01d 100644 --- a/docs/user-guide/advanced-configuration/priority-classes.md +++ b/docs/user-guide/advanced-configuration/priority-classes.md @@ -1,15 +1,10 @@ -#### Priority Classes +# Priority Classes The AWX and Postgres pods can be assigned a custom PriorityClass to rank their importance compared to other pods in your cluster, which determines which pods get evicted first if resources are running low. First, [create your PriorityClass](https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass) if needed. Then set the name of your priority class to the control plane and postgres pods as shown below. ```yaml ---- -apiVersion: awx.ansible.com/v1beta1 -kind: AWX -metadata: - name: awx-demo spec: ... control_plane_priority_class: awx-demo-high-priority diff --git a/docs/user-guide/advanced-configuration/privileged-tasks.md b/docs/user-guide/advanced-configuration/privileged-tasks.md index c09d6d476..e4532829a 100644 --- a/docs/user-guide/advanced-configuration/privileged-tasks.md +++ b/docs/user-guide/advanced-configuration/privileged-tasks.md @@ -1,4 +1,4 @@ -#### Privileged Tasks +# Privileged Tasks Depending on the type of tasks that you'll be running, you may find that you need the task pod to run as `privileged`. This can open yourself up to a variety of security concerns, so you should be aware (and verify that you have the privileges) to do this if necessary. In order to toggle this feature, you can add the following to your custom resource: @@ -11,8 +11,8 @@ spec: If you are attempting to do this on an OpenShift cluster, you will need to grant the `awx` ServiceAccount the `privileged` SCC, which can be done with: -``` -$ oc adm policy add-scc-to-user privileged -z awx +```sh +oc adm policy add-scc-to-user privileged -z awx ``` Again, this is the most relaxed SCC that is provided by OpenShift, so be sure to familiarize yourself with the security concerns that accompany this action. diff --git a/docs/user-guide/advanced-configuration/redis-container-capabilities.md b/docs/user-guide/advanced-configuration/redis-container-capabilities.md index 1b5c59c8b..9bd8e400b 100644 --- a/docs/user-guide/advanced-configuration/redis-container-capabilities.md +++ b/docs/user-guide/advanced-configuration/redis-container-capabilities.md @@ -1,4 +1,4 @@ -#### Redis container capabilities +# Redis container capabilities Depending on your kubernetes cluster and settings you might need to grant some capabilities to the redis container so it can start. Set the `redis_capabilities` option so the capabilities are added in the deployment. diff --git a/docs/user-guide/advanced-configuration/scaling-the-web-and-task-pods-independently.md b/docs/user-guide/advanced-configuration/scaling-the-web-and-task-pods-independently.md index 9f1ed01d9..a89b10579 100644 --- a/docs/user-guide/advanced-configuration/scaling-the-web-and-task-pods-independently.md +++ b/docs/user-guide/advanced-configuration/scaling-the-web-and-task-pods-independently.md @@ -1,4 +1,4 @@ -#### Scaling the Web and Task Pods independently +# Scaling the Web and Task Pods independently You can scale replicas up or down for each deployment by using the `web_replicas` or `task_replicas` respectively. You can scale all pods across both deployments by using `replicas` as well. The logic behind these CRD keys acts as such: @@ -7,7 +7,7 @@ You can scale replicas up or down for each deployment by using the `web_replicas These new replicas can be constrained in a similar manner to previous single deployments by appending the particular deployment name in front of the constraint used. More about those new constraints can be found in the [Assigning AWX pods to specific nodes](./assigning-awx-pods-to-specific-nodes.md) page. -##### Horizontal Pod Autoscaling +## Horizontal Pod Autoscaling -The operator is capable of working with Kubernete's HPA capabilities. See [Horizontal Pod Autoscaler](./horizontal-pod-autoscaler.md) +The operator is capable of working with Kubernetes' HPA capabilities. See [Horizontal Pod Autoscaler](./horizontal-pod-autoscaler.md) documentation for more information. diff --git a/docs/user-guide/advanced-configuration/security-context.md b/docs/user-guide/advanced-configuration/security-context.md index 9ab68ffcf..ba77b1b13 100644 --- a/docs/user-guide/advanced-configuration/security-context.md +++ b/docs/user-guide/advanced-configuration/security-context.md @@ -1,12 +1,11 @@ -#### Service Account +# Security Context It is possible to modify some `SecurityContext` proprieties of the various deployments and stateful sets if needed. | Name | Description | Default | | ---------------------------------- | -------------------------------------------- | ------- | | security_context_settings | SecurityContext for Task and Web deployments | {} | -| postgres_security_context_settings | SecurityContext for Task and Web deployments | {} | - +| postgres_security_context_settings | SecurityContext for PostgreSQL container | {} | Example configuration securityContext for the Task and Web deployments: @@ -17,11 +16,6 @@ spec: capabilities: drop: - ALL -``` - - -```yaml -spec: postgres_security_context_settings: runAsNonRoot: true ``` diff --git a/docs/user-guide/advanced-configuration/service-account.md b/docs/user-guide/advanced-configuration/service-account.md index 232f151ef..7fa5fbc33 100644 --- a/docs/user-guide/advanced-configuration/service-account.md +++ b/docs/user-guide/advanced-configuration/service-account.md @@ -1,4 +1,4 @@ -#### Service Account +# Service Account If you need to modify some `ServiceAccount` proprieties @@ -9,7 +9,7 @@ If you need to modify some `ServiceAccount` proprieties Example configuration of environment variables ```yaml - spec: - service_account_annotations: | - eks.amazonaws.com/role-arn: arn:aws:iam:::role/ +spec: + service_account_annotations: | + eks.amazonaws.com/role-arn: arn:aws:iam:::role/ ``` diff --git a/docs/user-guide/advanced-configuration/session-cookie-secure-setting.md b/docs/user-guide/advanced-configuration/session-cookie-secure-setting.md index 781e027bc..a64ec3f80 100644 --- a/docs/user-guide/advanced-configuration/session-cookie-secure-setting.md +++ b/docs/user-guide/advanced-configuration/session-cookie-secure-setting.md @@ -1,4 +1,4 @@ -#### Session Cookie Secure Setting +# Session Cookie Secure Setting With `session_cookie_secure`, you can pass the value for `SESSION_COOKIE_SECURE` to `/etc/tower/settings.py` diff --git a/docs/user-guide/advanced-configuration/trusting-a-custom-certificate-authority.md b/docs/user-guide/advanced-configuration/trusting-a-custom-certificate-authority.md index 9d07e5827..72404df2b 100644 --- a/docs/user-guide/advanced-configuration/trusting-a-custom-certificate-authority.md +++ b/docs/user-guide/advanced-configuration/trusting-a-custom-certificate-authority.md @@ -1,15 +1,15 @@ -#### Trusting a Custom Certificate Authority +# Trusting a Custom Certificate Authority In cases which you need to trust a custom Certificate Authority, there are few variables you can customize for the `awx-operator`. Trusting a custom Certificate Authority allows the AWX to access network services configured with SSL certificates issued locally, such as cloning a project from from an internal Git server via HTTPS. It is common for these scenarios, experiencing the error [unable to verify the first certificate](https://github.com/ansible/awx-operator/issues/376). - | Name | Description | Default | | -------------------------------- | ---------------------------------------- | --------| | ldap_cacert_secret | LDAP Certificate Authority secret name | '' | | ldap_password_secret | LDAP BIND DN Password secret name | '' | | bundle_cacert_secret | Certificate Authority secret name | '' | + Please note the `awx-operator` will look for the data field `ldap-ca.crt` in the specified secret when using the `ldap_cacert_secret`, whereas the data field `bundle-ca.crt` is required for `bundle_cacert_secret` parameter. Example of customization could be: @@ -26,15 +26,13 @@ spec: Create the secret with `kustomization.yaml` file: ```yaml -.... - +... secretGenerator: - name: -custom-certs files: - bundle-ca.crt= options: disableNameSuffixHash: true - ... ``` @@ -42,15 +40,15 @@ Create the secret with CLI: * Certificate Authority secret -``` -# kubectl create secret generic -custom-certs \ - --from-file=ldap-ca.crt= \ - --from-file=bundle-ca.crt= -``` + ```sh + kubectl create secret generic -custom-certs \ + --from-file=ldap-ca.crt= \ + --from-file=bundle-ca.crt= + ``` * LDAP BIND DN Password secret -``` -# kubectl create secret generic -ldap-password \ - --from-literal=ldap-password= -``` + ```sh + kubectl create secret generic -ldap-password \ + --from-literal=ldap-password= + ``` diff --git a/docs/user-guide/database-configuration.md b/docs/user-guide/database-configuration.md index af5714d7b..6168e3ebe 100644 --- a/docs/user-guide/database-configuration.md +++ b/docs/user-guide/database-configuration.md @@ -1,16 +1,15 @@ -### Database Configuration +# Database Configuration -#### PostgreSQL Version +## PostgreSQL Version The default PostgreSQL version for the version of AWX bundled with the latest version of the awx-operator is PostgreSQL 15. You can find this default for a given version by at the default value for [supported_pg_version](https://github.com/ansible/awx-operator/blob/ffba1b4712a0b03f1faedfa70e3a9ef0d443e4a6/roles/installer/vars/main.yml#L7). We only have coverage for the default version of PostgreSQL. Newer versions of PostgreSQL will likely work, but should only be configured as an external database. If your database is managed by the awx-operator (default if you don't specify a `postgres_configuration_secret`), then you should not override the default version as this may cause issues when awx-operator tries to upgrade your postgresql pod. -#### External PostgreSQL Service +## External PostgreSQL Service To configure AWX to use an external database, the Custom Resource needs to know about the connection details. To do this, create a k8s secret with those connection details and specify the name of the secret as `postgres_configuration_secret` at the CR spec level. - The secret should be formatted as follows: ```yaml @@ -32,13 +31,16 @@ stringData: type: Opaque ``` -> Please ensure that the value for the variable `password` should _not_ contain single or double quotes (`'`, `"`) or backslashes (`\`) to avoid any issues during deployment, [backup](https://github.com/ansible/awx-operator/tree/devel/roles/backup) or [restoration](https://github.com/ansible/awx-operator/tree/devel/roles/restore). +!!! warning + Please ensure that the value for the variable `password` should _not_ contain single or double quotes (`'`, `"`) or backslashes (`\`) to avoid any issues during deployment, [backup](https://github.com/ansible/awx-operator/tree/devel/roles/backup) or [restoration](https://github.com/ansible/awx-operator/tree/devel/roles/restore). -> It is possible to set a specific username, password, port, or database, but still have the database managed by the operator. In this case, when creating the postgres-configuration secret, the `type: managed` field should be added. +!!! tip + It is possible to set a specific username, password, port, or database, but still have the database managed by the operator. In this case, when creating the postgres-configuration secret, the `type: managed` field should be added. -**Note**: The variable `sslmode` is valid for `external` databases only. The allowed values are: `prefer`, `disable`, `allow`, `require`, `verify-ca`, `verify-full`. +!!! note + The variable `sslmode` is valid for `external` databases only. The allowed values are: `prefer`, `disable`, `allow`, `require`, `verify-ca`, `verify-full`. -**Note**: The variable `target_session_attrs` is only useful for `clustered external` databases. The allowed values are: `any` (default), `read-write`, `read-only`, `primary`, `standby` and `prefer-standby`, whereby only `read-write` and `primary` really make sense in AWX use, as you want to connect to a database node that offers write support. + The variable `target_session_attrs` is only useful for `clustered external` databases. The allowed values are: `any` (default), `read-write`, `read-only`, `primary`, `standby` and `prefer-standby`, whereby only `read-write` and `primary` really make sense in AWX use, as you want to connect to a database node that offers write support. Once the secret is created, you can specify it on your spec: @@ -49,11 +51,11 @@ spec: postgres_configuration_secret: ``` -#### Migrating data from an old AWX instance +## Migrating data from an old AWX instance For instructions on how to migrate from an older version of AWX, see [migration.md](../migration/migration.md). -#### Managed PostgreSQL Service +## Managed PostgreSQL Service If you don't have access to an external PostgreSQL service, the AWX operator can deploy one for you along side the AWX instance itself. @@ -92,9 +94,10 @@ spec: - 'max_connections=1000' ``` -**Note**: If `postgres_storage_class` is not defined, PostgreSQL will store it's data on a volume using the default storage class for your cluster. +!!! note + If `postgres_storage_class` is not defined, PostgreSQL will store it's data on a volume using the default storage class for your cluster. -#### Note about overriding the postgres image +## Note about overriding the postgres image We recommend you use the default image sclorg image. If you are coming from a deployment using the old postgres image from dockerhub (postgres:13), upgrading from awx-operator version 2.12.2 and below to 2.15.0+ will handle migrating your data to the new postgresql image (postgresql-15-c9s). @@ -102,7 +105,7 @@ You can no longer configure a custom `postgres_data_path` because it is hardcode If you override the postgres image to use a custom postgres image like postgres:15 for example, the default data directory path may be different. These images cannot be used interchangeably. -#### Initialize Postgres data volume +## Initialize Postgres data volume When using a hostPath backed PVC and some other storage classes like longhorn storagfe, the postgres data directory needs to be accessible by the user in the postgres pod (UID 26). diff --git a/docs/user-guide/network-and-tls-configuration.md b/docs/user-guide/network-and-tls-configuration.md index de81e08aa..974db31a7 100644 --- a/docs/user-guide/network-and-tls-configuration.md +++ b/docs/user-guide/network-and-tls-configuration.md @@ -1,6 +1,6 @@ -### Network and TLS Configuration +# Network and TLS Configuration -#### Service Type +## Service Type If the `service_type` is not specified, the `ClusterIP` service will be used for your AWX Tower service. @@ -24,7 +24,7 @@ spec: environment: testing ``` - * LoadBalancer +### LoadBalancer The following variables are customizable only when `service_type=LoadBalancer` @@ -54,7 +54,7 @@ When setting up a Load Balancer for HTTPS you will be required to set the `loadb The HTTPS Load Balancer also uses SSL termination at the Load Balancer level and will offload traffic to AWX over HTTP. - * NodePort +### NodePort The following variables are customizable only when `service_type=NodePort` @@ -69,7 +69,8 @@ spec: service_type: NodePort nodeport_port: 30080 ``` -#### Ingress Type + +## Ingress Type By default, the AWX operator is not opinionated and won't force a specific ingress type on you. So, when the `ingress_type` is not specified, it will default to `none` and nothing ingress-wise will be created. @@ -84,7 +85,7 @@ spec: ingress_type: none ``` - * Generic Ingress Controller +### Generic Ingress Controller The following variables are customizable when `ingress_type=ingress`. The `ingress` type creates an Ingress resource as [documented](https://kubernetes.io/docs/concepts/services-networking/ingress/) which can be shared with many other Ingress Controllers as [listed](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/). @@ -112,7 +113,7 @@ spec: environment: testing ``` -##### Specialized Ingress Controller configuration +### Specialized Ingress Controller configuration Some Ingress Controllers need a special configuration to fully support AWX, add the following value with the `ingress_controller` variable, if you are using one of these: @@ -132,7 +133,7 @@ spec: ingress_controller: contour ``` - * Route +### Route The following variables are customizable when `ingress_type=route` diff --git a/mkdocs.yml b/mkdocs.yml index f8c7f8a9a..96e6e864a 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -38,43 +38,37 @@ theme: name: Switch to light mode nav: - - index.md - - Contributors Guide: - - contributors-guide/contributing.md - - contributors-guide/release-process.md - - contributors-guide/author.md - - contributors-guide/code-of-conduct.md - - contributors-guide/get-involved.md + - Home: index.md - Installation: - - Installation: installation/index.md - installation/basic-install.md - - installation/creating-a-minikube-cluster-for-testing.md - installation/helm-install-on-existing-cluster.md + - installation/kind-install.md + - installation/creating-a-minikube-cluster-for-testing.md - Migrate: - migration/migration.md + - Upgrade: + - upgrade/upgrading.md - Uninstall: - uninstall/uninstall.md - User Guide: - user-guide/admin-user-account-configuration.md - user-guide/network-and-tls-configuration.md - user-guide/database-configuration.md - - Upgrade: - - upgrade/upgrading.md - Advanced Configuration: - user-guide/advanced-configuration/deploying-a-specific-version-of-awx.md - user-guide/advanced-configuration/redis-container-capabilities.md - user-guide/advanced-configuration/privileged-tasks.md + - user-guide/advanced-configuration/host-aliases.md - user-guide/advanced-configuration/containers-resource-requirements.md - user-guide/advanced-configuration/priority-classes.md - - user-guide/advanced-configuration/adding-execution-nodes.md - user-guide/advanced-configuration/scaling-the-web-and-task-pods-independently.md + - user-guide/advanced-configuration/horizontal-pod-autoscaler.md - user-guide/advanced-configuration/assigning-awx-pods-to-specific-nodes.md - user-guide/advanced-configuration/trusting-a-custom-certificate-authority.md - user-guide/advanced-configuration/custom-receptor-certs.md - user-guide/advanced-configuration/enabling-ldap-integration-at-awx-bootstrap.md - user-guide/advanced-configuration/persisting-projects-directory.md - user-guide/advanced-configuration/custom-volume-and-volume-mount-options.md - - user-guide/advanced-configuration/default-execution-environments-from-private-registries.md - user-guide/advanced-configuration/exporting-environment-variables-to-containers.md - user-guide/advanced-configuration/csrf-cookie-secure-setting.md - user-guide/advanced-configuration/session-cookie-secure-setting.md @@ -84,10 +78,18 @@ nav: - user-guide/advanced-configuration/service-account.md - user-guide/advanced-configuration/labeling-operator-managed-objects.md - user-guide/advanced-configuration/pods-termination-grace-period.md + - user-guide/advanced-configuration/security-context.md + - user-guide/advanced-configuration/container-probes.md - user-guide/advanced-configuration/disable-ipv6.md - user-guide/advanced-configuration/mesh-ingress.md - Troubleshooting: - troubleshooting/debugging.md + - Contributors Guide: + - contributors-guide/contributing.md + - contributors-guide/release-process.md + - contributors-guide/author.md + - contributors-guide/code-of-conduct.md + - contributors-guide/get-involved.md exclude_docs: README.md