From 5bb3a9b0ded568c7ed118ffb234c8cc941643136 Mon Sep 17 00:00:00 2001 From: Manjunath Hegde Date: Wed, 19 Oct 2022 10:34:26 +0000 Subject: [PATCH 1/4] idm_22.4.1_release --- .../templates/host-nginx-ingress-nonssl.yaml | 234 +++++ .../templates/nginx-ingress-nonssl.yaml | 123 +++ .../templates/nginx-ingress-ssl.yaml | 129 +++ .../templates/nginx-ingress.yaml | 181 ---- .../charts/ingress-per-domain/values.yaml | 17 +- .../kubernetes/charts/traefik/values.yaml | 7 +- .../charts/weblogic-operator/Chart.yaml | 4 +- .../templates/_operator-cm.tpl | 12 + .../templates/_operator-dep.tpl | 8 +- .../templates/_operator-external-svc.tpl | 2 + .../templates/_operator-internal-svc.tpl | 1 + .../charts/weblogic-operator/values.yaml | 15 +- .../kubernetes/common/utility.sh | 60 +- .../kubernetes/common/validate.sh | 29 +- .../kubernetes/common/wdt-and-wit-utility.sh | 4 +- .../common/oamconfig.properties | 4 +- .../common/oamconfig_modify.sh | 6 - .../domain-home-on-pv/common/readme.txt | 2 +- .../start-db-service.sh | 39 +- .../stop-db-service.sh | 7 +- .../kubernetes/create-rcu-schema/README.md | 12 +- .../common/createRepository.sh | 4 +- .../common/dropRepository.sh | 4 +- .../create-rcu-schema/create-rcu-schema.sh | 4 +- .../create-rcu-schema/drop-rcu-schema.sh | 4 +- .../kubernetes/domain-lifecycle/helper.sh | 140 +-- .../elasticsearch-and-kibana/README.md | 3 +- .../elasticsearch_and_kibana.yaml | 12 +- .../kubernetes/monitoring-service/README.md | 171 +++- .../weblogic-server-dashboard-import.json | 46 +- .../config/weblogic-server-dashboard.json | 46 +- .../monitoring-service/delete-monitoring.sh | 4 +- ...ometheus-roleBinding-domain-namespace.yaml | 2 +- ...metheus-roleSpecific-domain-namespace.yaml | 2 +- .../wls-exporter-ServiceMonitor.yaml | 13 +- .../monitoring-service/monitoring-inputs.yaml | 16 +- .../deploy-weblogic-monitoring-exporter.py | 10 +- ...eploy-weblogic-server-grafana-dashboard.sh | 30 + .../undeploy-weblogic-monitoring-exporter.py | 10 +- .../monitoring-service/scripts/utils.sh | 16 +- .../monitoring-service/setup-monitoring.sh | 23 +- .../kubernetes/monitoring-service/values.yaml | 13 + .../kubernetes/scaling/scalingAction.sh | 10 +- .../templates/host-nginx-ingress-nonssl.yaml | 346 ++++++++ .../templates/nginx-ingress-nonssl.yaml | 191 +++++ ...ss-k8s1.19.yaml => nginx-ingress-ssl.yaml} | 54 +- .../templates/nginx-ingress.yaml | 130 --- .../templates/traefik-ingress-k8s1.19.yaml | 92 -- .../templates/traefik-ingress.yaml | 74 -- .../charts/ingress-per-domain/values.yaml | 27 +- .../kubernetes/charts/traefik/values.yaml | 7 +- .../charts/weblogic-operator/Chart.yaml | 4 +- .../templates/_operator-cm.tpl | 12 + .../templates/_operator-dep.tpl | 8 +- .../templates/_operator-external-svc.tpl | 2 + .../templates/_operator-internal-svc.tpl | 1 + .../charts/weblogic-operator/values.yaml | 15 +- .../kubernetes/common/utility.sh | 60 +- .../kubernetes/common/validate.sh | 33 +- .../kubernetes/common/wdt-and-wit-utility.sh | 4 +- .../start-db-service.sh | 39 +- .../stop-db-service.sh | 7 +- .../kubernetes/create-rcu-schema/README.md | 12 +- .../common/createRepository.sh | 4 +- .../common/dropRepository.sh | 4 +- .../create-rcu-schema/create-rcu-schema.sh | 4 +- .../create-rcu-schema/drop-rcu-schema.sh | 4 +- .../design-console-ingress/README.md | 24 +- .../kubernetes/domain-lifecycle/helper.sh | 140 +-- .../domain-lifecycle/patch_oig_domain.sh | 407 +++++++++ .../domain-lifecycle/wl-pod-wait.sh | 423 ++++++++++ .../elasticsearch-and-kibana/README.md | 3 +- .../elasticsearch_and_kibana.yaml | 12 +- .../kubernetes/monitoring-service/README.md | 171 +++- .../weblogic-server-dashboard-import.json | 46 +- .../config/weblogic-server-dashboard.json | 47 +- .../monitoring-service/delete-monitoring.sh | 4 +- .../wls-exporter-ServiceMonitor.yaml | 9 +- .../monitoring-service/monitoring-inputs.yaml | 8 +- .../deploy-weblogic-monitoring-exporter.py | 4 +- ...eploy-weblogic-server-grafana-dashboard.sh | 30 + .../undeploy-weblogic-monitoring-exporter.py | 4 +- .../monitoring-service/scripts/utils.sh | 4 +- .../monitoring-service/setup-monitoring.sh | 21 +- .../kubernetes/monitoring-service/values.yaml | 13 + .../kubernetes/scaling/scalingAction.sh | 10 +- OracleUnifiedDirectory/kubernetes/README.md | 797 ------------------ .../templates/elk_logstash-configMap.yaml | 20 +- .../oud-ds-rs/templates/elk_logstash.yaml | 47 +- .../kubernetes/helm/oud-ds-rs/values.yaml | 124 +-- .../kubernetes/samples/oud-dir-pod.yaml | 82 -- .../kubernetes/samples/oud-dir-svc.yaml | 222 ----- .../oud-ds-plus-rs-remote-oud-svcs.yaml | 558 ------------ .../samples/oud-ds-plus-rs-svc.yaml | 313 ------- .../kubernetes/samples/oud-ds_proxy-svc.yaml | 122 --- .../kubernetes/samples/oud-ds_rs_ds-svc.yaml | 406 --------- .../kubernetes/samples/oudns.yaml | 11 - .../kubernetes/samples/persistent-volume.yaml | 39 - .../kubernetes/samples/secrets.yaml | 15 - .../samples/stress-oud-dir-svc.yaml | 360 -------- .../samples/stress-oud-ds-plus-rs-svc.yaml | 431 ---------- OracleUnifiedDirectorySM/kubernetes/README.md | 418 --------- .../templates/elk_logstash-configMap.yaml | 26 +- .../helm/oudsm/templates/elk_logstash.yaml | 48 +- .../kubernetes/helm/oudsm/values.yaml | 139 +-- .../kubernetes/samples/oudsm-deployment.yaml | 88 -- .../kubernetes/samples/oudsm-pod.yaml | 78 -- .../kubernetes/samples/oudsmns.yaml | 11 - .../kubernetes/samples/persistent-volume.yaml | 39 - .../kubernetes/samples/secrets.yaml | 16 - docs-source/content/oam/_index.md | 6 +- .../content/oam/configure-ingress/_index.md | 71 +- .../content/oam/create-oam-domains/_index.md | 26 +- .../logging-and-visualization.md | 686 +++++++++------ .../monitoring-oam-domains.md | 32 +- .../wlst-admin-operations.md | 22 +- .../content/oam/patch-and-upgrade/_index.md | 8 +- .../{patch_an_image.md => patch-an-image.md} | 2 +- .../patch-and-upgrade/upgrade-an-ingress.md | 179 ++++ ...ease.md => upgrade-an-operator-release.md} | 4 +- .../oam/patch-and-upgrade/upgrade-elk.md | 30 + .../content/oam/post-install-config/_index.md | 9 +- .../oam/prepare-your-environment/_index.md | 76 +- .../content/oam/prerequisites/_index.md | 2 +- docs-source/content/oam/release-notes.md | 12 +- .../oam/validate-domain-urls/_index.md | 2 +- docs-source/content/oig/_index.md | 6 +- ...sing-the-design-console-with-nginx-ssl.md} | 6 +- ...-the-design-console-with-nginx-non-ssl.md} | 6 +- ...x-setup-for-oig-domain-setup-on-K8S-ssl.md | 111 +-- ...nginx-setup-for-oig-domain-setup-on-K8S.md | 87 +- .../content/oig/create-oig-domains/_index.md | 22 +- .../logging-and-visualization.md | 691 +++++++++------ .../monitoring-oim-domains.md | 39 +- .../wlst-admin-operations.md | 12 +- .../content/oig/patch-and-upgrade/_index.md | 6 +- .../oig/patch-and-upgrade/patch-an-image.md | 120 +++ .../oig/patch-and-upgrade/patch_an_image.md | 212 ----- .../patch-and-upgrade/upgrade-an-ingress.md | 173 ++++ ...ease.md => upgrade-an-operator-release.md} | 4 +- .../oig/patch-and-upgrade/upgrade-elk.md | 32 + .../set_oimfronendurl_using_mbeans.md | 3 +- .../oig/prepare-your-environment/_index.md | 83 +- .../content/oig/prerequisites/_index.md | 2 +- docs-source/content/oig/release-notes.md | 11 + .../content/oig/troubleshooting/_index.md | 60 +- docs-source/content/oud/_index.md | 3 +- .../content/oud/configure-ingress/_index.md | 2 +- .../oud/create-oud-instances/_index.md | 84 +- .../logging-and-visualization.md | 409 ++++++--- .../monitoring-oud-instance.md | 4 +- .../content/oud/patch-and-upgrade/index.md | 108 ++- .../oud/prepare-your-environment/_index.md | 14 +- docs-source/content/oud/release-notes.md | 3 + .../content/oud/troubleshooting/_index.md | 53 +- docs-source/content/oudsm/_index.md | 3 +- .../content/oudsm/configure-ingress/_index.md | 10 +- .../oudsm/create-oudsm-instances/_index.md | 16 +- .../logging-and-visualization.md | 374 ++++---- .../monitoring-oudsm-instance.md | 6 +- .../content/oudsm/patch-and-upgrade/_index.md | 15 + .../{index.md => patch-an-oudsm-image.md} | 14 +- .../oudsm/patch-and-upgrade/upgrade-elk.md | 79 ++ .../oudsm/prepare-your-environment/_index.md | 9 +- docs-source/content/oudsm/release-notes.md | 7 + .../content/oudsm/troubleshooting/_index.md | 6 +- 166 files changed, 5854 insertions(+), 6621 deletions(-) create mode 100755 OracleAccessManagement/kubernetes/charts/ingress-per-domain/templates/host-nginx-ingress-nonssl.yaml create mode 100755 OracleAccessManagement/kubernetes/charts/ingress-per-domain/templates/nginx-ingress-nonssl.yaml create mode 100755 OracleAccessManagement/kubernetes/charts/ingress-per-domain/templates/nginx-ingress-ssl.yaml delete mode 100755 OracleAccessManagement/kubernetes/charts/ingress-per-domain/templates/nginx-ingress.yaml create mode 100755 OracleAccessManagement/kubernetes/monitoring-service/scripts/deploy-weblogic-server-grafana-dashboard.sh create mode 100755 OracleAccessManagement/kubernetes/monitoring-service/values.yaml create mode 100755 OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/templates/host-nginx-ingress-nonssl.yaml create mode 100755 OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/templates/nginx-ingress-nonssl.yaml rename OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/templates/{nginx-ingress-k8s1.19.yaml => nginx-ingress-ssl.yaml} (90%) delete mode 100755 OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/templates/nginx-ingress.yaml delete mode 100755 OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/templates/traefik-ingress-k8s1.19.yaml delete mode 100755 OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/templates/traefik-ingress.yaml create mode 100755 OracleIdentityGovernance/kubernetes/domain-lifecycle/patch_oig_domain.sh create mode 100755 OracleIdentityGovernance/kubernetes/domain-lifecycle/wl-pod-wait.sh create mode 100755 OracleIdentityGovernance/kubernetes/monitoring-service/scripts/deploy-weblogic-server-grafana-dashboard.sh create mode 100755 OracleIdentityGovernance/kubernetes/monitoring-service/values.yaml delete mode 100755 OracleUnifiedDirectory/kubernetes/README.md delete mode 100755 OracleUnifiedDirectory/kubernetes/samples/oud-dir-pod.yaml delete mode 100755 OracleUnifiedDirectory/kubernetes/samples/oud-dir-svc.yaml delete mode 100755 OracleUnifiedDirectory/kubernetes/samples/oud-ds-plus-rs-remote-oud-svcs.yaml delete mode 100755 OracleUnifiedDirectory/kubernetes/samples/oud-ds-plus-rs-svc.yaml delete mode 100755 OracleUnifiedDirectory/kubernetes/samples/oud-ds_proxy-svc.yaml delete mode 100755 OracleUnifiedDirectory/kubernetes/samples/oud-ds_rs_ds-svc.yaml delete mode 100755 OracleUnifiedDirectory/kubernetes/samples/oudns.yaml delete mode 100755 OracleUnifiedDirectory/kubernetes/samples/persistent-volume.yaml delete mode 100755 OracleUnifiedDirectory/kubernetes/samples/secrets.yaml delete mode 100755 OracleUnifiedDirectory/kubernetes/samples/stress-oud-dir-svc.yaml delete mode 100755 OracleUnifiedDirectory/kubernetes/samples/stress-oud-ds-plus-rs-svc.yaml delete mode 100755 OracleUnifiedDirectorySM/kubernetes/README.md delete mode 100755 OracleUnifiedDirectorySM/kubernetes/samples/oudsm-deployment.yaml delete mode 100755 OracleUnifiedDirectorySM/kubernetes/samples/oudsm-pod.yaml delete mode 100755 OracleUnifiedDirectorySM/kubernetes/samples/oudsmns.yaml delete mode 100755 OracleUnifiedDirectorySM/kubernetes/samples/persistent-volume.yaml delete mode 100755 OracleUnifiedDirectorySM/kubernetes/samples/secrets.yaml rename docs-source/content/oam/patch-and-upgrade/{patch_an_image.md => patch-an-image.md} (98%) create mode 100644 docs-source/content/oam/patch-and-upgrade/upgrade-an-ingress.md rename docs-source/content/oam/patch-and-upgrade/{upgrade_an_operator_release.md => upgrade-an-operator-release.md} (97%) create mode 100644 docs-source/content/oam/patch-and-upgrade/upgrade-elk.md rename docs-source/content/oig/configure-design-console/{Using the design console with NGINX (SSL).md => Using-the-design-console-with-nginx-ssl.md} (98%) rename docs-source/content/oig/configure-design-console/{Using the design console with NGINX (non-SSL).md => using-the-design-console-with-nginx-non-ssl.md} (98%) create mode 100644 docs-source/content/oig/patch-and-upgrade/patch-an-image.md delete mode 100644 docs-source/content/oig/patch-and-upgrade/patch_an_image.md create mode 100644 docs-source/content/oig/patch-and-upgrade/upgrade-an-ingress.md rename docs-source/content/oig/patch-and-upgrade/{upgrade_an_operator_release.md => upgrade-an-operator-release.md} (97%) create mode 100644 docs-source/content/oig/patch-and-upgrade/upgrade-elk.md create mode 100644 docs-source/content/oudsm/patch-and-upgrade/_index.md rename docs-source/content/oudsm/patch-and-upgrade/{index.md => patch-an-oudsm-image.md} (95%) create mode 100644 docs-source/content/oudsm/patch-and-upgrade/upgrade-elk.md diff --git a/OracleAccessManagement/kubernetes/charts/ingress-per-domain/templates/host-nginx-ingress-nonssl.yaml b/OracleAccessManagement/kubernetes/charts/ingress-per-domain/templates/host-nginx-ingress-nonssl.yaml new file mode 100755 index 000000000..27deb536b --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/ingress-per-domain/templates/host-nginx-ingress-nonssl.yaml @@ -0,0 +1,234 @@ +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +{{- if eq .Values.type "NGINX" }} +{{- if (eq .Values.sslType "NONSSL") }} +{{- if .Values.hostName.enabled }} +{{- if .Values.hostName.admin }} +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: oamadmin-ingress + namespace: {{ .Release.Namespace }} + annotations: + nginx.ingress.kubernetes.io/affinity: "cookie" + nginx.ingress.kubernetes.io/proxy-buffer-size: "2000k" + nginx.ingress.kubernetes.io/enable-access-log: "false" + nginx.ingress.kubernetes.io/ingress.allow-http: "true" + nginx.ingress.kubernetes.io/ssl-redirect: "false" +spec: + ingressClassName: nginx + rules: + - host: '{{ .Values.hostName.admin }}' + http: + paths: + - path: /console + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /consolehelp + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /rreg/rreg + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /em + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /oamconsole + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /dms + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /oam/services/rest + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /iam/admin/config + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /oam/admin/api + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /iam/admin/diag + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /oam/services + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /iam/admin + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /oam/services/rest/11.1.2.0.0 + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /oam/services/rest/ssa + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oamClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oamManagedServerPort }} + - path: /access + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.policyClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.policyManagedServerPort }} + +{{- end }} + +{{- if .Values.hostName.runtime }} +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: oamruntime-ingress + namespace: {{ .Release.Namespace }} + annotations: + nginx.ingress.kubernetes.io/affinity: "cookie" + nginx.ingress.kubernetes.io/proxy-buffer-size: "2000k" + nginx.ingress.kubernetes.io/enable-access-log: "false" +spec: + ingressClassName: nginx + rules: + - host: '{{ .Values.hostName.runtime }}' + http: + paths: + - path: /ms_oauth + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oamClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oamManagedServerPort }} + - path: /oam/services/rest/auth + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oamClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oamManagedServerPort }} + - path: /oam/services/rest/access + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oamClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oamManagedServerPort }} + - path: /oamfed + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oamClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oamManagedServerPort }} + - path: /otpfp/ + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oamClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oamManagedServerPort }} + - path: /oauth2 + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oamClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oamManagedServerPort }} + - path: /oam + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oamClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oamManagedServerPort }} + - path: /.well-known/openid-configuration + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oamClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oamManagedServerPort }} + - path: /.well-known/oidc-configuration + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oamClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oamManagedServerPort }} + - path: /CustomConsent + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oamClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oamManagedServerPort }} + - path: /iam/access + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oamClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oamManagedServerPort }} + +{{- end }} +{{- end }} +{{- end }} +{{- end }} + + diff --git a/OracleAccessManagement/kubernetes/charts/ingress-per-domain/templates/nginx-ingress-nonssl.yaml b/OracleAccessManagement/kubernetes/charts/ingress-per-domain/templates/nginx-ingress-nonssl.yaml new file mode 100755 index 000000000..ddfd5d09b --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/ingress-per-domain/templates/nginx-ingress-nonssl.yaml @@ -0,0 +1,123 @@ +# Copyright (c) 2020, 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +{{- if eq .Values.type "NGINX" }} +{{- if (eq .Values.sslType "NONSSL") }} +{{- if not .Values.hostName.enabled }} +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ .Values.wlsDomain.domainUID }}-nginx + namespace: {{ .Release.Namespace }} + annotations: + nginx.ingress.kubernetes.io/enable-access-log: 'false' + kubernetes.io/ingress.class: 'nginx' +spec: + rules: + - host: + http: + paths: + - path: /console + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /consolehelp + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /rreg/rreg + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /em + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /oamconsole + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /dms + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /oam/services/rest + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /iam/admin/config + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /iam/admin/diag + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /iam/access + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oamClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oamManagedServerPort }} + - path: /oam/admin/api + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /oam/services/rest/access/api + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oamClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oamManagedServerPort }} + - path: /access + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.policyClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.policyManagedServerPort }} + - path: / + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oamClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oamManagedServerPort }} + +{{- end }} +{{- end }} +{{- end }} + diff --git a/OracleAccessManagement/kubernetes/charts/ingress-per-domain/templates/nginx-ingress-ssl.yaml b/OracleAccessManagement/kubernetes/charts/ingress-per-domain/templates/nginx-ingress-ssl.yaml new file mode 100755 index 000000000..49f68c158 --- /dev/null +++ b/OracleAccessManagement/kubernetes/charts/ingress-per-domain/templates/nginx-ingress-ssl.yaml @@ -0,0 +1,129 @@ +# Copyright (c) 2020, 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +{{- if eq .Values.type "NGINX" }} +{{- if (eq .Values.sslType "SSL") }} +{{- if not .Values.hostName.enabled }} +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ .Values.wlsDomain.domainUID }}-nginx + namespace: {{ .Release.Namespace }} + annotations: + nginx.ingress.kubernetes.io/enable-access-log: 'false' + kubernetes.io/ingress.class: 'nginx' + nginx.ingress.kubernetes.io/proxy-buffer-size: '2000k' + nginx.ingress.kubernetes.io/configuration-snippet: | + more_clear_input_headers "WL-Proxy-Client-IP" "WL-Proxy-SSL"; + more_set_input_headers "X-Forwarded-Proto: https"; + more_set_input_headers "WL-Proxy-SSL: true"; + nginx.ingress.kubernetes.io/ingress.allow-http: 'false' +spec: + rules: + - host: + http: + paths: + - path: /console + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /consolehelp + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /rreg/rreg + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /em + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /oamconsole + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /dms + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /oam/services/rest + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /iam/admin/config + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /iam/admin/diag + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /iam/access + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oamClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oamManagedServerPort }} + - path: /oam/admin/api + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /oam/services/rest/access/api + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oamClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oamManagedServerPort }} + - path: /access + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.policyClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.policyManagedServerPort }} + - path: / + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oamClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oamManagedServerPort }} + +{{- end }} +{{- end }} +{{- end }} + diff --git a/OracleAccessManagement/kubernetes/charts/ingress-per-domain/templates/nginx-ingress.yaml b/OracleAccessManagement/kubernetes/charts/ingress-per-domain/templates/nginx-ingress.yaml deleted file mode 100755 index 956ac0acb..000000000 --- a/OracleAccessManagement/kubernetes/charts/ingress-per-domain/templates/nginx-ingress.yaml +++ /dev/null @@ -1,181 +0,0 @@ -# Copyright (c) 2020, 2021, Oracle and/or its affiliates. -# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. - -{{- if eq .Values.type "NGINX" }} ---- -{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} -apiVersion: networking.k8s.io/v1 -{{- else -}} -apiVersion: extensions/v1beta1 -{{- end }} -kind: Ingress -metadata: - name: access-ingress - namespace: {{ .Release.Namespace }} - labels: - weblogic.resourceVersion: domain-v2 -{{- if eq .Values.sslType "SSL" }} - annotations: - nginx.ingress.kubernetes.io/proxy-buffer-size: "2000k" - kubernetes.io/ingress.class: nginx - nginx.ingress.kubernetes.io/enable-access-log: "false" - nginx.ingress.kubernetes.io/configuration-snippet: | - more_set_input_headers "X-Forwarded-Proto: https"; - more_clear_input_headers "WL-Proxy-Client-IP" "WL-Proxy-SSL"; - more_set_input_headers "WL-Proxy-SSL: true"; - nginx.ingress.kubernetes.io/ingress.allow-http: "false" -{{- end }} -spec: - rules: - {{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }} - - http: - paths: - - path: /console - pathType: ImplementationSpecific - backend: - service: - name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - port: - number: {{ .Values.wlsDomain.adminServerPort }} - - path: /rreg/rreg - pathType: ImplementationSpecific - backend: - service: - name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - port: - number: {{ .Values.wlsDomain.adminServerPort }} - - path: /em - pathType: ImplementationSpecific - backend: - service: - name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - port: - number: {{ .Values.wlsDomain.adminServerPort }} - - path: /oamconsole - pathType: ImplementationSpecific - backend: - service: - name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - port: - number: {{ .Values.wlsDomain.adminServerPort }} - - path: /dms - pathType: ImplementationSpecific - backend: - service: - name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - port: - number: {{ .Values.wlsDomain.adminServerPort }} - - path: /oam/services/rest - pathType: ImplementationSpecific - backend: - service: - name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - port: - number: {{ .Values.wlsDomain.adminServerPort }} - - path: /iam/admin/config - pathType: ImplementationSpecific - backend: - service: - name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - port: - number: {{ .Values.wlsDomain.adminServerPort }} - - path: /iam/admin/diag - pathType: ImplementationSpecific - backend: - service: - name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - port: - number: {{ .Values.wlsDomain.adminServerPort }} - - path: /iam/access - pathType: ImplementationSpecific - backend: - service: - name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oamClusterName | lower | replace "_" "-" }}' - port: - number: {{ .Values.wlsDomain.oamManagedServerPort }} - - path: /oam/admin/api - pathType: ImplementationSpecific - backend: - service: - name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - port: - number: {{ .Values.wlsDomain.adminServerPort }} - - path: /oam/services/rest/access/api - pathType: ImplementationSpecific - backend: - service: - name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oamClusterName | lower | replace "_" "-" }}' - port: - number: {{ .Values.wlsDomain.oamManagedServerPort }} - - path: /access - pathType: ImplementationSpecific - backend: - service: - name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.policyClusterName | lower | replace "_" "-" }}' - port: - number: {{ .Values.wlsDomain.policyManagedServerPort }} - - path: / - pathType: ImplementationSpecific - backend: - service: - name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oamClusterName | lower | replace "_" "-" }}' - port: - number: {{ .Values.wlsDomain.oamManagedServerPort }} -{{- else }} - - http: - paths: - - path: /console - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - - path: /rreg/rreg - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - - path: /em - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - - path: /oamconsole - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - - path: /dms - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - - path: /oam/services/rest - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - - path: /iam/admin/config - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - - path: /iam/admin/diag - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - - path: /iam/access - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oamClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.oamManagedServerPort }} - - path: /oam/admin/api - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - - path: /oam/services/rest/access/api - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oamClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.oamManagedServerPort }} - - path: /access - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.policyClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.policyManagedServerPort }} - - path: / - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oamClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.oamManagedServerPort }} - -{{- end }} -{{- end }} diff --git a/OracleAccessManagement/kubernetes/charts/ingress-per-domain/values.yaml b/OracleAccessManagement/kubernetes/charts/ingress-per-domain/values.yaml index 2ecd64f08..683ebd4a6 100755 --- a/OracleAccessManagement/kubernetes/charts/ingress-per-domain/values.yaml +++ b/OracleAccessManagement/kubernetes/charts/ingress-per-domain/values.yaml @@ -6,12 +6,16 @@ # This is a YAML-formatted file. # Declare variables to be passed into your templates. # -# Load balancer type. Supported values are: NGINX + +# Load balancer type. Supported values are: NGINX type: NGINX -# Type of Configuration Supported Values are : NONSSL and SSL +# SSL configuration Type. Supported Values are : NONSSL,SSL sslType: SSL +# domainType. Supported values are: oam +domainType: oam + #WLS domain as backend to the load balancer wlsDomain: domainUID: accessinfra @@ -25,3 +29,12 @@ wlsDomain: policyManagedServerPort: 15100 policyManagedServerSSLPort: + +# Host specific values +hostName: + enabled: false + admin: + runtime: + + + diff --git a/OracleAccessManagement/kubernetes/charts/traefik/values.yaml b/OracleAccessManagement/kubernetes/charts/traefik/values.yaml index e94bf24f2..f680d34e3 100755 --- a/OracleAccessManagement/kubernetes/charts/traefik/values.yaml +++ b/OracleAccessManagement/kubernetes/charts/traefik/values.yaml @@ -1,9 +1,9 @@ -# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Copyright (c) 2020, 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. # image: name: traefik - tag: 2.2.8 + tag: 2.6.0 pullPolicy: IfNotPresent ingressRoute: dashboard: @@ -49,4 +49,7 @@ ports: # The port protocol (TCP/UDP) protocol: TCP nodePort: 30443 +additionalArguments: + - "--log.level=INFO" + diff --git a/OracleAccessManagement/kubernetes/charts/weblogic-operator/Chart.yaml b/OracleAccessManagement/kubernetes/charts/weblogic-operator/Chart.yaml index b5cac770e..5814294bf 100755 --- a/OracleAccessManagement/kubernetes/charts/weblogic-operator/Chart.yaml +++ b/OracleAccessManagement/kubernetes/charts/weblogic-operator/Chart.yaml @@ -6,5 +6,5 @@ name: weblogic-operator description: Helm chart for configuring the WebLogic operator. type: application -version: 3.3.0 -appVersion: 3.3.0 +version: 3.4.2 +appVersion: 3.4.2 diff --git a/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-cm.tpl b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-cm.tpl index dd6594de2..8f7f2ff51 100755 --- a/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-cm.tpl +++ b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-cm.tpl @@ -49,6 +49,18 @@ data: {{- if .tokenReviewAuthentication }} tokenReviewAuthentication: {{ .tokenReviewAuthentication | quote }} {{- end }} + {{- if (hasKey . "istioLocalhostBindingsEnabled") }} + istioLocalhostBindingsEnabled: {{ .istioLocalhostBindingsEnabled | quote }} + {{- end }} + {{- if .kubernetesPlatform }} + kubernetesPlatform: {{ .kubernetesPlatform | quote }} + {{- end }} + {{- if .domainPresenceFailureRetryMaxCount }} + domainPresenceFailureRetryMaxCount: {{ .domainPresenceFailureRetryMaxCount | quote }} + {{- end }} + {{- if .domainPresenceFailureRetrySeconds }} + domainPresenceFailureRetrySeconds: {{ .domainPresenceFailureRetrySeconds | quote }} + {{- end }} kind: "ConfigMap" metadata: labels: diff --git a/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-dep.tpl b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-dep.tpl index 3fadac7dc..6faacc095 100755 --- a/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-dep.tpl +++ b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-dep.tpl @@ -33,6 +33,10 @@ spec: {{- end }} spec: serviceAccountName: {{ .serviceAccount | quote }} + {{- if .runAsUser }} + securityContext: + runAsUser: {{ .runAsUser }} + {{- end }} {{- with .nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} @@ -64,6 +68,8 @@ spec: value: "false" - name: "JAVA_LOGGING_LEVEL" value: {{ .javaLoggingLevel | quote }} + - name: "KUBERNETES_PLATFORM" + value: {{ .kubernetesPlatform | quote }} - name: "JAVA_LOGGING_MAXSIZE" value: {{ .javaLoggingFileSizeLimit | default 20000000 | quote }} - name: "JAVA_LOGGING_COUNT" @@ -112,7 +118,7 @@ spec: command: - "bash" - "/operator/livenessProbe.sh" - initialDelaySeconds: 20 + initialDelaySeconds: 40 periodSeconds: 5 readinessProbe: exec: diff --git a/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-external-svc.tpl b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-external-svc.tpl index 44bfc1191..18b0876a9 100755 --- a/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-external-svc.tpl +++ b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-external-svc.tpl @@ -19,11 +19,13 @@ spec: {{- if .externalRestEnabled }} - name: "rest" port: 8081 + appProtocol: https nodePort: {{ .externalRestHttpsPort }} {{- end }} {{- if .remoteDebugNodePortEnabled }} - name: "debug" port: {{ .internalDebugHttpPort }} + appProtocol: http nodePort: {{ .externalDebugHttpPort }} {{- end }} {{- end }} diff --git a/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-internal-svc.tpl b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-internal-svc.tpl index 0108738de..b03aa8aee 100755 --- a/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-internal-svc.tpl +++ b/OracleAccessManagement/kubernetes/charts/weblogic-operator/templates/_operator-internal-svc.tpl @@ -17,4 +17,5 @@ spec: ports: - port: 8082 name: "rest" + appProtocol: https {{- end }} diff --git a/OracleAccessManagement/kubernetes/charts/weblogic-operator/values.yaml b/OracleAccessManagement/kubernetes/charts/weblogic-operator/values.yaml index dac9a5382..fd151bff1 100755 --- a/OracleAccessManagement/kubernetes/charts/weblogic-operator/values.yaml +++ b/OracleAccessManagement/kubernetes/charts/weblogic-operator/values.yaml @@ -63,7 +63,7 @@ domainNamespaces: enableClusterRoleBinding: false # image specifies the container image containing the operator. -image: "ghcr.io/oracle/weblogic-kubernetes-operator:3.3.0" +image: "ghcr.io/oracle/weblogic-kubernetes-operator:3.4.2" # imagePullPolicy specifies the image pull policy for the operator's container image. imagePullPolicy: IfNotPresent @@ -104,7 +104,7 @@ elkIntegrationEnabled: false # logStashImage specifies the container image containing logstash. # This parameter is ignored if 'elkIntegrationEnabled' is false. -logStashImage: "logstash:6.6.0" +logStashImage: "logstash:6.8.23" # elasticSearchHost specifies the hostname of where elasticsearch is running. # This parameter is ignored if 'elkIntegrationEnabled' is false. @@ -222,3 +222,14 @@ clusterSizePaddingValidationEnabled: true # to the Domain resource so that it is done using the caller's privileges. # The default value is false. #tokenReviewAuthentication: false + +# domainPresenceFailureRetryMaxCount and domainPresenceFailureRetrySeconds specify the number of introspector job +# retries for a Domain and the interval in seconds between these retries, respectively. +# Defaults to 5 retries and 10 seconds between each retry. +# domainPresenceFailureRetryMaxCount: 5 +# domainPresenceFailureRetrySeconds: 10 + +# runAsUser specifies the UID to run the operator container process. If not specified, +# it defaults to the user specified in the operator's container image. +#runAsUser: 1000 + diff --git a/OracleAccessManagement/kubernetes/common/utility.sh b/OracleAccessManagement/kubernetes/common/utility.sh index aafc57021..005677e62 100755 --- a/OracleAccessManagement/kubernetes/common/utility.sh +++ b/OracleAccessManagement/kubernetes/common/utility.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Copyright (c) 2018, 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. # @@ -185,6 +185,23 @@ function checkPvState { fi } +# +# Check the state of a persistent volume claim. +# $1 - name of volume claim +# $2 - expected state of volume claim +function checkPvcState { + echo "Checking if the persistent volume claim ${1:?} is ${2:?}" + local end_secs=$((SECONDS + 30)) + local pvc_state=`kubectl get pvc $1 -o jsonpath='{.status.phase}'` + while [ ! "$pvc_state" = "$2" ] && [ $SECONDS -le $end_secs ]; do + sleep 1 + pvc_state=`kubectl get pvc $1 -o jsonpath='{.status.phase}'` + done + if [ "$pvc_state" != "$2" ]; then + fail "The persistent volume state should be $2 but is $pvc_state" + fi +} + # # Function to check if a persistent volume exists # $1 - name of volume @@ -926,3 +943,44 @@ function checkService(){ done echo "Service [$svc] found" } + +# Get pod name when pod available in a given namespace +function getPodName(){ + + local max=$((SECONDS + 120)) + + local pod=$1 + local ns=$2 + + local pname="" + while [ $SECONDS -le $max ] ; do + pname=`kubectl get po -n ${ns} | grep -w ${pod} | awk '{print $1}'` + [ -z "${pname}" ] || break + sleep 1 + done + + if [ -z "${pname}" ] ; then + echo "[ERROR] Could not find Pod [$pod] after $max seconds"; + exit 1 + fi + + echo "${pname}" +} + +# Checks if a pod is available in a given namespace +function detectPod() { + ns=$1 + startSecs=$SECONDS + maxWaitSecs=10 + while [ -z "`kubectl get pod -n ${ns} -o jsonpath={.items[0].metadata.name}`" ]; do + if [ $((SECONDS - startSecs)) -lt $maxWaitSecs ]; then + echo "Pod not found after $((SECONDS - startSecs)) seconds, retrying ..." + sleep 2 + else + echo "[Error] Could not find Pod after $((SECONDS - startSecs)) seconds" + exit 1 + fi + done + retVal=`kubectl get pod -n ${ns} -o jsonpath={.items[0].metadata.name}` + echo "$retVal" +} diff --git a/OracleAccessManagement/kubernetes/common/validate.sh b/OracleAccessManagement/kubernetes/common/validate.sh index 1a407a99a..b6f06337c 100755 --- a/OracleAccessManagement/kubernetes/common/validate.sh +++ b/OracleAccessManagement/kubernetes/common/validate.sh @@ -82,6 +82,28 @@ function validateLowerCase { fi } +# +# Function to check if a value is a valid WLS domain name. +# must include only alphanumeric characters, hyphens (-) +# or underscore characters (_) and contain at least one letter +# but must start with an alphanumeric or underscore character. +# +# $1 - name of object being checked +# $2 - value to check +validateWlsDomainName() { + echo "validateWlsDomainName called with $2" + if ! [[ "$2" =~ ^[a-z_][a-z0-9_.-]*$ ]] ; then + validationError "$1 with value of $2 is not a valid WebLogic domain name. "\ + "A valid WebLogic domain name must include only alphanumeric characters, hyphens (-) "\ + "or underscore characters (_) but must start with an alphanumeric or underscore character." + else + if ! [[ "$2" =~ ^.*[a-z0-9].*$ ]] ; then + validationError "$1 with value of $2 is not a valid WebLogic domain name. "\ + "A valid WebLogic domain name must contain at least one alphanumeric character." + fi + fi +} + # # Function to check if a value is lowercase and legal DNS name # $1 - name of object being checked @@ -112,10 +134,13 @@ function validateVersion { # # Function to ensure the domain uid is a legal DNS name +# Because the domain uid is also used as a WebLogic domain +# name, it must also be a valid WebLogic domain name. # function validateDomainUid { - validateLowerCase "domainUID" ${domainUID} - validateDNS1123LegalName domainUID ${domainUID} + validateLowerCase "domainUID" "${domainUID}" + validateDNS1123LegalName "domainUID" "${domainUID}" + validateWlsDomainName "domainUID" "${domainUID}" } # diff --git a/OracleAccessManagement/kubernetes/common/wdt-and-wit-utility.sh b/OracleAccessManagement/kubernetes/common/wdt-and-wit-utility.sh index aa9cc691c..4ecf53f68 100755 --- a/OracleAccessManagement/kubernetes/common/wdt-and-wit-utility.sh +++ b/OracleAccessManagement/kubernetes/common/wdt-and-wit-utility.sh @@ -209,10 +209,12 @@ function run_wdt { cd $WDT_DIR || return 1 + mkdir ${action} + cmd=" $wdt_bin_dir/extractDomainResource.sh -oracle_home $oracle_home - -domain_resource_file domain${action}.yaml + -output_dir ./${action} -domain_home $domain_home_dir -model_file $model_final -variable_file $inputs_final diff --git a/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/common/oamconfig.properties b/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/common/oamconfig.properties index d002e86d5..ca68138c3 100755 --- a/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/common/oamconfig.properties +++ b/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/common/oamconfig.properties @@ -1,4 +1,4 @@ -# Copyright (c) 2020, Oracle Corporation and/or its affiliates. +# Copyright (c) 2020, 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. #Below are only the sample values, please modify them as per your setup @@ -6,7 +6,7 @@ # The name space where OAM servers are created OAM_NAMESPACE='accessns' -# Define the INGRESS CONTROLLER used. typical values are voyager/nginx +# Define the INGRESS CONTROLLER used. typical value is nginx INGRESS="nginx" # Define the INGRESS CONTROLLER name used during installation. diff --git a/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/common/oamconfig_modify.sh b/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/common/oamconfig_modify.sh index 1e786f0f8..bfb40b392 100755 --- a/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/common/oamconfig_modify.sh +++ b/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/common/oamconfig_modify.sh @@ -42,8 +42,6 @@ echo "INGRESS_NAME: $INGRESS_NAME" if [ $INGRESS == "nginx" ]; then ING_TYPE=`kubectl --namespace $OAM_NAMESPACE get services $INGRESS_NAME-ingress-nginx-controller -o jsonpath="{.spec.type}"` -elif [ $INGRESS == "voyager" ]; then - ING_TYPE=`kubectl --namespace $OAM_NAMESPACE get services voyager-$domainUID-voyager -o jsonpath="{.spec.type}"` else echo "Error: Invalid INGRESS : $INGRESS" exit 1 @@ -54,8 +52,6 @@ echo "ING_TYPE : $ING_TYPE " if [ $ING_TYPE == "NodePort" ]; then if [ $INGRESS == "nginx" ]; then LBR_PORT=`kubectl --namespace $OAM_NAMESPACE get services -o jsonpath="{.spec.ports[1].nodePort}" $INGRESS_NAME-ingress-nginx-controller` - elif [ $INGRESS == "voyager" ]; then - LBR_PORT=`kubectl --namespace $OAM_NAMESPACE get services -o jsonpath="{.spec.ports[0].nodePort}" voyager-$domainUID-voyager` else echo "Error: Invalid INGRESS : $INGRESS" exit 1 @@ -70,8 +66,6 @@ fi if [ $ING_TYPE == "LoadBalancer" ]; then if [ $INGRESS == "nginx" ]; then LBR_HOST=`kubectl --namespace $OAM_NAMESPACE get service $INGRESS_NAME-ingress-nginx-controller | grep controller | awk '{ print $4 }' | tr -d '\n'` - elif [ $INGRESS == "voyager" ]; then - LBR_HOST=`kubectl --namespace $OAM_NAMESPACE get services voyager-$domainUID-voyager |grep voyager-$domainUID-voyager | awk '{ print $4 }' | tr -d '\n'` else echo "Error: Invalid INGRESS : $INGRESS" exit 1 diff --git a/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/common/readme.txt b/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/common/readme.txt index d04367cd9..8b1c4c048 100755 --- a/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/common/readme.txt +++ b/OracleAccessManagement/kubernetes/create-access-domain/domain-home-on-pv/common/readme.txt @@ -9,7 +9,7 @@ Sample oamconfig.properties # The name space where OAM servers are created OAM_NAMESPACE=accessns -# Define the INGRESS CONTROLLER used. typical values are voyager/nginx +# Define the INGRESS CONTROLLER used. typical value is nginx INGRESS=nginx # Define the INGRESS CONTROLLER name used during installation. diff --git a/OracleAccessManagement/kubernetes/create-oracle-db-service/start-db-service.sh b/OracleAccessManagement/kubernetes/create-oracle-db-service/start-db-service.sh index 9a522d4eb..452860272 100755 --- a/OracleAccessManagement/kubernetes/create-oracle-db-service/start-db-service.sh +++ b/OracleAccessManagement/kubernetes/create-oracle-db-service/start-db-service.sh @@ -1,14 +1,14 @@ #!/bin/bash -# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Copyright (c) 2020, 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -# + # Bring up Oracle DB Instance in [default] NameSpace with a NodePort Service script="${BASH_SOURCE[0]}" scriptDir="$( cd "$( dirname "${script}" )" && pwd )" source ${scriptDir}/../common/utility.sh -function usage { +usage() { echo "usage: ${script} -p -i -s -n [-h]" echo " -i Oracle DB Image (optional)" echo " (default: container-registry.oracle.com/database/enterprise:12.2.0.1-slim)" @@ -67,28 +67,39 @@ fi echo "NodePort[$nodeport] ImagePullSecret[$pullsecret] Image[${dbimage}] NameSpace[${namespace}]" +#create unique db yaml file if does not exists +dbYaml=${scriptDir}/common/oracle.db.${namespace}.yaml +if [ ! -f "$dbYaml" ]; then + echo "$dbYaml does not exist." + cp ${scriptDir}/common/oracle.db.yaml ${dbYaml} +fi + # Modify ImagePullSecret and DatabaseImage based on input -sed -i -e '$d' ${scriptDir}/common/oracle.db.yaml -echo ' - name: docker-store' >> ${scriptDir}/common/oracle.db.yaml -sed -i -e "s?name: docker-store?name: ${pullsecret}?g" ${scriptDir}/common/oracle.db.yaml -sed -i -e "s?image:.*?image: ${dbimage}?g" ${scriptDir}/common/oracle.db.yaml -sed -i -e "s?namespace:.*?namespace: ${namespace}?g" ${scriptDir}/common/oracle.db.yaml +sed -i -e '$d' ${dbYaml} +echo ' - name: docker-store' >> ${dbYaml} +sed -i -e "s?name: docker-store?name: ${pullsecret}?g" ${dbYaml} +sed -i -e "s?image:.*?image: ${dbimage}?g" ${dbYaml} +sed -i -e "s?namespace:.*?namespace: ${namespace}?g" ${dbYaml} # Modify the NodePort based on input if [ "${nodeport}" = "none" ]; then - sed -i -e "s? nodePort:? #nodePort:?g" ${scriptDir}/common/oracle.db.yaml - sed -i -e "s? type:.*NodePort? #type: NodePort?g" ${scriptDir}/common/oracle.db.yaml + sed -i -e "s? nodePort:? #nodePort:?g" ${dbYaml} + sed -i -e "s? type:.*NodePort? #type: NodePort?g" ${dbYaml} else - sed -i -e "s?[#]*nodePort:.*?nodePort: ${nodeport}?g" ${scriptDir}/common/oracle.db.yaml - sed -i -e "s?[#]*type:.*NodePort?type: NodePort?g" ${scriptDir}/common/oracle.db.yaml # default type is ClusterIP + sed -i -e "s?[#]*nodePort:.*?nodePort: ${nodeport}?g" ${dbYaml} + sed -i -e "s?[#]*type:.*NodePort?type: NodePort?g" ${dbYaml} # default type is ClusterIP fi kubectl delete service oracle-db -n ${namespace} --ignore-not-found -kubectl apply -f ${scriptDir}/common/oracle.db.yaml +kubectl apply -f ${dbYaml} -dbpod=`kubectl get po -n ${namespace} | grep oracle-db | cut -f1 -d " " ` +detectPod ${namespace} +dbpod=${retVal} +echo "Is going to check dbpod: ${dbpod} in the namespace: ${namespace} " checkPod ${dbpod} ${namespace} + +echo " checking pod state for pod ${dbpod} running in ${namespace}" checkPodState ${dbpod} ${namespace} "1/1" checkService oracle-db ${namespace} diff --git a/OracleAccessManagement/kubernetes/create-oracle-db-service/stop-db-service.sh b/OracleAccessManagement/kubernetes/create-oracle-db-service/stop-db-service.sh index 7ab14928c..a99af10b4 100755 --- a/OracleAccessManagement/kubernetes/create-oracle-db-service/stop-db-service.sh +++ b/OracleAccessManagement/kubernetes/create-oracle-db-service/stop-db-service.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Copyright (c) 2020, 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. # # Drop the DB Service created by start-db-service.sh @@ -8,7 +8,7 @@ script="${BASH_SOURCE[0]}" scriptDir="$( cd "$( dirname "${script}" )" && pwd )" source ${scriptDir}/../common/utility.sh -function usage { +usage() { echo "usage: ${script} -n namespace [-h]" echo " -n Kubernetes NameSpace for Oracle DB Service to be Stopped (optional)" echo " (default: default) " @@ -34,7 +34,8 @@ fi dbpod=`kubectl get po -n ${namespace} | grep oracle-db | cut -f1 -d " " ` -kubectl delete -f ${scriptDir}/common/oracle.db.yaml --ignore-not-found +kubectl delete -f ${scriptDir}/common/oracle.db.${namespace}.yaml --ignore-not-found +rm ${scriptDir}/common/oracle.db.${namespace}.yaml --force if [ -z ${dbpod} ]; then echo "Couldn't find oracle-db pod in [${namespace}] namesapce" diff --git a/OracleAccessManagement/kubernetes/create-rcu-schema/README.md b/OracleAccessManagement/kubernetes/create-rcu-schema/README.md index e17cd596e..c9d9c8015 100755 --- a/OracleAccessManagement/kubernetes/create-rcu-schema/README.md +++ b/OracleAccessManagement/kubernetes/create-rcu-schema/README.md @@ -23,7 +23,7 @@ The script assumes that either the image, `oracle/oam:12.2.1.4.0`, is available ``` $ ./create-rcu-schema.sh -h -usage: ./create-rcu-schema.sh -s -t -d -i -u -p -n -q -r -o -c [-h] +usage: ./create-rcu-schema.sh -s -t -d -i -u -p -n -q -r -o -c [-l] [-h] -s RCU Schema Prefix (required) -t RCU Schema Type (optional) (supported values: oam) @@ -44,11 +44,13 @@ usage: ./create-rcu-schema.sh -s -t -d -i ` with appropriate values based on your environment: + +``` +$ cd ${WORKDIR}/monitoring-service/scripts +$ kubectl cp wls-exporter-deploy /:/u01/oracle +$ kubectl cp deploy-weblogic-monitoring-exporter.py /:/u01/oracle/wls-exporter-deploy +$ kubectl exec -it -n -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/deploy-weblogic-monitoring-exporter.py \ +-domainName -adminServerName -adminURL \ +-oamClusterName -wlsMonitoringExporterTooamCluster \ +-policyClusterName -wlsMonitoringExporterTopolicyCluster \ +-username -password +``` + +For example: + +``` +$ cd ${WORKDIR}/monitoring-service/scripts +$ kubectl cp wls-exporter-deploy accessns/accessinfra-adminserver:/u01/oracle +$ kubectl cp deploy-weblogic-monitoring-exporter.py accessns/accessinfra-adminserver:/u01/oracle/wls-exporter-deploy +$ kubectl exec -it -n accessns accessinfra-adminserver -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/deploy-weblogic-monitoring-exporter.py \ +-domainName accessinfra -adminServerName AdminServer -adminURL accessinfra-adminserver:7001 \ +-oamClusterName oam_cluster -wlsMonitoringExporterTooamCluster true \ +-policyClusterName policy_cluster -wlsMonitoringExporterTopolicyCluster true \ +-username weblogic -password Welcome1 +``` + +### Configure Prometheus Operator + +Prometheus enables you to collect metrics from the WebLogic Monitoring Exporter. The Prometheus Operator identifies the targets using service discovery. To get the WebLogic Monitoring Exporter end point discovered as a target, you must create a service monitor pointing to the service. + +The service monitor deployment YAML configuration file is available at `${WORKDIR}/monitoring-service/manifests/wls-exporter-ServiceMonitor.yaml.template`. Copy the file as `wls-exporter-ServiceMonitor.yaml` to update with appropriate values as detailed below. + +The exporting of metrics from `wls-exporter` requires `basicAuth`, so a Kubernetes `Secret` is created with the user name and password that are base64 encoded. This `Secret` is used in the `ServiceMonitor` deployment. The `wls-exporter-ServiceMonitor.yaml` has namespace as `accessns` and has `basicAuth` with credentials as `username: %USERNAME%` and `password: %PASSWORD%`. Update `%USERNAME%` and `%PASSWORD% ` in base64 encoded and all occurences of `accessns` based on your environment. + +Use the following example for base64 encoded: + +``` +$ echo -n "Welcome1" | base64 +V2VsY29tZTE= +``` + +You need to add `RoleBinding` and `Role` for the namespace (accessns) under which the WebLogic Servers pods are running in the Kubernetes cluster. These are required for Prometheus to access the endpoints provided by the WebLogic Monitoring Exporters. The YAML configuration files for accessns namespace are provided in "${WORKDIR}/monitoring-service/manifests/". + +If you are using namespace other than `accessns`, update the namespace details in `prometheus-roleBinding-domain-namespace.yaml` and `prometheus-roleSpecific-domain-namespace.yaml`. + +Perform the below steps for enabling Prometheus to collect the metrics from the WebLogic Monitoring Exporter: + +``` +$ cd ${WORKDIR}/monitoring-service/manifests +$ kubectl apply -f . +``` + +### Verify the service discovery of WebLogic Monitoring Exporter + +After the deployment of the service monitor, Prometheus should be able to discover wls-exporter and collect the metrics. + +1. Access the Prometheus dashboard at `http://mycompany.com:32101/` + +1. Navigate to **Status** to see the **Service Discovery** details. + +1. Verify that `wls-exporter` is listed in the discovered Services. + + +### Deploy Grafana Dashboard + +You can access the Grafana dashboard at `http://mycompany.com:32100/`. + +1. Log in to Grafana dashboard with username: admin and password: admin`. + +1. Navigate to + (Create) -> Import -> Upload the `weblogic-server-dashboard-import.json` file (provided at `${WORKDIR}/monitoring-service/config/weblogic-server-dashboard-import.json`). + + +## Set up using `setup-monitoring.sh` + +Alternatively, you can run the helper script `setup-monitoring.sh` available at `${WORKDIR}/monitoring-service` to setup the monitoring for OracleAccessManagement domain. + +This script creates kube-prometheus-stack(Prometheus, Grafana and Alertmanager), WebLogic Monitoring Exporter and imports `weblogic-server-dashboard.json` into Grafana for WebLogic Server Dashboard. + ### Prepare to use the setup monitoring script The sample scripts for setup monitoring for OracleAccessManagement domain are available at `${WORKDIR}/monitoring-service`. @@ -36,7 +196,7 @@ The following parameters can be provided in the inputs file. | `prometheusNodePort` | Port number of the Prometheus outside the Kubernetes cluster. | `32101` | | `grafanaNodePort` | Port number of the Grafana outside the Kubernetes cluster. | `32100` | | `alertmanagerNodePort` | Port number of the Alertmanager outside the Kubernetes cluster. | `32102` | -| `weblogicCredentialsSecretName` | Name of the Kubernetes secret which has Administration Server’s user name and password. | `accessinfra-domain-credentials` | +| `weblogicCredentialsSecretName` | Name of the Kubernetes secret which has Administration Server's user name and password. | `accessinfra-domain-credentials` | Note that the values specified in the `monitoring-inputs.yaml` file will be used to install kube-prometheus-stack (Prometheus, Grafana and Alertmanager) and deploying WebLogic Monitoring Exporter into the OracleAccessManagement domain. Hence make the domain specific values to be same as that used during domain creation. @@ -54,13 +214,11 @@ The script will perform the following steps: - Helm install `prometheus-community/kube-prometheus-stack` of version "16.5.0" if `setupKubePrometheusStack` is set to `true`. - Deploys WebLogic Monitoring Exporter to Administration Server. - Deploys WebLogic Monitoring Exporter to `oamCluster` if `wlsMonitoringExporterTooamCluster` is set to `true`. -- Exposes the Monitoring Services (Prometheus at `32101`, Grafana at `32100` and Alertmanager at `32102`) outside of the Kubernetes cluster if `exposeMonitoringNodePort` is set to `true`. -- Imports the WebLogic Server Grafana Dashboard if `setupKubePrometheusStack` is set to `true`. -- Deploys WebLogic Monitoring Exporter to Administration Server. - Deploys WebLogic Monitoring Exporter to `policyCluster` if `wlsMonitoringExporterTopolicyCluster` is set to `true`. - Exposes the Monitoring Services (Prometheus at `32101`, Grafana at `32100` and Alertmanager at `32102`) outside of the Kubernetes cluster if `exposeMonitoringNodePort` is set to `true`. - Imports the WebLogic Server Grafana Dashboard if `setupKubePrometheusStack` is set to `true`. + ### Verify the results The setup monitoring script will report failure if there was any error. However, verify that required resources were created by the script. @@ -116,5 +274,4 @@ $ cd ${WORKDIR}/monitoring-service $ ./delete-monitoring.sh \ -i monitoring-inputs.yaml ``` - diff --git a/OracleAccessManagement/kubernetes/monitoring-service/config/weblogic-server-dashboard-import.json b/OracleAccessManagement/kubernetes/monitoring-service/config/weblogic-server-dashboard-import.json index 0b8444e35..c2fa9e2eb 100755 --- a/OracleAccessManagement/kubernetes/monitoring-service/config/weblogic-server-dashboard-import.json +++ b/OracleAccessManagement/kubernetes/monitoring-service/config/weblogic-server-dashboard-import.json @@ -125,7 +125,7 @@ "expr": "count(count (wls_jvm_uptime{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\"}) by (name))", "format": "time_series", "intervalFactor": 1, - "legendFormat": "{{weblogic_serverName}}", + "legendFormat": "", "refId": "A" } ], @@ -750,7 +750,7 @@ "expr": "wls_jvm_heap_free_current{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", "format": "time_series", "intervalFactor": 1, - "legendFormat": " Heap Free ({{weblogic_serverName}})", + "legendFormat": " Heap Free ()", "refId": "B" }, { @@ -758,7 +758,7 @@ "format": "time_series", "instant": false, "intervalFactor": 1, - "legendFormat": "Heap Size ({{weblogic_serverName}})", + "legendFormat": "Heap Size ()", "refId": "A" }, { @@ -766,7 +766,7 @@ "format": "time_series", "hide": true, "intervalFactor": 1, - "legendFormat": "Heap Max ({{weblogic_serverName}})", + "legendFormat": "Heap Max ()", "refId": "C" } ], @@ -859,7 +859,7 @@ "expr": "wls_jvm_process_cpu_load{weblogic_domainUID=~\"$domainName\", weblogic_clusterName=~\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"} * 100", "format": "time_series", "intervalFactor": 1, - "legendFormat": " {{weblogic_serverName}}", + "legendFormat": " ", "refId": "B" } ], @@ -947,14 +947,14 @@ "expr": "wls_threadpool_execute_thread_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", "format": "time_series", "intervalFactor": 1, - "legendFormat": "Total Threads ({{weblogic_serverName}})", + "legendFormat": "Total Threads ()", "refId": "A" }, { "expr": "wls_threadpool_stuck_thread_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", "format": "time_series", "intervalFactor": 1, - "legendFormat": "Stuck Threads ({{weblogic_serverName}})", + "legendFormat": "Stuck Threads ()", "refId": "D" }, { @@ -1338,7 +1338,7 @@ "format": "time_series", "interval": "", "intervalFactor": 1, - "legendFormat": "{{app}}", + "legendFormat": "", "refId": "A" }, { @@ -1434,7 +1434,7 @@ "expr": " sum(irate(wls_webapp_config_sessions_opened_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])) by (app)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "{{app}}", + "legendFormat": "", "refId": "A" } ], @@ -1526,7 +1526,7 @@ "format": "time_series", "interval": "", "intervalFactor": 1, - "legendFormat": "{{app}}", + "legendFormat": "", "refId": "A" } ], @@ -1616,7 +1616,7 @@ "expr": "sum(irate(wls_servlet_invocation_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])) by (app)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "{{app}}", + "legendFormat": "", "refId": "A" } ], @@ -1888,7 +1888,7 @@ "expr": "wls_datasource_active_connections_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", "format": "time_series", "intervalFactor": 1, - "legendFormat": " {{name}} @ {{weblogic_serverName}}", + "legendFormat": " @ ", "refId": "A" } ], @@ -1977,7 +1977,7 @@ "expr": "irate(wls_datasource_connections_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])", "format": "time_series", "intervalFactor": 1, - "legendFormat": " {{name}} @ {{weblogic_serverName}}", + "legendFormat": " @ ", "refId": "A" } ], @@ -2065,7 +2065,7 @@ "expr": "wls_datasource_waiting_for_connection_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", "format": "time_series", "intervalFactor": 1, - "legendFormat": " {{name}} @ {{weblogic_serverName}}", + "legendFormat": " @ ", "refId": "A" } ], @@ -2153,7 +2153,7 @@ "expr": "wls_datasource_connection_delay_time{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", "format": "time_series", "intervalFactor": 1, - "legendFormat": " {{name}} @ {{weblogic_serverName}}", + "legendFormat": " @ ", "refId": "A" } ], @@ -2256,7 +2256,7 @@ "expr": "sum(wls_jmsruntime_connections_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (weblogic_serverName)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "{{weblogic_serverName}}", + "legendFormat": "", "refId": "A" } ], @@ -2345,7 +2345,7 @@ "expr": "sum(irate(wls_jmsruntime_connections_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])) by (weblogic_serverName)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "{{weblogic_serverName}}", + "legendFormat": "", "refId": "A" } ], @@ -2599,14 +2599,14 @@ "expr": "sum(wls_jms_messages_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "Current ({{jmsserver}})", + "legendFormat": "Current ()", "refId": "A" }, { "expr": "sum(wls_jms_messages_pending_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "Pending ({{jmsserver}})", + "legendFormat": "Pending ()", "refId": "B" } ], @@ -2694,14 +2694,14 @@ "expr": "sum(wls_jms_bytes_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "Current ({{jmsserver}})", + "legendFormat": "Current ()", "refId": "A" }, { "expr": "sum(wls_jms_bytes_pending_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "Pending ({{jmsserver}})", + "legendFormat": "Pending ()", "refId": "B" } ], @@ -2788,7 +2788,7 @@ "expr": "sum(irate(wls_jms_messages_received_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])) by (jmsserver)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "{{jmsserver}}", + "legendFormat": "", "refId": "A" } ], @@ -2875,7 +2875,7 @@ "expr": "sum(irate(wls_jms_bytes_received_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])) by (jmsserver)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "{{jmsserver}}", + "legendFormat": "", "refId": "A" } ], diff --git a/OracleAccessManagement/kubernetes/monitoring-service/config/weblogic-server-dashboard.json b/OracleAccessManagement/kubernetes/monitoring-service/config/weblogic-server-dashboard.json index 23961d230..cf6d5f776 100755 --- a/OracleAccessManagement/kubernetes/monitoring-service/config/weblogic-server-dashboard.json +++ b/OracleAccessManagement/kubernetes/monitoring-service/config/weblogic-server-dashboard.json @@ -126,7 +126,7 @@ "expr": "count(count (wls_jvm_uptime{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\"}) by (name))", "format": "time_series", "intervalFactor": 1, - "legendFormat": "{{weblogic_serverName}}", + "legendFormat": "", "refId": "A" } ], @@ -751,7 +751,7 @@ "expr": "wls_jvm_heap_free_current{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", "format": "time_series", "intervalFactor": 1, - "legendFormat": " Heap Free ({{weblogic_serverName}})", + "legendFormat": " Heap Free ()", "refId": "B" }, { @@ -759,7 +759,7 @@ "format": "time_series", "instant": false, "intervalFactor": 1, - "legendFormat": "Heap Size ({{weblogic_serverName}})", + "legendFormat": "Heap Size ()", "refId": "A" }, { @@ -767,7 +767,7 @@ "format": "time_series", "hide": true, "intervalFactor": 1, - "legendFormat": "Heap Max ({{weblogic_serverName}})", + "legendFormat": "Heap Max ()", "refId": "C" } ], @@ -860,7 +860,7 @@ "expr": "wls_jvm_process_cpu_load{weblogic_domainUID=~\"$domainName\", weblogic_clusterName=~\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"} * 100", "format": "time_series", "intervalFactor": 1, - "legendFormat": " {{weblogic_serverName}}", + "legendFormat": " ", "refId": "B" } ], @@ -948,14 +948,14 @@ "expr": "wls_threadpool_execute_thread_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", "format": "time_series", "intervalFactor": 1, - "legendFormat": "Total Threads ({{weblogic_serverName}})", + "legendFormat": "Total Threads ()", "refId": "A" }, { "expr": "wls_threadpool_stuck_thread_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", "format": "time_series", "intervalFactor": 1, - "legendFormat": "Stuck Threads ({{weblogic_serverName}})", + "legendFormat": "Stuck Threads ()", "refId": "D" }, { @@ -1339,7 +1339,7 @@ "format": "time_series", "interval": "", "intervalFactor": 1, - "legendFormat": "{{app}}", + "legendFormat": "", "refId": "A" }, { @@ -1435,7 +1435,7 @@ "expr": " sum(irate(wls_webapp_config_sessions_opened_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])) by (app)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "{{app}}", + "legendFormat": "", "refId": "A" } ], @@ -1527,7 +1527,7 @@ "format": "time_series", "interval": "", "intervalFactor": 1, - "legendFormat": "{{app}}", + "legendFormat": "", "refId": "A" } ], @@ -1617,7 +1617,7 @@ "expr": "sum(irate(wls_servlet_invocation_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])) by (app)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "{{app}}", + "legendFormat": "", "refId": "A" } ], @@ -1889,7 +1889,7 @@ "expr": "wls_datasource_active_connections_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", "format": "time_series", "intervalFactor": 1, - "legendFormat": " {{name}} @ {{weblogic_serverName}}", + "legendFormat": " @ ", "refId": "A" } ], @@ -1978,7 +1978,7 @@ "expr": "irate(wls_datasource_connections_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])", "format": "time_series", "intervalFactor": 1, - "legendFormat": " {{name}} @ {{weblogic_serverName}}", + "legendFormat": " @ ", "refId": "A" } ], @@ -2066,7 +2066,7 @@ "expr": "wls_datasource_waiting_for_connection_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", "format": "time_series", "intervalFactor": 1, - "legendFormat": " {{name}} @ {{weblogic_serverName}}", + "legendFormat": " @ ", "refId": "A" } ], @@ -2154,7 +2154,7 @@ "expr": "wls_datasource_connection_delay_time{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", "format": "time_series", "intervalFactor": 1, - "legendFormat": " {{name}} @ {{weblogic_serverName}}", + "legendFormat": " @ ", "refId": "A" } ], @@ -2257,7 +2257,7 @@ "expr": "sum(wls_jmsruntime_connections_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (weblogic_serverName)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "{{weblogic_serverName}}", + "legendFormat": "", "refId": "A" } ], @@ -2346,7 +2346,7 @@ "expr": "sum(irate(wls_jmsruntime_connections_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])) by (weblogic_serverName)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "{{weblogic_serverName}}", + "legendFormat": "", "refId": "A" } ], @@ -2600,14 +2600,14 @@ "expr": "sum(wls_jms_messages_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "Current ({{jmsserver}})", + "legendFormat": "Current ()", "refId": "A" }, { "expr": "sum(wls_jms_messages_pending_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "Pending ({{jmsserver}})", + "legendFormat": "Pending ()", "refId": "B" } ], @@ -2695,14 +2695,14 @@ "expr": "sum(wls_jms_bytes_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "Current ({{jmsserver}})", + "legendFormat": "Current ()", "refId": "A" }, { "expr": "sum(wls_jms_bytes_pending_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "Pending ({{jmsserver}})", + "legendFormat": "Pending ()", "refId": "B" } ], @@ -2789,7 +2789,7 @@ "expr": "sum(irate(wls_jms_messages_received_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])) by (jmsserver)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "{{jmsserver}}", + "legendFormat": "", "refId": "A" } ], @@ -2876,7 +2876,7 @@ "expr": "sum(irate(wls_jms_bytes_received_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])) by (jmsserver)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "{{jmsserver}}", + "legendFormat": "", "refId": "A" } ], diff --git a/OracleAccessManagement/kubernetes/monitoring-service/delete-monitoring.sh b/OracleAccessManagement/kubernetes/monitoring-service/delete-monitoring.sh index b676e9b40..eee881c77 100755 --- a/OracleAccessManagement/kubernetes/monitoring-service/delete-monitoring.sh +++ b/OracleAccessManagement/kubernetes/monitoring-service/delete-monitoring.sh @@ -62,7 +62,7 @@ function usage { } -function deleteKubePrometheusStack { +function deletePrometheusGrafana { helm delete ${monitoringNamespace} --namespace ${monitoringNamespace} } @@ -115,7 +115,7 @@ fi if [ "${setupKubePrometheusStack}" = "true" ]; then echo "Deleting Prometheus and grafana started" - deleteKubePrometheusStack + deletePrometheusGrafana echo "Deleting Prometheus and grafana completed" fi cd $OLD_PWD diff --git a/OracleAccessManagement/kubernetes/monitoring-service/manifests/prometheus-roleBinding-domain-namespace.yaml b/OracleAccessManagement/kubernetes/monitoring-service/manifests/prometheus-roleBinding-domain-namespace.yaml index e37b9830f..7f00f9b0c 100755 --- a/OracleAccessManagement/kubernetes/monitoring-service/manifests/prometheus-roleBinding-domain-namespace.yaml +++ b/OracleAccessManagement/kubernetes/monitoring-service/manifests/prometheus-roleBinding-domain-namespace.yaml @@ -7,7 +7,7 @@ items: kind: RoleBinding metadata: name: prometheus-k8s - namespace: oamns + namespace: accessns roleRef: apiGroup: rbac.authorization.k8s.io kind: Role diff --git a/OracleAccessManagement/kubernetes/monitoring-service/manifests/prometheus-roleSpecific-domain-namespace.yaml b/OracleAccessManagement/kubernetes/monitoring-service/manifests/prometheus-roleSpecific-domain-namespace.yaml index a881c8647..22a8ef1bd 100755 --- a/OracleAccessManagement/kubernetes/monitoring-service/manifests/prometheus-roleSpecific-domain-namespace.yaml +++ b/OracleAccessManagement/kubernetes/monitoring-service/manifests/prometheus-roleSpecific-domain-namespace.yaml @@ -7,7 +7,7 @@ items: kind: Role metadata: name: prometheus-k8s - namespace: oamns + namespace: accessns rules: - apiGroups: - "" diff --git a/OracleAccessManagement/kubernetes/monitoring-service/manifests/wls-exporter-ServiceMonitor.yaml b/OracleAccessManagement/kubernetes/monitoring-service/manifests/wls-exporter-ServiceMonitor.yaml index be289f234..670796b65 100755 --- a/OracleAccessManagement/kubernetes/monitoring-service/manifests/wls-exporter-ServiceMonitor.yaml +++ b/OracleAccessManagement/kubernetes/monitoring-service/manifests/wls-exporter-ServiceMonitor.yaml @@ -5,27 +5,26 @@ apiVersion: v1 kind: Secret metadata: name: basic-auth - namespace: oamns + namespace: monitoring data: - password: d2VsY29tZTE= - user: d2VibG9naWM= + password: V2VsY29tZTE= # Welcome1 i.e.'WebLogic password' + user: d2VibG9naWM= # weblogic i.e. 'WebLogic username' type: Opaque --- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: name: wls-exporter - namespace: oamns + namespace: monitoring labels: k8s-app: wls-exporter - release: monitoring spec: namespaceSelector: matchNames: - - oamns + - accessns selector: matchLabels: - weblogic.domainName: accessdomain + weblogic.domainName: accessinfra endpoints: - basicAuth: password: diff --git a/OracleAccessManagement/kubernetes/monitoring-service/monitoring-inputs.yaml b/OracleAccessManagement/kubernetes/monitoring-service/monitoring-inputs.yaml index dd2386588..39dfcdd6c 100755 --- a/OracleAccessManagement/kubernetes/monitoring-service/monitoring-inputs.yaml +++ b/OracleAccessManagement/kubernetes/monitoring-service/monitoring-inputs.yaml @@ -2,14 +2,14 @@ # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. # # The version of this inputs file. Do not modify. -version: create-accessdomain-monitoring-inputs-v1 +version: create-accessinfra-monitoring-inputs-v1 # Unique ID identifying your domain. # This ID must not contain an underscope ("_"), and must be lowercase and unique across all domains in a Kubernetes cluster. -domainUID: accessdomain +domainUID: accessinfra # Name of the domain namespace -domainNamespace: oamns +domainNamespace: accessns # Boolean value indicating whether to install kube-prometheus-stack setupKubePrometheusStack: true @@ -36,7 +36,7 @@ oamClusterName: oam_cluster oamManagedServerPort: 14100 # WebLogic Monitoring Exporter to Cluster -wlsMonitoringExporterTooamCluster: true +wlsMonitoringExporterTooamCluster: false # Cluster name policyClusterName: policy_cluster @@ -45,11 +45,11 @@ policyClusterName: policy_cluster policyManagedServerPort: 15100 # WebLogic Monitoring Exporter to Cluster -wlsMonitoringExporterTopolicyCluster: true +wlsMonitoringExporterTopolicyCluster: false -# Boolean to indicate if the adminNodePort will be exposed -exposeMonitoringNodePort: true +# Boolean to indicate if the Monitoring Services NodePort will be exposed +exposeMonitoringNodePort: false # NodePort to expose Prometheus prometheusNodePort: 32101 @@ -61,5 +61,5 @@ grafanaNodePort: 32100 alertmanagerNodePort: 32102 # Name of the Kubernetes secret for the Admin Server's username and password -weblogicCredentialsSecretName: accessdomain-domain-credentials +weblogicCredentialsSecretName: accessinfra-domain-credentials diff --git a/OracleAccessManagement/kubernetes/monitoring-service/scripts/deploy-weblogic-monitoring-exporter.py b/OracleAccessManagement/kubernetes/monitoring-service/scripts/deploy-weblogic-monitoring-exporter.py index 24f9f8334..4d45f189e 100755 --- a/OracleAccessManagement/kubernetes/monitoring-service/scripts/deploy-weblogic-monitoring-exporter.py +++ b/OracleAccessManagement/kubernetes/monitoring-service/scripts/deploy-weblogic-monitoring-exporter.py @@ -29,32 +29,32 @@ def usage(): usage() # domainName will be passed by command line parameter -domainName. -domainName = "accessdomain" +domainName = "accessinfra" # adminServerName will be passed by command line parameter -adminServerName adminServerName = "AdminServer" # adminURL will be passed by command line parameter -adminURL -adminURL = "accessdomain-adminserver:7001" +adminURL = "accessinfra-adminserver:7001" # oamClusterName will be passed by command line parameter -oamClusterName oamClusterName = "oam_cluster" # wlsMonitoringExporterTooamCluster will be passed by command line parameter -wlsMonitoringExporterTooamCluster -wlsMonitoringExporterTooamCluster = "true" +wlsMonitoringExporterTooamCluster = "false" # policyClusterName will be passed by command line parameter -policyClusterName policyClusterName = "policy_cluster" # wlsMonitoringExporterTopolicyCluster will be passed by command line parameter -wlsMonitoringExporterTopolicyCluster -wlsMonitoringExporterTopolicyCluster = "true" +wlsMonitoringExporterTopolicyCluster = "false" # username will be passed by command line parameter -username username = "weblogic" # password will be passed by command line parameter -password -password = "welcome1" +password = "Welcome1" i=1 while i < len(sys.argv): diff --git a/OracleAccessManagement/kubernetes/monitoring-service/scripts/deploy-weblogic-server-grafana-dashboard.sh b/OracleAccessManagement/kubernetes/monitoring-service/scripts/deploy-weblogic-server-grafana-dashboard.sh new file mode 100755 index 000000000..3da4f6b7d --- /dev/null +++ b/OracleAccessManagement/kubernetes/monitoring-service/scripts/deploy-weblogic-server-grafana-dashboard.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# Initialize +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +warDir=$PWD +source ${scriptDir}/utils.sh + +# Setting default values +initialize +# Function to lowercase a value and make it a legal DNS1123 name +# $1 - value to convert to lowercase +function toDNS1123Legal { + local val=`echo $1 | tr "[:upper:]" "[:lower:]"` + val=${val//"_"/"-"} + echo "$val" +} + +adminServerPodName="${domainUID}-$(toDNS1123Legal ${adminServerName})" + +grafanaEndpointIP=$(kubectl get endpoints ${monitoringNamespace}-grafana -n ${monitoringNamespace} -o=jsonpath="{.subsets[].addresses[].ip}") +grafanaEndpointPort=$(kubectl get endpoints ${monitoringNamespace}-grafana -n ${monitoringNamespace} -o=jsonpath="{.subsets[].ports[].port}") +grafanaEndpoint="${grafanaEndpointIP}:${grafanaEndpointPort}" +kubectl cp $scriptDir/../config/weblogic-server-dashboard.json ${domainNamespace}/${adminServerPodName}:/tmp/weblogic-server-dashboard.json +EXEC_DEPLOY="kubectl exec -it -n ${domainNamespace} ${adminServerPodName} -- curl --noproxy \"*\" -X POST -H \"Content-Type: application/json\" -d @/tmp/weblogic-server-dashboard.json http://admin:admin@${grafanaEndpoint}/api/dashboards/db" +echo "Deploying WebLogic Server Grafana Dashboard in progress...." +eval ${EXEC_DEPLOY} + diff --git a/OracleAccessManagement/kubernetes/monitoring-service/scripts/undeploy-weblogic-monitoring-exporter.py b/OracleAccessManagement/kubernetes/monitoring-service/scripts/undeploy-weblogic-monitoring-exporter.py index b06988469..3ad88bd3a 100755 --- a/OracleAccessManagement/kubernetes/monitoring-service/scripts/undeploy-weblogic-monitoring-exporter.py +++ b/OracleAccessManagement/kubernetes/monitoring-service/scripts/undeploy-weblogic-monitoring-exporter.py @@ -28,30 +28,30 @@ def usage(): usage() # domainName will be passed by command line parameter -domainName. -domainName = "accessdomain" +domainName = "accessinfra" # adminServerName will be passed by command line parameter -adminServerName adminServerName = "AdminServer" # adminURL will be passed by command line parameter -adminURL -adminURL = "accessdomain-adminserver:7001" +adminURL = "accessinfra-adminserver:7001" # oamClusterName will be passed by command line parameter -oamClusterName oamClusterName = "oam_cluster" # wlsMonitoringExporterTooamCluster will be passed by command line parameter -wlsMonitoringExporterTooamCluster -wlsMonitoringExporterTooamCluster = "true" +wlsMonitoringExporterTooamCluster = "false" # policyClusterName will be passed by command line parameter -policyClusterName policyClusterName = "policy_cluster" # wlsMonitoringExporterTopolicyCluster will be passed by command line parameter -wlsMonitoringExporterTopolicyCluster -wlsMonitoringExporterTopolicyCluster = "true" +wlsMonitoringExporterTopolicyCluster = "false" # username will be passed by command line parameter -username username = "weblogic" # password will be passed by command line parameter -password -password = "welcome1" +password = "Welcome1" i=1 diff --git a/OracleAccessManagement/kubernetes/monitoring-service/scripts/utils.sh b/OracleAccessManagement/kubernetes/monitoring-service/scripts/utils.sh index b3799563b..4721b6666 100755 --- a/OracleAccessManagement/kubernetes/monitoring-service/scripts/utils.sh +++ b/OracleAccessManagement/kubernetes/monitoring-service/scripts/utils.sh @@ -5,18 +5,18 @@ function initialize { if [ -z ${domainNamespace} ]; then - echo "domainNamespace is empty, setting to default oamns" - domainNamespace="oamns" + echo "domainNamespace is empty, setting to default accessns" + domainNamespace="accessns" fi if [ -z ${domainUID} ]; then - echo "domainUID is empty, setting to default accessdomain" - domainUID="accessdomain" + echo "domainUID is empty, setting to default accessinfra" + domainUID="accessinfra" fi if [ -z ${weblogicCredentialsSecretName} ]; then - echo "weblogicCredentialsSecretName is empty, setting to default \"accessdomain-domain-credentials\"" - weblogicCredentialsSecretName="accessdomain-domain-credentials" + echo "weblogicCredentialsSecretName is empty, setting to default \"accessinfra-domain-credentials\"" + weblogicCredentialsSecretName="accessinfra-domain-credentials" fi if [ -z ${adminServerName} ]; then @@ -41,7 +41,7 @@ function initialize { if [ -z ${wlsMonitoringExporterTooamCluster} ]; then echo "wlsMonitoringExporterTooamCluster is empty, setting to default \"false\"" - wlsMonitoringExporterTooamCluster="true" + wlsMonitoringExporterTooamCluster="false" fi if [ -z ${policyClusterName} ]; then echo "policyClusterName is empty, setting to default \"policy_cluster\"" @@ -55,7 +55,7 @@ function initialize { if [ -z ${wlsMonitoringExporterTopolicyCluster} ]; then echo "wlsMonitoringExporterTopolicyCluster is empty, setting to default \"false\"" - wlsMonitoringExporterTopolicyCluster="true" + wlsMonitoringExporterTopolicyCluster="false" fi } diff --git a/OracleAccessManagement/kubernetes/monitoring-service/setup-monitoring.sh b/OracleAccessManagement/kubernetes/monitoring-service/setup-monitoring.sh index c36b4bb82..57e7cba0c 100755 --- a/OracleAccessManagement/kubernetes/monitoring-service/setup-monitoring.sh +++ b/OracleAccessManagement/kubernetes/monitoring-service/setup-monitoring.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2021, 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. # # setup-monitoring.sh @@ -78,21 +78,22 @@ function installKubePrometheusStack { if [ ${exposeMonitoringNodePort} == "true" ]; then helm install ${monitoringNamespace} prometheus-community/kube-prometheus-stack \ - --namespace ${monitoringNamespace} \ + --namespace ${monitoringNamespace} ${additionalParamForKubePrometheusStack} \ --set prometheus.service.type=NodePort --set prometheus.service.nodePort=${prometheusNodePort} \ --set alertmanager.service.type=NodePort --set alertmanager.service.nodePort=${alertmanagerNodePort} \ --set grafana.adminPassword=admin --set grafana.service.type=NodePort --set grafana.service.nodePort=${grafanaNodePort} \ - --version "16.5.0" ${additionalParamForKubePrometheusStack} \ + --version "16.5.0" --values ${scriptDir}/values.yaml \ --atomic --wait else helm install ${monitoringNamespace} prometheus-community/kube-prometheus-stack \ - --namespace ${monitoringNamespace} \ + --namespace ${monitoringNamespace} ${additionalParamForKubePrometheusStack} \ --set grafana.adminPassword=admin \ - --version "16.5.0" ${additionalParamForKubePrometheusStack} \ + --version "16.5.0" --values ${scriptDir}/values.yaml \ --atomic --wait fi exitIfError $? "ERROR: prometheus-community/kube-prometheus-stack install failed." } + #Parse the inputs while getopts "hi:" opt; do case $opt in @@ -133,7 +134,7 @@ if [ "${setupKubePrometheusStack}" = "true" ]; then echo "The namespace ${monitoringNamespace} for install prometheus-community/kube-prometheus-stack does not exist. Creating the namespace ${monitoringNamespace}" kubectl create namespace ${monitoringNamespace} fi - echo -e "Monitoring setup in ${monitoringNamespace} in progress\n" + echo -e "Monitoring setup in ${monitoringNamespace} in progress.......\n" # Create the namespace and CRDs, and then wait for them to be availble before creating the remaining resources kubectl label nodes --all kubernetes.io/os=linux --overwrite=true @@ -145,8 +146,8 @@ if [ "${setupKubePrometheusStack}" = "true" ]; then echo "Setup prometheus-community/kube-prometheus-stack completed" fi -username=`kubectl get secrets ${weblogicCredentialsSecretName} -n ${domainNamespace} -o=jsonpath='{.data.username}'|base64 --decode` -password=`kubectl get secrets ${weblogicCredentialsSecretName} -n ${domainNamespace} -o=jsonpath='{.data.password}'|base64 --decode` +export username=`kubectl get secrets ${weblogicCredentialsSecretName} -n ${domainNamespace} -o=jsonpath='{.data.username}'|base64 --decode` +export password=`kubectl get secrets ${weblogicCredentialsSecretName} -n ${domainNamespace} -o=jsonpath='{.data.password}'|base64 --decode` # Setting up the WebLogic Monitoring Exporter echo "Deploy WebLogic Monitoring Exporter started" @@ -168,13 +169,11 @@ sed -i -e "$!N;s/matchNames:\n -.*/matchNames:\n - ${domainNamespace}/g;P; kubectl apply -f ${serviceMonitor} + if [ "${setupKubePrometheusStack}" = "true" ]; then # Deploying WebLogic Server Grafana Dashboard echo "Deploying WebLogic Server Grafana Dashboard...." - grafanaEndpointIP=$(kubectl get endpoints ${monitoringNamespace}-grafana -n ${monitoringNamespace} -o=jsonpath="{.subsets[].addresses[].ip}") - grafanaEndpointPort=$(kubectl get endpoints ${monitoringNamespace}-grafana -n ${monitoringNamespace} -o=jsonpath="{.subsets[].ports[].port}") - grafanaEndpoint="${grafanaEndpointIP}:${grafanaEndpointPort}" - curl --noproxy "*" -X POST -H "Content-Type: application/json" -d @config/weblogic-server-dashboard.json http://admin:admin@${grafanaEndpoint}/api/dashboards/db + sh ${scriptDir}/scripts/deploy-weblogic-server-grafana-dashboard.sh echo "" echo "Deployed WebLogic Server Grafana Dashboard successfully" echo "" diff --git a/OracleAccessManagement/kubernetes/monitoring-service/values.yaml b/OracleAccessManagement/kubernetes/monitoring-service/values.yaml new file mode 100755 index 000000000..18757f394 --- /dev/null +++ b/OracleAccessManagement/kubernetes/monitoring-service/values.yaml @@ -0,0 +1,13 @@ +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +prometheusOperator: + admissionWebhooks: + patch: + enabled: true + image: + repository: k8s.gcr.io/ingress-nginx/kube-webhook-certgen + tag: v1.0 + sha: "f3b6b39a6062328c095337b4cadcefd1612348fdd5190b1dcbcb9b9e90bd8068" + pullPolicy: IfNotPresent + diff --git a/OracleAccessManagement/kubernetes/scaling/scalingAction.sh b/OracleAccessManagement/kubernetes/scaling/scalingAction.sh index 0da098e68..462031314 100755 --- a/OracleAccessManagement/kubernetes/scaling/scalingAction.sh +++ b/OracleAccessManagement/kubernetes/scaling/scalingAction.sh @@ -98,7 +98,7 @@ cat > cmds-$$.py << INPUT import sys, json for i in json.load(sys.stdin)["spec"]["ports"]: if i["name"] == "rest": - print(i["port"]) + print((i["port"])) INPUT port=$(echo "${STATUS}" | python cmds-$$.py 2>> ${log_file_name}) fi @@ -131,7 +131,7 @@ cat > cmds-$$.py << INPUT import sys, json for i in json.load(sys.stdin)["groups"]: if i["name"] == "weblogic.oracle": - print(i["preferredVersion"]["version"]) + print((i["preferredVersion"]["version"])) INPUT domain_api_version=`echo ${APIS} | python cmds-$$.py 2>> ${log_file_name}` fi @@ -196,7 +196,7 @@ cat > cmds-$$.py << INPUT import sys, json for j in json.load(sys.stdin)["spec"]["clusters"]: if j["clusterName"] == "$wls_cluster_name": - print (j["replicas"]) + print((j["replicas"])) INPUT num_ms=`echo ${DOMAIN} | python cmds-$$.py 2>> ${log_file_name}` fi @@ -219,7 +219,7 @@ function get_num_ms_domain_scope() { else cat > cmds-$$.py << INPUT import sys, json -print (json.load(sys.stdin)["spec"]["replicas"]) +print((json.load(sys.stdin)["spec"]["replicas"])) INPUT num_ms=`echo ${DOMAIN} | python cmds-$$.py 2>> ${log_file_name}` fi @@ -253,7 +253,7 @@ cat > cmds-$$.py << INPUT import sys, json for j in json.load(sys.stdin)["status"]["clusters"]: if j["clusterName"] == "$clusterName": - print (j["minimumReplicas"]) + print((j["minimumReplicas"])) INPUT minReplicas=`echo ${DOMAIN} | python cmds-$$.py 2>> ${log_file_name}` fi diff --git a/OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/templates/host-nginx-ingress-nonssl.yaml b/OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/templates/host-nginx-ingress-nonssl.yaml new file mode 100755 index 000000000..7d12d672f --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/templates/host-nginx-ingress-nonssl.yaml @@ -0,0 +1,346 @@ +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +{{- if eq .Values.type "NGINX" }} +{{- if (eq .Values.sslType "NONSSL") }} +{{- if .Values.hostName.enabled }} + +{{- if .Values.hostName.admin }} +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: oigadmin-ingress + namespace: {{ .Release.Namespace }} + annotations: + nginx.ingress.kubernetes.io/affinity: 'cookie' + nginx.ingress.kubernetes.io/proxy-buffer-size: '2000k' + nginx.ingress.kubernetes.io/affinity-mode: 'persistent' + nginx.ingress.kubernetes.io/session-cookie-name: 'sticky' + nginx.ingress.kubernetes.io/enable-access-log: 'false' + nginx.ingress.kubernetes.io/ssl-redirect: 'false' + nginx.ingress.kubernetes.io/proxy-read-timeout: '{{ .Values.nginx.nginxTimeOut }}' + nginx.ingress.kubernetes.io/proxy-send-timeout: '{{ .Values.nginx.nginxTimeOut }}' +spec: + ingressClassName: nginx + rules: + - host: '{{ .Values.hostName.admin }}' + http: + paths: + - path: /console + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /consolehelp + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /em + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /oim + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /sysadmin + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /admin + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /dms + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /soa-infra + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.soaManagedServerPort }} + - path: /ws_utc + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.soaManagedServerPort }} + - path: /identity + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /OIGUI + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /FacadeWebApp + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /SchedulerService-web + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + +{{- end }} + +{{- if .Values.hostName.runtime }} +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: oigruntime-ingress + namespace: {{ .Release.Namespace }} + annotations: + nginx.ingress.kubernetes.io/affinity: 'cookie' + nginx.ingress.kubernetes.io/proxy-buffer-size: '2000k' + nginx.ingress.kubernetes.io/affinity-mode: 'persistent' + nginx.ingress.kubernetes.io/session-cookie-name: 'sticky' + nginx.ingress.kubernetes.io/enable-access-log: 'false' + nginx.ingress.kubernetes.io/proxy-read-timeout: '{{ .Values.nginx.nginxTimeOut }}' + nginx.ingress.kubernetes.io/proxy-send-timeout: '{{ .Values.nginx.nginxTimeOut }}' +spec: + ingressClassName: nginx + rules: + - host: '{{ .Values.hostName.runtime }}' + http: + paths: + - path: /identity + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /HTTPClnt + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /reqsvc + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /FacadeWebApp + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /iam + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /OIGUI + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + + +{{- end }} + +{{- if .Values.hostName.internal }} +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: oiginternal-ingress + namespace: {{ .Release.Namespace }} + annotations: + nginx.ingress.kubernetes.io/affinity: 'cookie' + nginx.ingress.kubernetes.io/proxy-buffer-size: '2000k' + nginx.ingress.kubernetes.io/affinity-mode: 'persistent' + nginx.ingress.kubernetes.io/session-cookie-name: 'sticky' + nginx.ingress.kubernetes.io/enable-access-log: 'false' + nginx.ingress.kubernetes.io/ssl-redirect: 'false' + nginx.ingress.kubernetes.io/proxy-read-timeout: '{{ .Values.nginx.nginxTimeOut }}' + nginx.ingress.kubernetes.io/proxy-send-timeout: '{{ .Values.nginx.nginxTimeOut }}' +spec: + ingressClassName: nginx + rules: + - host: oiginternal-ingress + http: + paths: + - path: /wsm-pm + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /sodcheck + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.soaManagedServerPort }} + - path: /role-sod + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /workflowservice + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /callbackResponseService + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /spml-xsd + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /spmlws + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /reqsvc + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /soa-infra + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.soaManagedServerPort }} + - path: /ws_utc + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.soaManagedServerPort }} + - path: /ucs + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.soaManagedServerPort }} + - path: /provisioning-callback + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /CertificationCallbackService + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /IdentityAuditCallbackService + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /soa/composer + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.soaManagedServerPort }} + - path: /integration + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.soaManagedServerPort }} + - path: /sdpmessaging/userprefs-ui + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.soaManagedServerPort }} + - path: /iam + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + +{{- end }} + +{{- end }} +{{- end }} +{{- end }} + + diff --git a/OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/templates/nginx-ingress-nonssl.yaml b/OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/templates/nginx-ingress-nonssl.yaml new file mode 100755 index 000000000..9afaad16f --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/templates/nginx-ingress-nonssl.yaml @@ -0,0 +1,191 @@ +# Copyright (c) 2020, 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +{{- if eq .Values.type "NGINX" }} +{{- if (eq .Values.sslType "NONSSL") }} +{{- if not .Values.hostName.enabled }} +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ .Values.wlsDomain.domainUID }}-nginx + namespace: {{ .Release.Namespace }} + annotations: + nginx.ingress.kubernetes.io/affinity: 'cookie' + nginx.ingress.kubernetes.io/session-cookie-name: 'sticky' + nginx.ingress.kubernetes.io/affinity-mode: 'persistent' + nginx.ingress.kubernetes.io/enable-access-log: 'false' + kubernetes.io/ingress.class: 'nginx' + nginx.ingress.kubernetes.io/proxy-read-timeout: '{{ .Values.nginx.nginxTimeOut }}' + nginx.ingress.kubernetes.io/proxy-send-timeout: '{{ .Values.nginx.nginxTimeOut }}' +spec: + rules: + - host: + http: + paths: + - path: /console + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /consolehelp + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /em + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} + - path: /ws_utc + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.soaManagedServerPort }} + - path: /soa + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.soaManagedServerPort }} + - path: /integration + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.soaManagedServerPort }} + - path: /soa-infra + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.soaManagedServerPort }} + - path: /identity + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /admin + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /oim + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /sysadmin + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /workflowservice + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /callbackResponseService + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /spml-xsd + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /HTTPClnt + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /reqsvc + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /iam + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /provisioning-callback + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /CertificationCallbackService + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /ucs + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /FacadeWebApp + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /OIGUI + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + - path: /weblogic + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.oimManagedServerPort }} + +{{- end }} +{{- end }} +{{- end }} + diff --git a/OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/templates/nginx-ingress-k8s1.19.yaml b/OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/templates/nginx-ingress-ssl.yaml similarity index 90% rename from OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/templates/nginx-ingress-k8s1.19.yaml rename to OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/templates/nginx-ingress-ssl.yaml index 2c076e013..1270f03cd 100755 --- a/OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/templates/nginx-ingress-k8s1.19.yaml +++ b/OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/templates/nginx-ingress-ssl.yaml @@ -1,9 +1,9 @@ # Copyright (c) 2020, 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. # -{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} {{- if eq .Values.type "NGINX" }} -{{- if or (eq .Values.sslType "NONSSL") (eq .Values.sslType "SSL") }} +{{- if (eq .Values.sslType "SSL") }} +{{- if not .Values.hostName.enabled }} --- apiVersion: networking.k8s.io/v1 kind: Ingress @@ -13,23 +13,21 @@ metadata: annotations: nginx.ingress.kubernetes.io/affinity: 'cookie' nginx.ingress.kubernetes.io/session-cookie-name: 'sticky' - nginx.ingress.kubernetes.io/affinity-mode: persistent + nginx.ingress.kubernetes.io/affinity-mode: 'persistent' nginx.ingress.kubernetes.io/enable-access-log: 'false' kubernetes.io/ingress.class: 'nginx' -{{- if (.Values.nginxTimeOut) }} - nginx.ingress.kubernetes.io/proxy-read-timeout: '{{ .Values.nginxTimeOut }}' - nginx.ingress.kubernetes.io/proxy-send-timeout: '{{ .Values.nginxTimeOut }}' -{{- end }} -{{- if eq .Values.sslType "SSL" }} + nginx.ingress.kubernetes.io/proxy-read-timeout: '{{ .Values.nginx.nginxTimeOut }}' + nginx.ingress.kubernetes.io/proxy-send-timeout: '{{ .Values.nginx.nginxTimeOut }}' nginx.ingress.kubernetes.io/proxy-buffer-size: '2000k' nginx.ingress.kubernetes.io/configuration-snippet: | - more_set_input_headers "X-Forwarded-Proto: https"; - more_set_input_headers "WL-Proxy-SSL: true"; + more_clear_input_headers "WL-Proxy-Client-IP" "WL-Proxy-SSL"; + more_set_input_headers "X-Forwarded-Proto: https"; + more_set_input_headers "WL-Proxy-SSL: true"; nginx.ingress.kubernetes.io/ingress.allow-http: 'false' -{{- end }} spec: rules: - - http: + - host: + http: paths: - path: /console pathType: ImplementationSpecific @@ -38,6 +36,13 @@ spec: name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' port: number: {{ .Values.wlsDomain.adminServerPort }} + - path: /consolehelp + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.adminServerPort }} - path: /em pathType: ImplementationSpecific backend: @@ -45,6 +50,13 @@ spec: name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' port: number: {{ .Values.wlsDomain.adminServerPort }} + - path: /ws_utc + pathType: ImplementationSpecific + backend: + service: + name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' + port: + number: {{ .Values.wlsDomain.soaManagedServerPort }} - path: /soa pathType: ImplementationSpecific backend: @@ -66,7 +78,7 @@ spec: name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' port: number: {{ .Values.wlsDomain.soaManagedServerPort }} - - path: /identity + - path: /identity pathType: ImplementationSpecific backend: service: @@ -101,20 +113,6 @@ spec: name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' port: number: {{ .Values.wlsDomain.oimManagedServerPort }} - - path: /xlWebApp - pathType: ImplementationSpecific - backend: - service: - name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' - port: - number: {{ .Values.wlsDomain.oimManagedServerPort }} - - path: /Nexaweb - pathType: ImplementationSpecific - backend: - service: - name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' - port: - number: {{ .Values.wlsDomain.oimManagedServerPort }} - path: /callbackResponseService pathType: ImplementationSpecific backend: @@ -192,7 +190,7 @@ spec: name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' port: number: {{ .Values.wlsDomain.oimManagedServerPort }} - + {{- end }} {{- end }} {{- end }} diff --git a/OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/templates/nginx-ingress.yaml b/OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/templates/nginx-ingress.yaml deleted file mode 100755 index 1aaa2cba3..000000000 --- a/OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/templates/nginx-ingress.yaml +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright (c) 2020, 2022, Oracle and/or its affiliates. -# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -# -{{- if semverCompare "<1.19-0" .Capabilities.KubeVersion.GitVersion -}} -{{- if eq .Values.type "NGINX" }} -{{- if or (eq .Values.sslType "NONSSL") (eq .Values.sslType "SSL") }} ---- -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: {{ .Values.wlsDomain.domainUID }}-nginx - namespace: {{ .Release.Namespace }} - annotations: - nginx.ingress.kubernetes.io/affinity: 'cookie' - nginx.ingress.kubernetes.io/session-cookie-name: 'sticky' - nginx.ingress.kubernetes.io/affinity-mode: persistent - nginx.ingress.kubernetes.io/enable-access-log: 'false' - kubernetes.io/ingress.class: 'nginx' -{{- if (.Values.nginxTimeOut) }} - nginx.ingress.kubernetes.io/proxy-read-timeout: '{{ .Values.nginxTimeOut }}' - nginx.ingress.kubernetes.io/proxy-send-timeout: '{{ .Values.nginxTimeOut }}' -{{- end }} -{{- if eq .Values.sslType "SSL" }} - nginx.ingress.kubernetes.io/proxy-buffer-size: '2000k' - nginx.ingress.kubernetes.io/configuration-snippet: | - more_set_input_headers "X-Forwarded-Proto: https"; - more_set_input_headers "WL-Proxy-SSL: true"; - nginx.ingress.kubernetes.io/ingress.allow-http: 'false' -{{- end }} -spec: - rules: - - http: - paths: - - path: /console - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - - path: /em - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - - path: /soa - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.soaManagedServerPort }} - - path: /integration - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.soaManagedServerPort }} - - path: /soa-infra - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.soaManagedServerPort }} - - path: /identity - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.oimManagedServerPort }} - - path: /admin - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.oimManagedServerPort }} - - path: /oim - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.oimManagedServerPort }} - - path: /sysadmin - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.oimManagedServerPort }} - - path: /workflowservice - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.oimManagedServerPort }} - - path: /xlWebApp - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.oimManagedServerPort }} - - path: /Nexaweb - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.oimManagedServerPort }} - - path: /callbackResponseService - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.oimManagedServerPort }} - - path: /spml-xsd - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.oimManagedServerPort }} - - path: /HTTPClnt - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.oimManagedServerPort }} - - path: /reqsvc - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.oimManagedServerPort }} - - path: /iam - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.oimManagedServerPort }} - - path: /provisioning-callback - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.oimManagedServerPort }} - - path: /CertificationCallbackService - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.oimManagedServerPort }} - - path: /ucs - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.oimManagedServerPort }} - - path: /FacadeWebApp - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.oimManagedServerPort }} - - path: /OIGUI - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.oimManagedServerPort }} - - path: /weblogic - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.oimManagedServerPort }} - -{{- end }} -{{- end }} -{{- end }} - diff --git a/OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/templates/traefik-ingress-k8s1.19.yaml b/OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/templates/traefik-ingress-k8s1.19.yaml deleted file mode 100755 index d19b64c93..000000000 --- a/OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/templates/traefik-ingress-k8s1.19.yaml +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright (c) 2020, 2021, Oracle and/or its affiliates. -# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -# -{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} -{{- if eq .Values.type "TRAEFIK" }} -{{- if or (eq .Values.sslType "NONSSL") (eq .Values.sslType "SSL") }} ---- -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: {{ .Values.wlsDomain.domainUID }}-traefik - namespace: {{ .Release.Namespace }} - labels: - weblogic.resourceVersion: domain-v2 - annotations: - kubernetes.io/ingress.class: 'traefik' -spec: - rules: - - host: '{{ .Values.traefik.hostname }}' - http: - paths: - - path: /console - pathType: ImplementationSpecific - backend: - service: - name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - port: - number: {{ .Values.wlsDomain.adminServerPort }} - - path: /em - pathType: ImplementationSpecific - backend: - service: - name: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - port: - number: {{ .Values.wlsDomain.adminServerPort }} - - path: /soa - pathType: ImplementationSpecific - backend: - service: - name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' - port: - number: {{ .Values.wlsDomain.soaManagedServerPort }} - - path: /integration - pathType: ImplementationSpecific - backend: - service: - name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' - port: - number: {{ .Values.wlsDomain.soaManagedServerPort }} - - path: /soa-infra - pathType: ImplementationSpecific - backend: - service: - name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' - port: - number: {{ .Values.wlsDomain.soaManagedServerPort }} - - path: - pathType: ImplementationSpecific - backend: - service: - name: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' - port: - number: {{ .Values.wlsDomain.oimManagedServerPort }} -{{- if eq .Values.sslType "SSL" }} - tls: - - hosts: - - '{{ .Values.traefik.hostname }}' - secretName: {{ .Values.secretName }} -{{- end }} ---- -#Create Traefik Middleware custom resource for SSL Termination -{{- if eq .Values.sslType "SSL" }} -apiVersion: traefik.containo.us/v1alpha1 -kind: Middleware -metadata: - name: wls-proxy-ssl - namespace: {{ .Release.Namespace }} -spec: - headers: - customRequestHeaders: - X-Custom-Request-Header: "" - X-Forwarded-For: "" - WL-Proxy-Client-IP: "" - WL-Proxy-SSL: "" - WL-Proxy-SSL: "true" - sslRedirect: true -{{- end }} - -{{- end }} -{{- end }} -{{- end }} - diff --git a/OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/templates/traefik-ingress.yaml b/OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/templates/traefik-ingress.yaml deleted file mode 100755 index efb029ee3..000000000 --- a/OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/templates/traefik-ingress.yaml +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright (c) 2020, 2021, Oracle and/or its affiliates. -# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -# -{{- if semverCompare "<1.19-0" .Capabilities.KubeVersion.GitVersion -}} -{{- if eq .Values.type "TRAEFIK" }} -{{- if or (eq .Values.sslType "NONSSL") (eq .Values.sslType "SSL") }} ---- -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: {{ .Values.wlsDomain.domainUID }}-traefik - namespace: {{ .Release.Namespace }} - labels: - weblogic.resourceVersion: domain-v2 - annotations: - kubernetes.io/ingress.class: 'traefik' -spec: - rules: - - host: '{{ .Values.traefik.hostname }}' - http: - paths: - - path: /console - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - - path: /em - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-{{ .Values.wlsDomain.adminServerName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.adminServerPort }} - - path: /soa - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.soaManagedServerPort }} - - path: /integration - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.soaManagedServerPort }} - - path: /soa-infra - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.soaClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.soaManagedServerPort }} - - path: - backend: - serviceName: '{{ .Values.wlsDomain.domainUID }}-cluster-{{ .Values.wlsDomain.oimClusterName | lower | replace "_" "-" }}' - servicePort: {{ .Values.wlsDomain.oimManagedServerPort }} -{{- if eq .Values.sslType "SSL" }} - tls: - - hosts: - - '{{ .Values.traefik.hostname }}' - secretName: {{ .Values.secretName }} -{{- end }} ---- -#Create Traefik Middleware custom resource for SSL Termination -{{- if eq .Values.sslType "SSL" }} -apiVersion: traefik.containo.us/v1alpha1 -kind: Middleware -metadata: - name: wls-proxy-ssl - namespace: {{ .Release.Namespace }} -spec: - headers: - customRequestHeaders: - X-Custom-Request-Header: "" - X-Forwarded-For: "" - WL-Proxy-Client-IP: "" - WL-Proxy-SSL: "" - WL-Proxy-SSL: "true" - sslRedirect: true -{{- end }} - -{{- end }} -{{- end }} -{{- end }} - diff --git a/OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/values.yaml b/OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/values.yaml index fe7c89e8b..b210e5a4f 100755 --- a/OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/values.yaml +++ b/OracleIdentityGovernance/kubernetes/charts/ingress-per-domain/values.yaml @@ -6,24 +6,39 @@ # This is a YAML-formatted file. # Declare variables to be passed into your templates. # -# Load balancer type. Supported values are: TRAEFIK, NGINX + +# Load balancer type. Supported values are: NGINX type: NGINX -# TimeOut value to be set for nginx parameters proxy-read-timeout and proxy-send-timeout -nginxTimeOut: 180 -# Type of Configuration Supported Values are : NONSSL, SSL + +# SSL configuration Type. Supported Values are : NONSSL,SSL sslType: SSL -# TLS secret name if the mode is SSL -secretName: domain1-tls-cert +# domainType. Supported values are: oim +domainType: oim #WLS domain as backend to the load balancer wlsDomain: domainUID: oimcluster adminServerName: AdminServer adminServerPort: 7001 + adminServerSSLPort: soaClusterName: soa_cluster soaManagedServerPort: 8001 + soaManagedServerSSLPort: oimClusterName: oim_cluster oimManagedServerPort: 14000 + oimManagedServerSSLPort: + + +# Host specific values +hostName: + enabled: false + admin: + runtime: + internal: + +# Ngnix specific values +nginx: + nginxTimeOut: 180 diff --git a/OracleIdentityGovernance/kubernetes/charts/traefik/values.yaml b/OracleIdentityGovernance/kubernetes/charts/traefik/values.yaml index e94bf24f2..f680d34e3 100755 --- a/OracleIdentityGovernance/kubernetes/charts/traefik/values.yaml +++ b/OracleIdentityGovernance/kubernetes/charts/traefik/values.yaml @@ -1,9 +1,9 @@ -# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Copyright (c) 2020, 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. # image: name: traefik - tag: 2.2.8 + tag: 2.6.0 pullPolicy: IfNotPresent ingressRoute: dashboard: @@ -49,4 +49,7 @@ ports: # The port protocol (TCP/UDP) protocol: TCP nodePort: 30443 +additionalArguments: + - "--log.level=INFO" + diff --git a/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/Chart.yaml b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/Chart.yaml index b5cac770e..5814294bf 100755 --- a/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/Chart.yaml +++ b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/Chart.yaml @@ -6,5 +6,5 @@ name: weblogic-operator description: Helm chart for configuring the WebLogic operator. type: application -version: 3.3.0 -appVersion: 3.3.0 +version: 3.4.2 +appVersion: 3.4.2 diff --git a/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-cm.tpl b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-cm.tpl index dd6594de2..8f7f2ff51 100755 --- a/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-cm.tpl +++ b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-cm.tpl @@ -49,6 +49,18 @@ data: {{- if .tokenReviewAuthentication }} tokenReviewAuthentication: {{ .tokenReviewAuthentication | quote }} {{- end }} + {{- if (hasKey . "istioLocalhostBindingsEnabled") }} + istioLocalhostBindingsEnabled: {{ .istioLocalhostBindingsEnabled | quote }} + {{- end }} + {{- if .kubernetesPlatform }} + kubernetesPlatform: {{ .kubernetesPlatform | quote }} + {{- end }} + {{- if .domainPresenceFailureRetryMaxCount }} + domainPresenceFailureRetryMaxCount: {{ .domainPresenceFailureRetryMaxCount | quote }} + {{- end }} + {{- if .domainPresenceFailureRetrySeconds }} + domainPresenceFailureRetrySeconds: {{ .domainPresenceFailureRetrySeconds | quote }} + {{- end }} kind: "ConfigMap" metadata: labels: diff --git a/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-dep.tpl b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-dep.tpl index 3fadac7dc..6faacc095 100755 --- a/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-dep.tpl +++ b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-dep.tpl @@ -33,6 +33,10 @@ spec: {{- end }} spec: serviceAccountName: {{ .serviceAccount | quote }} + {{- if .runAsUser }} + securityContext: + runAsUser: {{ .runAsUser }} + {{- end }} {{- with .nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} @@ -64,6 +68,8 @@ spec: value: "false" - name: "JAVA_LOGGING_LEVEL" value: {{ .javaLoggingLevel | quote }} + - name: "KUBERNETES_PLATFORM" + value: {{ .kubernetesPlatform | quote }} - name: "JAVA_LOGGING_MAXSIZE" value: {{ .javaLoggingFileSizeLimit | default 20000000 | quote }} - name: "JAVA_LOGGING_COUNT" @@ -112,7 +118,7 @@ spec: command: - "bash" - "/operator/livenessProbe.sh" - initialDelaySeconds: 20 + initialDelaySeconds: 40 periodSeconds: 5 readinessProbe: exec: diff --git a/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-external-svc.tpl b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-external-svc.tpl index 44bfc1191..18b0876a9 100755 --- a/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-external-svc.tpl +++ b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-external-svc.tpl @@ -19,11 +19,13 @@ spec: {{- if .externalRestEnabled }} - name: "rest" port: 8081 + appProtocol: https nodePort: {{ .externalRestHttpsPort }} {{- end }} {{- if .remoteDebugNodePortEnabled }} - name: "debug" port: {{ .internalDebugHttpPort }} + appProtocol: http nodePort: {{ .externalDebugHttpPort }} {{- end }} {{- end }} diff --git a/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-internal-svc.tpl b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-internal-svc.tpl index 0108738de..b03aa8aee 100755 --- a/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-internal-svc.tpl +++ b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/templates/_operator-internal-svc.tpl @@ -17,4 +17,5 @@ spec: ports: - port: 8082 name: "rest" + appProtocol: https {{- end }} diff --git a/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/values.yaml b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/values.yaml index dac9a5382..fd151bff1 100755 --- a/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/values.yaml +++ b/OracleIdentityGovernance/kubernetes/charts/weblogic-operator/values.yaml @@ -63,7 +63,7 @@ domainNamespaces: enableClusterRoleBinding: false # image specifies the container image containing the operator. -image: "ghcr.io/oracle/weblogic-kubernetes-operator:3.3.0" +image: "ghcr.io/oracle/weblogic-kubernetes-operator:3.4.2" # imagePullPolicy specifies the image pull policy for the operator's container image. imagePullPolicy: IfNotPresent @@ -104,7 +104,7 @@ elkIntegrationEnabled: false # logStashImage specifies the container image containing logstash. # This parameter is ignored if 'elkIntegrationEnabled' is false. -logStashImage: "logstash:6.6.0" +logStashImage: "logstash:6.8.23" # elasticSearchHost specifies the hostname of where elasticsearch is running. # This parameter is ignored if 'elkIntegrationEnabled' is false. @@ -222,3 +222,14 @@ clusterSizePaddingValidationEnabled: true # to the Domain resource so that it is done using the caller's privileges. # The default value is false. #tokenReviewAuthentication: false + +# domainPresenceFailureRetryMaxCount and domainPresenceFailureRetrySeconds specify the number of introspector job +# retries for a Domain and the interval in seconds between these retries, respectively. +# Defaults to 5 retries and 10 seconds between each retry. +# domainPresenceFailureRetryMaxCount: 5 +# domainPresenceFailureRetrySeconds: 10 + +# runAsUser specifies the UID to run the operator container process. If not specified, +# it defaults to the user specified in the operator's container image. +#runAsUser: 1000 + diff --git a/OracleIdentityGovernance/kubernetes/common/utility.sh b/OracleIdentityGovernance/kubernetes/common/utility.sh index 979207be2..a7ae01450 100755 --- a/OracleIdentityGovernance/kubernetes/common/utility.sh +++ b/OracleIdentityGovernance/kubernetes/common/utility.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Copyright (c) 2018, 2021, Oracle and/or its affiliates. +# Copyright (c) 2018, 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. # @@ -185,6 +185,23 @@ function checkPvState { fi } +# +# Check the state of a persistent volume claim. +# $1 - name of volume claim +# $2 - expected state of volume claim +function checkPvcState { + echo "Checking if the persistent volume claim ${1:?} is ${2:?}" + local end_secs=$((SECONDS + 30)) + local pvc_state=`kubectl get pvc $1 -o jsonpath='{.status.phase}'` + while [ ! "$pvc_state" = "$2" ] && [ $SECONDS -le $end_secs ]; do + sleep 1 + pvc_state=`kubectl get pvc $1 -o jsonpath='{.status.phase}'` + done + if [ "$pvc_state" != "$2" ]; then + fail "The persistent volume state should be $2 but is $pvc_state" + fi +} + # # Function to check if a persistent volume exists # $1 - name of volume @@ -926,3 +943,44 @@ function checkService(){ done echo "Service [$svc] found" } + +# Get pod name when pod available in a given namespace +function getPodName(){ + + local max=$((SECONDS + 120)) + + local pod=$1 + local ns=$2 + + local pname="" + while [ $SECONDS -le $max ] ; do + pname=`kubectl get po -n ${ns} | grep -w ${pod} | awk '{print $1}'` + [ -z "${pname}" ] || break + sleep 1 + done + + if [ -z "${pname}" ] ; then + echo "[ERROR] Could not find Pod [$pod] after $max seconds"; + exit 1 + fi + + echo "${pname}" +} + +# Checks if a pod is available in a given namespace +function detectPod() { + ns=$1 + startSecs=$SECONDS + maxWaitSecs=10 + while [ -z "`kubectl get pod -n ${ns} -o jsonpath={.items[0].metadata.name}`" ]; do + if [ $((SECONDS - startSecs)) -lt $maxWaitSecs ]; then + echo "Pod not found after $((SECONDS - startSecs)) seconds, retrying ..." + sleep 2 + else + echo "[Error] Could not find Pod after $((SECONDS - startSecs)) seconds" + exit 1 + fi + done + retVal=`kubectl get pod -n ${ns} -o jsonpath={.items[0].metadata.name}` + echo "$retVal" +} diff --git a/OracleIdentityGovernance/kubernetes/common/validate.sh b/OracleIdentityGovernance/kubernetes/common/validate.sh index 4bbbffb71..b6f06337c 100755 --- a/OracleIdentityGovernance/kubernetes/common/validate.sh +++ b/OracleIdentityGovernance/kubernetes/common/validate.sh @@ -82,6 +82,28 @@ function validateLowerCase { fi } +# +# Function to check if a value is a valid WLS domain name. +# must include only alphanumeric characters, hyphens (-) +# or underscore characters (_) and contain at least one letter +# but must start with an alphanumeric or underscore character. +# +# $1 - name of object being checked +# $2 - value to check +validateWlsDomainName() { + echo "validateWlsDomainName called with $2" + if ! [[ "$2" =~ ^[a-z_][a-z0-9_.-]*$ ]] ; then + validationError "$1 with value of $2 is not a valid WebLogic domain name. "\ + "A valid WebLogic domain name must include only alphanumeric characters, hyphens (-) "\ + "or underscore characters (_) but must start with an alphanumeric or underscore character." + else + if ! [[ "$2" =~ ^.*[a-z0-9].*$ ]] ; then + validationError "$1 with value of $2 is not a valid WebLogic domain name. "\ + "A valid WebLogic domain name must contain at least one alphanumeric character." + fi + fi +} + # # Function to check if a value is lowercase and legal DNS name # $1 - name of object being checked @@ -112,10 +134,13 @@ function validateVersion { # # Function to ensure the domain uid is a legal DNS name +# Because the domain uid is also used as a WebLogic domain +# name, it must also be a valid WebLogic domain name. # function validateDomainUid { - validateLowerCase "domainUID" ${domainUID} - validateDNS1123LegalName domainUID ${domainUID} + validateLowerCase "domainUID" "${domainUID}" + validateDNS1123LegalName "domainUID" "${domainUID}" + validateWlsDomainName "domainUID" "${domainUID}" } # @@ -339,13 +364,13 @@ function validateDomainSecret { # Verify the secret contains a username SECRET=`kubectl get secret ${weblogicCredentialsSecretName} -n ${namespace} -o jsonpath='{.data}' | tr -d '"' | grep username: | wc | awk ' { print $1; }'` if [ "${SECRET}" != "1" ]; then - validationError "The domain secret ${weblogicCredentialsSecretName} in namespace ${namespace} does not contain a username" + validationError "The domain secret ${weblogicCredentialsSecretName} in namespace ${namespace} does contain a username" fi # Verify the secret contains a password SECRET=`kubectl get secret ${weblogicCredentialsSecretName} -n ${namespace} -o jsonpath='{.data}' | tr -d '"'| grep password: | wc | awk ' { print $1; }'` if [ "${SECRET}" != "1" ]; then - validationError "The domain secret ${weblogicCredentialsSecretName} in namespace ${namespace} does not contain a password" + validationError "The domain secret ${weblogicCredentialsSecretName} in namespace ${namespace} does contain a password" fi failIfValidationErrors } diff --git a/OracleIdentityGovernance/kubernetes/common/wdt-and-wit-utility.sh b/OracleIdentityGovernance/kubernetes/common/wdt-and-wit-utility.sh index aa9cc691c..4ecf53f68 100755 --- a/OracleIdentityGovernance/kubernetes/common/wdt-and-wit-utility.sh +++ b/OracleIdentityGovernance/kubernetes/common/wdt-and-wit-utility.sh @@ -209,10 +209,12 @@ function run_wdt { cd $WDT_DIR || return 1 + mkdir ${action} + cmd=" $wdt_bin_dir/extractDomainResource.sh -oracle_home $oracle_home - -domain_resource_file domain${action}.yaml + -output_dir ./${action} -domain_home $domain_home_dir -model_file $model_final -variable_file $inputs_final diff --git a/OracleIdentityGovernance/kubernetes/create-oracle-db-service/start-db-service.sh b/OracleIdentityGovernance/kubernetes/create-oracle-db-service/start-db-service.sh index 9a522d4eb..452860272 100755 --- a/OracleIdentityGovernance/kubernetes/create-oracle-db-service/start-db-service.sh +++ b/OracleIdentityGovernance/kubernetes/create-oracle-db-service/start-db-service.sh @@ -1,14 +1,14 @@ #!/bin/bash -# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Copyright (c) 2020, 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. -# + # Bring up Oracle DB Instance in [default] NameSpace with a NodePort Service script="${BASH_SOURCE[0]}" scriptDir="$( cd "$( dirname "${script}" )" && pwd )" source ${scriptDir}/../common/utility.sh -function usage { +usage() { echo "usage: ${script} -p -i -s -n [-h]" echo " -i Oracle DB Image (optional)" echo " (default: container-registry.oracle.com/database/enterprise:12.2.0.1-slim)" @@ -67,28 +67,39 @@ fi echo "NodePort[$nodeport] ImagePullSecret[$pullsecret] Image[${dbimage}] NameSpace[${namespace}]" +#create unique db yaml file if does not exists +dbYaml=${scriptDir}/common/oracle.db.${namespace}.yaml +if [ ! -f "$dbYaml" ]; then + echo "$dbYaml does not exist." + cp ${scriptDir}/common/oracle.db.yaml ${dbYaml} +fi + # Modify ImagePullSecret and DatabaseImage based on input -sed -i -e '$d' ${scriptDir}/common/oracle.db.yaml -echo ' - name: docker-store' >> ${scriptDir}/common/oracle.db.yaml -sed -i -e "s?name: docker-store?name: ${pullsecret}?g" ${scriptDir}/common/oracle.db.yaml -sed -i -e "s?image:.*?image: ${dbimage}?g" ${scriptDir}/common/oracle.db.yaml -sed -i -e "s?namespace:.*?namespace: ${namespace}?g" ${scriptDir}/common/oracle.db.yaml +sed -i -e '$d' ${dbYaml} +echo ' - name: docker-store' >> ${dbYaml} +sed -i -e "s?name: docker-store?name: ${pullsecret}?g" ${dbYaml} +sed -i -e "s?image:.*?image: ${dbimage}?g" ${dbYaml} +sed -i -e "s?namespace:.*?namespace: ${namespace}?g" ${dbYaml} # Modify the NodePort based on input if [ "${nodeport}" = "none" ]; then - sed -i -e "s? nodePort:? #nodePort:?g" ${scriptDir}/common/oracle.db.yaml - sed -i -e "s? type:.*NodePort? #type: NodePort?g" ${scriptDir}/common/oracle.db.yaml + sed -i -e "s? nodePort:? #nodePort:?g" ${dbYaml} + sed -i -e "s? type:.*NodePort? #type: NodePort?g" ${dbYaml} else - sed -i -e "s?[#]*nodePort:.*?nodePort: ${nodeport}?g" ${scriptDir}/common/oracle.db.yaml - sed -i -e "s?[#]*type:.*NodePort?type: NodePort?g" ${scriptDir}/common/oracle.db.yaml # default type is ClusterIP + sed -i -e "s?[#]*nodePort:.*?nodePort: ${nodeport}?g" ${dbYaml} + sed -i -e "s?[#]*type:.*NodePort?type: NodePort?g" ${dbYaml} # default type is ClusterIP fi kubectl delete service oracle-db -n ${namespace} --ignore-not-found -kubectl apply -f ${scriptDir}/common/oracle.db.yaml +kubectl apply -f ${dbYaml} -dbpod=`kubectl get po -n ${namespace} | grep oracle-db | cut -f1 -d " " ` +detectPod ${namespace} +dbpod=${retVal} +echo "Is going to check dbpod: ${dbpod} in the namespace: ${namespace} " checkPod ${dbpod} ${namespace} + +echo " checking pod state for pod ${dbpod} running in ${namespace}" checkPodState ${dbpod} ${namespace} "1/1" checkService oracle-db ${namespace} diff --git a/OracleIdentityGovernance/kubernetes/create-oracle-db-service/stop-db-service.sh b/OracleIdentityGovernance/kubernetes/create-oracle-db-service/stop-db-service.sh index 7ab14928c..a99af10b4 100755 --- a/OracleIdentityGovernance/kubernetes/create-oracle-db-service/stop-db-service.sh +++ b/OracleIdentityGovernance/kubernetes/create-oracle-db-service/stop-db-service.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright (c) 2020, 2021, Oracle and/or its affiliates. +# Copyright (c) 2020, 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. # # Drop the DB Service created by start-db-service.sh @@ -8,7 +8,7 @@ script="${BASH_SOURCE[0]}" scriptDir="$( cd "$( dirname "${script}" )" && pwd )" source ${scriptDir}/../common/utility.sh -function usage { +usage() { echo "usage: ${script} -n namespace [-h]" echo " -n Kubernetes NameSpace for Oracle DB Service to be Stopped (optional)" echo " (default: default) " @@ -34,7 +34,8 @@ fi dbpod=`kubectl get po -n ${namespace} | grep oracle-db | cut -f1 -d " " ` -kubectl delete -f ${scriptDir}/common/oracle.db.yaml --ignore-not-found +kubectl delete -f ${scriptDir}/common/oracle.db.${namespace}.yaml --ignore-not-found +rm ${scriptDir}/common/oracle.db.${namespace}.yaml --force if [ -z ${dbpod} ]; then echo "Couldn't find oracle-db pod in [${namespace}] namesapce" diff --git a/OracleIdentityGovernance/kubernetes/create-rcu-schema/README.md b/OracleIdentityGovernance/kubernetes/create-rcu-schema/README.md index 0d5eee26e..951161d63 100755 --- a/OracleIdentityGovernance/kubernetes/create-rcu-schema/README.md +++ b/OracleIdentityGovernance/kubernetes/create-rcu-schema/README.md @@ -23,7 +23,7 @@ The script assumes that either the image, `oracle/oig:12.2.1.4.0`, is available ``` $ ./create-rcu-schema.sh -h -usage: ./create-rcu-schema.sh -s -t -d -i -u -p -n -q -r -o -c [-h] +usage: ./create-rcu-schema.sh -s -t -d -i -u -p -n -q -r -o -c [-l] [-h] -s RCU Schema Prefix (required) -t RCU Schema Type (optional) (supported values: oim) @@ -44,11 +44,13 @@ usage: ./create-rcu-schema.sh -s -t -d -i : Mandatory. Kubernetes namespace where OIG domain in running. + There should be only one OIG domain running in given + namespace. for example, oigns + -i : Mandatory. Image tag of the updated image. for example, + 12.2.1.4.0-8-ol7-210721.0748 + -r registry : Optional. Container registry to be used for fetching image. + Default value will be fetched from running domain + definition. for example, + container-registry.oracle.com/middleware/oig_cpu + -l custom_log_dir : Optional. Default will be under script working directory + + -t : Optional. Timeout in seconds. Defaults to 2000s. + -h : This help. +EOF + exit $1 +} + +unset image_registry +unset imagetag +unset LOG_DIR +unset registry + +BASEDIR="$( cd "$( dirname "${script}" )" && pwd )" +# +# Parse the command line options +# +while getopts "hn:i:r:l:t:" opt; do + case $opt in + n) namespace="${OPTARG}" + ;; + i) imagetag="${OPTARG}" + ;; + r) registry="${OPTARG}" + ;; + l) log_dir="${OPTARG}" + ;; + t) timeout="${OPTARG}" + ;; + h) usage 0 + ;; + *) usage 1 + ;; + esac +done + +if [ ! -x "$(command -v kubectl)" ]; then + echo "Can't find kubectl. Please add it to the path." + exit 1 +fi + +if [ ! -x "$(command -v base64)" ]; then + echo "Can't find base64. Please add it to the path." + exit 1 +fi + +if [ -z ${namespace} ]; then + echo "${script}: -n must be specified." + missingRequiredOption="true" +fi + +if [ -z ${imagetag} ]; then + echo "${script}: -i must be specified." + missingRequiredOption="true" +fi + +if [ "${missingRequiredOption}" == "true" ]; then + usage 1 +fi + +#log settings +LOG_TIME="`date +%Y-%m-%d_%H-%M-%S`" +if [ -z $log_dir ]; then + LOG_DIR=$BASEDIR/log/oim_patch_log-$LOG_TIME +else + LOG_DIR=$log_dir/oim_patch_log-$LOG_TIME +fi +mkdir -p $LOG_DIR + +#check if namespace exists +kubectl get ns ${namespace} > $LOG_DIR/check_namespace.log 2>&1 + +if [ $? != 0 ] +then + fail "Namespace ${namespace} doesn't exist. Check the namespace and rerun.." +fi + +#get domainUID. assuming only one domain in the namespace +domainUID=`kubectl get domains -n ${namespace} -o jsonpath="{.items..metadata.name}"` +info "Found domain name: $domainUID" + +#fetch registry name +if [ -z ${registry} ]; then + registry=`kubectl get domain ${domainUID} -n ${namespace} -o jsonpath="{..image}" | cut -d ":" -f 1` +fi + +image_registry=$registry +info "Image Registry: $image_registry" + +if [ -z ${timeout} ] +then + timeout=2000 +fi + +current_image_tag=`kubectl get domain ${domainUID} -n ${namespace} -o jsonpath="{..image}" | cut -d ":" -f 2` +current_image_reg=`kubectl get domain ${domainUID} -n ${namespace} -o jsonpath="{..image}" | cut -d ":" -f 1` +info "Domain $domainUID is currently running with image: $current_image_reg:$current_image_tag" + +#fetch no. of current weblogic pod under given domain +##fetch oim and soa replica count from domain config +NO_OF_PODS_ORIG=0 +cluster_name=`kubectl get domains ${domainUID} -n ${namespace} -o jsonpath="{.spec.clusters[0]['clusterName']}"` +if [ $cluster_name == 'soa_cluster' ]; then + NO_OF_SOA_REPLICAS=`kubectl get domains ${domainUID} -n ${namespace} -o jsonpath="{.spec.clusters[0]['replicas']}"` + NO_OF_OIM_REPLICAS=`kubectl get domains ${domainUID} -n ${namespace} -o jsonpath="{.spec.clusters[1]['replicas']}"` +else + NO_OF_OIM_REPLICAS=`kubectl get domains ${domainUID} -n ${namespace} -o jsonpath="{.spec.clusters[0]['replicas']}"` + NO_OF_SOA_REPLICAS=`kubectl get domains ${domainUID} -n ${namespace} -o jsonpath="{.spec.clusters[1]['replicas']}"` +fi + +(( NO_OF_PODS_ORIG = NO_OF_PODS_ORIG + NO_OF_SOA_REPLICAS + NO_OF_OIM_REPLICAS + 1 )) +echo "current no of pods under $domainUID are $NO_OF_PODS_ORIG" + +#make sure if any old helper pod is running or not, if yes then delete it +helper_pod_name="helper" +result=`kubectl get pods ${helper_pod_name} -n ${namespace} --ignore-not-found=true | grep ${helper_pod_name} | wc | awk ' { print $1; }'` +if [ "${result:=Error}" != "0" ]; then + info "The pod ${helper_pod_name} already exists in namespace ${namespace}." + info "Deleting pod ${helper_pod_name}" + kubectl delete pod ${helper_pod_name} -n ${namespace} + sleep 30 +fi + +#fetch imagepullsecrets dynamically from domain. if no imagepullsecret, then exit. No option of passing. +image_pull_secrets=`kubectl get domain ${domainUID} -n ${namespace} -o jsonpath="{.spec.imagePullSecrets[*]['name']}"` +info "Fetched Image Pull Secret: $image_pull_secrets" +if [ ! -z $image_pull_secrets ] +then + is_image_pull_secret=`kubectl get secrets -n ${namespace} | grep -i $image_pull_secrets | head -1` + if [ "$is_image_pull_secret" = "" ] + then + fail "ImagePullSecrets $image_pull_secrets doesn't exist in namespace $namespace. Create it first and rerun this script." + else + info "Creating new helper pod with image: $image_registry:$imagetag" + kubectl run --image=${image_registry}:${imagetag} --image-pull-policy="IfNotPresent" --overrides='{"apiVersion": "v1","spec":{"imagePullSecrets": [{"name": '\"${image_pull_secrets}\"'}]}}' ${helper_pod_name} -n ${namespace} -- sleep infinity + fi +else + echo "[WARNING] Could not fetch any imagePullSecrets from $domainUID definition. Proceeding with helper pod creation without imagePullSecrets." + kubectl run helper --image ${image_registry}:${imagetag} -n ${namespace} -- sleep infinity +fi + +#create a new helper pod and wait for it to run. +check_running $namespace $helper_pod_name $timeout $image_registry:$imagetag $domainUID 30 + +#Stopping Admin, SOA and OIM servers +info "Stopping Admin, SOA and OIM servers in domain $domainUID. This may take some time, monitor log $LOG_DIR/stop_servers.log for details" +kubectl patch domain ${domainUID} -n ${namespace} --type merge -p '{"spec":{"serverStartPolicy":"NEVER"}}' > $LOG_DIR/stop_servers.log 2>&1 + +#wait for all pods to be down +sh wl-pod-wait.sh -d ${domainUID} -n ${namespace} -t $timeout -p 0 >> $LOG_DIR/stop_servers.log 2>&1 + +if [ $? != 0 ] +then + fail "All servers under domain ${domainUID} could not be stopped. Check kubectl get pods -n ${namespace} for details" +fi + +NO_OF_PODS=$(kubectl get pods -n ${namespace} -l weblogic.serverName,weblogic.domainUID=${domainUID} -o=jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' | wc -l) + +if [ $NO_OF_PODS == 0 ] +then + info "All servers are now stopped successfully. Proceeding with DB Schema changes" +else + fail "All servers under domain ${domainUID} could not be stopped. Check kubectl get pods -n ${namespace} for details" +fi + +#fetch details from job configmap for db schema changes +JOB_CM=`kubectl get cm -n ${namespace} | grep -i fmw-infra-sample-domain-job | awk '{print $1}' | tr -d " "` +rcuSchemaPrefix=`kubectl get cm ${JOB_CM} -n ${namespace} -o template --template {{.data}} | grep "rcuSchemaPrefix:" | cut -d ":" -f 2 | tr -d " "` +rcuCredentialsSecret=`kubectl get cm ${JOB_CM} -n ${namespace} -o template --template {{.data}} | grep "rcuCredentialsSecret:" | cut -d ":" -f 2 | tr -d " "` +rcuDatabaseURL=`kubectl get cm ${JOB_CM} -n ${namespace} -o template --template {{.data}} | grep "rcuDatabaseURL:" | awk -F 'rcuDatabaseURL:' '{print $2}' | tr -d " "` +DB_HOST=`echo $rcuDatabaseURL | cut -d ":" -f 1 | tr -d " "` +DB_PORT=`echo $rcuDatabaseURL | tr ":" "\t" | awk '{print $2}' | tr -d " " | tr "/" "\t" | awk '{print $1}' | tr -d " "` +DB_SERVICE=`echo $rcuDatabaseURL | tr ":" "\t" | awk '{print $2}' | tr -d " " | tr "/" "\t" | awk '{print $2}' | tr -d " "` +RCU_SCHEMA_PWD=`kubectl get secrets ${rcuCredentialsSecret} -n ${namespace} -o yaml | grep "\spassword" | tr -d " " | tr ":" "\t" | awk '{print $2}' | tr -d " " | base64 -d` +SYS_PWD=`kubectl get secrets ${rcuCredentialsSecret} -n ${namespace} -o yaml | grep "\ssys_password" | tr -d " " | tr ":" "\t" | awk '{print $2}' | tr -d " " | base64 -d` + +echo "DB_HOST=$DB_HOST + DB_PORT=$DB_PORT + DB_SERVICE=$DB_SERVICE + RCU_SCHEMA_PREFIX=$rcuSchemaPrefix + RCU_CREDENTIALS_SECRET=$rcuCredentialsSecret + RCUDATABASEURL=$rcuDatabaseURL + JOBCM=$JOB_CM" > $LOG_DIR/db.properties + +#run db schema patch command +info "Patching OIM schemas..." +kubectl exec -it ${helper_pod_name} -n ${namespace} -- bash -c "echo -e ${SYS_PWD}'\n'${RCU_SCHEMA_PWD} > /tmp/pwd.txt" +kubectl exec -it ${helper_pod_name} -n ${namespace} -- /u01/oracle/oracle_common/modules/thirdparty/org.apache.ant/1.10.5.0.0/apache-ant-1.10.5/bin/ant \ +-f /u01/oracle/idm/server/setup/deploy-files/automation.xml \ +run-patched-sql-files invoke-metadata-seeding \ +-logger org.apache.tools.ant.NoBannerLogger \ +-logfile /u01/oracle/idm/server/bin/patch_oim_wls.log \ +-DmdsDB.user=${rcuSchemaPrefix}_MDS \ +-DmdsDB.password=$RCU_SCHEMA_PWD \ +-DmdsDB.port=$DB_PORT \ +-DmdsDB.serviceName=$DB_SERVICE \ +-DmdsDB.host=$DB_HOST \ +-Dserver.dir=/u01/oracle/idm/server \ +-Dmw_home=/u01/oracle \ +-Djava_home=/u01/jdk \ +-DoperationsDB.host=$DB_HOST \ +-DoperationsDB.port=$DB_PORT \ +-DoperationsDB.serviceName=$DB_SERVICE \ +-DoperationsDB.user=${rcuSchemaPrefix}_OIM \ +-DOIM.DBPassword=$RCU_SCHEMA_PWD \ +-Dojdbc=/u01/oracle/oracle_common/modules/oracle.jdbc/ojdbc8.jar > $LOG_DIR/patch_schema.log 2>&1 + +if [ $? -gt 0 ]; then + kubectl cp $namespace/$helper_pod_name:/u01/oracle/idm/server/bin/patch_oim_wls.log $LOG_DIR/patch_oim_wls.log > /dev/null + fail "OIM schema update failed. Check log $LOG_DIR/patch_oim_wls.log for details" +fi + +if [ $? -eq 0 ]; then + kubectl cp $namespace/$helper_pod_name:/u01/oracle/idm/server/bin/patch_oim_wls.log $LOG_DIR/patch_oim_wls.log > /dev/null + grep -q "BUILD SUCCESSFUL" $LOG_DIR/patch_oim_wls.log + if [ $? = 0 ] + then + info "OIM schema update successful. Check log $LOG_DIR/patch_oim_wls.log for details" + else + fail "OIM schema update failed. Check log $LOG_DIR/patch_oim_wls.log for details" + fi +fi + +#cleanup /tmp/pwd.txt +kubectl exec -it $helper_pod_name -n $namespace -- rm -rf /tmp/pwd.txt + +info "Starting Admin, SOA and OIM servers with new image $image_registry:$imagetag" +kubectl patch domain ${domainUID} -n ${namespace} --type merge -p '{"spec":{"image":'\"${image_registry}':'${imagetag}\"', "serverStartPolicy":"IF_NEEDED"}}' > $LOG_DIR/patch_domain.log 2>&1 +if [ $? -eq 1 ]; then + fail "Domain update failed.." +fi + +#wait for pod to be ready with latest image +info "Waiting for $NO_OF_PODS_ORIG weblogic pods to be ready..This may take several minutes, do not close the window. Check log $LOG_DIR/monitor_weblogic_pods.log for progress " +sh wl-pod-wait.sh -d ${domainUID} -n ${namespace} -t $timeout -p $NO_OF_PODS_ORIG > $LOG_DIR/monitor_weblogic_pods.log 2>&1 + +if [ $? != 0 ] +then + fail "All pods under $domainUID are not in ready state. Check logs and run kubectl get pods -n $namespace for details" +fi + +grep -q "Success!" $LOG_DIR/monitor_weblogic_pods.log + +if [ $? != 0 ] +then + fail "All pods under $domainUID are not in ready state. Check logs and run kubectl get pods -n $namespace for details" +else + echo "[SUCCESS] All servers under $domainUID are now in ready state with new image: $image_registry:$imagetag" + exit 0 +fi + + + + + + diff --git a/OracleIdentityGovernance/kubernetes/domain-lifecycle/wl-pod-wait.sh b/OracleIdentityGovernance/kubernetes/domain-lifecycle/wl-pod-wait.sh new file mode 100755 index 000000000..afc98ef50 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/domain-lifecycle/wl-pod-wait.sh @@ -0,0 +1,423 @@ +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. + +set -eu +set -o pipefail + +DOMAIN_UID="sample-domain1" +DOMAIN_NAMESPACE="sample-domain1-ns" +timeout_secs=1000 + +usage() { + + cat << EOF + Usage: + $(basename $0) [-n mynamespace] [-d mydomainuid] \\ + [-p expected_pod_count] [-t timeout_secs] [-q] + Description: + This utility script exits successfully when the designated number of + WebLogic Server pods in the given WebLogic Kubernetes Operator domain + reach a 'ready' state and have 'restartVersion', 'introspectVersion', + 'spec.image', and 'spec.serverPod.auxiliaryImages.image' values that match + their corresponding values in their domain resource. + If the designated number of pods is zero, then this script exits + successfully when all pods for the given domain have exited. + This script exits non-zero if a configurable timeout is reached + before the target pod count is reached (default $timeout_secs + seconds). It also exists non-zero if the specified domain + cannot be found and the target pod count is at least one. + Parameters: + -d : WKO Domain UID. Defaults to '$DOMAIN_UID'. + -n : Kubernetes namespace. + Defaults to '$DOMAIN_NAMESPACE'. + -p 0 : Wait until there are no running WebLogic Server pods + for a domain. The default. + -p : Wait until all of the following are true + for exactly 'pod_count' WebLogic Server pods + in the domain: + - ready + - same 'weblogic.domainRestartVersion' label value as + the domain resource's 'spec.restartVersion' + - same 'weblogic.introspectVersion' label value as + the domain resource's 'spec.introspectVersion' + - same image as the domain resource's 'spec.image' + - same image(s) as specified in the domain resource's + optional 'spec.serverPod.auxiliaryImages.image' + -t : Timeout in seconds. Defaults to '$timeout_secs'. + -q : Quiet mode. Show only a count of wl pods that + have reached the desired criteria. + -? : This help. +EOF +} + +expected=0 +syntax_error=false +verbose=true +report_interval=120 + +while [ ! "${1:-}" = "" ]; do + if [ ! "$1" = "-?" ] && [ ! "$1" = "-q" ] && [ "${2:-}" = "" ]; then + syntax_error=true + break + fi + case "$1" in + -n) DOMAIN_NAMESPACE="${2}" + ;; + -d) DOMAIN_UID="${2}" + ;; + -t) timeout_secs="$2" + case "$2" in + ''|*[!0-9]*) syntax_error=true ;; + esac + ;; + -p) expected="$2" + case "$2" in + ''|*[!0-9]*) syntax_error=true ;; + esac + ;; + -q) verbose=false + report_interval=30 + shift + continue + ;; + -?) usage + exit 0 + ;; + *) syntax_error=true + break + ;; + esac + shift + shift +done + +if [ "$syntax_error" = "true" ]; then + echo "@@ Error: Syntax error when calling $(basename $0). Pass '-?' for usage." + exit 1 +fi + +timestamp() { + date --utc '+%Y-%m-%dT%H:%M:%S' +} + +tempfile() { + mktemp /tmp/$(basename "$0").$PPID.$(timestamp).XXXXXX +} + +sortlist() { + # sort a comma or space separated list + # - stdin input, stdout output + # - spaces replaced with commas + # - blank fields ignored + # - output removes any trailing comma + # - examples: ""->"" "c,b"->"b,c" "c,b,"->"b,c" "c b"->"b,c" + tr ' ' '\n' | \ + tr ',' '\n' | \ + sort -V | \ + xargs echo -n | \ + tr ' ' ',' +} +sortAIImages() { + # sort "aiimages=;im2,im1;" field assuming comma or space sep list + # - stdin input, stdout output + # - spaces replaced with commas + # - input ignores trailing comma, output removes any trailing comma + # - examples: see sortAIImagesUnitTest() + while read line + do + echo -n "$line" | sed 's/\(^.*aiimages=;\).*/\1/' + echo -n "$line" | sed 's/.*aiimages=;\([^;]*\).*/\1/' | sortlist + echo "$line" | sed 's/.*aiimages=;[^;]*\(;.*\)/\1/' + done +} +_sortAIImagesUnitTest() { + local res=$(echo "$1" | sortAIImages) + if [ ! "$res" = "$2" ]; then + echo "unit test fail" + echo " input ='$1'" + echo " expect='$2'" + echo " actual='$res'" + exit 1 + fi +} +sortAIImagesUnitTest() { + _sortAIImagesUnitTest "foo=;bar; aiimages=;c,b; bar=;foo;" "foo=;bar; aiimages=;b,c; bar=;foo;" + _sortAIImagesUnitTest "foo=;bar; aiimages=; c,b,; bar=;foo;" "foo=;bar; aiimages=;b,c; bar=;foo;" + _sortAIImagesUnitTest "foo=;bar; aiimages=;; bar=;foo;" "foo=;bar; aiimages=;; bar=;foo;" + _sortAIImagesUnitTest "foo=;bar; aiimages=;a ; bar=;foo;" "foo=;bar; aiimages=;a; bar=;foo;" + _sortAIImagesUnitTest "aiimages=;c b; bar=;foo; foo=;bar;" "aiimages=;b,c; bar=;foo; foo=;bar;" + _sortAIImagesUnitTest "bar=;foo; foo=;bar; aiimages=; c b ;" "bar=;foo; foo=;bar; aiimages=;b,c;" + _sortAIImagesUnitTest "aiimages=;;" "aiimages=;;" + _sortAIImagesUnitTest "aiimages=; ;" "aiimages=;;" + _sortAIImagesUnitTest "aiimages=;,,;" "aiimages=;;" + return 0 +} +sortAIImagesUnitTest + + +getDomainValue() { + # get domain value specified by $1 and put in env var named by $2 + # - if get fails, and global expected is >0, then echo an Error and exit script non-zero + # - example: getDomainValue '.spec.introspectVersion' DOM_VERSION + local attvalue + local ljpath="{$1}" + local __retvar=$2 + set +e + attvalue=$(kubectl -n ${DOMAIN_NAMESPACE} get domain ${DOMAIN_UID} -o=jsonpath="$ljpath" 2>&1) + if [ $? -ne 0 ]; then + if [ $expected -ne 0 ]; then + echo "@@ Error: Could not obtain '$1' from '${DOMAIN_UID}' in namespace '${DOMAIN_NAMESPACE}'. Is your domain resource deployed? Err='$attvalue'" + exit 1 + else + # We're waiting for 0 pods - domain might have been deleted, and it doesn't matter what the value is + attvalue='': + fi + fi + eval "$__retvar='$attvalue'" + set -e +} + +getDomainAIImages() { + # get list of domain auxiliary images (if any) and place result in the env var named by $1 + # - if expected>0 and get fails, then echo an Error and exit script non-zero + # - result is a sorted comma separated list + local attvalue + local __retvar=$1 + set +e + attvalue=$( + kubectl \ + get domain ${DOMAIN_UID} \ + -n ${DOMAIN_NAMESPACE} \ + -o=jsonpath="{range .spec.configuration.model.auxiliaryImages[*]}{.image}{','}{end}" \ + 2>&1 + ) + if [ $? -ne 0 ]; then + if [ $expected -ne 0 ]; then + echo "@@ Error: Could not obtain '.spec.serverPod' from '${DOMAIN_UID}' in namespace '${DOMAIN_NAMESPACE}'. Is your domain resource deployed? Err='$attvalue'" + exit 1 + else + # We're waiting for 0 pods - it doesn't matter what the value is + attvalue='': + fi + fi + set -e + attvalue=$(echo "$attvalue" | sortlist) + eval "$__retvar='$attvalue'" +} + +tmpfileorig=$(tempfile) +tmpfilecur=$(tempfile) + +#trap "rm -f $tmpfileorig $tmpfilecur" EXIT + +cur_pods=0 +reported=0 +last_pod_count_secs=$SECONDS +goal_RV_orig="--not-known--" +goal_IV_orig="--not-known--" +goal_image_orig="--not-known--" +goal_aiimages_orig="--not-known--" + +# col_headers must line up with the jpath +col_headers1="NAME RVER IVER IMAGE AIIMAGES READY PHASE" +col_headers2="---- ---- ---- ----- -------- ----- -----" + +# be careful! if changing jpath, then it must +# correspond with the regex below and col_headers above + +jpath='' +jpath+='{range .items[*]}' + jpath+='{" name="}' + jpath+='{";"}{.metadata.name}{";"}' + jpath+='{" domainRestartVersion="}' + jpath+='{";"}{.metadata.labels.weblogic\.domainRestartVersion}{";"}' + jpath+='{" introspectVersion="}' + jpath+='{";"}{.metadata.labels.weblogic\.introspectVersion}{";"}' + jpath+='{" image="}' + jpath+='{";"}{.spec.containers[?(@.name=="weblogic-server")].image}{";"}' + jpath+='{" aiimages="}' + jpath+='{";"}{.spec.initContainers[?(@.command[0]=="/weblogic-operator/scripts/auxImage.sh")].image}{";"}' + jpath+='{" ready="}' + jpath+='{";"}{.status.containerStatuses[?(@.name=="weblogic-server")].ready}{";"}' + jpath+='{" phase="}' + jpath+='{";"}{.status.phase}{";"}' + jpath+='{"\n"}' +jpath+='{end}' + +# Loop until we reach the desired pod count for pods at the desired restart version, +# introspect version, and image -- or until we reach the timeout. + +while [ 1 -eq 1 ]; do + + # + # Get the current domain resource's spec.restartVersion, spec.introspectVersion, + # spec.image, and ai images. If any of these fail then these functions + # fail we assume that domain resource was not found and "exit 1" if goal pods != 0, + # or return "" if goal pods == 0. + # + + getDomainValue ".spec.restartVersion" goal_RV_current + getDomainValue ".spec.introspectVersion" goal_IV_current + getDomainValue ".spec.image" goal_image_current + getDomainAIImages goal_aiimages_current + + ret="${goal_RV_current} +${goal_IV_current} +${goal_image_current} +${goal_aiimages_current}^M" + if [ ! "${ret/Error:/}" = "${ret}" ]; then + echo $ret + exit 1 + fi + + # + # Force new reporting for the rare case where domain resource RV, IV, or + # image changed since we last reported. + # + + if [ ! "$goal_RV_orig" = "$goal_RV_current" ] \ + || [ ! "$goal_IV_orig" = "$goal_IV_current" ] \ + || [ ! "$goal_image_orig" = "$goal_image_current" ] \ + || [ ! "$goal_aiimages_orig" = "$goal_aiimages_current" ] + then + [ "$reported" = "1" ] && echo + reported=0 + goal_IV_orig="$goal_IV_current" + goal_RV_orig="$goal_RV_current" + goal_image_orig="$goal_image_current" + goal_aiimages_orig="$goal_aiimages_current" + fi + + # + # If 'expected' = 0, get the current number of pods regardless of their + # restart version, introspect version, image, or ready state. + # + # If "expected != 0" get the number of ready pods with the current domain + # resource restart version and image. + # + # (Note that grep returns non-zero if it doesn't find anything (sigh), + # so we disable error checking and cross-fingers...) + # + + if [ "$expected" = "0" ]; then + + cur_pods=$( kubectl -n ${DOMAIN_NAMESPACE} get pods \ + -l weblogic.serverName,weblogic.domainUID="${DOMAIN_UID}" \ + -o=jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' \ + | wc -l ) + + lead_string="Waiting up to $timeout_secs seconds for there to be no (0) WebLogic Server pods that match the following criteria:" + criteria="namespace='$DOMAIN_NAMESPACE' domainUID='$DOMAIN_UID'" + + else + + regex="domainRestartVersion=;$goal_RV_current;" + regex+=" introspectVersion=;$goal_IV_current;" + regex+=" image=;$goal_image_current;" + regex+=" aiimages=;$goal_aiimages_current;" + regex+=" ready=;true;" + + set +e # disable error checks as grep returns non-zero when it finds nothing (sigh) + cur_pods=$( kubectl -n ${DOMAIN_NAMESPACE} get pods \ + -l weblogic.serverName,weblogic.domainUID="${DOMAIN_UID}" \ + -o=jsonpath="$jpath" \ + | sortAIImages \ + | grep "$regex" | wc -l ) + set -e + + lead_string="Waiting up to $timeout_secs seconds for exactly '$expected' WebLogic Server pods to reach the following criteria:" + criteria="ready='true'" + criteria+=" image='$goal_image_current'" + criteria+=" auxiliaryImages='$goal_aiimages_current'" + criteria+=" domainRestartVersion='$goal_RV_current'" + criteria+=" introspectVersion='$goal_IV_current'" + criteria+=" namespace='$DOMAIN_NAMESPACE'" + criteria+=" domainUID='$DOMAIN_UID'" + + fi + + # + # Report the current state to stdout. Exit 0 if we've reached our + # goal, exit non-zero if we've reached our time-out. + # + + + if [ "$verbose" = "false" ]; then + if [ $reported -eq 0 ]; then + echo "@@ [$(timestamp)][seconds=$SECONDS] Info: $lead_string" + for criterion in $criteria; do + echo "@@ [$(timestamp)][seconds=$SECONDS] Info: $criterion" + done + echo -n "@@ [$(timestamp)][seconds=$SECONDS] Info: Current pods that match the above criteria =" + echo -n " $cur_pods" + reported=1 + last_pod_count_secs=$SECONDS + + elif [ $((SECONDS - last_pod_count_secs)) -gt $report_interval ] \ + || [ $cur_pods -eq $expected ]; then + echo -n " $cur_pods" + last_pod_count_secs=$SECONDS + + fi + else + + kubectl -n ${DOMAIN_NAMESPACE} get pods \ + -l weblogic.domainUID="${DOMAIN_UID}" \ + -o=jsonpath="$jpath" | sortAIImages > $tmpfilecur + + set +e + diff -q $tmpfilecur $tmpfileorig 2>&1 > /dev/null + diff_res=$? + set -e + if [ ! $diff_res -eq 0 ] \ + || [ $((SECONDS - last_pod_count_secs)) -gt $report_interval ] \ + || [ $cur_pods -eq $expected ]; then + + if [ $reported -eq 0 ]; then + echo + echo "@@ [$(timestamp)][seconds=$SECONDS] Info: $lead_string" + for criterion in $criteria; do + echo "@@ [$(timestamp)][seconds=$SECONDS] Info: $criterion" + done + echo + reported=1 + fi + + echo "@@ [$(timestamp)][seconds=$SECONDS] Info: '$cur_pods' WebLogic Server pods currently match all criteria, expecting '$expected'." + echo "@@ [$(timestamp)][seconds=$SECONDS] Info: Introspector and WebLogic Server pods with same namespace and domain-uid:" + echo + + # print results as a table + # - first strip out the var= and replace with "val". + # - note that the quotes are necessary so that 'print_table' + # doesn't get confused by col entries that are missing values + ( + echo $col_headers1 + echo $col_headers2 + cat $tmpfilecur | sed "s|[^ ]*=;\([^;]*\);|'\1'|g" + ) | column -t + echo + + cp $tmpfilecur $tmpfileorig + last_pod_count_secs=$SECONDS + fi + fi + + if [ $cur_pods -eq $expected ]; then + if [ ! "$verbose" = "true" ]; then + echo ". " + else + echo + fi + echo "@@ [$(timestamp)][seconds=$SECONDS] Info: Success!" + exit 0 + fi + + if [ $SECONDS -ge $timeout_secs ]; then + echo + echo "@@ [$(timestamp)][seconds=$SECONDS] Error: Timeout after waiting more than $timeout_secs seconds." + exit 1 + fi + + sleep 1 +done diff --git a/OracleIdentityGovernance/kubernetes/elasticsearch-and-kibana/README.md b/OracleIdentityGovernance/kubernetes/elasticsearch-and-kibana/README.md index 1ac06a610..a5b96bb44 100755 --- a/OracleIdentityGovernance/kubernetes/elasticsearch-and-kibana/README.md +++ b/OracleIdentityGovernance/kubernetes/elasticsearch-and-kibana/README.md @@ -19,7 +19,7 @@ To control Elasticsearch memory parameters (Heap allocation and Enabling/Disabli * ES_JAVA_OPTS: value may contain for example -Xms512m -Xmx512m to lower the default memory usage (please be aware that this value is only applicable for demo purpose and it is not the one recommended by Elasticsearch itself) * bootstrap.memory_lock: value may contain true (enables the usage of mlockall to try to lock the process address space into RAM, preventing any Elasticsearch memory from being swapped out) or false (disables the usage of mlockall to try to lock the process address space into RAM, preventing any Elasticsearch memory from being swapped out). -* imagePullSecrets: It has been added to resolve the issue with Docker Hub Rate Limiting. One needs to create a secret using your docker hub credentials to work around the error and replace the value dockercred. + To install Elasticsearch and Kibana, use: ```shell $ kubectl apply -f kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml @@ -29,3 +29,4 @@ To remove them, use: ```shell $ kubectl delete -f kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml ``` + diff --git a/OracleIdentityGovernance/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml b/OracleIdentityGovernance/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml index 84b7e8bf1..c1c61d78c 100755 --- a/OracleIdentityGovernance/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml +++ b/OracleIdentityGovernance/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml @@ -1,5 +1,6 @@ -# Copyright (c) 2018, 2022, Oracle and/or its affiliates. +# Copyright (c) 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# # When a user installs the WebLogic operator Helm chart, the user can set # elkIntegrationEnabled to true in their values.yaml to tell the operator to send the @@ -27,7 +28,7 @@ apiVersion: apps/v1 kind: Deployment metadata: - namespace: "oigns" + namespace: "oimcluster" name: "elasticsearch" labels: app: "elasticsearch" @@ -67,7 +68,7 @@ spec: kind: "Service" apiVersion: "v1" metadata: - namespace: "oigns" + namespace: "oimcluster" name: "elasticsearch" spec: ports: @@ -86,7 +87,7 @@ spec: apiVersion: "apps/v1" kind: "Deployment" metadata: - namespace: "oigns" + namespace: "oimcluster" name: "kibana" labels: app: "kibana" @@ -112,7 +113,7 @@ spec: apiVersion: "v1" kind: "Service" metadata: - namespace: "oigns" + namespace: "oimcluster" name: "kibana" labels: app: "kibana" @@ -122,3 +123,4 @@ spec: - port: 5601 selector: app: "kibana" + diff --git a/OracleIdentityGovernance/kubernetes/monitoring-service/README.md b/OracleIdentityGovernance/kubernetes/monitoring-service/README.md index 8a43c25bb..8fd3a03a1 100755 --- a/OracleIdentityGovernance/kubernetes/monitoring-service/README.md +++ b/OracleIdentityGovernance/kubernetes/monitoring-service/README.md @@ -1,12 +1,172 @@ -## Monitor the OracleIdentityGovernance instance using Prometheus and Grafana +# Monitor the OracleIdentityGovernance instance using Prometheus and Grafana Using the `WebLogic Monitoring Exporter` you can scrape runtime information from a running OracleIdentityGovernance instance and monitor them using Prometheus and Grafana. -### Prerequisites +## Prerequisites - Have Docker and a Kubernetes cluster running and have `kubectl` installed and configured. - Have Helm installed. - An OracleIdentityGovernance domain deployed by `weblogic-operator` is running in the Kubernetes cluster. +## Set up monitoring for OracleIdentityGovernance domain + +Set up the WebLogic Monitoring Exporter that will collect WebLogic Server metrics and monitor OracleIdentityGovernance domain. + +**Note**: Either of the following methods can be used to set up monitoring for OracleIdentityGovernance domain. Using `setup-monitoring.sh` does the set up in an automated way. + +1. [Set up manually](#set-up-manually) +1. [Set up using `setup-monitoring.sh`](#set-up-using-setup-monitoringsh) + +## Set up manually + +### Deploy Prometheus and Grafana + +Refer to the compatibility matrix of [Kube Prometheus](https://github.com/coreos/kube-prometheus#kubernetes-compatibility-matrix) and clone the [release](https://github.com/coreos/kube-prometheus/releases) version of the `kube-prometheus` repository according to the Kubernetes version of your cluster. + +1. Clone the `kube-prometheus` repository: + ``` + $ git clone https://github.com/coreos/kube-prometheus.git + ``` + +1. Change to folder `kube-prometheus` and enter the following commands to create the namespace and CRDs, and then wait for their availability before creating the remaining resources: + + ``` + $ cd kube-prometheus + $ kubectl create -f manifests/setup + $ until kubectl get servicemonitors --all-namespaces ; do date; sleep 1; echo ""; done + $ kubectl create -f manifests/ + ``` + +1. `kube-prometheus` requires all nodes in the Kubernetes cluster to be labeled with `kubernetes.io/os=linux`. If any node is not labeled with this, then you need to label it using the following command: + + ``` + $ kubectl label nodes --all kubernetes.io/os=linux + ``` + +1. Enter the following commands to provide external access for Grafana, Prometheus, and Alertmanager: + + ``` + $ kubectl patch svc grafana -n monitoring --type=json -p '[{"op": "replace", "path": "/spec/type", "value": "NodePort" },{"op": "replace", "path": "/spec/ports/0/nodePort", "value": 32100 }]' + + $ kubectl patch svc prometheus-k8s -n monitoring --type=json -p '[{"op": "replace", "path": "/spec/type", "value": "NodePort" },{"op": "replace", "path": "/spec/ports/0/nodePort", "value": 32101 }]' + + $ kubectl patch svc alertmanager-main -n monitoring --type=json -p '[{"op": "replace", "path": "/spec/type", "value": "NodePort" },{"op": "replace", "path": "/spec/ports/0/nodePort", "value": 32102 }]' + ``` + + Note: + * `32100` is the external port for Grafana + * `32101` is the external port for Prometheus + * `32102` is the external port for Alertmanager + +### Generate the WebLogic Monitoring Exporter Deployment Package + +The `wls-exporter.war` package need to be updated and created for each listening ports (Administration Server and Managed Servers) in the domain. +Set the below environment values based on your environment and run the script `get-wls-exporter.sh` to generate the required WAR files at `${WORKDIR}/monitoring-service/scripts/wls-exporter-deploy`: +- adminServerPort +- wlsMonitoringExporterTosoaCluster +- soaManagedServerPort +- wlsMonitoringExporterTooimCluster +- oimManagedServerPort + +For example: + +``` +$ cd ${WORKDIR}/monitoring-service/scripts +$ export adminServerPort=7001 +$ export wlsMonitoringExporterTosoaCluster=true +$ export soaManagedServerPort=8001 +$ export wlsMonitoringExporterTooimCluster=true +$ export oimManagedServerPort=14000 +$ sh get-wls-exporter.sh +``` + +Verify whether the required WAR files are generated at `${WORKDIR}/monitoring-service/scripts/wls-exporter-deploy`. + +``` +$ ls ${WORKDIR}/monitoring-service/scripts/wls-exporter-deploy +``` + +### Deploy the WebLogic Monitoring Exporter into the OracleIdentityGovernance domain + +Follow these steps to copy and deploy the WebLogic Monitoring Exporter WAR files into the OracleIdentityGovernance Domain. + +**Note**: Replace the `` with appropriate values based on your environment: + +``` +$ cd ${WORKDIR}/monitoring-service/scripts +$ kubectl cp wls-exporter-deploy /:/u01/oracle +$ kubectl cp deploy-weblogic-monitoring-exporter.py /:/u01/oracle/wls-exporter-deploy +$ kubectl exec -it -n -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/deploy-weblogic-monitoring-exporter.py \ +-domainName -adminServerName -adminURL \ +-soaClusterName -wlsMonitoringExporterTosoaCluster \ +-oimClusterName -wlsMonitoringExporterTooimCluster \ +-username -password +``` + +For example: + +``` +$ cd ${WORKDIR}/monitoring-service/scripts +$ kubectl cp wls-exporter-deploy oimcluster/oimcluster-adminserver:/u01/oracle +$ kubectl cp deploy-weblogic-monitoring-exporter.py oimcluster/oimcluster-adminserver:/u01/oracle/wls-exporter-deploy +$ kubectl exec -it -n oimcluster oimcluster-adminserver -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/deploy-weblogic-monitoring-exporter.py \ +-domainName oimcluster -adminServerName AdminServer -adminURL oimcluster-adminserver:7001 \ +-soaClusterName soa_cluster -wlsMonitoringExporterTosoaCluster true \ +-oimClusterName oim_cluster -wlsMonitoringExporterTooimCluster true \ +-username weblogic -password Welcome1 +``` + +### Configure Prometheus Operator + +Prometheus enables you to collect metrics from the WebLogic Monitoring Exporter. The Prometheus Operator identifies the targets using service discovery. To get the WebLogic Monitoring Exporter end point discovered as a target, you must create a service monitor pointing to the service. + +The service monitor deployment YAML configuration file is available at `${WORKDIR}/monitoring-service/manifests/wls-exporter-ServiceMonitor.yaml.template`. Copy the file as `wls-exporter-ServiceMonitor.yaml` to update with appropriate values as detailed below. + +The exporting of metrics from `wls-exporter` requires `basicAuth`, so a Kubernetes `Secret` is created with the user name and password that are base64 encoded. This `Secret` is used in the `ServiceMonitor` deployment. The `wls-exporter-ServiceMonitor.yaml` has namespace as `oimcluster` and has `basicAuth` with credentials as `username: %USERNAME%` and `password: %PASSWORD%`. Update `%USERNAME%` and `%PASSWORD% ` in base64 encoded and all occurences of `oimcluster` based on your environment. + +Use the following example for base64 encoded: + +``` +$ echo -n "Welcome1" | base64 +V2VsY29tZTE= +``` + +You need to add `RoleBinding` and `Role` for the namespace (oimcluster) under which the WebLogic Servers pods are running in the Kubernetes cluster. These are required for Prometheus to access the endpoints provided by the WebLogic Monitoring Exporters. The YAML configuration files for oimcluster namespace are provided in "${WORKDIR}/monitoring-service/manifests/". + +If you are using namespace other than `oimcluster`, update the namespace details in `prometheus-roleBinding-domain-namespace.yaml` and `prometheus-roleSpecific-domain-namespace.yaml`. + +Perform the below steps for enabling Prometheus to collect the metrics from the WebLogic Monitoring Exporter: + +``` +$ cd ${WORKDIR}/monitoring-service/manifests +$ kubectl apply -f . +``` + +### Verify the service discovery of WebLogic Monitoring Exporter + +After the deployment of the service monitor, Prometheus should be able to discover wls-exporter and collect the metrics. + +1. Access the Prometheus dashboard at `http://mycompany.com:32101/` + +1. Navigate to **Status** to see the **Service Discovery** details. + +1. Verify that `wls-exporter` is listed in the discovered Services. + + +### Deploy Grafana Dashboard + +You can access the Grafana dashboard at `http://mycompany.com:32100/`. + +1. Log in to Grafana dashboard with username: admin and password: admin`. + +1. Navigate to + (Create) -> Import -> Upload the `weblogic-server-dashboard-import.json` file (provided at `${WORKDIR}/monitoring-service/config/weblogic-server-dashboard-import.json`). + + +## Set up using `setup-monitoring.sh` + +Alternatively, you can run the helper script `setup-monitoring.sh` available at `${WORKDIR}/monitoring-service` to setup the monitoring for OracleIdentityGovernance domain. + +This script creates kube-prometheus-stack(Prometheus, Grafana and Alertmanager), WebLogic Monitoring Exporter and imports `weblogic-server-dashboard.json` into Grafana for WebLogic Server Dashboard. + ### Prepare to use the setup monitoring script The sample scripts for setup monitoring for OracleIdentityGovernance domain are available at `${WORKDIR}/monitoring-service`. @@ -36,7 +196,7 @@ The following parameters can be provided in the inputs file. | `prometheusNodePort` | Port number of the Prometheus outside the Kubernetes cluster. | `32101` | | `grafanaNodePort` | Port number of the Grafana outside the Kubernetes cluster. | `32100` | | `alertmanagerNodePort` | Port number of the Alertmanager outside the Kubernetes cluster. | `32102` | -| `weblogicCredentialsSecretName` | Name of the Kubernetes secret which has Administration Server’s user name and password. | `oimcluster-domain-credentials` | +| `weblogicCredentialsSecretName` | Name of the Kubernetes secret which has Administration Server's user name and password. | `oimcluster-domain-credentials` | Note that the values specified in the `monitoring-inputs.yaml` file will be used to install kube-prometheus-stack (Prometheus, Grafana and Alertmanager) and deploying WebLogic Monitoring Exporter into the OracleIdentityGovernance domain. Hence make the domain specific values to be same as that used during domain creation. @@ -54,13 +214,11 @@ The script will perform the following steps: - Helm install `prometheus-community/kube-prometheus-stack` of version "16.5.0" if `setupKubePrometheusStack` is set to `true`. - Deploys WebLogic Monitoring Exporter to Administration Server. - Deploys WebLogic Monitoring Exporter to `soaCluster` if `wlsMonitoringExporterTosoaCluster` is set to `true`. -- Exposes the Monitoring Services (Prometheus at `32101`, Grafana at `32100` and Alertmanager at `32102`) outside of the Kubernetes cluster if `exposeMonitoringNodePort` is set to `true`. -- Imports the WebLogic Server Grafana Dashboard if `setupKubePrometheusStack` is set to `true`. -- Deploys WebLogic Monitoring Exporter to Administration Server. - Deploys WebLogic Monitoring Exporter to `oimCluster` if `wlsMonitoringExporterTooimCluster` is set to `true`. - Exposes the Monitoring Services (Prometheus at `32101`, Grafana at `32100` and Alertmanager at `32102`) outside of the Kubernetes cluster if `exposeMonitoringNodePort` is set to `true`. - Imports the WebLogic Server Grafana Dashboard if `setupKubePrometheusStack` is set to `true`. + ### Verify the results The setup monitoring script will report failure if there was any error. However, verify that required resources were created by the script. @@ -116,5 +274,4 @@ $ cd ${WORKDIR}/monitoring-service $ ./delete-monitoring.sh \ -i monitoring-inputs.yaml ``` - diff --git a/OracleIdentityGovernance/kubernetes/monitoring-service/config/weblogic-server-dashboard-import.json b/OracleIdentityGovernance/kubernetes/monitoring-service/config/weblogic-server-dashboard-import.json index 0b8444e35..c2fa9e2eb 100755 --- a/OracleIdentityGovernance/kubernetes/monitoring-service/config/weblogic-server-dashboard-import.json +++ b/OracleIdentityGovernance/kubernetes/monitoring-service/config/weblogic-server-dashboard-import.json @@ -125,7 +125,7 @@ "expr": "count(count (wls_jvm_uptime{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\"}) by (name))", "format": "time_series", "intervalFactor": 1, - "legendFormat": "{{weblogic_serverName}}", + "legendFormat": "", "refId": "A" } ], @@ -750,7 +750,7 @@ "expr": "wls_jvm_heap_free_current{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", "format": "time_series", "intervalFactor": 1, - "legendFormat": " Heap Free ({{weblogic_serverName}})", + "legendFormat": " Heap Free ()", "refId": "B" }, { @@ -758,7 +758,7 @@ "format": "time_series", "instant": false, "intervalFactor": 1, - "legendFormat": "Heap Size ({{weblogic_serverName}})", + "legendFormat": "Heap Size ()", "refId": "A" }, { @@ -766,7 +766,7 @@ "format": "time_series", "hide": true, "intervalFactor": 1, - "legendFormat": "Heap Max ({{weblogic_serverName}})", + "legendFormat": "Heap Max ()", "refId": "C" } ], @@ -859,7 +859,7 @@ "expr": "wls_jvm_process_cpu_load{weblogic_domainUID=~\"$domainName\", weblogic_clusterName=~\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"} * 100", "format": "time_series", "intervalFactor": 1, - "legendFormat": " {{weblogic_serverName}}", + "legendFormat": " ", "refId": "B" } ], @@ -947,14 +947,14 @@ "expr": "wls_threadpool_execute_thread_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", "format": "time_series", "intervalFactor": 1, - "legendFormat": "Total Threads ({{weblogic_serverName}})", + "legendFormat": "Total Threads ()", "refId": "A" }, { "expr": "wls_threadpool_stuck_thread_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", "format": "time_series", "intervalFactor": 1, - "legendFormat": "Stuck Threads ({{weblogic_serverName}})", + "legendFormat": "Stuck Threads ()", "refId": "D" }, { @@ -1338,7 +1338,7 @@ "format": "time_series", "interval": "", "intervalFactor": 1, - "legendFormat": "{{app}}", + "legendFormat": "", "refId": "A" }, { @@ -1434,7 +1434,7 @@ "expr": " sum(irate(wls_webapp_config_sessions_opened_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])) by (app)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "{{app}}", + "legendFormat": "", "refId": "A" } ], @@ -1526,7 +1526,7 @@ "format": "time_series", "interval": "", "intervalFactor": 1, - "legendFormat": "{{app}}", + "legendFormat": "", "refId": "A" } ], @@ -1616,7 +1616,7 @@ "expr": "sum(irate(wls_servlet_invocation_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])) by (app)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "{{app}}", + "legendFormat": "", "refId": "A" } ], @@ -1888,7 +1888,7 @@ "expr": "wls_datasource_active_connections_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", "format": "time_series", "intervalFactor": 1, - "legendFormat": " {{name}} @ {{weblogic_serverName}}", + "legendFormat": " @ ", "refId": "A" } ], @@ -1977,7 +1977,7 @@ "expr": "irate(wls_datasource_connections_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])", "format": "time_series", "intervalFactor": 1, - "legendFormat": " {{name}} @ {{weblogic_serverName}}", + "legendFormat": " @ ", "refId": "A" } ], @@ -2065,7 +2065,7 @@ "expr": "wls_datasource_waiting_for_connection_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", "format": "time_series", "intervalFactor": 1, - "legendFormat": " {{name}} @ {{weblogic_serverName}}", + "legendFormat": " @ ", "refId": "A" } ], @@ -2153,7 +2153,7 @@ "expr": "wls_datasource_connection_delay_time{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", "format": "time_series", "intervalFactor": 1, - "legendFormat": " {{name}} @ {{weblogic_serverName}}", + "legendFormat": " @ ", "refId": "A" } ], @@ -2256,7 +2256,7 @@ "expr": "sum(wls_jmsruntime_connections_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (weblogic_serverName)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "{{weblogic_serverName}}", + "legendFormat": "", "refId": "A" } ], @@ -2345,7 +2345,7 @@ "expr": "sum(irate(wls_jmsruntime_connections_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])) by (weblogic_serverName)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "{{weblogic_serverName}}", + "legendFormat": "", "refId": "A" } ], @@ -2599,14 +2599,14 @@ "expr": "sum(wls_jms_messages_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "Current ({{jmsserver}})", + "legendFormat": "Current ()", "refId": "A" }, { "expr": "sum(wls_jms_messages_pending_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "Pending ({{jmsserver}})", + "legendFormat": "Pending ()", "refId": "B" } ], @@ -2694,14 +2694,14 @@ "expr": "sum(wls_jms_bytes_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "Current ({{jmsserver}})", + "legendFormat": "Current ()", "refId": "A" }, { "expr": "sum(wls_jms_bytes_pending_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "Pending ({{jmsserver}})", + "legendFormat": "Pending ()", "refId": "B" } ], @@ -2788,7 +2788,7 @@ "expr": "sum(irate(wls_jms_messages_received_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])) by (jmsserver)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "{{jmsserver}}", + "legendFormat": "", "refId": "A" } ], @@ -2875,7 +2875,7 @@ "expr": "sum(irate(wls_jms_bytes_received_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])) by (jmsserver)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "{{jmsserver}}", + "legendFormat": "", "refId": "A" } ], diff --git a/OracleIdentityGovernance/kubernetes/monitoring-service/config/weblogic-server-dashboard.json b/OracleIdentityGovernance/kubernetes/monitoring-service/config/weblogic-server-dashboard.json index 338eb6397..cf6d5f776 100755 --- a/OracleIdentityGovernance/kubernetes/monitoring-service/config/weblogic-server-dashboard.json +++ b/OracleIdentityGovernance/kubernetes/monitoring-service/config/weblogic-server-dashboard.json @@ -126,7 +126,7 @@ "expr": "count(count (wls_jvm_uptime{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\"}) by (name))", "format": "time_series", "intervalFactor": 1, - "legendFormat": "{{weblogic_serverName}}", + "legendFormat": "", "refId": "A" } ], @@ -751,7 +751,7 @@ "expr": "wls_jvm_heap_free_current{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", "format": "time_series", "intervalFactor": 1, - "legendFormat": " Heap Free ({{weblogic_serverName}})", + "legendFormat": " Heap Free ()", "refId": "B" }, { @@ -759,7 +759,7 @@ "format": "time_series", "instant": false, "intervalFactor": 1, - "legendFormat": "Heap Size ({{weblogic_serverName}})", + "legendFormat": "Heap Size ()", "refId": "A" }, { @@ -767,7 +767,7 @@ "format": "time_series", "hide": true, "intervalFactor": 1, - "legendFormat": "Heap Max ({{weblogic_serverName}})", + "legendFormat": "Heap Max ()", "refId": "C" } ], @@ -860,7 +860,7 @@ "expr": "wls_jvm_process_cpu_load{weblogic_domainUID=~\"$domainName\", weblogic_clusterName=~\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"} * 100", "format": "time_series", "intervalFactor": 1, - "legendFormat": " {{weblogic_serverName}}", + "legendFormat": " ", "refId": "B" } ], @@ -948,14 +948,14 @@ "expr": "wls_threadpool_execute_thread_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", "format": "time_series", "intervalFactor": 1, - "legendFormat": "Total Threads ({{weblogic_serverName}})", + "legendFormat": "Total Threads ()", "refId": "A" }, { "expr": "wls_threadpool_stuck_thread_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", "format": "time_series", "intervalFactor": 1, - "legendFormat": "Stuck Threads ({{weblogic_serverName}})", + "legendFormat": "Stuck Threads ()", "refId": "D" }, { @@ -1339,7 +1339,7 @@ "format": "time_series", "interval": "", "intervalFactor": 1, - "legendFormat": "{{app}}", + "legendFormat": "", "refId": "A" }, { @@ -1435,7 +1435,7 @@ "expr": " sum(irate(wls_webapp_config_sessions_opened_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])) by (app)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "{{app}}", + "legendFormat": "", "refId": "A" } ], @@ -1527,7 +1527,7 @@ "format": "time_series", "interval": "", "intervalFactor": 1, - "legendFormat": "{{app}}", + "legendFormat": "", "refId": "A" } ], @@ -1617,7 +1617,7 @@ "expr": "sum(irate(wls_servlet_invocation_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])) by (app)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "{{app}}", + "legendFormat": "", "refId": "A" } ], @@ -1889,7 +1889,7 @@ "expr": "wls_datasource_active_connections_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", "format": "time_series", "intervalFactor": 1, - "legendFormat": " {{name}} @ {{weblogic_serverName}}", + "legendFormat": " @ ", "refId": "A" } ], @@ -1978,7 +1978,7 @@ "expr": "irate(wls_datasource_connections_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])", "format": "time_series", "intervalFactor": 1, - "legendFormat": " {{name}} @ {{weblogic_serverName}}", + "legendFormat": " @ ", "refId": "A" } ], @@ -2066,7 +2066,7 @@ "expr": "wls_datasource_waiting_for_connection_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", "format": "time_series", "intervalFactor": 1, - "legendFormat": " {{name}} @ {{weblogic_serverName}}", + "legendFormat": " @ ", "refId": "A" } ], @@ -2154,7 +2154,7 @@ "expr": "wls_datasource_connection_delay_time{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}", "format": "time_series", "intervalFactor": 1, - "legendFormat": " {{name}} @ {{weblogic_serverName}}", + "legendFormat": " @ ", "refId": "A" } ], @@ -2257,7 +2257,7 @@ "expr": "sum(wls_jmsruntime_connections_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (weblogic_serverName)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "{{weblogic_serverName}}", + "legendFormat": "", "refId": "A" } ], @@ -2346,7 +2346,7 @@ "expr": "sum(irate(wls_jmsruntime_connections_total_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])) by (weblogic_serverName)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "{{weblogic_serverName}}", + "legendFormat": "", "refId": "A" } ], @@ -2600,14 +2600,14 @@ "expr": "sum(wls_jms_messages_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "Current ({{jmsserver}})", + "legendFormat": "Current ()", "refId": "A" }, { "expr": "sum(wls_jms_messages_pending_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "Pending ({{jmsserver}})", + "legendFormat": "Pending ()", "refId": "B" } ], @@ -2695,14 +2695,14 @@ "expr": "sum(wls_jms_bytes_current_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "Current ({{jmsserver}})", + "legendFormat": "Current ()", "refId": "A" }, { "expr": "sum(wls_jms_bytes_pending_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}) by (jmsserver)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "Pending ({{jmsserver}})", + "legendFormat": "Pending ()", "refId": "B" } ], @@ -2789,7 +2789,7 @@ "expr": "sum(irate(wls_jms_messages_received_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])) by (jmsserver)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "{{jmsserver}}", + "legendFormat": "", "refId": "A" } ], @@ -2876,7 +2876,7 @@ "expr": "sum(irate(wls_jms_bytes_received_count{weblogic_domainUID=\"$domainName\", weblogic_clusterName=\"$clusterName\", weblogic_serverName=~\"${serverName:regex}\"}[5m])) by (jmsserver)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "{{jmsserver}}", + "legendFormat": "", "refId": "A" } ], @@ -3312,3 +3312,4 @@ "version": 6 } } + diff --git a/OracleIdentityGovernance/kubernetes/monitoring-service/delete-monitoring.sh b/OracleIdentityGovernance/kubernetes/monitoring-service/delete-monitoring.sh index b676e9b40..eee881c77 100755 --- a/OracleIdentityGovernance/kubernetes/monitoring-service/delete-monitoring.sh +++ b/OracleIdentityGovernance/kubernetes/monitoring-service/delete-monitoring.sh @@ -62,7 +62,7 @@ function usage { } -function deleteKubePrometheusStack { +function deletePrometheusGrafana { helm delete ${monitoringNamespace} --namespace ${monitoringNamespace} } @@ -115,7 +115,7 @@ fi if [ "${setupKubePrometheusStack}" = "true" ]; then echo "Deleting Prometheus and grafana started" - deleteKubePrometheusStack + deletePrometheusGrafana echo "Deleting Prometheus and grafana completed" fi cd $OLD_PWD diff --git a/OracleIdentityGovernance/kubernetes/monitoring-service/manifests/wls-exporter-ServiceMonitor.yaml b/OracleIdentityGovernance/kubernetes/monitoring-service/manifests/wls-exporter-ServiceMonitor.yaml index 833a05d62..ac49b6330 100755 --- a/OracleIdentityGovernance/kubernetes/monitoring-service/manifests/wls-exporter-ServiceMonitor.yaml +++ b/OracleIdentityGovernance/kubernetes/monitoring-service/manifests/wls-exporter-ServiceMonitor.yaml @@ -5,20 +5,19 @@ apiVersion: v1 kind: Secret metadata: name: basic-auth - namespace: oimcluster + namespace: monitoring data: - password: V2VsY29tZTE= - user: d2VibG9naWM= + password: V2VsY29tZTE= # Welcome1 i.e.'WebLogic password' + user: d2VibG9naWM= # weblogic i.e. 'WebLogic username' type: Opaque --- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: name: wls-exporter - namespace: oimcluster + namespace: monitoring labels: k8s-app: wls-exporter - release: monitoring spec: namespaceSelector: matchNames: diff --git a/OracleIdentityGovernance/kubernetes/monitoring-service/monitoring-inputs.yaml b/OracleIdentityGovernance/kubernetes/monitoring-service/monitoring-inputs.yaml index 6dab6efca..f4916fb06 100755 --- a/OracleIdentityGovernance/kubernetes/monitoring-service/monitoring-inputs.yaml +++ b/OracleIdentityGovernance/kubernetes/monitoring-service/monitoring-inputs.yaml @@ -36,7 +36,7 @@ soaClusterName: soa_cluster soaManagedServerPort: 8001 # WebLogic Monitoring Exporter to Cluster -wlsMonitoringExporterTosoaCluster: true +wlsMonitoringExporterTosoaCluster: false # Cluster name oimClusterName: oim_cluster @@ -45,11 +45,11 @@ oimClusterName: oim_cluster oimManagedServerPort: 14000 # WebLogic Monitoring Exporter to Cluster -wlsMonitoringExporterTooimCluster: true +wlsMonitoringExporterTooimCluster: false -# Boolean to indicate if the adminNodePort will be exposed -exposeMonitoringNodePort: true +# Boolean to indicate if the Monitoring Services NodePort will be exposed +exposeMonitoringNodePort: false # NodePort to expose Prometheus prometheusNodePort: 32101 diff --git a/OracleIdentityGovernance/kubernetes/monitoring-service/scripts/deploy-weblogic-monitoring-exporter.py b/OracleIdentityGovernance/kubernetes/monitoring-service/scripts/deploy-weblogic-monitoring-exporter.py index 97d0d8b6b..3be9aafac 100755 --- a/OracleIdentityGovernance/kubernetes/monitoring-service/scripts/deploy-weblogic-monitoring-exporter.py +++ b/OracleIdentityGovernance/kubernetes/monitoring-service/scripts/deploy-weblogic-monitoring-exporter.py @@ -41,14 +41,14 @@ def usage(): soaClusterName = "soa_cluster" # wlsMonitoringExporterTosoaCluster will be passed by command line parameter -wlsMonitoringExporterTosoaCluster -wlsMonitoringExporterTosoaCluster = "true" +wlsMonitoringExporterTosoaCluster = "false" # oimClusterName will be passed by command line parameter -oimClusterName oimClusterName = "oim_cluster" # wlsMonitoringExporterTooimCluster will be passed by command line parameter -wlsMonitoringExporterTooimCluster -wlsMonitoringExporterTooimCluster = "true" +wlsMonitoringExporterTooimCluster = "false" # username will be passed by command line parameter -username username = "weblogic" diff --git a/OracleIdentityGovernance/kubernetes/monitoring-service/scripts/deploy-weblogic-server-grafana-dashboard.sh b/OracleIdentityGovernance/kubernetes/monitoring-service/scripts/deploy-weblogic-server-grafana-dashboard.sh new file mode 100755 index 000000000..3da4f6b7d --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/monitoring-service/scripts/deploy-weblogic-server-grafana-dashboard.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +# Initialize +script="${BASH_SOURCE[0]}" +scriptDir="$( cd "$( dirname "${script}" )" && pwd )" +warDir=$PWD +source ${scriptDir}/utils.sh + +# Setting default values +initialize +# Function to lowercase a value and make it a legal DNS1123 name +# $1 - value to convert to lowercase +function toDNS1123Legal { + local val=`echo $1 | tr "[:upper:]" "[:lower:]"` + val=${val//"_"/"-"} + echo "$val" +} + +adminServerPodName="${domainUID}-$(toDNS1123Legal ${adminServerName})" + +grafanaEndpointIP=$(kubectl get endpoints ${monitoringNamespace}-grafana -n ${monitoringNamespace} -o=jsonpath="{.subsets[].addresses[].ip}") +grafanaEndpointPort=$(kubectl get endpoints ${monitoringNamespace}-grafana -n ${monitoringNamespace} -o=jsonpath="{.subsets[].ports[].port}") +grafanaEndpoint="${grafanaEndpointIP}:${grafanaEndpointPort}" +kubectl cp $scriptDir/../config/weblogic-server-dashboard.json ${domainNamespace}/${adminServerPodName}:/tmp/weblogic-server-dashboard.json +EXEC_DEPLOY="kubectl exec -it -n ${domainNamespace} ${adminServerPodName} -- curl --noproxy \"*\" -X POST -H \"Content-Type: application/json\" -d @/tmp/weblogic-server-dashboard.json http://admin:admin@${grafanaEndpoint}/api/dashboards/db" +echo "Deploying WebLogic Server Grafana Dashboard in progress...." +eval ${EXEC_DEPLOY} + diff --git a/OracleIdentityGovernance/kubernetes/monitoring-service/scripts/undeploy-weblogic-monitoring-exporter.py b/OracleIdentityGovernance/kubernetes/monitoring-service/scripts/undeploy-weblogic-monitoring-exporter.py index 377545063..a8d66f5fd 100755 --- a/OracleIdentityGovernance/kubernetes/monitoring-service/scripts/undeploy-weblogic-monitoring-exporter.py +++ b/OracleIdentityGovernance/kubernetes/monitoring-service/scripts/undeploy-weblogic-monitoring-exporter.py @@ -40,12 +40,12 @@ def usage(): soaClusterName = "soa_cluster" # wlsMonitoringExporterTosoaCluster will be passed by command line parameter -wlsMonitoringExporterTosoaCluster -wlsMonitoringExporterTosoaCluster = "true" +wlsMonitoringExporterTosoaCluster = "false" # oimClusterName will be passed by command line parameter -oimClusterName oimClusterName = "oim_cluster" # wlsMonitoringExporterTooimCluster will be passed by command line parameter -wlsMonitoringExporterTooimCluster -wlsMonitoringExporterTooimCluster = "true" +wlsMonitoringExporterTooimCluster = "false" # username will be passed by command line parameter -username username = "weblogic" diff --git a/OracleIdentityGovernance/kubernetes/monitoring-service/scripts/utils.sh b/OracleIdentityGovernance/kubernetes/monitoring-service/scripts/utils.sh index d9c998e98..0276373a3 100755 --- a/OracleIdentityGovernance/kubernetes/monitoring-service/scripts/utils.sh +++ b/OracleIdentityGovernance/kubernetes/monitoring-service/scripts/utils.sh @@ -41,7 +41,7 @@ function initialize { if [ -z ${wlsMonitoringExporterTosoaCluster} ]; then echo "wlsMonitoringExporterTosoaCluster is empty, setting to default \"false\"" - wlsMonitoringExporterTosoaCluster="true" + wlsMonitoringExporterTosoaCluster="false" fi if [ -z ${oimClusterName} ]; then echo "oimClusterName is empty, setting to default \"oim_cluster\"" @@ -55,7 +55,7 @@ function initialize { if [ -z ${wlsMonitoringExporterTooimCluster} ]; then echo "wlsMonitoringExporterTooimCluster is empty, setting to default \"false\"" - wlsMonitoringExporterTooimCluster="true" + wlsMonitoringExporterTooimCluster="false" fi } diff --git a/OracleIdentityGovernance/kubernetes/monitoring-service/setup-monitoring.sh b/OracleIdentityGovernance/kubernetes/monitoring-service/setup-monitoring.sh index f6d6f5f0c..57e7cba0c 100755 --- a/OracleIdentityGovernance/kubernetes/monitoring-service/setup-monitoring.sh +++ b/OracleIdentityGovernance/kubernetes/monitoring-service/setup-monitoring.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright (c) 2021, Oracle and/or its affiliates. +# Copyright (c) 2021, 2022, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. # # setup-monitoring.sh @@ -78,17 +78,17 @@ function installKubePrometheusStack { if [ ${exposeMonitoringNodePort} == "true" ]; then helm install ${monitoringNamespace} prometheus-community/kube-prometheus-stack \ - --namespace ${monitoringNamespace} \ + --namespace ${monitoringNamespace} ${additionalParamForKubePrometheusStack} \ --set prometheus.service.type=NodePort --set prometheus.service.nodePort=${prometheusNodePort} \ --set alertmanager.service.type=NodePort --set alertmanager.service.nodePort=${alertmanagerNodePort} \ --set grafana.adminPassword=admin --set grafana.service.type=NodePort --set grafana.service.nodePort=${grafanaNodePort} \ - --version "16.5.0" ${additionalParamForKubePrometheusStack} \ + --version "16.5.0" --values ${scriptDir}/values.yaml \ --atomic --wait else helm install ${monitoringNamespace} prometheus-community/kube-prometheus-stack \ - --namespace ${monitoringNamespace} \ + --namespace ${monitoringNamespace} ${additionalParamForKubePrometheusStack} \ --set grafana.adminPassword=admin \ - --version "16.5.0" ${additionalParamForKubePrometheusStack} \ + --version "16.5.0" --values ${scriptDir}/values.yaml \ --atomic --wait fi exitIfError $? "ERROR: prometheus-community/kube-prometheus-stack install failed." @@ -134,7 +134,7 @@ if [ "${setupKubePrometheusStack}" = "true" ]; then echo "The namespace ${monitoringNamespace} for install prometheus-community/kube-prometheus-stack does not exist. Creating the namespace ${monitoringNamespace}" kubectl create namespace ${monitoringNamespace} fi - echo -e "Monitoring setup in ${monitoringNamespace} in progress\n" + echo -e "Monitoring setup in ${monitoringNamespace} in progress.......\n" # Create the namespace and CRDs, and then wait for them to be availble before creating the remaining resources kubectl label nodes --all kubernetes.io/os=linux --overwrite=true @@ -146,8 +146,8 @@ if [ "${setupKubePrometheusStack}" = "true" ]; then echo "Setup prometheus-community/kube-prometheus-stack completed" fi -username=`kubectl get secrets ${weblogicCredentialsSecretName} -n ${domainNamespace} -o=jsonpath='{.data.username}'|base64 --decode` -password=`kubectl get secrets ${weblogicCredentialsSecretName} -n ${domainNamespace} -o=jsonpath='{.data.password}'|base64 --decode` +export username=`kubectl get secrets ${weblogicCredentialsSecretName} -n ${domainNamespace} -o=jsonpath='{.data.username}'|base64 --decode` +export password=`kubectl get secrets ${weblogicCredentialsSecretName} -n ${domainNamespace} -o=jsonpath='{.data.password}'|base64 --decode` # Setting up the WebLogic Monitoring Exporter echo "Deploy WebLogic Monitoring Exporter started" @@ -173,10 +173,7 @@ kubectl apply -f ${serviceMonitor} if [ "${setupKubePrometheusStack}" = "true" ]; then # Deploying WebLogic Server Grafana Dashboard echo "Deploying WebLogic Server Grafana Dashboard...." - grafanaEndpointIP=$(kubectl get endpoints ${monitoringNamespace}-grafana -n ${monitoringNamespace} -o=jsonpath="{.subsets[].addresses[].ip}") - grafanaEndpointPort=$(kubectl get endpoints ${monitoringNamespace}-grafana -n ${monitoringNamespace} -o=jsonpath="{.subsets[].ports[].port}") - grafanaEndpoint="${grafanaEndpointIP}:${grafanaEndpointPort}" - curl --noproxy "*" -X POST -H "Content-Type: application/json" -d @config/weblogic-server-dashboard.json http://admin:admin@${grafanaEndpoint}/api/dashboards/db + sh ${scriptDir}/scripts/deploy-weblogic-server-grafana-dashboard.sh echo "" echo "Deployed WebLogic Server Grafana Dashboard successfully" echo "" diff --git a/OracleIdentityGovernance/kubernetes/monitoring-service/values.yaml b/OracleIdentityGovernance/kubernetes/monitoring-service/values.yaml new file mode 100755 index 000000000..18757f394 --- /dev/null +++ b/OracleIdentityGovernance/kubernetes/monitoring-service/values.yaml @@ -0,0 +1,13 @@ +# Copyright (c) 2022, Oracle and/or its affiliates. +# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. +# +prometheusOperator: + admissionWebhooks: + patch: + enabled: true + image: + repository: k8s.gcr.io/ingress-nginx/kube-webhook-certgen + tag: v1.0 + sha: "f3b6b39a6062328c095337b4cadcefd1612348fdd5190b1dcbcb9b9e90bd8068" + pullPolicy: IfNotPresent + diff --git a/OracleIdentityGovernance/kubernetes/scaling/scalingAction.sh b/OracleIdentityGovernance/kubernetes/scaling/scalingAction.sh index 0da098e68..462031314 100755 --- a/OracleIdentityGovernance/kubernetes/scaling/scalingAction.sh +++ b/OracleIdentityGovernance/kubernetes/scaling/scalingAction.sh @@ -98,7 +98,7 @@ cat > cmds-$$.py << INPUT import sys, json for i in json.load(sys.stdin)["spec"]["ports"]: if i["name"] == "rest": - print(i["port"]) + print((i["port"])) INPUT port=$(echo "${STATUS}" | python cmds-$$.py 2>> ${log_file_name}) fi @@ -131,7 +131,7 @@ cat > cmds-$$.py << INPUT import sys, json for i in json.load(sys.stdin)["groups"]: if i["name"] == "weblogic.oracle": - print(i["preferredVersion"]["version"]) + print((i["preferredVersion"]["version"])) INPUT domain_api_version=`echo ${APIS} | python cmds-$$.py 2>> ${log_file_name}` fi @@ -196,7 +196,7 @@ cat > cmds-$$.py << INPUT import sys, json for j in json.load(sys.stdin)["spec"]["clusters"]: if j["clusterName"] == "$wls_cluster_name": - print (j["replicas"]) + print((j["replicas"])) INPUT num_ms=`echo ${DOMAIN} | python cmds-$$.py 2>> ${log_file_name}` fi @@ -219,7 +219,7 @@ function get_num_ms_domain_scope() { else cat > cmds-$$.py << INPUT import sys, json -print (json.load(sys.stdin)["spec"]["replicas"]) +print((json.load(sys.stdin)["spec"]["replicas"])) INPUT num_ms=`echo ${DOMAIN} | python cmds-$$.py 2>> ${log_file_name}` fi @@ -253,7 +253,7 @@ cat > cmds-$$.py << INPUT import sys, json for j in json.load(sys.stdin)["status"]["clusters"]: if j["clusterName"] == "$clusterName": - print (j["minimumReplicas"]) + print((j["minimumReplicas"])) INPUT minReplicas=`echo ${DOMAIN} | python cmds-$$.py 2>> ${log_file_name}` fi diff --git a/OracleUnifiedDirectory/kubernetes/README.md b/OracleUnifiedDirectory/kubernetes/README.md deleted file mode 100755 index 5f2069e63..000000000 --- a/OracleUnifiedDirectory/kubernetes/README.md +++ /dev/null @@ -1,797 +0,0 @@ -Oracle Unified Directory (OUD) on Kubernetes -============================================ - -## Contents -1. [Introduction](#introduction) -1. [Hardware and Software Requirements](#hardware-and-software-requirements) -1. [Prerequisites](#prerequisites) -1. [Example 1 Directory Server](#example-1-directory-server-instancetypedirectory) -1. [Example 2 Directory Server as a Kubernetes Service](#example-2-directory-server-instancetypedirectory-as-a-kubernetes-service) -1. [Example 3 Proxy Server as a Kubernetes Service](#example-3-proxy-server-instancetypeproxy-as-a-kubernetes-service) -1. [Example 4 Replication Server (instanceType=Replication) as a Kubernetes Service](#example-4-replication-server-instancetypereplication-as-a-kubernetes-service) -1. [Example 5 Directory Server/Service added to existing Replication Server/Service (instanceType=AddDS2RS)](#example-5-directory-serverservice-added-to-existing-replication-serverservice-instancetypeaddds2rs) -1. [Appendix Reference](#appendix-reference) - -# Introduction -This project offers Sample YAML files and scripts to deploy Oracle Unified Directory Docker images based on 12cPS4 (12.2.1.4.0) release within a Kubernetes environment. Use these YAML files to facilitate installation, configuration, and environment setup for DevOps users. - -The Docker Image refers to binaries for OUD Release 12.2.1.4.0 and it has the capability to create different types of OUD Instances (Directory Service, Proxy, Replication) on containers targeted for development and testing. - -***Image***: oracle/oud:12.2.1.4.0 - -# Hardware and Software Requirements -Oracle Unified Directory Docker Image has been tested and is known to run on following hardware and software: - -## Hardware Requirements - -| Hardware | Size | -| :-------: | :---: | -| RAM | 16GB | -| Disk Space| 200GB+| - -## Software Requirements - -| | Version | Command to verify version | -| :---: | :----------------------------: | :-----------------------: | -| Docker| Docker version 18.03 or higher | docker version | -| K8s | Kubernetes version 1.16.0+ | kubectl version - -# Prerequisites - -## Verify Docker Version and OUD Image -Docker version should be 18.03 or higher. To check this, issue the following command: - - # docker version - Client: Docker Engine - Community - Version: 18.09.8-ol - ... - -The Oracle Unified Directory Image for 12cPS4 (12.2.1.4.0) should be loaded into Docker. Verify this by running the following: - - # docker images - REPOSITORY TAG IMAGE ID CREATED SIZE - oracle/oud 12.2.1.4.0 1855f331f5ef 10 days ago 945MB - ... - -## Verify Kubernetes Version -Kubernetes version should be 1.16.0 or higher. Verify by running the following: - - # kubectl version - Client Version: version.Info{Major:"1", Minor:"18", GitVersion:"v1.18.4", GitCommit:"c96aede7b5205121079932896c4ad89bb93260af", GitTreeState:"clean", BuildDate:"2020-06-17T11:41:22Z", GoVersion:"go1.13.9", Compiler:"gc", Platform:"linux/amd64"} - Server Version: version.Info{Major:"1", Minor:"18", GitVersion:"v1.18.4", GitCommit:"c96aede7b5205121079932896c4ad89bb93260af", GitTreeState:"clean", BuildDate:"2020-06-17T11:33:59Z", GoVersion:"go1.13.9", Compiler:"gc", Platform:"linux/amd64"} - -## Create Kubernetes Namespace -You should create a Kubernetes namespace to provide a scope for other objects such as pods and services that you create in the environment. To create your namespace you should refer to the samples/oudns.yaml file. - -Update the samples/oudns.yaml file and replace %NAMESPACE% with the value of the namespace you would like to create. In the example below the value 'myoudns' is used. - -To create the namespace apply the file using kubectl: - - # kubectl apply -f samples/oudns.yaml - namespace/myoudns created - -Confirm that the namespace is created: - -
   # kubectl get namespaces
-    NAME          STATUS   AGE
-    default       Active   4d
-    kube-public   Active   4d
-    kube-system   Active   4d
-    myoudns       Active   53s
- -## Create Secrets for User IDs and Passwords - -To protect sensitive information, namely user IDs and passwords, you should create Kubernetes Secrets for the key-value pairs with following keys. The Secret with key-value pairs will be used to pass values to containers created through OUD image: - -* rootUserDN -* rootUserPassword -* adminUID -* adminPassword -* bindDN1 -* bindPassword1 -* bindDN2 -* bindPassword2 - -There are two ways by which Secret object can be created with required key-value pairs. - -### Using samples/secrets.yaml file - -To do this you should update the samples/secrets.yaml file with the value for %SECRET_NAME% and %NAMESPACE%, together with the Base64 value for each secret. - -* %rootUserDN% - With Base64 encoded value for rootUserDN parameter. -* %rootUserPassword% - With Base64 encoded value for rootUserPassword parameter. -* %adminUID% - With Base64 encoded value for adminUID parameter. -* %adminPassword% - With Base64 encoded value for adminPassword parameter. -* %bindDN1% - With Base64 encoded value for bindDN1 parameter. -* %bindPassword1% - With Base64 encoded value for bindPassword1 parameter. -* %bindDN2% - With Base64 encoded value for bindDN2 parameter. -* %bindPassword2% - With Base64 encoded value for bindPassword2 parameter. - -Obtain the base64 value for your secrets: - -
   # echo -n cn=Directory Manager | base64
-    Y249RGlyZWN0b3J5IE1hbmFnZXI=
-    # echo -n Oracle123 | base64
-    T3JhY2xlMTIz
-    # echo -n admin | base64
-    YWRtaW4=
- -**Note**: Please make sure to use -n with echo command. Without that, Base64 values would be generated with new-line character included. - -Update the samples/secrets.yaml file with your values. It should look similar to the file shown below: - - apiVersion: v1 - kind: Secret - metadata: - name: oudsecret - namespace: myoudns - type: Opaque - data: - rootUserDN: Y249RGlyZWN0b3J5IE1hbmFnZXI= - rootUserPassword: T3JhY2xlMTIz - adminUID: YWRtaW4= - adminPassword: T3JhY2xlMTIz - bindDN1: Y249RGlyZWN0b3J5IE1hbmFnZXI= - bindPassword1: T3JhY2xlMTIz - bindDN2: Y249RGlyZWN0b3J5IE1hbmFnZXI= - bindPassword2: T3JhY2xlMTIz - -Apply the file: - - # kubectl apply -f samples/secrets.yaml - secret/oudsecret created - -Verify that the secret has been created: - -
   # kubectl --namespace myoudns get secret
-    NAME                  TYPE                                  DATA   AGE
-    default-token-fztcb   kubernetes.io/service-account-token   3      15m
-    oudsecret             Opaque                                8      99s
- -### Using `kubectl create secret` command - -Kubernetes Secret can be created using following command: - - # kubectl --namespace %NAMESPACE% create secret generic %SECRET_NAME% \ - --from-literal=rootUserDN="%rootUserDN%" \ - --from-literal=rootUserPassword="%rootUserPassword%" \ - --from-literal=adminUID="%adminUID%" \ - --from-literal=adminPassword="%adminPassword%" \ - --from-literal=bindDN1="%bindDN1%" \ - --from-literal=bindPassword1="%bindPassword1%" \ - --from-literal=bindDN2="%bindDN2%" \ - --from-literal=bindPassword2="%bindPassword2%" - -In the command mentioned above, following placeholders are required to be updated: - -* %NAMESPACE% - With name of namespace in which secret is required to be created -* %SECRET_NAME% - Name for the secret object -* %rootUserDN% - With Base64 encoded value for rootUserDN parameter. -* %rootUserPassword% - With Base64 encoded value for rootUserPassword parameter. -* %adminUID% - With Base64 encoded value for adminUID parameter. -* %adminPassword% - With Base64 encoded value for adminPassword parameter. -* %bindDN1% - With Base64 encoded value for bindDN1 parameter. -* %bindPassword1% - With Base64 encoded value for bindPassword1 parameter. -* %bindDN2% - With Base64 encoded value for bindDN2 parameter. -* %bindPassword2% - With Base64 encoded value for bindPassword2 parameter. - -After executing `kubectl create secret ...` command, verify that the secret has been created: - -
   # kubectl --namespace myoudns get secret
-    NAME                  TYPE                                  DATA   AGE
-    default-token-fztcb   kubernetes.io/service-account-token   3      15m
-    oudsecret             Opaque                                8      99s
- -## Prepare a host directory to be used for Filesystem based PersistentVolume - -It's required to prepare directory on Host filesystem to store OUD Instances and other configuration outside container filesystem. That directory from host filesystem would be associated with PersistentVolume. -**In case of multi-node Kubernetes cluster, directory to be associated with PersistentVolume should be accessible on all the nodes at the same path.** - -To prepare a host directory (for example: /scratch/test/oud_user_projects) for mounting as file system based PersistentVolume inside containers, execute the command below on host: - -> The userid can be anything but it must belong to uid:guid as 1000:1000, which is same as 'oracle' user running in the container. -> This ensures 'oracle' user has access to shared volume/directory. - -``` -sudo su - root -mkdir -p /scratch/test/oud_user_projects -chown 1000:1000 /scratch/test/oud_user_projects -exit -``` - -All container operations are performed as **'oracle'** user. - -**Note**: If a user already exist with **'-u 1000 -g 1000'** then use the same user. Or modify any existing user to have uid-gid as **'-u 1000 -g 1000'** - -## Create PersistentVolume (PV) and PersistentVolumeClaim (PVC) for your Namespace -A PV is storage resource, while PVC is a request for that resource. To provide storage for your namespace, update the samples/persistent-volume.yaml file. - -Update the following to values specific to your environment: - -| Param | Value | Example | -| :-----------: | :-------------------------: | :-------------------: | -| %PV_NAME% | PV name | oudpv | -| %PV_HOST_PATH% | Valid path on localhost | /scratch/test/oud_user_projects | -| %PVC_NAME% | PVC name | oudpvc | -| %NAMESPACE% | Namespace | myoudns | - -Apply the file: - - # kubectl apply -f samples/persistent-volume.yaml - persistentvolume/oudpv created - persistentvolumeclaim/oudpvc created - -Verify the PersistentVolume: - - # kubectl --namespace myoudns describe persistentvolume oudpv - Name: oudpv - Labels: type=local - Annotations: kubectl.kubernetes.io/last-applied-configuration: - {"apiVersion":"v1","kind":"PersistentVolume","metadata":{"annotations":{},"labels":{"type":"local"},"name":"oudpv"},"spec":{"accessModes":... - pv.kubernetes.io/bound-by-controller: yes - Finalizers: [kubernetes.io/pv-protection] - StorageClass: oud-storage - Status: Bound - Claim: myoudns/oudpvc - Reclaim Policy: Retain - Access Modes: RWX - VolumeMode: Filesystem - Capacity: 10Gi - Node Affinity: - Message: - Source: - Type: HostPath (bare host directory volume) - Path: /scratch/test/oud_user_projects - HostPathType: - Events: - -Verify the PersistentVolumeClaim: - - # kubectl --namespace myoudns describe pvc oudpvc - Name: oudpvc - Namespace: myoudns - StorageClass: oud-storage - Status: Bound - Volume: oudpv - Labels: - Annotations: kubectl.kubernetes.io/last-applied-configuration: - {"apiVersion":"v1","kind":"PersistentVolumeClaim","metadata":{"annotations":{},"name":"oudpvc","namespace":"myoudns"},"spec":{"accessModes... - pv.kubernetes.io/bind-completed: yes - pv.kubernetes.io/bound-by-controller: yes - Finalizers: [kubernetes.io/pvc-protection] - Capacity: 10Gi - Access Modes: RWX - VolumeMode: Filesystem - Events: - Mounted By: - -# Example 1 Directory Server (instanceType=Directory) - -In this example you create a POD (oudpod1) which holds a single container based on an Oracle Unified Directory 12c PS4 (12.2.1.4.0) image. - -To create the POD update the samples/oud-dir-pod.yaml file. - -Update the following to values specific to your environment: - -| Param | Value | Example | -| :-----------: | :-------------------------: | :-------------------: | -| %NAMESPACE% | Namespace | myoudns | -| %IMAGE% | Oracle image tag | oracle/oud:12.2.1.4.0 | -| %SECRET_NAME% | Secret name | oudsecret | -| %PV_NAME% | PV name | oudpv | -| %PVC_NAME% | PVC name | oudpvc | - -Apply the file: - - # kubectl apply -f samples/oud-dir-pod.yaml - pod/oudpod1 created - -To check the status of the created pod: - -
   #  kubectl get pods -n myoudns
-    NAME      READY   STATUS    RESTARTS   AGE
-    oudpod1   1/1     Running   0          14m
- -If you see any errors then use the following commands to debug the pod/container. - -To review issues with the pod e.g. CreateContainerConfigError: - - # kubectl --namespace describe pod - -For example: - - # kubectl --namespace myoudns describe pod oudpod1 - -To tail the container logs while it is initialising use the following command: - - # kubectl --namespace logs -f -c - -For example: - - # kubectl --namespace myoudns logs -c oudds1 oudpod1 - -To view the full container logs: - - # kubectl --namespace logs -c - -To validate that the OUD directory server instance is running, connect to the container: - - # kubectl --namespace myoudns exec -it -c oudds1 oudpod1 /bin/bash - -In the container, run ldapsearch to return entries from the directory server: - - # cd /u01/oracle/user_projects/oudpod1/OUD/bin - # ./ldapsearch -h localhost -p 1389 -D "cn=Directory Manager" -w Oracle123 -b "" -s sub "(objectclass=*)" dn - dn: dc=example1,dc=com - dn: ou=People,dc=example1,dc=com - dn: uid=user.0,ou=People,dc=example1,dc=com - ... - dn: uid=user.99,ou=People,dc=example1,dc=com - -# Example 2 Directory Server (instanceType=Directory) as a Kubernetes Service - -In this example you will create two pods and 2 associated containers, both running OUD 12s directory server instances. This demonstrates how you can expose OUD 12c as a network service. This provides a way of abstracting access to the backend service independent of the pod details. - -To create the POD update the samples/oud-dir-svc.yaml file. - -Update the following to values specific to your environment: - -| Param | Value | Example | -| :-----------: | :-------------------------: | :-------------------: | -| %NAMESPACE% | Namespace | myoudns | -| %IMAGE% | Oracle image tag | oracle/oud:12.2.1.4.0 | -| %SECRET_NAME% | Secret name | oudsecret | -| %PV_NAME% | PV name | oudpv | -| %PVC_NAME% | PVC name | oudpvc | - -Apply the file: - - # kubectl apply -f samples/oud-dir-svc.yaml - service/oud-dir-svc-1 created - pod/oud-dir1 created - service/oud-dir-svc-2 created - pod/oud-dir2 created - -To check the status of the created pods (oud-dir1 and oud-dir2) and services (oud-dir-svc-1 and oud-dir-svc-2): - -
#  kubectl --namespace myoudns get all
-    NAME           READY   STATUS    RESTARTS   AGE
-    pod/oud-dir1   1/1     Running   0          28m
-    pod/oud-dir2   1/1     Running   0          28m
-    pod/oudpod1    1/1     Running   0          22h
-        
-    NAME                    TYPE       CLUSTER-IP       EXTERNAL-IP   PORT(S)                                                                                                    AGE
-    service/oud-dir-svc-1   NodePort   10.107.171.235           1444:30616/TCP,1888:32605/TCP,1389:31405/TCP,1636:32544/TCP,1080:31509/TCP,1081:32395/TCP,1898:31116/TCP   28m
-    service/oud-dir-svc-2   NodePort   10.106.206.229           1444:30882/TCP,1888:30427/TCP,1389:31299/TCP,1636:31529/TCP,1080:30056/TCP,1081:30458/TCP,1898:31796/TCP   28m
- -From this example you can see that the following service port mappings are available to access the container: - -
   service/oud-dir-svc-1 : 10.107.171.235 : 1389:31405
-    service/oud-dir-svc-2 : 10.106.206.229 : 1389:31299
- -To access the OUD directory server running in pod/oud-dir1 via the LDAP port 1389 you would use the service port : $HOSTNAME:31405. - -To access the OUD directory server running in pod/oud-dir2 via the LDAP port 1389 you would use the service port : $HOSTNAME:31299. - -For example: - - ldapsearch -h $HOSTNAME -p 31405 -D "cn=Directory Manager" -w Oracle123 -b "" -s sub "(objectclass=*)" dn - dn: dc=example1,dc=com - dn: ou=People,dc=example1,dc=com - dn: uid=user.0,ou=People,dc=example1,dc=com - ... - dn: uid=user.98,ou=People,dc=example1,dc=com - dn: uid=user.99,ou=People,dc=example1,dc=com - - ldapsearch -h $HOSTNAME -p 31299 -D "cn=Directory Manager" -w Oracle123 -b "" -s sub "(objectclass=*)" dn - dn: dc=example2,dc=com - dn: ou=People,dc=example2,dc=com - dn: uid=user.0,ou=People,dc=example2,dc=com - ... - dn: uid=user.98,ou=People,dc=example2,dc=com - dn: uid=user.99,ou=People,dc=example2,dc=com - -## Validation - -From outside the cluster, you can invoke curl commands like following for accessing interfaces exposed through NodePort. In this example, there are two services (service/oud-dir-svc-1 and service/oud-dir-svc-2) exposing set of ports. Following curl commands can be executed against ports exposed through each service. - -### Curl command example for OUD Admin REST: - - curl --noproxy "*" --insecure --location --request GET \ - 'https://:/rest/v1/admin/?scope=base&attributes=%2b' \ - --header 'Content-Type: application/json' \ - --header 'Authorization: Basic Y249RGlyZWN0b3J5IE1hbmFnZXI6T3JhY2xlMTIz' | json_pp - -### Curl command example for OUD Data REST : - - curl --noproxy "*" --insecure --location --request GET \ - 'https://:/rest/v1/directory/?scope=base&attributes=%2b' \ - --header 'Authorization: Basic Y249RGlyZWN0b3J5IE1hbmFnZXI6T3JhY2xlMTIz' | json_pp - -### Curl command example for OUD Data SCIM: - - curl --noproxy "*" --insecure --location --request GET \ - 'https://:/iam/directory/oud/scim/v1/Schemas/urn:ietf:params:scim:schemas:core:2.0:Schema' \ - --header 'Authorization: Basic Y249RGlyZWN0b3J5IE1hbmFnZXI6T3JhY2xlMTIz' | json_pp - -# Example 3 Proxy Server (instanceType=Proxy) as a Kubernetes Service - -In this example you will create a service, pod and associated container, in which an OUD 12c Proxy Server instance is deployed. This acts as a proxy to the 2 services you created in the previous example. - -To create the POD update the samples/oud-ds_proxy-svc.yaml file. - -Update the following to values specific to your environment: - -| Param | Value | Example | -| :-----------: | :-------------------------: | :-------------------: | -| %NAMESPACE% | Namespace | myoudns | -| %IMAGE% | Oracle image tag | oracle/oud:12.2.1.4.0 | -| %SECRET_NAME% | Secret name | oudsecret | -| %PV_NAME% | PV name | oudpv | -| %PVC_NAME% | PVC name | oudpvc | - -Apply the file: - - # kubectl apply -f samples/oud-ds_proxy-svc.yaml - service/oud-ds-proxy-svc created - pod/oudp1 created - -Check the status of the new pod/service: - -
# kubectl --namespace myoudns get all
-    NAME           READY   STATUS    RESTARTS   AGE
-    pod/oud-dir1   1/1     Running   0          166m
-    pod/oud-dir2   1/1     Running   0          166m
-    pod/oudp1      1/1     Running   0          20m
-    pod/oudpod1    1/1     Running   0          25h
-    
-    NAME                       TYPE       CLUSTER-IP       EXTERNAL-IP   PORT(S)                                                                                                    AGE
-    service/oud-dir-svc-1      NodePort   10.107.171.235           1444:30616/TCP,1888:32605/TCP,1389:31405/TCP,1636:32544/TCP,1080:31509/TCP,1081:32395/TCP,1898:31116/TCP   166m
-    service/oud-dir-svc-2      NodePort   10.106.206.229           1444:30882/TCP,1888:30427/TCP,1389:31299/TCP,1636:31529/TCP,1080:30056/TCP,1081:30458/TCP,1898:31796/TCP   166m
-    service/oud-ds-proxy-svc   NodePort   10.103.41.171            1444:30878/TCP,1888:30847/TCP,1389:31810/TCP,1636:30873/TCP,1080:32076/TCP,1081:30762/TCP,1898:31269/TCP   20m
- -Verify operation of the proxy server, accessing through the external service port: - - # ldapsearch -h $HOSTNAME -p 31810 -D "cn=Directory Manager" -w Oracle123 -b "" -s sub "(objectclass=*)" dn - dn: dc=example1,dc=com - dn: ou=People,dc=example1,dc=com - dn: uid=user.0,ou=People,dc=example1,dc=com - ... - dn: uid=user.99,ou=People,dc=example1,dc=com - dn: dc=example2,dc=com - dn: ou=People,dc=example2,dc=com - dn: uid=user.0,ou=People,dc=example2,dc=com - ... - dn: uid=user.98,ou=People,dc=example2,dc=com - dn: uid=user.99,ou=People,dc=example2,dc=com - -**Note**: Entries are returned from both backend directory servers (dc=example1,dc=com and dc=example2,dc=com) via the proxy server. - -## Validation - -From outside the cluster, you can invoke curl commands like following for accessing interfaces exposed through NodePort. In this example, there is a service (service/oud-ds-proxy-svc) exposing set of ports. - -### Curl command example for OUD Admin REST: - - curl --noproxy "*" --insecure --location --request GET \ - 'https://:/rest/v1/admin/?scope=base&attributes=%2b' \ - --header 'Content-Type: application/json' \ - --header 'Authorization: Basic Y249RGlyZWN0b3J5IE1hbmFnZXI6T3JhY2xlMTIz' | json_pp - -### Curl command example for OUD Data REST : - - curl --noproxy "*" --insecure --location --request GET \ - 'https://:/rest/v1/directory/?scope=base&attributes=%2b' \ - --header 'Authorization: Basic Y249RGlyZWN0b3J5IE1hbmFnZXI6T3JhY2xlMTIz' | json_pp - -### Curl command example for OUD Data SCIM: - - curl --noproxy "*" --insecure --location --request GET \ - 'https://:/iam/directory/oud/scim/v1/Schemas/urn:ietf:params:scim:schemas:core:2.0:Schema' \ - --header 'Authorization: Basic Y249RGlyZWN0b3J5IE1hbmFnZXI6T3JhY2xlMTIz' | json_pp - -# Example 4 Replication Server (instanceType=Replication) as a Kubernetes Service - -In this example you will create a service, pod and associated container, in which an OUD 12 Replication Server instance is deployed. This creates a single Replication Server which has 2 Directory Servers as its replication group. - -To create the POD update the samples/oud-ds_rs_ds-svc.yaml file. - -Update the following to values specific to your environment: - -| Param | Value | Example | -| :-----------: | :-------------------------: | :-------------------: | -| %NAMESPACE% | Namespace | myoudns | -| %IMAGE% | Oracle image tag | oracle/oud:12.2.1.4.0 | -| %SECRET_NAME% | Secret name | oudsecret | -| %PV_NAME% | PV name | oudpv | -| %PVC_NAME% | PVC name | oudpvc | - -Apply the file: - - # kubectl apply -f samples/oud-ds_rs_ds-svc.yaml - service/oud-rs-svc-1 created - pod/oudpodrs1 created - service/oud-ds-svc-1a created - pod/oudpodds1a created - service/oud-ds-svc-1b created - pod/oudpodds1b created - -Check the status of the new services: - -
# kubectl --namespace myoudns get all
-    NAME             READY   STATUS    RESTARTS   AGE
-    pod/oud-dir1     1/1     Running   0          2d20h
-    pod/oud-dir2     1/1     Running   0          2d20h
-    pod/oudp1        1/1     Running   0          2d18h
-    pod/oudpod1      1/1     Running   0          3d18h
-    pod/oudpodds1a   0/1     Running   0          2m44s
-    pod/oudpodds1b   0/1     Running   0          2m44s
-    pod/oudpodrs1    0/1     Running   0          2m45s
-    
-    NAME                       TYPE       CLUSTER-IP       EXTERNAL-IP   PORT(S)                                                                                                    AGE
-    service/oud-dir-svc-1      NodePort   10.107.171.235           1444:30616/TCP,1888:32605/TCP,1389:31405/TCP,1636:32544/TCP,1080:31509/TCP,1081:32395/TCP,1898:31116/TCP   2d20h
-    service/oud-dir-svc-2      NodePort   10.106.206.229           1444:30882/TCP,1888:30427/TCP,1389:31299/TCP,1636:31529/TCP,1080:30056/TCP,1081:30458/TCP,1898:31796/TCP   2d20h
-    service/oud-ds-proxy-svc   NodePort   10.103.41.171            1444:30878/TCP,1888:30847/TCP,1389:31810/TCP,1636:30873/TCP,1080:32076/TCP,1081:30762/TCP,1898:31269/TCP   2d18h
-    service/oud-ds-svc-1a      NodePort   10.102.218.25            1444:30347/TCP,1888:30392/TCP,1389:32482/TCP,1636:31161/TCP,1080:31241/TCP,1081:32597/TCP                  2m45s
-    service/oud-ds-svc-1b      NodePort   10.104.6.215             1444:32031/TCP,1888:31621/TCP,1389:32511/TCP,1636:31698/TCP,1080:30737/TCP,1081:30748/TCP                  2m44s
-    service/oud-rs-svc-1       NodePort   10.110.237.193           1444:32685/TCP,1888:30176/TCP,1898:30543/TCP                                                               2m45s
- -To validate that the OUD replication group is running, connect to the replication server container (oudrs1): - - # kubectl --namespace myoudns exec -it -c oudrs1 oudpodrs1 /bin/bash - cd /u01/oracle/user_projects/oudpodrs1/OUD/bin - -In the container, run dsreplication to return details of the replication group: - - # ./dsreplication status --trustAll --hostname localhost --port 1444 --adminUID admin --dataToDisplay compat-view --dataToDisplay rs-connections - - >>>> Specify Oracle Unified Directory LDAP connection parameters - - Password for user 'admin': - - Establishing connections and reading configuration ..... Done. - - - dc=example1,dc=com - Replication Enabled - ======================================== - - Server : Entries : M.C. [1] : A.O.M.C. [2] : Port [3] : Encryption [4] : Trust [5] : U.C. [6] : Status [7] : ChangeLog [8] : Group ID [9] : Connected To [10] - --------------------:----------:----------:--------------:----------:----------------:-----------:----------:------------:---------------:--------------:--------------------------- - oud-rs-svc-1:1444 : -- [11] : 0 : -- : 1898 : Disabled : -- : -- : Up : -- : 1 : -- - oud-ds-svc-1a:1444 : 1 : 0 : 0 : -- [12] : Disabled : Trusted : -- : Normal : Enabled : 1 : oud-rs-svc-1:1898 (GID=1) - oud-ds-svc-1b:1444 : 1 : 0 : 0 : -- [12] : Disabled : Trusted : -- : Normal : Enabled : 1 : oud-rs-svc-1:1898 (GID=1) - -You can see that the Replication Server is running as the oud-rs-svc-1:1444, while you have Directory Server services running on oud-ds-svc-1a:1444 and oud-ds-svc-1b:1444. - -## Validation - -From outside the cluster, you can invoke curl commands like following for accessing interfaces exposed through NodePort. In this example, there are two Directory services (service/oud-ds-svc-1a and service/oud-ds-svc-1b) exposing set of ports. Following curl commands can be executed against ports exposed through each service. - -### Curl command example for OUD Admin REST: - - curl --noproxy "*" --insecure --location --request GET \ - 'https://:/rest/v1/admin/?scope=base&attributes=%2b' \ - --header 'Content-Type: application/json' \ - --header 'Authorization: Basic Y249RGlyZWN0b3J5IE1hbmFnZXI6T3JhY2xlMTIz' | json_pp -> This can be executed against replication service (oud-rs-svc-1) as well. - -### Curl command example for OUD Data REST : - - curl --noproxy "*" --insecure --location --request GET \ - 'https://:/rest/v1/directory/?scope=base&attributes=%2b' \ - --header 'Authorization: Basic Y249RGlyZWN0b3J5IE1hbmFnZXI6T3JhY2xlMTIz' | json_pp - -### Curl command example for OUD Data SCIM: - - curl --noproxy "*" --insecure --location --request GET \ - 'https://:/iam/directory/oud/scim/v1/Schemas/urn:ietf:params:scim:schemas:core:2.0:Schema' \ - --header 'Authorization: Basic Y249RGlyZWN0b3J5IE1hbmFnZXI6T3JhY2xlMTIz' | json_pp - -# Example 5 Directory Server/Service added to existing Replication Server/Service (instanceType=AddDS2RS) - -In this example you will create services, pods and containers, in which OUD 12 Replication Server instances are deployed. In this case, 2 Replication/Directory Server Services are added, in addition the Directory Server created in Example 2 (oud-dir-svc-2) is added to the replication group. - -To create the POD update the samples/oud-ds-plus-rs-svc.yaml file. - -Update the following to values specific to your environment: - -| Param | Value | Example | -| :-----------: | :-------------------------: | :-------------------: | -| %NAMESPACE% | Namespace | myoudns | -| %IMAGE% | Oracle image tag | oracle/oud:12.2.1.4.0 | -| %SECRET_NAME% | Secret name | oudsecret | -| %PV_NAME% | PV name | oudpv | -| %PVC_NAME% | PVC name | oudpvc | - -Apply the file: - - # kubectl apply -f samples/oud-ds-plus-rs-svc.yaml - service/oud-dsrs-svc-1 created - pod/ouddsrs1 created - service/oud-dsrs-svc-2 created - pod/ouddsrs2 created - -Check the status of the new services: - -
   # kubectl --namespace myoudns get all
-    NAME             READY   STATUS    RESTARTS   AGE
-    pod/oud-dir1     1/1     Running   0          3d
-    pod/oud-dir2     1/1     Running   0          3d
-    pod/ouddsrs1     0/1     Running   0          75s
-    pod/ouddsrs2     0/1     Running   0          75s
-    pod/oudp1        1/1     Running   0          2d21h
-    pod/oudpod1      1/1     Running   0          3d22h
-    pod/oudpodds1a   1/1     Running   0          3h33m
-    pod/oudpodds1b   1/1     Running   0          3h33m
-    pod/oudpodrs1    1/1     Running   0          3h33m
-    
-    NAME                       TYPE       CLUSTER-IP       EXTERNAL-IP   PORT(S)                                                                                                    AGE
-    service/oud-dir-svc-1      NodePort   10.107.171.235           1444:30616/TCP,1888:32605/TCP,1389:31405/TCP,1636:32544/TCP,1080:31509/TCP,1081:32395/TCP,1898:31116/TCP   3d
-    service/oud-dir-svc-2      NodePort   10.106.206.229           1444:30882/TCP,1888:30427/TCP,1389:31299/TCP,1636:31529/TCP,1080:30056/TCP,1081:30458/TCP,1898:31796/TCP   3d
-    service/oud-ds-proxy-svc   NodePort   10.103.41.171            1444:30878/TCP,1888:30847/TCP,1389:31810/TCP,1636:30873/TCP,1080:32076/TCP,1081:30762/TCP,1898:31269/TCP   2d21h
-    service/oud-ds-svc-1a      NodePort   10.102.218.25            1444:30347/TCP,1888:30392/TCP,1389:32482/TCP,1636:31161/TCP,1080:31241/TCP,1081:32597/TCP                  3h33m
-    service/oud-ds-svc-1b      NodePort   10.104.6.215             1444:32031/TCP,1888:31621/TCP,1389:32511/TCP,1636:31698/TCP,1080:30737/TCP,1081:30748/TCP                  3h33m
-    service/oud-dsrs-svc-1     NodePort   10.102.118.29            1444:30738/TCP,1888:30935/TCP,1389:32438/TCP,1636:32109/TCP,1080:31776/TCP,1081:31897/TCP,1898:30874/TCP   75s
-    service/oud-dsrs-svc-2     NodePort   10.98.139.53             1444:32312/TCP,1888:30595/TCP,1389:31376/TCP,1636:30090/TCP,1080:31238/TCP,1081:31174/TCP,1898:31863/TCP   75s
-    service/oud-rs-svc-1       NodePort   10.110.237.193           1444:32685/TCP,1888:30176/TCP,1898:30543/TCP   3h33m
- -To validate that the OUD replication group is running, connect to the replication server container (oudrs1): - - # kubectl --namespace myoudns exec -it -c ouddsrs ouddsrs1 /bin/bash - cd /u01/oracle/user_projects/ouddsrs1/OUD/bin - -In the container, run dsreplication to return details of the replication group: - - # ./dsreplication status --trustAll --hostname localhost --port 1444 --adminUID admin --dataToDisplay compat-view --dataToDisplay rs-connections - - >>>> Specify Oracle Unified Directory LDAP connection parameters - - Password for user 'admin': - - Establishing connections and reading configuration ..... Done. - - dc=example2,dc=com - Replication Enabled - ======================================== - - Server : Entries : M.C. [1] : A.O.M.C. [2] : Port [3] : Encryption [4] : Trust [5] : U.C. [6] : Status [7] : ChangeLog [8] : Group ID [9] : Connected To [10] - ---------------------:---------:----------:--------------:----------:----------------:-----------:----------:------------:---------------:--------------:----------------------------- - oud-dir-svc-2:1444 : 102 : 0 : 0 : 1898 : Disabled : Trusted : -- : Normal : Enabled : 1 : oud-dir-svc-2:1898 (GID=1) - oud-dsrs-svc-1:1444 : 102 : 0 : 0 : 1898 : Disabled : Trusted : -- : Normal : Enabled : 2 : oud-dsrs-svc-1:1898 (GID=2) - oud-dsrs-svc-2:1444 : 102 : 0 : 0 : 1898 : Disabled : Trusted : -- : Normal : Enabled : 2 : oud-dsrs-svc-2:1898 (GID=2) - - Replication Server [11] : RS #1 : RS #2 : RS #3 - --------------------------:-------:-------:------- - oud-dir-svc-2:1898 (#1) : -- : Yes : Yes - oud-dsrs-svc-1:1898 (#2) : Yes : -- : Yes - oud-dsrs-svc-2:1898 (#3) : Yes : Yes : -- - - -## Validation - -From outside the cluster, you can invoke curl commands like following for accessing interfaces exposed through NodePort. In this example, there are two services (service/oud-dsrs-svc-1 and service/oud-dsrs-svc-2) exposing set of ports. Following curl commands can be executed against ports exposed through each service. - -### Curl command example for OUD Admin REST: - - curl --noproxy "*" --insecure --location --request GET \ - 'https://:/rest/v1/admin/?scope=base&attributes=%2b' \ - --header 'Content-Type: application/json' \ - --header 'Authorization: Basic Y249RGlyZWN0b3J5IE1hbmFnZXI6T3JhY2xlMTIz' | json_pp - -### Curl command example for OUD Data REST : - - curl --noproxy "*" --insecure --location --request GET \ - 'https://:/rest/v1/directory/?scope=base&attributes=%2b' \ - --header 'Authorization: Basic Y249RGlyZWN0b3J5IE1hbmFnZXI6T3JhY2xlMTIz' | json_pp - -### Curl command example for OUD Data SCIM: - - curl --noproxy "*" --insecure --location --request GET \ - 'https://:/iam/directory/oud/scim/v1/Schemas/urn:ietf:params:scim:schemas:core:2.0:Schema' \ - --header 'Authorization: Basic Y249RGlyZWN0b3J5IE1hbmFnZXI6T3JhY2xlMTIz' | json_pp - - -# Appendix Reference - -Before using these sample yaml files, following variables are requried to be updated -* %NAMESPACE% - with value for Kubernetes namespace of your choice -* %IMAGE% - with exact docker image for oracle/oud:12.2.1.x.x -* %PV_NAME% - with value of the persistent volume name of your choice -* %PV_HOST_PATH% - with value of the persistent volume Host Path (Directory Path which would be used as storage path for volume) -* %PVC_NAME% - with value of the persistent volume claim name of your choice -* %SECRET_NAME% - with value of the secret name which can be created using samples/secrets.yaml file. -* %rootUserDN% - With Base64 encoded value for rootUserDN parameter. -* %rootUserPassword% - With Base64 encoded value for rootUserPassword parameter. -* %adminUID% - With Base64 encoded value for adminUID parameter. -* %adminPassword% - With Base64 encoded value for adminPassword parameter. -* %bindDN1% - With Base64 encoded value for bindDN1 parameter. -* %bindPassword1% - With Base64 encoded value for bindPassword1 parameter. -* %bindDN2% - With Base64 encoded value for bindDN2 parameter. -* %bindPassword2% - With Base64 encoded value for bindPassword2 parameter. - - -## samples/oudns.yaml - -This is a sample file to create Kubernetes namespace. - -## samples/persistent-volume.yaml - -This is a sample file to create Persistent volume and persistent volume claim - -## samples/secrets.yaml - -This is a sample file to create the secrets which can be used to create secrets for the pods. - -Below keys will be honoured by different OUD yaml files -* rootUserDN -* rootUserPassword -* adminUID -* adminPassword -* bindDN1 -* bindPassword1 -* bindDN2 -* bindPassword2 - -All the values of the keys should be encoded using the below command and provide the value in samples/secrets.yaml file. - -example: To generate value for keys in Base64 format, following kind of command can be executed. -echo -n 'MyPassword' | base64 -TXlQYXNzd29yZA== - -**Note**: Please make sure to use -n with echo command. Without that, Base64 values would be generated with new-line character included. - -## samples/oud-dir-svc.yaml - -This is a sample file to create 2 set of PODs and Services for OUD Instances - -## samples/oud-dir-pod.yaml - -This is a sample file to create POD (oudpod1) with container for OUD Directory Instance. - -## samples/oud-ds_proxy-svc.yaml - -This is a sample file to create: -* POD (oudds1) with container for OUD Directory Instance (dc=example1,dc=com) -* POD (oudds2) with container for OUD Directory Instance (dc=example2,dc=com) -* POD (oudp1) with container for OUD Directory Proxy referring to OUD Directory Instances (oudds1 and oudds2) for dc=example1,dc=com and dc=example2,dc=com -* Service (oud-ds-proxy-svc) referring to POD with OUD Directory Proxy (oudp1) - -## samples/oud-ds_rs_ds-svc.yaml - -This is a sample file to create: -* POD (oudpodrs1) with container for OUD Replication Server Instance connected to OUD Directory Instance (oudpodds1) -* POD (oudpodds1a) with container for OUD Directory Instance having replication enabled through Replication Server Instance (oudpodrs1) -* POD (oudpodds1b) with container for OUD Directory Instance having replication enabled through Replication Server Instance (oudpodrs1) -* Service (oud-rs-svc-1) referring to Ports from POD (oudpodrs1) -* Service (oud-ds-svc-1a) referring to Ports from POD (oudpodds1a) -* Service (oud-ds-svc-1b) referring to Ports from POD (oudpodds1b) - -With execution of following kind of command in container, status can be checked for replicated instances: - - $ /u01/oracle/user_projects/oudpodrs1/OUD/bin/dsreplication status \ - --trustAll --hostname oudpodrs1.oud-ds-rs-ds-svc.myoudns.svc.cluster.local --port 1444 \ - --dataToDisplay compat-view - -## samples/oud-ds-plus-rs-svc.yaml - -This is a sample file to create 3 replicated DS+RS Instances: -* POD (ouddsrs1) with container for OUD Directory Server (dc=example1,dc=com) and Replication Server -* POD (ouddsrs2) with container for OUD Directory Server (dc=example1,dc=com) and Replication Server -* Service (oud-dsrs-svc-1) referring to Ports from POD (ouddsrs1) -* Service (oud-dsrs-svc-2) referring to Ports from POD (ouddsrs2) - -With execution of following kind of command in container, status can be checked for replicated instances: - - $ /u01/oracle/user_projects/ouddsrs2/OUD/bin/dsreplication status \ - --trustAll --hostname ouddsrs2.oud-dsrs-svc.myoudns.svc.cluster.local --port 1444 \ - --dataToDisplay compat-view - -# Licensing & Copyright - -## License
-To download and run Oracle Fusion Middleware products, regardless whether inside or outside a Docker container, you must download the binaries from the Oracle website and accept the license indicated at that page.

- -All scripts and files hosted in this project and GitHub [fmw-kubernetes/OracleUnifiedDirectory](./) repository required to build the Docker images are, unless otherwise noted, released under [UPL 1.0](https://oss.oracle.com/licenses/upl/) license.

- -## Copyright
-Copyright (c) 2020, Oracle and/or its affiliates.
-Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl

diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/elk_logstash-configMap.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/elk_logstash-configMap.yaml index 498013086..f47a81ad7 100755 --- a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/elk_logstash-configMap.yaml +++ b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/elk_logstash-configMap.yaml @@ -1,16 +1,19 @@ # -# Copyright (c) 2020, Oracle and/or its affiliates. +# Copyright (c) 2020, 2022, Oracle and/or its affiliates. # # Licensed under the Universal Permissive License v 1.0 as shown at # https://oss.oracle.com/licenses/upl # # -{{- if and .Values.elk.enabled (not .Values.elk.logstash.logstashConfigMap) }} +{{- if and .Values.elk.IntegrationEnabled (not .Values.elk.logstashConfigMap) }} apiVersion: v1 kind: ConfigMap metadata: name: {{ include "oud-ds-rs.fullname" . }}-logstash-configmap data: + logstash.yml: | + #http.host: "0.0.0.0" + elk.crt: {{ .Values.elk.escert | toYaml | indent 4 }} logstash-config.conf: | input { file { @@ -45,7 +48,18 @@ data: } output { elasticsearch { - hosts => ["{{ include "oud-ds-rs.fullname" . }}-elasticsearch:9200"] + hosts => ["{{ .Values.elk.eshosts }}"] + cacert => '/usr/share/logstash/config/certs/elk.crt' + index => "{{ .Values.elk.esindex }}" + ssl => {{ .Values.elk.sslenabled }} + ssl_certificate_verification => false + {{- if .Values.elk.espassword }} + user => "{{ .Values.elk.esuser }}" + password => "${ELASTICSEARCH_PASSWORD}" + {{- end }} + {{- if .Values.elk.esapikey }} + api_key => "${ELASTICSEARCH_PASSWORD}" + {{- end }} } } {{- end -}} diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/elk_logstash.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/elk_logstash.yaml index a1786e9d7..a65e25f73 100755 --- a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/elk_logstash.yaml +++ b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/elk_logstash.yaml @@ -5,7 +5,7 @@ # https://oss.oracle.com/licenses/upl # # -{{- if .Values.elk.enabled }} +{{- if .Values.elk.IntegrationEnabled }} apiVersion: apps/v1 kind: Deployment metadata: @@ -20,39 +20,58 @@ spec: labels: app: logstash spec: - volumes: - - name: {{ include "oud-ds-rs.fullname" . }}-pv - persistentVolumeClaim: - claimName: {{ include "oud-ds-rs.fullname" . }}-pvc - - name: shared-logs - emptyDir: {} containers: - name: {{ include "oud-ds-rs.fullname" . }}-logstash - image: {{ .Values.elk.logstash.image.repository }}:{{ .Values.elk.logstash.image.tag }} + env: + - name: ELASTICSEARCH_PASSWORD + valueFrom: + secretKeyRef: + name: elasticsearch-pw-elastic + key: password + image: {{ .Values.elk.logStashImage }} ports: - - containerPort: {{ .Values.elk.logstash.containerPort }} + - containerPort: 5044 name: logstash command: - logstash - #command: ["/bin/sh"] - #args: ["/usr/share/logstash/bin/logstash", "-f", /srv/nfs/oudelk/oud_user_projects/logstash/config/logstash-config.conf ] imagePullPolicy: IfNotPresent volumeMounts: - name: {{ include "oud-ds-rs.fullname" . }}-pipeline mountPath: /usr/share/logstash/pipeline/ - name : {{ include "oud-ds-rs.fullname" . }}-pv mountPath: /u01/oracle/user_projects + - mountPath: /usr/share/logstash/config/logstash.yml + subPath: logstash.yml + name: config-volume + - mountPath: /usr/share/logstash/config/certs + name: elk-cert volumes: - name: {{ include "oud-ds-rs.fullname" . }}-pipeline configMap: - {{- if .Values.elk.logstash.logstashConfigMap }} - name: {{ .Values.elk.logstash.logstashConfigMap }} + {{- if .Values.elk.logstashConfigMap }} + name: {{ .Values.elk.logstashConfigMap }} {{- else }} name: {{ include "oud-ds-rs.fullname" . }}-logstash-configmap items: - key: logstash-config.conf path: logstash-config.conf {{- end }} + - configMap: + defaultMode: 420 + items: + - key: elk.crt + path: elk.crt + name: {{ include "oud-ds-rs.fullname" . }}-logstash-configmap + name: elk-cert + - configMap: + defaultMode: 420 + items: + - key: logstash.yml + path: logstash.yml + name: {{ include "oud-ds-rs.fullname" . }}-logstash-configmap + name: config-volume + - name: shared-logs + emptyDir: {} - name: {{ include "oud-ds-rs.fullname" . }}-pv persistentVolumeClaim: claimName: {{ include "oud-ds-rs.fullname" . }}-pvc @@ -61,4 +80,4 @@ spec: {{- toYaml . | nindent 6 }} {{- end }} -{{- end }} +{{- end }} diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/values.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/values.yaml index b59984cc8..3a2dfb056 100755 --- a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/values.yaml +++ b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/values.yaml @@ -321,103 +321,43 @@ replOUD: # List of env variables which would not be honored from envVars map is same as list of env var names mentioned for envVarsConfigMap envVars: + +# Configuration for Logstash deployment elk: # Enabled flag to enable the integrated ELK stack for OUD enabled: false - elasticsearch: - image: - repository: docker.elastic.co/elasticsearch/elasticsearch - tag: 6.8.0 - pullPolicy: IfNotPresent - # esreplicas is the number of replicas of ElasticSearch deployment to be created - esreplicas: 1 - #minimumMasterNodes is the the minimum number of master nodes that needs to configured. This will use the number of replicas (esreplicas / 2) + 1 - minimumMasterNodes: 1 - # Java options for Elasticsearch. This is where you should configure the jvm heap size - esJAVAOpts: "-Xms512m -Xmx512m" - #sysctl vm.max_map_count needed for Elasticsearch - sysctlVmMaxMapCount: 262144 - # cpu resources for elastic search - resources: - requests: - cpu: "100m" - limits: - cpu: "1000m" - esService: - # Type of Service to be created for Elastic Search interfaces - type: ClusterIP - # Service Type for loadbalancer services accessing Elastic Search pods. - lbrtype: ClusterIP - - # Kibana configuration parameters - kibana: - image: - repository: docker.elastic.co/kibana/kibana - tag: 6.8.0 - pullPolicy: IfNotPresent - #Number of Kibana instances will be created - kibanaReplicas : 1 - #Type of kibana service to be created - service: - type: NodePort - #Port on which the kibana will be accessed inside the cluster - targetPort: 5601 - #nodePort is the port on which kibana service will be accessed from outside - nodePort: 31199 - - # Logstash Configuration parameters - logstash: - image: - repository: logstash - tag: 6.6.0 - pullPolicy: IfNotPresent - #Port on which the logstash container will be running - containerPort : 5044 - #Type of service to be created - service: - type: NodePort - #Port on which the logstash will be accessed with in the cluster - targetPort: 9600 - #Port on which the logstash will be accessed outside the cluster - nodePort: 32222 - # If logstashConfigMap is empty, Then default logstashConfigMap will be created and used. - logstashConfigMap: - # ELK Ports on which elastic search will be listening across the nodes and outside the nodes - elkPorts: - rest: 9200 - internode: 9300 - #Image used for initiContainers - busybox: - image: busybox - imagePullSecrets: - name: dockercred - -elkVolume: - # If enabled, it will use the persistent volume. - # if value is false, PV and PVC would not be used and there would not be any mount point available for config - enabled: true - # Path at which the volume would be mounted - mountPath: /usr/share/elasticsearch/data - # provide the pvname to use an already created Persistent Volume. If blank, will use the default name from Chart - pvname: - accessMode: ReadWriteMany - size: 20Gi - storageClass: elk-oud -# default supported values: either filesystem or networkstorage or custom - type: filesystem - networkstorage: - nfs: - path: /scratch/shared/oud_elk/data - server: 0.0.0.0 - filesystem: - hostPath: - # The path location mentioned should be created and made accessible with necessary privileges for access from pods/containers. - path: /scratch/shared/oud_elk/data - custom: - # YAML content to be included in PersistenceVolume Object - annotations: {} - + IntegrationEnabled: false + logStashImage: logstash:8.3.1 + logstashConfigMap: + esindex: oudlogs-00001 + eshosts: http://elasticsearch.oudns.svc.cluster.local:9200 + sslenabled: false + esuser: logstash_internal + espassword: elasticsearch-pw-elastic + esapikey: + escert: | + -----BEGIN CERTIFICATE----- + MIIDVjCCAj6gAwIBAgIRAOqQ3Gy75NvPPQUN5kXqNQUwDQYJKoZIhvcNAQELBQAw + NTEWMBQGA1UECxMNZWxhc3RpY3NlYXJjaDEbMBkGA1UEAxMSZWxhc3RpY3NlYXJj + aC1odHRwMB4XDTIyMDgyNDA1MTU1OVoXDTIzMDgyNDA1MjU1OVowNTEWMBQGA1UE + CxMNZWxhc3RpY3NlYXJjaDEbMBkGA1UEAxMSZWxhc3RpY3NlYXJjaC1odHRwMIIB + IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsQOnxUm9uF32+lyc9SA3WcMZ + P1X7TbHMDuO/l3UHBUf5F/bt2m3YkGw+enIos9wzuUNpjIwVt8q4WrRCMl80nAQ0 + yCXrfLSI9zaHxEC8Ht7V0U+7Sgu5uysD4tyZ9T0Q5zjvkWS6oBPxhfri3OQfPvUW + gQ6wJaPGDteYZAwiBMvPEkmh0VUTBTXjToHrtrT7pzmz5BBWnUzdf+jv0+nEfedm + mMWw/8jqyqid7bu7bo6gKBZ8zk06n2iMaXzmGW34QlYRLBgubThhxyDE7joZ4NTA + UFEJecZR2fccmpN8CNkT9Ex4Hq88nh2OP5XKKPNF4kLh2u6F4auF7Uz42jwvIwID + AQABo2EwXzAOBgNVHQ8BAf8EBAMCAoQwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsG + AQUFBwMCMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLQb/IjHHkSmHgKSPY7r + zBIJZMbdMA0GCSqGSIb3DQEBCwUAA4IBAQA01qY0tGIPsKNkn7blxRjEYkTg59Z5 + vi6MCpGtdoyZeJgH621IpwyB34Hpu1RQfyg1aNgmOtIK9cvQZRl008DHF4AiHYhU + 6xe3cjI/QxDXwitoBgWl+a0mkwhSmzJt7TuzImq7RMO4ws3M/nGeNUwFjwsQu86+ + N/Y3RuuUVbK1xy8Jdz3FZADIgHVPN6GQwYKEpWrZNapKBXjunjCZmpBFxqGMRF44 + fcSKFlFkwjyTq4kgq44NPv18NMfKCYZcK7ttRTiep77vKB7No/TM69Oz5ZHhQ+2Q + pSGg3QF+1fOCFCgWXFEOle6lQ5i8a/GihY0FuphrZxP9ovZ/EKPpE6Gq + -----END CERTIFICATE----- # Cron job will run based on the schedule defined. By default it will run every 30 minutes diff --git a/OracleUnifiedDirectory/kubernetes/samples/oud-dir-pod.yaml b/OracleUnifiedDirectory/kubernetes/samples/oud-dir-pod.yaml deleted file mode 100755 index b1dae7ac8..000000000 --- a/OracleUnifiedDirectory/kubernetes/samples/oud-dir-pod.yaml +++ /dev/null @@ -1,82 +0,0 @@ -# -# Copyright (c) 2020, Oracle and/or its affiliates. -# -# Licensed under the Universal Permissive License v 1.0 as shown at -# https://oss.oracle.com/licenses/upl -# ---- -apiVersion: v1 -kind: Pod -metadata: - name: oudpod1 - namespace: %NAMESPACE% - labels: - app: oud1 -spec: - restartPolicy: OnFailure - hostname: oudpod1 - subdomain: oudsvc - containers: - - name: oudds1 - image: %IMAGE% - ports: - - containerPort: 1444 - - containerPort: 1888 - - containerPort: 1389 - - containerPort: 1636 - - containerPort: 1080 - - containerPort: 1081 - - containerPort: 1898 - env: - - name: instanceType - value: Directory - - name: OUD_INSTANCE_NAME - value: oudpod1 - - name: hostname - value: oudpod1 - - name: baseDN - value: dc=example1,dc=com - - name: sampleData - value: "100" - - name: rootUserDN - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: rootUserDN - - name: rootUserPassword - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: rootUserPassword - - name: sleepBeforeConfig - value: "3" - volumeMounts: - - mountPath: /u01/oracle/user_projects - name: %PV_NAME% - readinessProbe: - tcpSocket: - port: 1389 - initialDelaySeconds: 120 - periodSeconds: 30 - readinessProbe: - tcpSocket: - port: 1444 - initialDelaySeconds: 120 - periodSeconds: 30 - readinessProbe: - tcpSocket: - port: 1080 - initialDelaySeconds: 120 - periodSeconds: 30 - readinessProbe: - exec: - command: - - "/u01/oracle/container-scripts/checkOUDInstance.sh" - initialDelaySeconds: 120 - periodSeconds: 60 - imagePullSecrets: - - name: regcred - volumes: - - name: %PV_NAME% - persistentVolumeClaim: - claimName: %PVC_NAME% diff --git a/OracleUnifiedDirectory/kubernetes/samples/oud-dir-svc.yaml b/OracleUnifiedDirectory/kubernetes/samples/oud-dir-svc.yaml deleted file mode 100755 index 674320730..000000000 --- a/OracleUnifiedDirectory/kubernetes/samples/oud-dir-svc.yaml +++ /dev/null @@ -1,222 +0,0 @@ -# -# Copyright (c) 2020, Oracle and/or its affiliates. -# -# Licensed under the Universal Permissive License v 1.0 as shown at -# https://oss.oracle.com/licenses/upl -# ---- -apiVersion: v1 -kind: Service -metadata: - name: oud-dir-svc-1 - namespace: %NAMESPACE% - labels: - app: oud-dir-1 -spec: - ports: - - port: 1444 - name: admin-ldaps - targetPort: 1444 - - port: 1888 - name: admin-https - targetPort: 1888 - - port: 1389 - name: ldap - targetPort: 1389 - - port: 1636 - name: ldaps - targetPort: 1636 - - port: 1080 - name: data-http - targetPort: 1080 - - port: 1081 - name: data-https - targetPort: 1081 - - port: 1898 - name: replication - targetPort: 1898 - type: NodePort - selector: - app: oud-dir-1 ---- -apiVersion: v1 -kind: Pod -metadata: - name: oud-dir1 - namespace: %NAMESPACE% - labels: - app: oud-dir-1 -spec: - restartPolicy: OnFailure - containers: - - name: oudds - image: %IMAGE% - ports: - - containerPort: 1444 - - containerPort: 1888 - - containerPort: 1389 - - containerPort: 1636 - - containerPort: 1080 - - containerPort: 1081 - - containerPort: 1898 - env: - - name: instanceType - value: Directory - - name: OUD_INSTANCE_NAME - value: oud-dir1 - - name: hostname - value: oud-dir-svc-1 - - name: baseDN - value: dc=example1,dc=com - - name: sampleData - value: "100" - - name: rootUserDN - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: rootUserDN - - name: rootUserPassword - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: rootUserPassword - - name: sleepBeforeConfig - value: "3" - volumeMounts: - - mountPath: /u01/oracle/user_projects - name: %PV_NAME% - readinessProbe: - tcpSocket: - port: 1389 - initialDelaySeconds: 120 - periodSeconds: 30 - readinessProbe: - tcpSocket: - port: 1444 - initialDelaySeconds: 120 - periodSeconds: 30 - readinessProbe: - tcpSocket: - port: 1080 - initialDelaySeconds: 120 - periodSeconds: 30 - readinessProbe: - exec: - command: - - "/u01/oracle/container-scripts/checkOUDInstance.sh" - initialDelaySeconds: 120 - periodSeconds: 60 - imagePullSecrets: - - name: regcred - volumes: - - name: %PV_NAME% - persistentVolumeClaim: - claimName: %PVC_NAME% ---- -apiVersion: v1 -kind: Service -metadata: - name: oud-dir-svc-2 - namespace: %NAMESPACE% - labels: - app: oud-dir-2 -spec: - ports: - - port: 1444 - name: admin-ldaps - targetPort: 1444 - - port: 1888 - name: admin-https - targetPort: 1888 - - port: 1389 - name: ldap - targetPort: 1389 - - port: 1636 - name: ldaps - targetPort: 1636 - - port: 1080 - name: data-http - targetPort: 1080 - - port: 1081 - name: data-https - targetPort: 1081 - - port: 1898 - name: replication - targetPort: 1898 - type: NodePort - selector: - app: oud-dir-2 ---- -apiVersion: v1 -kind: Pod -metadata: - name: oud-dir2 - namespace: %NAMESPACE% - labels: - app: oud-dir-2 -spec: - restartPolicy: OnFailure - containers: - - name: oudds - image: %IMAGE% - ports: - - containerPort: 1444 - - containerPort: 1888 - - containerPort: 1389 - - containerPort: 1636 - - containerPort: 1080 - - containerPort: 1081 - - containerPort: 1898 - env: - - name: instanceType - value: Directory - - name: OUD_INSTANCE_NAME - value: oud-dir2 - - name: hostname - value: oud-dir-svc-2 - - name: baseDN - value: dc=example2,dc=com - - name: sampleData - value: "100" - - name: rootUserDN - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: rootUserDN - - name: rootUserPassword - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: rootUserPassword - - name: sleepBeforeConfig - value: "3" - volumeMounts: - - mountPath: /u01/oracle/user_projects - name: %PV_NAME% - readinessProbe: - tcpSocket: - port: 1389 - initialDelaySeconds: 120 - periodSeconds: 30 - readinessProbe: - tcpSocket: - port: 1444 - initialDelaySeconds: 120 - periodSeconds: 30 - readinessProbe: - tcpSocket: - port: 1080 - initialDelaySeconds: 120 - periodSeconds: 30 - readinessProbe: - exec: - command: - - "/u01/oracle/container-scripts/checkOUDInstance.sh" - initialDelaySeconds: 120 - periodSeconds: 60 - imagePullSecrets: - - name: regcred - volumes: - - name: %PV_NAME% - persistentVolumeClaim: - claimName: %PVC_NAME% diff --git a/OracleUnifiedDirectory/kubernetes/samples/oud-ds-plus-rs-remote-oud-svcs.yaml b/OracleUnifiedDirectory/kubernetes/samples/oud-ds-plus-rs-remote-oud-svcs.yaml deleted file mode 100755 index ec17fefe3..000000000 --- a/OracleUnifiedDirectory/kubernetes/samples/oud-ds-plus-rs-remote-oud-svcs.yaml +++ /dev/null @@ -1,558 +0,0 @@ -# -# Copyright (c) 2020, Oracle and/or its affiliates. -# -# Licensed under the Universal Permissive License v 1.0 as shown at -# https://oss.oracle.com/licenses/upl -# ---- -apiVersion: v1 -kind: Service -metadata: - name: oud-dsrs-roud-svc-1 - namespace: %NAMESPACE% - labels: - app: oud-ds-plus-rs-roud-1 -spec: - ports: - - port: 30144 - name: admin-ldaps - targetPort: 30144 - nodePort: 30144 - - port: 30188 - name: admin-https - targetPort: 30188 - nodePort: 30188 - - port: 30189 - name: ldap - targetPort: 30189 - nodePort: 30189 - - port: 30136 - name: ldaps - targetPort: 30136 - nodePort: 30136 - - port: 30180 - name: data-http - targetPort: 30180 - nodePort: 30180 - - port: 30181 - name: data-https - targetPort: 30181 - nodePort: 30181 - - port: 30198 - name: replication - targetPort: 30198 - nodePort: 30198 - type: NodePort - selector: - app: oud-ds-plus-rs-roud-1 ---- -apiVersion: v1 -kind: Pod -metadata: - name: ouddsrs-roud-1 - namespace: %NAMESPACE% - labels: - app: oud-ds-plus-rs-roud-1 -spec: - restartPolicy: OnFailure - hostname: oud-dsrs-roud-svc-1 - containers: - - name: ouddsrs - image: %IMAGE% - ports: - - containerPort: 30144 - - containerPort: 30188 - - containerPort: 30189 - - containerPort: 30136 - - containerPort: 30180 - - containerPort: 30181 - - containerPort: 30198 - env: - - name: sleepBeforeConfig - value: "3" - - name: instanceType - value: AddDS2RS - - name: OUD_INSTANCE_NAME - value: ouddsrs-roud-1 - - name: hostname - value: %HOSTNAME% - - name: baseDN - value: dc=example,dc=com - - name: rootUserDN - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: rootUserDN - - name: rootUserPassword - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: rootUserPassword - - name: adminUID - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: adminUID - - name: adminPassword - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: adminPassword - - name: bindDN1 - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: bindDN1 - - name: bindPassword1 - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: bindPassword1 - - name: bindDN2 - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: bindDN2 - - name: bindPassword2 - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: bindPassword2 - - name: sourceHost - value: %SRC_HOSTNAME% - - name: sourceServerPorts - value: %SRC_HOSTNAME%:1444,%HOSTNAME%:30144 - - name: sourceAdminConnectorPort - value: "1444" - - name: sourceReplicationPort - value: "1898" - - name: adminConnectorPort - value: "30144" - - name: httpAdminConnectorPort - value: "30188" - - name: ldapPort - value: "30189" - - name: ldapsPort - value: "30136" - - name: replicationPort - value: "30198" - - name: httpPort - value: "30180" - - name: httpsPort - value: "30181" - - name: sampleData - value: NONE - - name: dsreplication_1 - value: verify --hostname ${sourceHost} --port ${sourceAdminConnectorPort} --baseDN ${baseDN} --serverToRemove ${hostname}:${adminConnectorPort} - - name: dsreplication_2 - value: enable --host1 ${sourceHost} --port1 ${sourceAdminConnectorPort} --replicationPort1 ${sourceReplicationPort} --host2 ${hostname} --port2 ${adminConnectorPort} --replicationPort2 ${replicationPort} --baseDN ${baseDN} - - name: dsreplication_3 - value: initialize --hostSource ${initializeFromHost} --portSource ${sourceAdminConnectorPort} --hostDestination ${hostname} --portDestination ${adminConnectorPort} --baseDN ${baseDN} - - name: dsreplication_4 - value: verify --hostname ${hostname} --port ${adminConnectorPort} --baseDN ${baseDN} - - name: dsreplication_5 - value: status --hostname ${hostname} --port ${adminConnectorPort} --baseDN ${baseDN} --dataToDisplay compat-view - - name: post_dsreplication_dsconfig_1 - value: set-replication-domain-prop --domain-name ${baseDN} --set group-id:2 - - name: post_dsreplication_dsconfig_2 - value: set-replication-server-prop --set group-id:2 - volumeMounts: - - mountPath: /u01/oracle/user_projects - name: %PV_NAME% - readinessProbe: - tcpSocket: - port: 30189 - initialDelaySeconds: 120 - periodSeconds: 30 - readinessProbe: - tcpSocket: - port: 30144 - initialDelaySeconds: 120 - periodSeconds: 30 - readinessProbe: - tcpSocket: - port: 30180 - initialDelaySeconds: 120 - periodSeconds: 30 - readinessProbe: - exec: - command: - - "/u01/oracle/container-scripts/checkOUDInstance.sh" - initialDelaySeconds: 120 - periodSeconds: 60 - imagePullSecrets: - - name: regcred - volumes: - - name: %PV_NAME% - persistentVolumeClaim: - claimName: %PVC_NAME% ---- ---- ---- -apiVersion: v1 -kind: Service -metadata: - name: oud-dsrs-roud-svc-2 - namespace: %NAMESPACE% - labels: - app: oud-ds-plus-rs-roud-2 -spec: - ports: - - port: 30244 - name: admin-ldaps - targetPort: 30244 - nodePort: 30244 - - port: 30288 - name: admin-https - targetPort: 30288 - nodePort: 30288 - - port: 30289 - name: ldap - targetPort: 30289 - nodePort: 30289 - - port: 30236 - name: ldaps - targetPort: 30236 - nodePort: 30236 - - port: 30280 - name: data-http - targetPort: 30280 - nodePort: 30280 - - port: 30281 - name: data-https - targetPort: 30281 - nodePort: 30281 - - port: 30298 - name: replication - targetPort: 30298 - nodePort: 30298 - type: NodePort - selector: - app: oud-ds-plus-rs-roud-2 ---- -apiVersion: v1 -kind: Pod -metadata: - name: ouddsrs-roud-2 - namespace: %NAMESPACE% - labels: - app: oud-ds-plus-rs-roud-2 -spec: - restartPolicy: OnFailure - hostname: oud-dsrs-roud-svc-2 - containers: - - name: ouddsrs - image: %IMAGE% - ports: - - containerPort: 30244 - - containerPort: 30288 - - containerPort: 30289 - - containerPort: 30236 - - containerPort: 30280 - - containerPort: 30281 - - containerPort: 30298 - env: - - name: sleepBeforeConfig - value: "5m" - - name: instanceType - value: AddDS2RS - - name: OUD_INSTANCE_NAME - value: ouddsrs-roud-2 - - name: hostname - value: %HOSTNAME% - - name: baseDN - value: dc=example,dc=com - - name: rootUserDN - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: rootUserDN - - name: rootUserPassword - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: rootUserPassword - - name: adminUID - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: adminUID - - name: adminPassword - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: adminPassword - - name: bindDN1 - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: bindDN1 - - name: bindPassword1 - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: bindPassword1 - - name: bindDN2 - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: bindDN2 - - name: bindPassword2 - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: bindPassword2 - - name: sourceHost - value: %SRC_HOSTNAME% - - name: sourceServerPorts - value: %SRC_HOSTNAME%:1444,%HOSTNAME%:30244 - - name: sourceAdminConnectorPort - value: "1444" - - name: sourceReplicationPort - value: "1898" - - name: adminConnectorPort - value: "30244" - - name: httpAdminConnectorPort - value: "30288" - - name: ldapPort - value: "30289" - - name: ldapsPort - value: "30236" - - name: replicationPort - value: "30298" - - name: httpPort - value: "30280" - - name: httpsPort - value: "30281" - - name: sampleData - value: NONE - - name: dsreplication_1 - value: verify --hostname ${sourceHost} --port ${sourceAdminConnectorPort} --baseDN ${baseDN} --serverToRemove ${hostname}:${adminConnectorPort} - - name: dsreplication_2 - value: enable --host1 ${sourceHost} --port1 ${sourceAdminConnectorPort} --replicationPort1 ${sourceReplicationPort} --host2 ${hostname} --port2 ${adminConnectorPort} --replicationPort2 ${replicationPort} --baseDN ${baseDN} - - name: dsreplication_3 - value: initialize --hostSource ${initializeFromHost} --portSource ${sourceAdminConnectorPort} --hostDestination ${hostname} --portDestination ${adminConnectorPort} --baseDN ${baseDN} - - name: dsreplication_4 - value: verify --hostname ${hostname} --port ${adminConnectorPort} --baseDN ${baseDN} - - name: dsreplication_5 - value: status --hostname ${hostname} --port ${adminConnectorPort} --baseDN ${baseDN} --dataToDisplay compat-view - - name: post_dsreplication_dsconfig_1 - value: set-replication-domain-prop --domain-name ${baseDN} --set group-id:2 - - name: post_dsreplication_dsconfig_2 - value: set-replication-server-prop --set group-id:2 - volumeMounts: - - mountPath: /u01/oracle/user_projects - name: %PV_NAME% - readinessProbe: - tcpSocket: - port: 30289 - initialDelaySeconds: 120 - periodSeconds: 30 - readinessProbe: - tcpSocket: - port: 30244 - initialDelaySeconds: 120 - periodSeconds: 30 - readinessProbe: - tcpSocket: - port: 30280 - initialDelaySeconds: 120 - periodSeconds: 30 - readinessProbe: - exec: - command: - - "/u01/oracle/container-scripts/checkOUDInstance.sh" - initialDelaySeconds: 120 - periodSeconds: 60 - imagePullSecrets: - - name: regcred - volumes: - - name: %PV_NAME% - persistentVolumeClaim: - claimName: %PVC_NAME% ---- ---- ---- -apiVersion: v1 -kind: Service -metadata: - name: oud-dsrs-roud-svc-3 - namespace: %NAMESPACE% - labels: - app: oud-ds-plus-rs-roud-3 -spec: - ports: - - port: 30344 - name: admin-ldaps - targetPort: 30344 - nodePort: 30344 - - port: 30388 - name: admin-https - targetPort: 30388 - nodePort: 30388 - - port: 30389 - name: ldap - targetPort: 30389 - nodePort: 30389 - - port: 30336 - name: ldaps - targetPort: 30336 - nodePort: 30336 - - port: 30380 - name: data-http - targetPort: 30380 - nodePort: 30380 - - port: 30381 - name: data-https - targetPort: 30381 - nodePort: 30381 - - port: 30398 - name: replication - targetPort: 30398 - nodePort: 30398 - type: NodePort - selector: - app: oud-ds-plus-rs-roud-3 ---- -apiVersion: v1 -kind: Pod -metadata: - name: ouddsrs-roud-3 - namespace: %NAMESPACE% - labels: - app: oud-ds-plus-rs-roud-3 -spec: - restartPolicy: OnFailure - hostname: oud-dsrs-roud-svc-3 - containers: - - name: ouddsrs - image: %IMAGE% - ports: - - containerPort: 30344 - - containerPort: 30388 - - containerPort: 30389 - - containerPort: 30336 - - containerPort: 30380 - - containerPort: 30381 - - containerPort: 30398 - env: - - name: sleepBeforeConfig - value: "10m" - - name: instanceType - value: AddDS2RS - - name: OUD_INSTANCE_NAME - value: ouddsrs-roud-3 - - name: hostname - value: %HOSTNAME% - - name: baseDN - value: dc=example,dc=com - - name: rootUserDN - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: rootUserDN - - name: rootUserPassword - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: rootUserPassword - - name: adminUID - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: adminUID - - name: adminPassword - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: adminPassword - - name: bindDN1 - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: bindDN1 - - name: bindPassword1 - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: bindPassword1 - - name: bindDN2 - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: bindDN2 - - name: bindPassword2 - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: bindPassword2 - - name: sourceHost - value: %SRC_HOSTNAME% - - name: sourceServerPorts - value: %SRC_HOSTNAME%:1444,%HOSTNAME%:30344 - - name: sourceAdminConnectorPort - value: "1444" - - name: sourceReplicationPort - value: "1898" - - name: adminConnectorPort - value: "30344" - - name: httpAdminConnectorPort - value: "30388" - - name: ldapPort - value: "30389" - - name: ldapsPort - value: "30336" - - name: replicationPort - value: "30398" - - name: httpPort - value: "30380" - - name: httpsPort - value: "30381" - - name: sampleData - value: NONE - - name: dsreplication_1 - value: verify --hostname ${sourceHost} --port ${sourceAdminConnectorPort} --baseDN ${baseDN} --serverToRemove ${hostname}:${adminConnectorPort} - - name: dsreplication_2 - value: enable --host1 ${sourceHost} --port1 ${sourceAdminConnectorPort} --replicationPort1 ${sourceReplicationPort} --host2 ${hostname} --port2 ${adminConnectorPort} --replicationPort2 ${replicationPort} --baseDN ${baseDN} - - name: dsreplication_3 - value: initialize --hostSource ${initializeFromHost} --portSource ${sourceAdminConnectorPort} --hostDestination ${hostname} --portDestination ${adminConnectorPort} --baseDN ${baseDN} - - name: dsreplication_4 - value: verify --hostname ${hostname} --port ${adminConnectorPort} --baseDN ${baseDN} - - name: dsreplication_5 - value: status --hostname ${hostname} --port ${adminConnectorPort} --baseDN ${baseDN} --dataToDisplay compat-view - - name: post_dsreplication_dsconfig_1 - value: set-replication-domain-prop --domain-name ${baseDN} --set group-id:2 - - name: post_dsreplication_dsconfig_2 - value: set-replication-server-prop --set group-id:2 - volumeMounts: - - mountPath: /u01/oracle/user_projects - name: %PV_NAME% - readinessProbe: - tcpSocket: - port: 30389 - initialDelaySeconds: 120 - periodSeconds: 30 - readinessProbe: - tcpSocket: - port: 30344 - initialDelaySeconds: 120 - periodSeconds: 30 - readinessProbe: - tcpSocket: - port: 30380 - initialDelaySeconds: 120 - periodSeconds: 30 - readinessProbe: - exec: - command: - - "/u01/oracle/container-scripts/checkOUDInstance.sh" - initialDelaySeconds: 120 - periodSeconds: 60 - imagePullSecrets: - - name: regcred - volumes: - - name: %PV_NAME% - persistentVolumeClaim: - claimName: %PVC_NAME% ---- ---- diff --git a/OracleUnifiedDirectory/kubernetes/samples/oud-ds-plus-rs-svc.yaml b/OracleUnifiedDirectory/kubernetes/samples/oud-ds-plus-rs-svc.yaml deleted file mode 100755 index 478d74309..000000000 --- a/OracleUnifiedDirectory/kubernetes/samples/oud-ds-plus-rs-svc.yaml +++ /dev/null @@ -1,313 +0,0 @@ -# -# Copyright (c) 2020, Oracle and/or its affiliates. -# -# Licensed under the Universal Permissive License v 1.0 as shown at -# https://oss.oracle.com/licenses/upl -# ---- -apiVersion: v1 -kind: Service -metadata: - name: oud-dsrs-svc-1 - namespace: %NAMESPACE% - labels: - app: oud-ds-plus-rs-1 -spec: - ports: - - port: 1444 - name: admin-ldaps - targetPort: 1444 - - port: 1888 - name: admin-https - targetPort: 1888 - - port: 1389 - name: ldap - targetPort: 1389 - - port: 1636 - name: ldaps - targetPort: 1636 - - port: 1080 - name: data-http - targetPort: 1080 - - port: 1081 - name: data-https - targetPort: 1081 - - port: 1898 - name: replication - targetPort: 1898 - type: NodePort - selector: - app: oud-ds-plus-rs-1 ---- -apiVersion: v1 -kind: Pod -metadata: - name: ouddsrs1 - namespace: %NAMESPACE% - labels: - app: oud-ds-plus-rs-1 -spec: - restartPolicy: OnFailure - hostname: oud-dsrs-svc-1 - containers: - - name: ouddsrs - image: %IMAGE% - ports: - - containerPort: 1444 - - containerPort: 1888 - - containerPort: 1389 - - containerPort: 1636 - - containerPort: 1080 - - containerPort: 1081 - - containerPort: 1898 - env: - - name: sleepBeforeConfig - value: "3" - - name: instanceType - value: AddDS2RS - - name: OUD_INSTANCE_NAME - value: ouddsrs1 - - name: hostname - value: oud-dsrs-svc-1 - - name: baseDN - value: dc=example2,dc=com - - name: rootUserDN - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: rootUserDN - - name: rootUserPassword - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: rootUserPassword - - name: adminUID - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: adminUID - - name: adminPassword - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: adminPassword - - name: bindDN1 - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: bindDN1 - - name: bindPassword1 - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: bindPassword1 - - name: bindDN2 - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: bindDN2 - - name: bindPassword2 - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: bindPassword2 - - name: sourceHost - value: oud-dir-svc-2 - - name: dsreplication_1 - value: verify --hostname ${sourceHost} --port ${adminConnectorPort} --baseDN ${baseDN} --serverToRemove ${hostname}:${adminConnectorPort} - - name: dsreplication_2 - value: enable --host1 ${sourceHost} --port1 ${adminConnectorPort} --replicationPort1 ${replicationPort} --host2 ${hostname} --port2 ${adminConnectorPort} --replicationPort2 ${replicationPort} --baseDN ${baseDN} - - name: dsreplication_3 - value: initialize --hostSource ${initializeFromHost} --portSource ${adminConnectorPort} --hostDestination ${hostname} --portDestination ${adminConnectorPort} --baseDN ${baseDN} - - name: dsreplication_4 - value: verify --hostname ${hostname} --port ${adminConnectorPort} --baseDN ${baseDN} - - name: dsreplication_5 - value: status --hostname ${hostname} --port ${adminConnectorPort} --baseDN ${baseDN} --dataToDisplay compat-view - - name: post_dsreplication_dsconfig_1 - value: set-replication-domain-prop --domain-name ${baseDN} --set group-id:2 - - name: post_dsreplication_dsconfig_2 - value: set-replication-server-prop --set group-id:2 - volumeMounts: - - mountPath: /u01/oracle/user_projects - name: %PV_NAME% - readinessProbe: - tcpSocket: - port: 1389 - initialDelaySeconds: 120 - periodSeconds: 30 - readinessProbe: - tcpSocket: - port: 1444 - initialDelaySeconds: 120 - periodSeconds: 30 - readinessProbe: - tcpSocket: - port: 1080 - initialDelaySeconds: 120 - periodSeconds: 30 - readinessProbe: - exec: - command: - - "/u01/oracle/container-scripts/checkOUDInstance.sh" - initialDelaySeconds: 120 - periodSeconds: 60 - imagePullSecrets: - - name: regcred - volumes: - - name: %PV_NAME% - persistentVolumeClaim: - claimName: %PVC_NAME% ---- -apiVersion: v1 -kind: Service -metadata: - name: oud-dsrs-svc-2 - namespace: %NAMESPACE% - labels: - app: oud-ds-plus-rs-2 -spec: - ports: - - port: 1444 - name: admin-ldaps - targetPort: 1444 - - port: 1888 - name: admin-https - targetPort: 1888 - - port: 1389 - name: ldap - targetPort: 1389 - - port: 1636 - name: ldaps - targetPort: 1636 - - port: 1080 - name: data-http - targetPort: 1080 - - port: 1081 - name: data-https - targetPort: 1081 - - port: 1898 - name: replication - targetPort: 1898 - type: NodePort - selector: - app: oud-ds-plus-rs-2 ---- -apiVersion: v1 -kind: Pod -metadata: - name: ouddsrs2 - namespace: %NAMESPACE% - labels: - app: oud-ds-plus-rs-2 -spec: - restartPolicy: OnFailure - hostname: oud-dsrs-svc-2 - containers: - - name: ouddsrs - image: %IMAGE% - ports: - - containerPort: 1444 - - containerPort: 1888 - - containerPort: 1389 - - containerPort: 1636 - - containerPort: 1080 - - containerPort: 1081 - - containerPort: 1898 - env: - - name: sleepBeforeConfig - value: "300" - - name: instanceType - value: AddDS2RS - - name: OUD_INSTANCE_NAME - value: ouddsrs2 - - name: hostname - value: oud-dsrs-svc-2 - - name: baseDN - value: dc=example2,dc=com - - name: rootUserDN - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: rootUserDN - - name: rootUserPassword - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: rootUserPassword - - name: adminUID - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: adminUID - - name: adminPassword - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: adminPassword - - name: bindDN1 - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: bindDN1 - - name: bindPassword1 - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: bindPassword1 - - name: bindDN2 - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: bindDN2 - - name: bindPassword2 - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: bindPassword2 - - name: sourceHost - value: oud-dir-svc-2 - - name: dsreplication_1 - value: verify --hostname ${sourceHost} --port ${adminConnectorPort} --baseDN ${baseDN} --serverToRemove ${hostname}:${adminConnectorPort} - - name: dsreplication_2 - value: enable --host1 ${sourceHost} --port1 ${adminConnectorPort} --replicationPort1 ${replicationPort} --host2 ${hostname} --port2 ${adminConnectorPort} --replicationPort2 ${replicationPort} --baseDN ${baseDN} - - name: dsreplication_3 - value: initialize --hostSource ${initializeFromHost} --portSource ${adminConnectorPort} --hostDestination ${hostname} --portDestination ${adminConnectorPort} --baseDN ${baseDN} - - name: dsreplication_4 - value: verify --hostname ${hostname} --port ${adminConnectorPort} --baseDN ${baseDN} - - name: dsreplication_5 - value: status --hostname ${hostname} --port ${adminConnectorPort} --baseDN ${baseDN} --dataToDisplay compat-view - - name: post_dsreplication_dsconfig_1 - value: set-replication-domain-prop --domain-name ${baseDN} --set group-id:2 - - name: post_dsreplication_dsconfig_2 - value: set-replication-server-prop --set group-id:2 - volumeMounts: - - mountPath: /u01/oracle/user_projects - name: %PV_NAME% - readinessProbe: - tcpSocket: - port: 1389 - initialDelaySeconds: 420 - periodSeconds: 30 - readinessProbe: - tcpSocket: - port: 1444 - initialDelaySeconds: 420 - periodSeconds: 30 - readinessProbe: - tcpSocket: - port: 1080 - initialDelaySeconds: 420 - periodSeconds: 30 - readinessProbe: - exec: - command: - - "/u01/oracle/container-scripts/checkOUDInstance.sh" - initialDelaySeconds: 420 - periodSeconds: 60 - imagePullSecrets: - - name: regcred - volumes: - - name: %PV_NAME% - persistentVolumeClaim: - claimName: %PVC_NAME% ---- diff --git a/OracleUnifiedDirectory/kubernetes/samples/oud-ds_proxy-svc.yaml b/OracleUnifiedDirectory/kubernetes/samples/oud-ds_proxy-svc.yaml deleted file mode 100755 index f81e26388..000000000 --- a/OracleUnifiedDirectory/kubernetes/samples/oud-ds_proxy-svc.yaml +++ /dev/null @@ -1,122 +0,0 @@ -# -# Copyright (c) 2020, Oracle and/or its affiliates. -# -# Licensed under the Universal Permissive License v 1.0 as shown at -# https://oss.oracle.com/licenses/upl -# ---- -apiVersion: v1 -kind: Service -metadata: - name: oud-ds-proxy-svc - namespace: %NAMESPACE% - labels: - app: oud-ds-proxy -spec: - ports: - - port: 1444 - name: admin-ldaps - targetPort: 1444 - - port: 1888 - name: admin-https - targetPort: 1888 - - port: 1389 - name: ldap - targetPort: 1389 - - port: 1636 - name: ldaps - targetPort: 1636 - - port: 1080 - name: data-http - targetPort: 1080 - - port: 1081 - name: data-https - targetPort: 1081 - - port: 1898 - name: replication - targetPort: 1898 - type: NodePort - selector: - app: oud-ds-proxy ---- -apiVersion: v1 -kind: Pod -metadata: - name: oudp1 - namespace: %NAMESPACE% - labels: - app: oud-ds-proxy -spec: - restartPolicy: OnFailure - containers: - - name: oudp - image: %IMAGE% - ports: - - containerPort: 1444 - - containerPort: 1888 - - containerPort: 1389 - - containerPort: 1636 - - containerPort: 1080 - - containerPort: 1081 - env: - - name: sleepBeforeConfig - value: "3" - - name: instanceType - value: Proxy - - name: OUD_INSTANCE_NAME - value: oudp1 - - name: hostname - value: oud-ds-proxy-svc - - name: rootUserDN - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: rootUserDN - - name: rootUserPassword - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: rootUserPassword - - name: sourceServerPorts - value: oud-dir-svc-1:1389,oud-dir-svc-2:1389 - - name: dsconfig_1 - value: create-extension --set enabled:true --set remote-ldap-server-address:oud-dir-svc-1 --set remote-ldap-server-port:1389 --set remote-ldap-server-ssl-port:1636 --extension-name ldap_extn_1 --type ldap-server - - name: dsconfig_2 - value: create-workflow-element --set client-cred-mode:use-client-identity --set enabled:true --set ldap-server-extension:ldap_extn_1 --type proxy-ldap --element-name proxy_ldap_wfe_1 - - name: dsconfig_3 - value: create-workflow --set base-dn:dc=example1,dc=com --set enabled:true --set workflow-element:proxy_ldap_wfe_1 --type generic --workflow-name wf_1 - - name: dsconfig_4 - value: set-network-group-prop --group-name network-group --add workflow:wf_1 - - name: dsconfig_5 - value: create-extension --set enabled:true --set remote-ldap-server-address:oud-dir-svc-2 --set remote-ldap-server-port:1389 --set remote-ldap-server-ssl-port:1636 --extension-name ldap_extn_2 --type ldap-server - - name: dsconfig_6 - value: create-workflow-element --set client-cred-mode:use-client-identity --set enabled:true --set ldap-server-extension:ldap_extn_2 --type proxy-ldap --element-name proxy_ldap_wfe_2 - - name: dsconfig_7 - value: create-workflow --set base-dn:dc=example2,dc=com --set enabled:true --set workflow-element:proxy_ldap_wfe_2 --type generic --workflow-name wf_2 - - name: dsconfig_8 - value: set-network-group-prop --group-name network-group --add workflow:wf_2 - volumeMounts: - - mountPath: /u01/oracle/user_projects - name: %PV_NAME% - readinessProbe: - tcpSocket: - port: 1444 - initialDelaySeconds: 240 - periodSeconds: 30 - readinessProbe: - tcpSocket: - port: 1389 - initialDelaySeconds: 240 - periodSeconds: 30 - readinessProbe: - exec: - command: - - "/u01/oracle/container-scripts/checkOUDInstance.sh" - initialDelaySeconds: 240 - periodSeconds: 60 - imagePullSecrets: - - name: regcred - volumes: - - name: %PV_NAME% - persistentVolumeClaim: - claimName: %PVC_NAME% diff --git a/OracleUnifiedDirectory/kubernetes/samples/oud-ds_rs_ds-svc.yaml b/OracleUnifiedDirectory/kubernetes/samples/oud-ds_rs_ds-svc.yaml deleted file mode 100755 index ab816e29e..000000000 --- a/OracleUnifiedDirectory/kubernetes/samples/oud-ds_rs_ds-svc.yaml +++ /dev/null @@ -1,406 +0,0 @@ -# -# Copyright (c) 2020, Oracle and/or its affiliates. -# -# Licensed under the Universal Permissive License v 1.0 as shown at -# https://oss.oracle.com/licenses/upl -# ---- -apiVersion: v1 -kind: Service -metadata: - name: oud-rs-svc-1 - namespace: %NAMESPACE% - labels: - app: oud-rs-1 -spec: - ports: - - port: 1444 - name: admin-ldaps - targetPort: 1444 - - port: 1888 - name: admin-https - targetPort: 1888 - - port: 1898 - name: replication - targetPort: 1898 - type: NodePort - selector: - app: oud-rs-1 ---- -apiVersion: v1 -kind: Pod -metadata: - name: oudpodrs1 - namespace: %NAMESPACE% - labels: - app: oud-rs-1 -spec: - restartPolicy: OnFailure - containers: - - name: oudrs1 - image: %IMAGE% - ports: - - containerPort: 1444 - - containerPort: 1888 - - containerPort: 1898 - env: - - name: sleepBeforeConfig - value: "3" - - name: instanceType - value: Replication - - name: OUD_INSTANCE_NAME - value: oudpodrs1 - - name: hostname - value: oud-rs-svc-1 - - name: baseDN - value: dc=example1,dc=com - - name: rootUserDN - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: rootUserDN - - name: rootUserPassword - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: rootUserPassword - - name: adminUID - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: adminUID - - name: adminPassword - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: adminPassword - - name: bindDN1 - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: bindDN1 - - name: bindPassword1 - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: bindPassword1 - - name: bindDN2 - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: bindDN2 - - name: bindPassword2 - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: bindPassword2 - - name: dsreplication_1 - value: disable --disableAll --hostname ${sourceHost} --port ${adminConnectorPort} - - name: dsreplication_2 - value: enable --host1 ${sourceHost} --port1 ${adminConnectorPort} --noReplicationServer1 --host2 ${hostname} --port2 ${adminConnectorPort} --replicationPort2 ${replicationPort} --onlyReplicationServer2 --baseDN ${baseDN} - - name: dsreplication_3 - value: status --hostname ${hostname} --port ${adminConnectorPort} --baseDN ${baseDN} --dataToDisplay compat-view - - name: dsreplication_4 - value: verify --hostname ${hostname} --port ${adminConnectorPort} --baseDN ${baseDN} - volumeMounts: - - mountPath: /u01/oracle/user_projects - name: %PV_NAME% - readinessProbe: - tcpSocket: - port: 1444 - initialDelaySeconds: 120 - periodSeconds: 30 - readinessProbe: - exec: - command: - - "/u01/oracle/container-scripts/checkOUDInstance.sh" - initialDelaySeconds: 120 - periodSeconds: 60 - imagePullSecrets: - - name: regcred - volumes: - - name: %PV_NAME% - persistentVolumeClaim: - claimName: %PVC_NAME% ---- -apiVersion: v1 -kind: Service -metadata: - name: oud-ds-svc-1a - namespace: %NAMESPACE% - labels: - app: oud-ds-1a -spec: - ports: - - port: 1444 - name: admin-ldaps - targetPort: 1444 - - port: 1888 - name: admin-https - targetPort: 1888 - - port: 1389 - name: ldap - targetPort: 1389 - - port: 1636 - name: ldaps - targetPort: 1636 - - port: 1080 - name: data-http - targetPort: 1080 - - port: 1081 - name: data-https - targetPort: 1081 - type: NodePort - selector: - app: oud-ds-1a ---- -apiVersion: v1 -kind: Pod -metadata: - name: oudpodds1a - namespace: %NAMESPACE% - labels: - app: oud-ds-1a -spec: - restartPolicy: OnFailure - containers: - - name: oudds1a - image: %IMAGE% - ports: - - containerPort: 1444 - - containerPort: 1888 - - containerPort: 1389 - - containerPort: 1636 - - containerPort: 1080 - - containerPort: 1081 - env: - - name: sleepBeforeConfig - value: "240" - - name: instanceType - value: AddDS2RS - - name: OUD_INSTANCE_NAME - value: oudpodds1a - - name: hostname - value: oud-ds-svc-1a - - name: baseDN - value: dc=example1,dc=com - - name: rootUserDN - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: rootUserDN - - name: rootUserPassword - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: rootUserPassword - - name: adminUID - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: adminUID - - name: adminPassword - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: adminPassword - - name: bindDN1 - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: bindDN1 - - name: bindPassword1 - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: bindPassword1 - - name: bindDN2 - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: bindDN2 - - name: bindPassword2 - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: bindPassword2 - - name: sourceHost - value: oud-rs-svc-1 - - name: initializeFromHost - value: oud-dir-svc-1 - - name: dsreplication_1 - value: verify --hostname ${sourceHost} --port ${adminConnectorPort} --baseDN ${baseDN} --serverToRemove ${hostname}:${adminConnectorPort} - - name: dsreplication_2 - value: enable --host1 ${hostname} --port1 ${adminConnectorPort} --noReplicationServer1 --host2 ${sourceHost} --port2 ${adminConnectorPort} --replicationPort2 ${replicationPort} --onlyReplicationServer2 --baseDN ${baseDN} - - name: dsreplication_3 - value: initialize --hostSource ${initializeFromHost} --portSource ${adminConnectorPort} --hostDestination ${hostname} --portDestination ${adminConnectorPort} --baseDN ${baseDN} - - name: dsreplication_4 - value: verify --hostname ${hostname} --port ${adminConnectorPort} --baseDN ${baseDN} - - name: dsreplication_5 - value: status --hostname ${hostname} --port ${adminConnectorPort} --baseDN ${baseDN} --dataToDisplay compat-view - volumeMounts: - - mountPath: /u01/oracle/user_projects - name: %PV_NAME% - readinessProbe: - tcpSocket: - port: 1444 - initialDelaySeconds: 240 - periodSeconds: 30 - readinessProbe: - tcpSocket: - port: 1389 - initialDelaySeconds: 240 - periodSeconds: 30 - readinessProbe: - exec: - command: - - "/u01/oracle/container-scripts/checkOUDInstance.sh" - initialDelaySeconds: 240 - periodSeconds: 60 - imagePullSecrets: - - name: regcred - volumes: - - name: %PV_NAME% - persistentVolumeClaim: - claimName: %PVC_NAME% ---- -apiVersion: v1 -kind: Service -metadata: - name: oud-ds-svc-1b - namespace: %NAMESPACE% - labels: - app: oud-ds-1b -spec: - ports: - - port: 1444 - name: admin-ldaps - targetPort: 1444 - - port: 1888 - name: admin-https - targetPort: 1888 - - port: 1389 - name: ldap - targetPort: 1389 - - port: 1636 - name: ldaps - targetPort: 1636 - - port: 1080 - name: data-http - targetPort: 1080 - - port: 1081 - name: data-https - targetPort: 1081 - type: NodePort - selector: - app: oud-ds-1b ---- -apiVersion: v1 -kind: Pod -metadata: - name: oudpodds1b - namespace: %NAMESPACE% - labels: - app: oud-ds-1b -spec: - restartPolicy: OnFailure - containers: - - name: oudds1b - image: %IMAGE% - ports: - - containerPort: 1444 - - containerPort: 1888 - - containerPort: 1389 - - containerPort: 1636 - - containerPort: 1080 - - containerPort: 1081 - env: - - name: sleepBeforeConfig - value: "300" - - name: instanceType - value: AddDS2RS - - name: OUD_INSTANCE_NAME - value: oudpodds1b - - name: hostname - value: oud-ds-svc-1b - - name: baseDN - value: dc=example1,dc=com - - name: rootUserDN - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: rootUserDN - - name: rootUserPassword - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: rootUserPassword - - name: adminUID - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: adminUID - - name: adminPassword - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: adminPassword - - name: bindDN1 - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: bindDN1 - - name: bindPassword1 - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: bindPassword1 - - name: bindDN2 - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: bindDN2 - - name: bindPassword2 - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: bindPassword2 - - name: sourceHost - value: oud-rs-svc-1 - - name: initializeFromHost - value: oud-dir-svc-1 - - name: dsreplication_1 - value: verify --hostname ${sourceHost} --port ${adminConnectorPort} --baseDN ${baseDN} --serverToRemove ${hostname}:${adminConnectorPort} - - name: dsreplication_2 - value: enable --host1 ${hostname} --port1 ${adminConnectorPort} --noReplicationServer1 --host2 ${sourceHost} --port2 ${adminConnectorPort} --replicationPort2 ${replicationPort} --onlyReplicationServer2 --baseDN ${baseDN} - - name: dsreplication_3 - value: initialize --hostSource ${initializeFromHost} --portSource ${adminConnectorPort} --hostDestination ${hostname} --portDestination ${adminConnectorPort} --baseDN ${baseDN} - - name: dsreplication_4 - value: verify --hostname ${hostname} --port ${adminConnectorPort} --baseDN ${baseDN} - - name: dsreplication_5 - value: status --hostname ${hostname} --port ${adminConnectorPort} --baseDN ${baseDN} --dataToDisplay compat-view - volumeMounts: - - mountPath: /u01/oracle/user_projects - name: %PV_NAME% - readinessProbe: - tcpSocket: - port: 1444 - initialDelaySeconds: 300 - periodSeconds: 30 - readinessProbe: - tcpSocket: - port: 1389 - initialDelaySeconds: 300 - periodSeconds: 30 - readinessProbe: - exec: - command: - - "/u01/oracle/container-scripts/checkOUDInstance.sh" - initialDelaySeconds: 300 - periodSeconds: 60 - imagePullSecrets: - - name: regcred - volumes: - - name: %PV_NAME% - persistentVolumeClaim: - claimName: %PVC_NAME% diff --git a/OracleUnifiedDirectory/kubernetes/samples/oudns.yaml b/OracleUnifiedDirectory/kubernetes/samples/oudns.yaml deleted file mode 100755 index 40678b0d6..000000000 --- a/OracleUnifiedDirectory/kubernetes/samples/oudns.yaml +++ /dev/null @@ -1,11 +0,0 @@ -# -# Copyright (c) 2020, Oracle and/or its affiliates. -# -# Licensed under the Universal Permissive License v 1.0 as shown at -# https://oss.oracle.com/licenses/upl -# ---- -apiVersion: v1 -kind: Namespace -metadata: - name: %NAMESPACE% diff --git a/OracleUnifiedDirectory/kubernetes/samples/persistent-volume.yaml b/OracleUnifiedDirectory/kubernetes/samples/persistent-volume.yaml deleted file mode 100755 index f607116f0..000000000 --- a/OracleUnifiedDirectory/kubernetes/samples/persistent-volume.yaml +++ /dev/null @@ -1,39 +0,0 @@ -# -# Copyright (c) 2020, Oracle and/or its affiliates. -# -# Licensed under the Universal Permissive License v 1.0 as shown at -# https://oss.oracle.com/licenses/upl -# -# -kind: PersistentVolume -apiVersion: v1 -metadata: - name: %PV_NAME% - namespace: %NAMESPACE% - labels: - type: oud-pv -spec: - storageClassName: manual - capacity: - storage: 10Gi - persistentVolumeReclaimPolicy: "Delete" - accessModes: - - ReadWriteMany - hostPath: - path: "%PV_HOST_PATH%" ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: %PVC_NAME% - namespace: %NAMESPACE% -spec: - storageClassName: manual - selector: - matchLabels: - type: oud-pv - accessModes: - - ReadWriteMany - resources: - requests: - storage: 10Gi diff --git a/OracleUnifiedDirectory/kubernetes/samples/secrets.yaml b/OracleUnifiedDirectory/kubernetes/samples/secrets.yaml deleted file mode 100755 index 3af40fa6f..000000000 --- a/OracleUnifiedDirectory/kubernetes/samples/secrets.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: %SECRET_NAME% - namespace: %NAMESPACE% -type: Opaque -data: - rootUserDN: %rootUserDN% - rootUserPassword: %rootUserPassword% - adminUID: %adminUID% - adminPassword: %adminPassword% - bindDN1: %bindDN1% - bindPassword1: %bindPassword2% - bindDN2: %bindDN2% - bindPassword2: %bindPassword2% diff --git a/OracleUnifiedDirectory/kubernetes/samples/stress-oud-dir-svc.yaml b/OracleUnifiedDirectory/kubernetes/samples/stress-oud-dir-svc.yaml deleted file mode 100755 index 89c9618ed..000000000 --- a/OracleUnifiedDirectory/kubernetes/samples/stress-oud-dir-svc.yaml +++ /dev/null @@ -1,360 +0,0 @@ -# -# Copyright (c) 2020, Oracle and/or its affiliates. -# -# Licensed under the Universal Permissive License v 1.0 as shown at -# https://oss.oracle.com/licenses/upl -# ---- -apiVersion: v1 -kind: Service -metadata: - name: oud-dir-svc-1 - namespace: %NAMESPACE% - labels: - app: oud-dir-1 -spec: - ports: - - port: 1444 - name: admin-ldaps - targetPort: 1444 - - port: 1888 - name: admin-https - targetPort: 1888 - - port: 1389 - name: ldap - targetPort: 1389 - - port: 1636 - name: ldaps - targetPort: 1636 - - port: 1080 - name: data-http - targetPort: 1080 - - port: 1081 - name: data-https - targetPort: 1081 - - port: 1898 - name: replication - targetPort: 1898 - type: NodePort - selector: - app: oud-dir-1 ---- -apiVersion: v1 -kind: Pod -metadata: - name: oud-dir1 - namespace: %NAMESPACE% - labels: - app: oud-dir-1 -spec: - restartPolicy: OnFailure - containers: - - name: oudds - image: %IMAGE% - ports: - - containerPort: 1444 - - containerPort: 1888 - - containerPort: 1389 - - containerPort: 1636 - - containerPort: 1080 - - containerPort: 1081 - - containerPort: 1898 - env: - - name: instanceType - value: Directory - - name: OUD_INSTANCE_NAME - value: oud-dir1 - - name: hostname - value: oud-dir-svc-1 - - name: baseDN - value: dc=oracle,dc=com - - name: rootUserDN - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: rootUserDN - - name: rootUserPassword - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: rootUserPassword - - name: sleepBeforeConfig - value: "3" - - name: serverTuning - value: -Xms1024m -Xmx2048m -d64 -XX:+UseCompressedOops -server -Xmn1g -XX:MaxTenuringThreshold=1 -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=60 -# Importing data - - name: ldifFile_1 - value: /u01/oracle/user_projects/ldif_files/input_1million_18july.ldif -# Importing 100 dynamic groups - - name: ldifFile_2 - value: /u01/oracle/user_projects/ldif_files/dgroup_300_19july.ldif -# Importing 100 sysgroups - - name: ldifFile_3 - value: /u01/oracle/user_projects/ldif_files/sysgroup_23july.ldif -# Importing 100 sys dynamic groups - - name: ldifFile_4 - value: /u01/oracle/user_projects/ldif_files/sysdgroup_23july.ldif -# Changing limits - - name: dsconfig_1 - value: set-global-configuration-prop --set lookthrough-limit:75000 - - name: dsconfig_2 - value: set-global-configuration-prop --set size-limit:75000 - - name: dsconfig_3 - value: set-workflow-element-prop --element-name userRoot --set index-entry-limit:75000 -# Creating indexes -# Creating equality type index on ou attribute - - name: dsconfig_4 - value: create-local-db-index --element-name userRoot --index-name ou --set index-type:equality -# Adding substring to index on ou attribute - - name: dsconfig_5 - value: set-local-db-index-prop --element-name userRoot --index-name ou --add index-type:substring -# Creating equality type index on c attribute - - name: dsconfig_6 - value: create-local-db-index --element-name userRoot --index-name c --set index-type:equality -# Adding substring to index on c attribute - - name: dsconfig_7 - value: set-local-db-index-prop --element-name userRoot --index-name c --add index-type:substring -# Creating equality type index on mobile attribute - - name: dsconfig_8 - value: create-local-db-index --element-name userRoot --index-name mobile --set index-type:equality -# Adding substring to index on mobile attribute - - name: dsconfig_9 - value: set-local-db-index-prop --element-name userRoot --index-name mobile --add index-type:substring -# Creating equality type index on title attribute - - name: dsconfig_10 - value: create-local-db-index --element-name userRoot --index-name title --set index-type:equality -# Adding substring to index on title attribute - - name: dsconfig_11 - value: set-local-db-index-prop --element-name userRoot --index-name title --add index-type:substring -# Enabling entry cache preloading... - - name: dsconfig_12 - value: set-global-configuration-prop --set entry-cache-preload:true -# Entry Cache and Group Cache - - name: dsconfig_13 - value: delete-entry-cache --cache-name "Group Cache" -# Creating Entry Cache... - - name: dsconfig_14 - value: create-entry-cache --set cache-level:1 --set enabled:true --type fifo --set max-entries:1100000 --set max-memory-percent:70 --cache-name FIFO_Entry_Cache_1 -# Creating Group Entry Cache... - - name: dsconfig_15 - value: create-entry-cache --set cache-level:2 --set enabled:true --type fifo-group --set max-entries:1000 --set max-memory-percent:10 --cache-name FIFO_Group_Entry_Cache_2 -# Access control handlers - - name: dsconfig_16 - value: set-access-control-handler-prop --add 'global-aci:(targetattr="*")(version 3.0; acl "All read and write access"; allow (read,search,compare,add,write,delete,import,export) userdn="ldap:///all"; )' --no-prompt -# Logs disabling - - name: dsconfig_17 - value: set-log-publisher-prop --publisher-name "Oracle Error Logger" --set enabled:false - - name: dsconfig_18 - value: set-log-publisher-prop --publisher-name "Oracle Access Logger" --set enabled:false - - name: dsconfig_19 - value: set-log-publisher-prop --publisher-name "Oracle Admin Access Logger" --set enabled:false -# Rebuilding index(es) for attributes ou, c, mobile and title - - name: rebuildIndex_1 - value: --index ou --index c --index mobile --index title - volumeMounts: - - mountPath: /u01/oracle/user_projects - name: %PV_NAME% - readinessProbe: - tcpSocket: - port: 1389 - initialDelaySeconds: 120 - periodSeconds: 30 - readinessProbe: - tcpSocket: - port: 1444 - initialDelaySeconds: 120 - periodSeconds: 30 - readinessProbe: - tcpSocket: - port: 1080 - initialDelaySeconds: 120 - periodSeconds: 30 - readinessProbe: - exec: - command: - - "/u01/oracle/container-scripts/checkOUDInstance.sh" - initialDelaySeconds: 120 - periodSeconds: 60 - imagePullSecrets: - - name: regcred - volumes: - - name: %PV_NAME% - persistentVolumeClaim: - claimName: %PVC_NAME% ---- -apiVersion: v1 -kind: Service -metadata: - name: oud-dir-svc-2 - namespace: %NAMESPACE% - labels: - app: oud-dir-2 -spec: - ports: - - port: 1444 - name: admin-ldaps - targetPort: 1444 - - port: 1888 - name: admin-https - targetPort: 1888 - - port: 1389 - name: ldap - targetPort: 1389 - - port: 1636 - name: ldaps - targetPort: 1636 - - port: 1080 - name: data-http - targetPort: 1080 - - port: 1081 - name: data-https - targetPort: 1081 - - port: 1898 - name: replication - targetPort: 1898 - type: NodePort - selector: - app: oud-dir-2 ---- -apiVersion: v1 -kind: Pod -metadata: - name: oud-dir2 - namespace: %NAMESPACE% - labels: - app: oud-dir-2 -spec: - restartPolicy: OnFailure - containers: - - name: oudds - image: %IMAGE% - ports: - - containerPort: 1444 - - containerPort: 1888 - - containerPort: 1389 - - containerPort: 1636 - - containerPort: 1080 - - containerPort: 1081 - - containerPort: 1898 - env: - - name: instanceType - value: Directory - - name: OUD_INSTANCE_NAME - value: oud-dir2 - - name: hostname - value: oud-dir-svc-2 - - name: baseDN - value: dc=oracle,dc=com - - name: rootUserDN - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: rootUserDN - - name: rootUserPassword - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: rootUserPassword - - name: sleepBeforeConfig - value: "3" - - name: serverTuning - value: -Xms1024m -Xmx2048m -d64 -XX:+UseCompressedOops -server -Xmn1g -XX:MaxTenuringThreshold=1 -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=60 -# Importing data - - name: ldifFile_1 - value: /u01/oracle/user_projects/ldif_files/input_1million_18july.ldif -# Importing 100 dynamic groups - - name: ldifFile_2 - value: /u01/oracle/user_projects/ldif_files/dgroup_300_19july.ldif -# Importing 100 sysgroups - - name: ldifFile_3 - value: /u01/oracle/user_projects/ldif_files/sysgroup_23july.ldif -# Importing 100 sys dynamic groups - - name: ldifFile_4 - value: /u01/oracle/user_projects/ldif_files/sysdgroup_23july.ldif -# Changing limits - - name: dsconfig_1 - value: set-global-configuration-prop --set lookthrough-limit:75000 - - name: dsconfig_2 - value: set-global-configuration-prop --set size-limit:75000 - - name: dsconfig_3 - value: set-workflow-element-prop --element-name userRoot --set index-entry-limit:75000 -# Creating indexes -# Creating equality type index on ou attribute - - name: dsconfig_4 - value: create-local-db-index --element-name userRoot --index-name ou --set index-type:equality -# Adding substring to index on ou attribute - - name: dsconfig_5 - value: set-local-db-index-prop --element-name userRoot --index-name ou --add index-type:substring -# Creating equality type index on c attribute - - name: dsconfig_6 - value: create-local-db-index --element-name userRoot --index-name c --set index-type:equality -# Adding substring to index on c attribute - - name: dsconfig_7 - value: set-local-db-index-prop --element-name userRoot --index-name c --add index-type:substring -# Creating equality type index on mobile attribute - - name: dsconfig_8 - value: create-local-db-index --element-name userRoot --index-name mobile --set index-type:equality -# Adding substring to index on mobile attribute - - name: dsconfig_9 - value: set-local-db-index-prop --element-name userRoot --index-name mobile --add index-type:substring -# Creating equality type index on title attribute - - name: dsconfig_10 - value: create-local-db-index --element-name userRoot --index-name title --set index-type:equality -# Adding substring to index on title attribute - - name: dsconfig_11 - value: set-local-db-index-prop --element-name userRoot --index-name title --add index-type:substring -# Enabling entry cache preloading... - - name: dsconfig_12 - value: set-global-configuration-prop --set entry-cache-preload:true -# Entry Cache and Group Cache - - name: dsconfig_13 - value: delete-entry-cache --cache-name "Group Cache" -# Creating Entry Cache... - - name: dsconfig_14 - value: create-entry-cache --set cache-level:1 --set enabled:true --type fifo --set max-entries:1100000 --set max-memory-percent:70 --cache-name FIFO_Entry_Cache_1 -# Creating Group Entry Cache... - - name: dsconfig_15 - value: create-entry-cache --set cache-level:2 --set enabled:true --type fifo-group --set max-entries:1000 --set max-memory-percent:10 --cache-name FIFO_Group_Entry_Cache_2 -# Access control handlers - - name: dsconfig_16 - value: set-access-control-handler-prop --add 'global-aci:(targetattr="*")(version 3.0; acl "All read and write access"; allow (read,search,compare,add,write,delete,import,export) userdn="ldap:///all"; )' --no-prompt -# Logs disabling - - name: dsconfig_17 - value: set-log-publisher-prop --publisher-name "Oracle Error Logger" --set enabled:false - - name: dsconfig_18 - value: set-log-publisher-prop --publisher-name "Oracle Access Logger" --set enabled:false - - name: dsconfig_19 - value: set-log-publisher-prop --publisher-name "Oracle Admin Access Logger" --set enabled:false -# Rebuilding index(es) for attributes ou, c, mobile and title - - name: rebuildIndex_1 - value: --index ou --index c --index mobile --index title - volumeMounts: - - mountPath: /u01/oracle/user_projects - name: %PV_NAME% - readinessProbe: - tcpSocket: - port: 1389 - initialDelaySeconds: 120 - periodSeconds: 30 - readinessProbe: - tcpSocket: - port: 1444 - initialDelaySeconds: 120 - periodSeconds: 30 - readinessProbe: - tcpSocket: - port: 1080 - initialDelaySeconds: 120 - periodSeconds: 30 - readinessProbe: - exec: - command: - - "/u01/oracle/container-scripts/checkOUDInstance.sh" - initialDelaySeconds: 120 - periodSeconds: 60 - imagePullSecrets: - - name: regcred - volumes: - - name: %PV_NAME% - persistentVolumeClaim: - claimName: %PVC_NAME% diff --git a/OracleUnifiedDirectory/kubernetes/samples/stress-oud-ds-plus-rs-svc.yaml b/OracleUnifiedDirectory/kubernetes/samples/stress-oud-ds-plus-rs-svc.yaml deleted file mode 100755 index 3ed260126..000000000 --- a/OracleUnifiedDirectory/kubernetes/samples/stress-oud-ds-plus-rs-svc.yaml +++ /dev/null @@ -1,431 +0,0 @@ -# -# Copyright (c) 2020, Oracle and/or its affiliates. -# -# Licensed under the Universal Permissive License v 1.0 as shown at -# https://oss.oracle.com/licenses/upl -# ---- -apiVersion: v1 -kind: Service -metadata: - name: oud-dsrs-svc-1 - namespace: %NAMESPACE% - labels: - app: oud-ds-plus-rs-1 -spec: - ports: - - port: 1444 - name: admin-ldaps - targetPort: 1444 - - port: 1888 - name: admin-https - targetPort: 1888 - - port: 1389 - name: ldap - targetPort: 1389 - - port: 1636 - name: ldaps - targetPort: 1636 - - port: 1080 - name: data-http - targetPort: 1080 - - port: 1081 - name: data-https - targetPort: 1081 - - port: 1898 - name: replication - targetPort: 1898 - type: NodePort - selector: - app: oud-ds-plus-rs-1 ---- -apiVersion: v1 -kind: Pod -metadata: - name: ouddsrs1 - namespace: %NAMESPACE% - labels: - app: oud-ds-plus-rs-1 -spec: - restartPolicy: OnFailure - hostname: oud-dsrs-svc-1 - containers: - - name: ouddsrs - image: %IMAGE% - ports: - - containerPort: 1444 - - containerPort: 1888 - - containerPort: 1389 - - containerPort: 1636 - - containerPort: 1080 - - containerPort: 1081 - - containerPort: 1898 - env: - - name: sleepBeforeConfig - value: "3" - - name: instanceType - value: AddDS2RS - - name: OUD_INSTANCE_NAME - value: ouddsrs1 - - name: hostname - value: oud-dsrs-svc-1 - - name: baseDN - value: dc=oracle,dc=com - - name: rootUserDN - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: rootUserDN - - name: rootUserPassword - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: rootUserPassword - - name: adminUID - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: adminUID - - name: adminPassword - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: adminPassword - - name: bindDN1 - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: bindDN1 - - name: bindPassword1 - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: bindPassword1 - - name: bindDN2 - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: bindDN2 - - name: bindPassword2 - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: bindPassword2 - - name: serverTuning - value: -Xms1024m -Xmx2048m -d64 -XX:+UseCompressedOops -server -Xmn1g -XX:MaxTenuringThreshold=1 -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=60 -# Changing limits - - name: dsconfig_1 - value: set-global-configuration-prop --set lookthrough-limit:75000 - - name: dsconfig_2 - value: set-global-configuration-prop --set size-limit:75000 - - name: dsconfig_3 - value: set-workflow-element-prop --element-name userRoot --set index-entry-limit:75000 -# Creating indexes -# Creating equality type index on ou attribute - - name: dsconfig_4 - value: create-local-db-index --element-name userRoot --index-name ou --set index-type:equality -# Adding substring to index on ou attribute - - name: dsconfig_5 - value: set-local-db-index-prop --element-name userRoot --index-name ou --add index-type:substring -# Creating equality type index on c attribute - - name: dsconfig_6 - value: create-local-db-index --element-name userRoot --index-name c --set index-type:equality -# Adding substring to index on c attribute - - name: dsconfig_7 - value: set-local-db-index-prop --element-name userRoot --index-name c --add index-type:substring -# Creating equality type index on mobile attribute - - name: dsconfig_8 - value: create-local-db-index --element-name userRoot --index-name mobile --set index-type:equality -# Adding substring to index on mobile attribute - - name: dsconfig_9 - value: set-local-db-index-prop --element-name userRoot --index-name mobile --add index-type:substring -# Creating equality type index on title attribute - - name: dsconfig_10 - value: create-local-db-index --element-name userRoot --index-name title --set index-type:equality -# Adding substring to index on title attribute - - name: dsconfig_11 - value: set-local-db-index-prop --element-name userRoot --index-name title --add index-type:substring -# Enabling entry cache preloading... - - name: dsconfig_12 - value: set-global-configuration-prop --set entry-cache-preload:true -# Entry Cache and Group Cache - - name: dsconfig_13 - value: delete-entry-cache --cache-name "Group Cache" -# Creating Entry Cache... - - name: dsconfig_14 - value: create-entry-cache --set cache-level:1 --set enabled:true --type fifo --set max-entries:1100000 --set max-memory-percent:70 --cache-name FIFO_Entry_Cache_1 -# Creating Group Entry Cache... - - name: dsconfig_15 - value: create-entry-cache --set cache-level:2 --set enabled:true --type fifo-group --set max-entries:1000 --set max-memory-percent:10 --cache-name FIFO_Group_Entry_Cache_2 -# Access control handlers - - name: dsconfig_16 - value: set-access-control-handler-prop --add 'global-aci:(targetattr="*")(version 3.0; acl "All read and write access"; allow (read,search,compare,add,write,delete,import,export) userdn="ldap:///all"; )' --no-prompt -# Logs disabling - - name: dsconfig_17 - value: set-log-publisher-prop --publisher-name "Oracle Error Logger" --set enabled:false - - name: dsconfig_18 - value: set-log-publisher-prop --publisher-name "Oracle Access Logger" --set enabled:false - - name: dsconfig_19 - value: set-log-publisher-prop --publisher-name "Oracle Admin Access Logger" --set enabled:false -# Rebuilding index(es) for attributes ou, c, mobile and title - - name: rebuildIndex_1 - value: --index ou --index c --index mobile --index title - - name: sourceHost - value: oud-dir-svc-2 - - name: dsreplication_1 - value: verify --hostname ${sourceHost} --port ${adminConnectorPort} --baseDN ${baseDN} --serverToRemove ${hostname}:${adminConnectorPort} - - name: dsreplication_2 - value: enable --host1 ${sourceHost} --port1 ${adminConnectorPort} --replicationPort1 ${replicationPort} --host2 ${hostname} --port2 ${adminConnectorPort} --replicationPort2 ${replicationPort} --baseDN ${baseDN} - - name: dsreplication_3 - value: initialize --hostSource ${initializeFromHost} --portSource ${adminConnectorPort} --hostDestination ${hostname} --portDestination ${adminConnectorPort} --baseDN ${baseDN} - - name: dsreplication_4 - value: verify --hostname ${hostname} --port ${adminConnectorPort} --baseDN ${baseDN} - - name: dsreplication_5 - value: status --hostname ${hostname} --port ${adminConnectorPort} --baseDN ${baseDN} --dataToDisplay compat-view - - name: post_dsreplication_dsconfig_1 - value: set-replication-domain-prop --domain-name ${baseDN} --set group-id:2 - - name: post_dsreplication_dsconfig_2 - value: set-replication-server-prop --set group-id:2 - volumeMounts: - - mountPath: /u01/oracle/user_projects - name: %PV_NAME% - readinessProbe: - tcpSocket: - port: 1389 - initialDelaySeconds: 900 - periodSeconds: 30 - readinessProbe: - tcpSocket: - port: 1444 - initialDelaySeconds: 900 - periodSeconds: 30 - readinessProbe: - tcpSocket: - port: 1080 - initialDelaySeconds: 900 - periodSeconds: 30 - readinessProbe: - exec: - command: - - "/u01/oracle/container-scripts/checkOUDInstance.sh" - initialDelaySeconds: 900 - periodSeconds: 60 - imagePullSecrets: - - name: regcred - volumes: - - name: %PV_NAME% - persistentVolumeClaim: - claimName: %PVC_NAME% ---- -apiVersion: v1 -kind: Service -metadata: - name: oud-dsrs-svc-2 - namespace: %NAMESPACE% - labels: - app: oud-ds-plus-rs-2 -spec: - ports: - - port: 1444 - name: admin-ldaps - targetPort: 1444 - - port: 1888 - name: admin-https - targetPort: 1888 - - port: 1389 - name: ldap - targetPort: 1389 - - port: 1636 - name: ldaps - targetPort: 1636 - - port: 1080 - name: data-http - targetPort: 1080 - - port: 1081 - name: data-https - targetPort: 1081 - - port: 1898 - name: replication - targetPort: 1898 - type: NodePort - selector: - app: oud-ds-plus-rs-2 ---- -apiVersion: v1 -kind: Pod -metadata: - name: ouddsrs2 - namespace: %NAMESPACE% - labels: - app: oud-ds-plus-rs-2 -spec: - restartPolicy: OnFailure - hostname: oud-dsrs-svc-2 - containers: - - name: ouddsrs - image: %IMAGE% - ports: - - containerPort: 1444 - - containerPort: 1888 - - containerPort: 1389 - - containerPort: 1636 - - containerPort: 1080 - - containerPort: 1081 - - containerPort: 1898 - env: - - name: sleepBeforeConfig - value: "1200" - - name: instanceType - value: AddDS2RS - - name: OUD_INSTANCE_NAME - value: ouddsrs2 - - name: hostname - value: oud-dsrs-svc-2 - - name: baseDN - value: dc=oracle,dc=com - - name: rootUserDN - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: rootUserDN - - name: rootUserPassword - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: rootUserPassword - - name: adminUID - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: adminUID - - name: adminPassword - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: adminPassword - - name: bindDN1 - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: bindDN1 - - name: bindPassword1 - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: bindPassword1 - - name: bindDN2 - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: bindDN2 - - name: bindPassword2 - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: bindPassword2 - - name: serverTuning - value: -Xms1024m -Xmx2048m -d64 -XX:+UseCompressedOops -server -Xmn1g -XX:MaxTenuringThreshold=1 -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=60 -# Changing limits - - name: dsconfig_1 - value: set-global-configuration-prop --set lookthrough-limit:75000 - - name: dsconfig_2 - value: set-global-configuration-prop --set size-limit:75000 - - name: dsconfig_3 - value: set-workflow-element-prop --element-name userRoot --set index-entry-limit:75000 -# Creating indexes -# Creating equality type index on ou attribute - - name: dsconfig_4 - value: create-local-db-index --element-name userRoot --index-name ou --set index-type:equality -# Adding substring to index on ou attribute - - name: dsconfig_5 - value: set-local-db-index-prop --element-name userRoot --index-name ou --add index-type:substring -# Creating equality type index on c attribute - - name: dsconfig_6 - value: create-local-db-index --element-name userRoot --index-name c --set index-type:equality -# Adding substring to index on c attribute - - name: dsconfig_7 - value: set-local-db-index-prop --element-name userRoot --index-name c --add index-type:substring -# Creating equality type index on mobile attribute - - name: dsconfig_8 - value: create-local-db-index --element-name userRoot --index-name mobile --set index-type:equality -# Adding substring to index on mobile attribute - - name: dsconfig_9 - value: set-local-db-index-prop --element-name userRoot --index-name mobile --add index-type:substring -# Creating equality type index on title attribute - - name: dsconfig_10 - value: create-local-db-index --element-name userRoot --index-name title --set index-type:equality -# Adding substring to index on title attribute - - name: dsconfig_11 - value: set-local-db-index-prop --element-name userRoot --index-name title --add index-type:substring -# Enabling entry cache preloading... - - name: dsconfig_12 - value: set-global-configuration-prop --set entry-cache-preload:true -# Entry Cache and Group Cache - - name: dsconfig_13 - value: delete-entry-cache --cache-name "Group Cache" -# Creating Entry Cache... - - name: dsconfig_14 - value: create-entry-cache --set cache-level:1 --set enabled:true --type fifo --set max-entries:1100000 --set max-memory-percent:70 --cache-name FIFO_Entry_Cache_1 -# Creating Group Entry Cache... - - name: dsconfig_15 - value: create-entry-cache --set cache-level:2 --set enabled:true --type fifo-group --set max-entries:1000 --set max-memory-percent:10 --cache-name FIFO_Group_Entry_Cache_2 -# Access control handlers - - name: dsconfig_16 - value: set-access-control-handler-prop --add 'global-aci:(targetattr="*")(version 3.0; acl "All read and write access"; allow (read,search,compare,add,write,delete,import,export) userdn="ldap:///all"; )' --no-prompt -# Logs disabling - - name: dsconfig_17 - value: set-log-publisher-prop --publisher-name "Oracle Error Logger" --set enabled:false - - name: dsconfig_18 - value: set-log-publisher-prop --publisher-name "Oracle Access Logger" --set enabled:false - - name: dsconfig_19 - value: set-log-publisher-prop --publisher-name "Oracle Admin Access Logger" --set enabled:false -# Rebuilding index(es) for attributes ou, c, mobile and title - - name: rebuildIndex_1 - value: --index ou --index c --index mobile --index title - - name: sourceHost - value: oud-dir-svc-2 - - name: dsreplication_1 - value: verify --hostname ${sourceHost} --port ${adminConnectorPort} --baseDN ${baseDN} --serverToRemove ${hostname}:${adminConnectorPort} - - name: dsreplication_2 - value: enable --host1 ${sourceHost} --port1 ${adminConnectorPort} --replicationPort1 ${replicationPort} --host2 ${hostname} --port2 ${adminConnectorPort} --replicationPort2 ${replicationPort} --baseDN ${baseDN} - - name: dsreplication_3 - value: initialize --hostSource ${initializeFromHost} --portSource ${adminConnectorPort} --hostDestination ${hostname} --portDestination ${adminConnectorPort} --baseDN ${baseDN} - - name: dsreplication_4 - value: verify --hostname ${hostname} --port ${adminConnectorPort} --baseDN ${baseDN} - - name: dsreplication_5 - value: status --hostname ${hostname} --port ${adminConnectorPort} --baseDN ${baseDN} --dataToDisplay compat-view - - name: post_dsreplication_dsconfig_1 - value: set-replication-domain-prop --domain-name ${baseDN} --set group-id:2 - - name: post_dsreplication_dsconfig_2 - value: set-replication-server-prop --set group-id:2 - volumeMounts: - - mountPath: /u01/oracle/user_projects - name: %PV_NAME% - readinessProbe: - tcpSocket: - port: 1389 - initialDelaySeconds: 1800 - periodSeconds: 30 - readinessProbe: - tcpSocket: - port: 1444 - initialDelaySeconds: 1800 - periodSeconds: 30 - readinessProbe: - tcpSocket: - port: 1080 - initialDelaySeconds: 1800 - periodSeconds: 30 - readinessProbe: - exec: - command: - - "/u01/oracle/container-scripts/checkOUDInstance.sh" - initialDelaySeconds: 1800 - periodSeconds: 60 - imagePullSecrets: - - name: regcred - volumes: - - name: %PV_NAME% - persistentVolumeClaim: - claimName: %PVC_NAME% ---- diff --git a/OracleUnifiedDirectorySM/kubernetes/README.md b/OracleUnifiedDirectorySM/kubernetes/README.md deleted file mode 100755 index 9a41a6ce4..000000000 --- a/OracleUnifiedDirectorySM/kubernetes/README.md +++ /dev/null @@ -1,418 +0,0 @@ -Oracle Unified Directory Oracle Unified Directory Services Manager (OUDSM) on Kubernetes -============================================================ - -## Contents -1. [Introduction](#introduction) -1. [Hardware and Software Requirements](#hardware-and-software-requirements) -1. [Prerequisites](#prerequisites) -1. [Example 1 OUDSM POD](#example-1-oudsm-pod) -1. [Example 2 OUDSM Deployment](#example-2-oudsm-deployment) -1. [Appendix Reference](#appendix-reference) - -# Introduction -This project offers YAML files and scripts to build Oracle Unified Directory Services Manager (OUDSM) Docker images based on 12cPS4 (12.2.1.4.0) release within a Kubernetes environment. Use these YAML files to facilitate installation, configuration, and environment setup for DevOps users. - -The Docker Image refers to binaries for OUD Release 12.2.1.4.0. - -***Image***: oracle/oudsm:12.2.1.4.0 - -# Hardware and Software Requirements -Oracle Unified Directory Docker Image has been tested and is known to run on following hardware and software: - -## Hardware Requirements - -| Hardware | Size | -| :-------: | :---: | -| RAM | 16GB | -| Disk Space| 200GB+| - -## Software Requirements - -| | Version | Command to verify version | -| :---: | :----------------------------: | :-----------------------: | -| OS | Oracle Linux 7.3 or higher | more /etc/oracle-release | -| Docker| Docker version 18.03 or higher | docker version | -| K8s | Kubernetes version 1.16.0+ | kubectl version - -# Prerequisites - - -## Verify OS Version -OS version should be Oracle Linux 7.3 or higher. To check this, issue the following command: - - # more /etc/oracle-release - Oracle Linux Server release 7.5 - -## Verify Docker Version and OUD Image -Docker version should be 18.03 or higher. To check this, issue the following command: - - # docker version - Client: Docker Engine - Community - Version: 18.09.8-ol - ... - -The Oracle Unified Directory Image for 12cPS4 (12.2.1.4.0) should be loaded into Docker. Verify this by running the following: - - # docker images - REPOSITORY TAG IMAGE ID CREATED SIZE - oracle/oudsm 12.2.1.4.0 4aefb2e19cd6 2 days ago 2.6GB - ... - -## Verify Kubernetes Version -Kubernetes version should be 1.16.0 or higher. Verify by running the following: - - # kubectl version - Client Version: version.Info{Major:"1", Minor:"18", GitVersion:"v1.18.4", GitCommit:"c96aede7b5205121079932896c4ad89bb93260af", GitTreeState:"clean", BuildDate:"2020-06-17T11:41:22Z", GoVersion:"go1.13.9", Compiler:"gc", Platform:"linux/amd64"} - Server Version: version.Info{Major:"1", Minor:"18", GitVersion:"v1.18.4", GitCommit:"c96aede7b5205121079932896c4ad89bb93260af", GitTreeState:"clean", BuildDate:"2020-06-17T11:33:59Z", GoVersion:"go1.13.9", Compiler:"gc", Platform:"linux/amd64"} - -## Create Kubernetes Namespace -You should create a Kubernetes namespace to provide a scope for other objects such as pods and services that you create in the environment. To create your namespace you should refer to the samples/oudsmns.yaml file. - -Update the samples/oudsmns.yaml file and replace %NAMESPACE% with the value of the namespace you would like to create. In the example below the value 'oudsmns' is used. - -To create the namespace apply the file using kubectl: - - # kubectl apply -f samples/oudsmns.yaml - namespace/oudsmns created - -Confirm that the namespace is created: - -
   # kubectl get namespaces
-    NAME          STATUS   AGE
-    default       Active   8d
-    kube-public   Active   8d
-    kube-system   Active   8d
-    oudsmns       Active   87s
- -## Create Secrets for User IDs and Passwords - -To protect sensitive information, namely user IDs and passwords, you should create Kubernetes Secrets for the key-value pairs with following keys. The Secret with key-value pairs will be used to pass values to containers created through OUDSM image: - -* adminUser -* adminPass - -There are two ways by which Secret object can be created with required key-value pairs. - -### Using samples/secrets.yaml file - -To do this you should update the samples/secrets.yaml file with the value for %SECRET_NAME% and %NAMESPACE%, together with the Base64 value for each secret. - -* %adminUser% - With Base64 encoded value for adminUser parameter. -* %adminPass% - With Base64 encoded value for adminPass parameter. - -Obtain the base64 value for your secrets: - - # echo -n weblogic | base64 - d2VibG9naWM= - # echo -n Oracle123 | base64 - T3JhY2xlMTIz - -**Note**: Please make sure to use -n with echo command. Without that, Base64 values would be generated with new-line character included. - -Update the samples/secrets.yaml file with your values. It should look similar to the file shown below: - - apiVersion: v1 - kind: Secret - metadata: - name: oudsecret - namespace: oudsmns - type: Opaque - data: - adminUser: d2VibG9naWM= - adminPass: T3JhY2xlMTIz - -Apply the file: - - # kubectl apply -f samples/secrets.yaml - secret/oudsecret created - -Verify that the secret has been created: - - # kubectl --namespace oudsmns get secret - NAME TYPE DATA AGE - default-token-l5nwd kubernetes.io/service-account-token 3 7m10s - oudsecret Opaque 2 27s - -### Using `kubectl create secret` command - -Kubernetes Secret can be created using following command: - - # kubectl --namespace %NAMESPACE% create secret generic %SECRET_NAME% \ - --from-literal=adminUser="%adminUser%" \ - --from-literal=adminPass="%adminPass%" - -In the command mentioned above, following placeholders are required to be updated: - -* %NAMESPACE% - With name of namespace in which secret is required to be created -* %SECRET_NAME% - Name for the secret object -* %adminUser% - With Base64 encoded value for adminUser parameter. -* %adminPass% - With Base64 encoded value for adminPass parameter. - -After executing `kubectl create secret ...` command, verify that the secret has been created: - - # kubectl --namespace oudsmns get secret - NAME TYPE DATA AGE - default-token-l5nwd kubernetes.io/service-account-token 3 7m10s - oudsecret Opaque 2 27s - -## Create PersistentVolume (PV) and PersistentVolumeClaim (PVC) for your Namespace -A PV is storage resource, while PVC is a request for that resource. To provide storage for your namespace, update the samples/persistent-volume.yaml file. - -Update the following to values specific to your environment: - -| Param | Value | Example | -| :------------: | :-------------------------: | :-------------------: | -| %PV_NAME% | PV name | oudsmpv | -| %PV_HOST_PATH% | Valid path on localhost |/u01/app/oracle/mydir | -| %PVC_NAME% | PVC name | oudsmpvc | -| %NAMESPACE% | Namespace | oudsmns | - -Apply the file: - - # kubectl apply -f samples/persistent-volume.yaml - persistentvolume/oudsmpv created - persistentvolumeclaim/oudsmpvc created - -Verify the PersistentVolume: - - # kubectl --namespace oudsmns describe persistentvolume oudsmpv - Name: oudsmpv - Labels: type=local - Annotations: kubectl.kubernetes.io/last-applied-configuration: - {"apiVersion":"v1","kind":"PersistentVolume","metadata":{"annotations":{},"labels":{"type":"local"},"name":"oudsmpv"},"spec":{"accessModes... - Finalizers: [kubernetes.io/pv-protection] - StorageClass: - Status: Available - Claim: - Reclaim Policy: Retain - Access Modes: RWX - VolumeMode: Filesystem - Capacity: 10Gi - Node Affinity: - Message: - Source: - Type: HostPath (bare host directory volume) - Path: /scratch/beta/user_projects - HostPathType: - Events: - -Verify the PersistentVolumeClaim: - - # kubectl --namespace oudsmns describe pvc oudsmpvc - Name: oudsmpvc - Namespace: oudsmns - StorageClass: - Status: Bound - Volume: oud-ds-rs-1585148421-pv - Labels: - Annotations: kubectl.kubernetes.io/last-applied-configuration: - {"apiVersion":"v1","kind":"PersistentVolumeClaim","metadata":{"annotations":{},"name":"oudsmpvc","namespace":"oudsmns"},"spec":{"accessMod... - pv.kubernetes.io/bind-completed: yes - pv.kubernetes.io/bound-by-controller: yes - Finalizers: [kubernetes.io/pvc-protection] - Capacity: 10Gi - Access Modes: RWX - VolumeMode: Filesystem - Events: - Mounted By: - -# Example 1 OUDSM POD - -In this example you create a POD (oudsmpod) which holds a single container based on an Oracle Unified Directory Services Manager 12c PS4 (12.2.1.4.0) image. This container is configured to run Oracle Unified Directory Services Manager (OUDSM). You also create a service (oudsm) through which you can access the OUDSM GUI. - -To create the POD update the samples/oudsm-pod.yaml file. - -Update the following to values specific to your environment: - -| Param | Value | Example | -| :-----------: | :-------------------------: | :---------------------: | -| %NAMESPACE% | Namespace | oudsmns | -| %IMAGE% | Oracle image tag | oracle/oudsm:12.2.1.4.0 | -| %SECRET_NAME% | Secret name | oudsecret | -| %PV_NAME% | PV name | oudsmpv | -| %PVC_NAME% | PVC name | oudsmpvc | - -Apply the file: - - # kubectl apply -f samples/oudsm-pod.yaml - namespace/oudsmns unchanged - service/oudsm created - pod/oudsmpod created - -To check the status of the created pod: - - # kubectl get pods -n oudsmns - NAME READY STATUS RESTARTS AGE - oudsmpod 0/1 Error 0 20m - - -If you see any errors then use the following commands to debug the pod/container. - -To review issues with the pod e.g. CreateContainerConfigError: - - # kubectl --namespace describe pod - -For example: - - # kubectl --namespace oudsmns describe pod oudsmpod - -To tail the container logs while it is initialising use the following command: - - # kubectl --namespace logs -f -c - -For example: - - # kubectl --namespace oudsmns logs -f -c oudsm oudsmpod - -To view the full container logs: - - # kubectl --namespace logs -c - -To validate that the POD is running: - - # kubectl --namespace get all,pv,pvc,secret - -For example: - -
   # kubectl --namespace oudsmns get all,pv,pvc,secret
-    NAME           READY   STATUS    RESTARTS   AGE
-    pod/oudsmpod   1/1     Running   0          15m
-    
-    NAME            TYPE       CLUSTER-IP      EXTERNAL-IP   PORT(S)                         AGE
-    service/oudsm   NodePort   10.101.73.196           7001:31421/TCP,7002:31737/TCP   15m
-    
-    NAME                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM              STORAGECLASS   REASON   AGE
-    persistentvolume/oudsmpv   10Gi       RWX            Retain           Bound    oudsmns/oudsmpvc                           18m
-    
-    NAME                             STATUS   VOLUME    CAPACITY   ACCESS MODES   STORAGECLASS   AGE
-    persistentvolumeclaim/oudsmpvc   Bound    oudsmpv   10Gi       RWX                           18m
-    
-    NAME                         TYPE                                  DATA   AGE
-    secret/default-token-wp4gx   kubernetes.io/service-account-token   3      34m
-    secret/oudsecret             Opaque                                2      19m
- -Once the container is running (READY shows as '1/1') check the value of the service port (PORT/s value : here 7001:31421/TCP,7002:31737/TCP) for the OUDSM service and use this to access OUDSM in a browser: - - http://:/oudsm - -In the case here: - - http://:31421/oudsm - -# Example 2 OUDSM Deployment - -In this example you create multiple OUDSM PODs/Services using Kubernetes deployments. - -To create the deployment update the samples/oudsm-deployment.yaml file. - -Update the following to values specific to your environment: - -| Param | Value | Example | -| :-----------: | :-------------------------: | :---------------------: | -| %NAMESPACE% | Namespace | oudsmns | -| %IMAGE% | Oracle image tag | oracle/oudsm:12.2.1.4.0 | -| %SECRET_NAME% | Secret name | oudsecret | - -Apply the file: - - # kubectl apply -f samples/oudsm-deployment.yaml - namespace/oudsmns unchanged - service/oudsm configured - deployment.apps/oudsmdeploypod created - -To validate that the POD is running: - - # kubectl --namespace get all,pv,pvc,secret - -For example: - - # kubectl --namespace oudsmns get all,pv,pvc,secret - - -For example: - - # kubectl --namespace oudsmns get all,pv,pvc,secret - NAME READY STATUS RESTARTS AGE - pod/oudsmdeploypod-7bb67b685c-78sq5 1/1 Running 0 12m - pod/oudsmdeploypod-7bb67b685c-xssbq 1/1 Running 0 12m - - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - service/oudsm NodePort 10.102.47.146 7001:30489/TCP,7002:31588/TCP 12m - - NAME READY UP-TO-DATE AVAILABLE AGE - deployment.apps/oudsmdeploypod 2/2 2 2 12m - - NAME DESIRED CURRENT READY AGE - replicaset.apps/oudsmdeploypod-7bb67b685c 2 2 2 12m - - NAME TYPE DATA AGE - secret/default-token-clzx7 kubernetes.io/service-account-token 3 68m - secret/oudsecret Opaque 2 67m - -Once the container is running (READY shows as '1/1') check the value of the service port (PORT/s value : here 7001:31421/TCP,7002:31737/TCP) for the OUDSM service and use this to access OUDSM in a browser: - - http://:/oudsm - -In the case here: - - http://:30489/oudsm - -Notice that in the output above we have created 2 OUDSM PODs (pod/oudsmdeploypod-7bb67b685c-78sq5, pod/oudsmdeploypod-7bb67b685c-xssbq) which are accessed via a service (service/oudsm). - -The number of PODs is governed by the replicas parameter in the samples/oudsm-deployment.yaml file: - -
...
-        kind: Deployment
-        metadata:
-          name: oudsmdeploypod
-          namespace: oudsmns
-          labels:
-            app: oudsmdeploypod
-        spec:
-        replicas: 2
-        selector:
-            matchLabels:
-            app: oudsmdeploypod
-        ...
- -If you have a requirement to add additional PODs to your cluster you can update the samples/oudsm-deployment.yaml file with the new value for replicas and apply the file. For example, setting replicas to '3' would start an additional POD as shown below: - - # kubectl apply -f samples/oudsm-deployment.yaml.tmp - namespace/oudsmns unchanged - service/oudsm unchanged - deployment.apps/oudsmdeploypod configured - -
   # kubectl --namespace oudsmns get all,pv,pvc,secret
-    NAME                                  READY   STATUS    RESTARTS   AGE
-    pod/oudsmdeploypod-7bb67b685c-78sq5   1/1     Running   0          105m
-    pod/oudsmdeploypod-7bb67b685c-sv9ms   1/1     Running   0          76m
-    pod/oudsmdeploypod-7bb67b685c-xssbq   1/1     Running   0          105m
-    
-    NAME            TYPE       CLUSTER-IP      EXTERNAL-IP   PORT(S)                         AGE
-    service/oudsm   NodePort   10.102.47.146           7001:30489/TCP,7002:31588/TCP   105m
-
-    NAME                             READY   UP-TO-DATE   AVAILABLE   AGE
-    deployment.apps/oudsmdeploypod   3/3     3            3           105m
-
-    NAME                                        DESIRED   CURRENT   READY   AGE
-    replicaset.apps/oudsmdeploypod-7bb67b685c   3         3         3       105m
-
-    NAME                         TYPE                                  DATA   AGE
-    secret/default-token-clzx7   kubernetes.io/service-account-token   3      161m
-    secret/oudsecret             Opaque                                2      160m
- -# Appendix Reference - -1. **samples/oudsm-pod.yaml** : This yaml file is use to create the pod and bring up the OUDSM services -2. **samples/oudsm-deployment.yaml** : This yaml file is used to create replicas of OUDSM and bring up the OUDSM services based on the deployment - -# Licensing & Copyright - -## License
-To download and run Oracle Fusion Middleware products, regardless whether inside or outside a Docker container, you must download the binaries from the Oracle website and accept the license indicated at that page.

- -All scripts and files hosted in this project and GitHub [fmw-kubernetes/OracleUnifiedDirectorySM](./) repository required to build the Docker images are, unless otherwise noted, released under [UPL 1.0](https://oss.oracle.com/licenses/upl/) license.

- -## Copyright
-Copyright (c) 2020, Oracle and/or its affiliates.
-Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl

diff --git a/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/elk_logstash-configMap.yaml b/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/elk_logstash-configMap.yaml index b4af97773..5a1107be4 100755 --- a/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/elk_logstash-configMap.yaml +++ b/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/elk_logstash-configMap.yaml @@ -5,12 +5,15 @@ # https://oss.oracle.com/licenses/upl # # -{{- if and .Values.elk.enabled (not .Values.elk.logstash.logstashConfigMap) }} +{{- if and .Values.elk.IntegrationEnabled (not .Values.elk.logstashConfigMap) }} apiVersion: v1 kind: ConfigMap metadata: name: {{ include "oudsm.fullname" . }}-logstash-configmap data: + logstash.yml: | + #http.host: "0.0.0.0" + elk.crt: {{ .Values.elk.escert | toYaml | indent 4 }} logstash-config.conf: | input { file { @@ -19,23 +22,34 @@ data: start_position => beginning sincedb_path => "/dev/null" } - } + } filter { if [type] == "setup-logs" { grok { match => [ "message", "<%{DATA:log_timestamp}> <%{WORD:log_level}> <%{WORD:thread}> <%{HOSTNAME:hostname}> <%{HOSTNAME:hostserver}> %{GREEDYDATA:message}" ] } - } + } + if "_grokparsefailure" in [tags] { mutate { remove_tag => [ "_grokparsefailure" ] } } } - output { + output { elasticsearch { - hosts => ["{{ include "oudsm.fullname" . }}-elasticsearch:9200"] + hosts => ["{{ .Values.elk.eshosts }}"] + cacert => '/usr/share/logstash/config/certs/elk.crt' + index => "{{ .Values.elk.esindex }}" + ssl => {{ .Values.elk.sslenabled }} + ssl_certificate_verification => false + {{- if .Values.elk.espassword }} + user => "{{ .Values.elk.esuser }}" + password => "${ELASTICSEARCH_PASSWORD}" + {{- end }} + {{- if .Values.elk.esapikey }} + api_key => "${ELASTICSEARCH_PASSWORD}" + {{- end }} } } {{- end -}} - diff --git a/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/elk_logstash.yaml b/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/elk_logstash.yaml index 9a2784512..896b3e1c3 100755 --- a/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/elk_logstash.yaml +++ b/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/elk_logstash.yaml @@ -5,7 +5,7 @@ # https://oss.oracle.com/licenses/upl # # -{{- if .Values.elk.enabled }} +{{- if .Values.elk.IntegrationEnabled }} apiVersion: apps/v1 kind: Deployment metadata: @@ -20,39 +20,58 @@ spec: labels: app: logstash spec: - volumes: - - name: {{ include "oudsm.fullname" . }}-pv - persistentVolumeClaim: - claimName: {{ include "oudsm.fullname" . }}-pvc - - name: shared-logs - emptyDir: {} containers: - name: {{ include "oudsm.fullname" . }}-logstash - image: {{ .Values.elk.logstash.image.repository }}:{{ .Values.elk.logstash.image.tag }} + env: + - name: ELASTICSEARCH_PASSWORD + valueFrom: + secretKeyRef: + name: elasticsearch-pw-elastic + key: password + image: {{ .Values.elk.logStashImage }} ports: - - containerPort: {{ .Values.elk.logstash.containerPort }} + - containerPort: 5044 name: logstash command: - logstash - #command: ["/bin/sh"] - #args: ["/usr/share/logstash/bin/logstash", "-f", /srv/nfs/oudelk/oud_user_projects/logstash/config/logstash-config.conf ] imagePullPolicy: IfNotPresent volumeMounts: - name: {{ include "oudsm.fullname" . }}-pipeline mountPath: /usr/share/logstash/pipeline/ - name : {{ include "oudsm.fullname" . }}-pv mountPath: /u01/oracle/user_projects + - mountPath: /usr/share/logstash/config/logstash.yml + subPath: logstash.yml + name: config-volume + - mountPath: /usr/share/logstash/config/certs + name: elk-cert volumes: - name: {{ include "oudsm.fullname" . }}-pipeline configMap: - {{- if .Values.elk.logstash.logstashConfigMap }} - name: {{ .Values.elk.logstash.logstashConfigMap }} + {{- if .Values.elk.logstashConfigMap }} + name: {{ .Values.elk.logstashConfigMap }} {{- else }} name: {{ include "oudsm.fullname" . }}-logstash-configmap items: - key: logstash-config.conf path: logstash-config.conf {{- end }} + - configMap: + defaultMode: 420 + items: + - key: elk.crt + path: elk.crt + name: {{ include "oudsm.fullname" . }}-logstash-configmap + name: elk-cert + - configMap: + defaultMode: 420 + items: + - key: logstash.yml + path: logstash.yml + name: {{ include "oudsm.fullname" . }}-logstash-configmap + name: config-volume + - name: shared-logs + emptyDir: {} - name: {{ include "oudsm.fullname" . }}-pv persistentVolumeClaim: claimName: {{ include "oudsm.fullname" . }}-pvc @@ -60,4 +79,5 @@ spec: imagePullSecrets: {{- toYaml . | nindent 6 }} {{- end }} -{{- end }} + +{{- end }} diff --git a/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/values.yaml b/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/values.yaml index 150ca9abb..22c3812ad 100755 --- a/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/values.yaml +++ b/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/values.yaml @@ -1,7 +1,7 @@ # # Copyright (c) 2020, 2022, Oracle and/or its affiliates. # -# Licensed under the Universal Permissive License v 1.0 as shown at +# Licensed under the Universal Permissive License v 1.0 as shown at # https://oss.oracle.com/licenses/upl # # Default values for oudsm. @@ -123,9 +123,9 @@ persistence: # if value is false, PV and PVC would not be used and pods would be using the default emptyDir mount volume enabled: true # provided the pvname to use an already created Persistent Volume. If blank, will use the default name from Chart - pvname: + pvname: # provided the pvname to use an already created Persistent Volume Claim. If blank, will use default name from Chart - pvcname: + pvcname: accessMode: ReadWriteMany size: 20Gi reclaimPolicy: "Delete" @@ -138,7 +138,7 @@ persistence: server: 0.0.0.0 filesystem: hostPath: - # The path location mentioned should be created and made accessible with necessary privileges for access from pods/containers. + # The path location mentioned should be created and made accessible with necessary privileges for access from pods/containers. path: /scratch/shared/oudsm_user_projects custom: # YAML content to be included in PersistenceVolume Object @@ -157,104 +157,39 @@ oudsm: # Whether to invoke setWeblogicPluginEnabled or not weblogicPluginEnabled: "true" - +# Configuration for Logstash deployment elk: - # Enabled flag to enable the integrated ELK stack for OUDSM - enabled: false - elasticsearch: - image: - repository: docker.elastic.co/elasticsearch/elasticsearch - tag: 6.8.0 - pullPolicy: IfNotPresent - # esreplicas is the number of replicas of ElasticSearch deployment to be created - esreplicas: 1 - #minimumMasterNodes is the the minimum number of master nodes that needs to configured. This will use the number of replicas (esreplicas / 2) + 1 - minimumMasterNodes: 1 - # Java options for Elasticsearch. This is where you should configure the jvm heap size - esJAVAOpts: "-Xms512m -Xmx512m" - #sysctl vm.max_map_count needed for Elasticsearch - sysctlVmMaxMapCount: 262144 - ## cpu resources for elastic search - resources: - requests: - cpu: "100m" - limits: - cpu: "1000m" - - esService: - # Type of Service to be created for elastic search interfaces - type: ClusterIP - # Service Type for loadbalancer services exposing elastic search. - lbrtype: ClusterIP - - # Kibana configuration parameters - kibana: - image: - repository: docker.elastic.co/kibana/kibana - tag: 6.8.0 - pullPolicy: IfNotPresent - #Number of Kibana instances will be created - kibanaReplicas : 1 - #Type of kibana service to be created - #Port on which the kibana will be accessed inside the cluster - #nodePort is the port on which kibana service will be accessed from outside - service: - type: NodePort - targetPort: 5601 - nodePort: 31195 - - # Logstash Configuration parameters - logstash: - image: - repository: logstash - tag: 6.6.0 - pullPolicy: IfNotPresent - ##Port on which the logstash container will be running - containerPort : 5044 - #targetPort Port on which the logstash will be accessed with in the cluster - #nodePort Port on which the logstash will be accessed outside the cluster - service: - type: NodePort - targetPort: 9600 - nodePort: 32222 - # If logstashConfigMap is empty, Then default logstashConfigMap will be created and used. - logstashConfigMap: - - # ELK Ports on which elastic search will be listening across the nodes and outside the nodes - elkPorts: - rest: 9200 - internode: 9300 - #Image used for initiContainers - busybox: - image: busybox - + # Enabled flag to enable the integrated ELK stack for OUD + enabled: false imagePullSecrets: - name: dockercred - - -elkVolume: - # If enabled, it will use the persistent volume. - # if value is false, PV and PVC would not be used and there would not be any mount point available for config - enabled: false - # Path at which the volume would be mounted - mountPath: /usr/share/elasticsearch/data - # provided the pvname to use an already created Persistent Volume. If blank, will use the default name from Chart - pvname: - # provided the pvname to use an already created Persistent Volume Claim. If blank, will use default name from Chart - #pvcname: - accessMode: ReadWriteMany - size: 20Gi - storageClass: elk-oudsm -# default supported values: either filesystem or networkstorage or custom - type: filesystem - networkstorage: - nfs: - path: /scratch/shared/oudsm_elk/data - server: 0.0.0.0 - filesystem: - hostPath: - # The path location mentioned should be created and made accessible with necessary privileges for access from pods/containers. - path: /scratch/shared/oudsm_user_projects/data - custom: - # YAML content to be included in PersistenceVolume Object - annotations: {} + IntegrationEnabled: false + logStashImage: logstash:8.3.1 + logstashConfigMap: + esindex: oudsmlogs-00001 + eshosts: http://elasticsearch.oudsmns.svc.cluster.local:9200 + sslenabled: false + esuser: logstash_internal + espassword: elasticsearch-pw-elastic + esapikey: + escert: | + -----BEGIN CERTIFICATE----- + MIIDVjCCAj6gAwIBAgIRAOqQ3Gy75NvPPQUN5kXqNQUwDQYJKoZIhvcNAQELBQAw + NTEWMBQGA1UECxMNZWxhc3RpY3NlYXJjaDEbMBkGA1UEAxMSZWxhc3RpY3NlYXJj + aC1odHRwMB4XDTIyMDgyNDA1MTU1OVoXDTIzMDgyNDA1MjU1OVowNTEWMBQGA1UE + CxMNZWxhc3RpY3NlYXJjaDEbMBkGA1UEAxMSZWxhc3RpY3NlYXJjaC1odHRwMIIB + IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsQOnxUm9uF32+lyc9SA3WcMZ + P1X7TbHMDuO/l3UHBUf5F/bt2m3YkGw+enIos9wzuUNpjIwVt8q4WrRCMl80nAQ0 + yCXrfLSI9zaHxEC8Ht7V0U+7Sgu5uysD4tyZ9T0Q5zjvkWS6oBPxhfri3OQfPvUW + gQ6wJaPGDteYZAwiBMvPEkmh0VUTBTXjToHrtrT7pzmz5BBWnUzdf+jv0+nEfedm + mMWw/8jqyqid7bu7bo6gKBZ8zk06n2iMaXzmGW34QlYRLBgubThhxyDE7joZ4NTA + UFEJecZR2fccmpN8CNkT9Ex4Hq88nh2OP5XKKPNF4kLh2u6F4auF7Uz42jwvIwID + AQABo2EwXzAOBgNVHQ8BAf8EBAMCAoQwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsG + AQUFBwMCMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLQb/IjHHkSmHgKSPY7r + zBIJZMbdMA0GCSqGSIb3DQEBCwUAA4IBAQA01qY0tGIPsKNkn7blxRjEYkTg59Z5 + vi6MCpGtdoyZeJgH621IpwyB34Hpu1RQfyg1aNgmOtIK9cvQZRl008DHF4AiHYhU + 6xe3cjI/QxDXwitoBgWl+a0mkwhSmzJt7TuzImq7RMO4ws3M/nGeNUwFjwsQu86+ + N/Y3RuuUVbK1xy8Jdz3FZADIgHVPN6GQwYKEpWrZNapKBXjunjCZmpBFxqGMRF44 + fcSKFlFkwjyTq4kgq44NPv18NMfKCYZcK7ttRTiep77vKB7No/TM69Oz5ZHhQ+2Q + pSGg3QF+1fOCFCgWXFEOle6lQ5i8a/GihY0FuphrZxP9ovZ/EKPpE6Gq + -----END CERTIFICATE----- diff --git a/OracleUnifiedDirectorySM/kubernetes/samples/oudsm-deployment.yaml b/OracleUnifiedDirectorySM/kubernetes/samples/oudsm-deployment.yaml deleted file mode 100755 index 4f64f2c2c..000000000 --- a/OracleUnifiedDirectorySM/kubernetes/samples/oudsm-deployment.yaml +++ /dev/null @@ -1,88 +0,0 @@ -# -# Copyright (c) 2020, Oracle and/or its affiliates. -# -# Licensed under the Universal Permissive License v 1.0 as shown at -# https://oss.oracle.com/licenses/upl -# ---- -apiVersion: v1 -kind: Service -metadata: - name: oudsm - namespace: %NAMESPACE% - labels: - app: oudsmdeploypod -spec: - ports: - - port: 7001 - name: admin-http - targetPort: 7001 - - port: 7002 - name: admin-https - targetPort: 7002 - type: NodePort - sessionAffinity: "ClientIP" - selector: - app: oudsmdeploypod ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: oudsmdeploypod - namespace: %NAMESPACE% - labels: - app: oudsmdeploypod -spec: - replicas: 2 - selector: - matchLabels: - app: oudsmdeploypod - template: - metadata: - name: oudsmdeploypod - labels: - app: oudsmdeploypod - spec: - hostname: oudsmdeploypod - containers: - - name: oudsm - image: %IMAGE% - env: - - name: DOMAIN_NAME - value: "oudsm_domain" - - name: ADMIN_USER - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: adminUser - - name: ADMIN_PASS - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: adminPass - - name: ADMIN_PORT - value: "7001" - - name: ADMIN_SSL_PORT - value: "7002" - ports: - - containerPort: 7001 - - containerPort: 7002 - volumeMounts: - - mountPath: /u01/oracle/user_projects - name: oudsm-dep-storage - subPath: user_projects - readinessProbe: - httpGet: - path: /console - port: 7001 - initialDelaySeconds: 660 - periodSeconds: 5 - httpGet: - path: /oudsm - port: 7001 - initialDelaySeconds: 660 - periodSeconds: 5 - imagePullPolicy: IfNotPresent - volumes: - - name: oudsm-dep-storage - emptyDir: {} diff --git a/OracleUnifiedDirectorySM/kubernetes/samples/oudsm-pod.yaml b/OracleUnifiedDirectorySM/kubernetes/samples/oudsm-pod.yaml deleted file mode 100755 index 172d986cb..000000000 --- a/OracleUnifiedDirectorySM/kubernetes/samples/oudsm-pod.yaml +++ /dev/null @@ -1,78 +0,0 @@ -# -# Copyright (c) 2020, Oracle and/or its affiliates. -# -# Licensed under the Universal Permissive License v 1.0 as shown at -# https://oss.oracle.com/licenses/upl -# ---- -apiVersion: v1 -kind: Service -metadata: - name: oudsm-svc - namespace: %NAMESPACE% - labels: - app: oudsmpod -spec: - ports: - - port: 7001 - name: admin-http - targetPort: 7001 - - port: 7002 - name: admin-https - targetPort: 7002 - type: NodePort - sessionAffinity: "ClientIP" - selector: - app: oudsmpod ---- -apiVersion: v1 -kind: Pod -metadata: - name: oudsmpod - namespace: %NAMESPACE% - labels: - app: oudsmpod -spec: - restartPolicy: Never - containers: - - name: oudsm - image: %IMAGE% - env: - - name: DOMAIN_NAME - value: "oudsm_domain" - - name: ADMIN_USER - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: adminUser - - name: ADMIN_PASS - valueFrom: - secretKeyRef: - name: %SECRET_NAME% - key: adminPass - - name: ADMIN_PORT - value: "7001" - - name: ADMIN_SSL_PORT - value: "7002" - ports: - - containerPort: 7001 - - containerPort: 7002 - volumeMounts: - - mountPath: /u01/oracle/user_projects - name: %PV_NAME% - readinessProbe: - httpGet: - path: /console - port: 7001 - initialDelaySeconds: 660 - periodSeconds: 5 - httpGet: - path: /oudsm - port: 7001 - initialDelaySeconds: 660 - periodSeconds: 5 - imagePullPolicy: IfNotPresent - volumes: - - name: %PV_NAME% - persistentVolumeClaim: - claimName: %PVC_NAME% diff --git a/OracleUnifiedDirectorySM/kubernetes/samples/oudsmns.yaml b/OracleUnifiedDirectorySM/kubernetes/samples/oudsmns.yaml deleted file mode 100755 index 40678b0d6..000000000 --- a/OracleUnifiedDirectorySM/kubernetes/samples/oudsmns.yaml +++ /dev/null @@ -1,11 +0,0 @@ -# -# Copyright (c) 2020, Oracle and/or its affiliates. -# -# Licensed under the Universal Permissive License v 1.0 as shown at -# https://oss.oracle.com/licenses/upl -# ---- -apiVersion: v1 -kind: Namespace -metadata: - name: %NAMESPACE% diff --git a/OracleUnifiedDirectorySM/kubernetes/samples/persistent-volume.yaml b/OracleUnifiedDirectorySM/kubernetes/samples/persistent-volume.yaml deleted file mode 100755 index f607116f0..000000000 --- a/OracleUnifiedDirectorySM/kubernetes/samples/persistent-volume.yaml +++ /dev/null @@ -1,39 +0,0 @@ -# -# Copyright (c) 2020, Oracle and/or its affiliates. -# -# Licensed under the Universal Permissive License v 1.0 as shown at -# https://oss.oracle.com/licenses/upl -# -# -kind: PersistentVolume -apiVersion: v1 -metadata: - name: %PV_NAME% - namespace: %NAMESPACE% - labels: - type: oud-pv -spec: - storageClassName: manual - capacity: - storage: 10Gi - persistentVolumeReclaimPolicy: "Delete" - accessModes: - - ReadWriteMany - hostPath: - path: "%PV_HOST_PATH%" ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: %PVC_NAME% - namespace: %NAMESPACE% -spec: - storageClassName: manual - selector: - matchLabels: - type: oud-pv - accessModes: - - ReadWriteMany - resources: - requests: - storage: 10Gi diff --git a/OracleUnifiedDirectorySM/kubernetes/samples/secrets.yaml b/OracleUnifiedDirectorySM/kubernetes/samples/secrets.yaml deleted file mode 100755 index 7060ba854..000000000 --- a/OracleUnifiedDirectorySM/kubernetes/samples/secrets.yaml +++ /dev/null @@ -1,16 +0,0 @@ -# -# Copyright (c) 2020, Oracle and/or its affiliates. -# -# Licensed under the Universal Permissive License v 1.0 as shown at -# https://oss.oracle.com/licenses/upl -# -# -apiVersion: v1 -kind: Secret -metadata: - name: %SECRET_NAME% - namespace: %NAMESPACE% -type: Opaque -data: - adminUser: %adminUser% - adminPass: %adminPass% diff --git a/docs-source/content/oam/_index.md b/docs-source/content/oam/_index.md index 2ed7d714b..1dff411ee 100644 --- a/docs-source/content/oam/_index.md +++ b/docs-source/content/oam/_index.md @@ -22,10 +22,9 @@ environment. You can: ### Current production release -The current production release for the Oracle Access Management domain deployment on Kubernetes is [22.3.1](https://github.com/oracle/fmw-kubernetes/releases). This release uses the WebLogic Kubernetes Operator version 3.3.0. +The current production release for the Oracle Access Management domain deployment on Kubernetes is [22.4.1](https://github.com/oracle/fmw-kubernetes/releases). This release uses the WebLogic Kubernetes Operator version 3.4.2. -This release of the documentation can also be used for 3.1.X and 3.2.0 WebLogic Kubernetes Operator. -For 3.0.X WebLogic Kubernetes Operator refer to [Version 21.4.1](https://oracle.github.io/fmw-kubernetes/21.4.1/oam/) +For 3.3.X WebLogic Kubernetes Operator refer to [Version 22.3.1](https://oracle.github.io/fmw-kubernetes/22.3.1/oam/) ### Recent changes and known issues @@ -46,6 +45,7 @@ If performing an Enterprise Deployment where multiple Oracle Identity Management To view documentation for an earlier release, see: +* [Version 22.3.1](https://oracle.github.io/fmw-kubernetes/22.3.1/oam/) * [Version 22.2.1](https://oracle.github.io/fmw-kubernetes/22.2.1/oam/) * [Version 21.4.2](https://oracle.github.io/fmw-kubernetes/21.4.2/oam/) * [Version 21.4.1](https://oracle.github.io/fmw-kubernetes/21.4.1/oam/) diff --git a/docs-source/content/oam/configure-ingress/_index.md b/docs-source/content/oam/configure-ingress/_index.md index 391e45d22..8806ecfff 100644 --- a/docs-source/content/oam/configure-ingress/_index.md +++ b/docs-source/content/oam/configure-ingress/_index.md @@ -124,7 +124,7 @@ If you are using a Managed Service for your Kubernetes cluster, for example Orac ``` NAME: nginx-ingress - LAST DEPLOYED: Mon Jul 12 13:57:21 2022 + LAST DEPLOYED: NAMESPACE: oamns STATUS: deployed @@ -193,7 +193,7 @@ If you are using a Managed Service for your Kubernetes cluster, for example Orac $ helm install nginx-ingress -n oamns --set controller.extraArgs.default-ssl-certificate=oamns/accessdomain-tls-cert --set controller.service.type=LoadBalancer --set controller.admissionWebhooks.enabled=false stable/ingress-nginx NAME: nginx-ingress - LAST DEPLOYED: Mon Jul 12 13:57:21 2022 + LAST DEPLOYED: NAMESPACE: nginxssl STATUS: deployed REVISION: 1 @@ -262,7 +262,9 @@ If you are using a Managed Service for your Kubernetes cluster, for example Orac # Type of Configuration Supported Values are : SSL and NONSSL sslType: SSL - # domainType Supported values are soa,osb and soaosb. + # domainType. Supported values are: oam + domainType: oam + #WLS domain as backend to the load balancer wlsDomain: @@ -276,8 +278,15 @@ If you are using a Managed Service for your Kubernetes cluster, for example Orac policyClusterName: policy_cluster policyManagedServerPort: 15100 policyManagedServerSSLPort: + + # Host specific values + hostName: + enabled: false + admin: + runtime: ``` - + + 1. Run the following helm command to install the ingress: ```bash @@ -296,7 +305,7 @@ If you are using a Managed Service for your Kubernetes cluster, for example Orac ``` NAME: oam-nginx - LAST DEPLOYED: Mon Jul 12 14:01:01 2022 + LAST DEPLOYED: NAMESPACE: oamns STATUS: deployed REVISION: 1 @@ -333,7 +342,7 @@ If you are using a Managed Service for your Kubernetes cluster, for example Orac ```bash $ kubectl --namespace oamns get services -o jsonpath="{.spec.ports[1].nodePort}" nginx-ingress-ingress-nginx-controller ``` - + The output will look similar to the following: ``` @@ -343,60 +352,64 @@ If you are using a Managed Service for your Kubernetes cluster, for example Orac 1. Run the following command to check the ingress: ```bash - $ kubectl describe ing access-ingress -n + $ kubectl describe ing -nginx -n ``` For example: ```bash - $ kubectl describe ing access-ingress -n oamns + $ kubectl describe ing accessdomain-nginx -n oamns ``` The output will look similar to the following: ``` - Name: access-ingress + Name: accessdomain-nginx Namespace: oamns - Address: 10.101.132.251 + Address: 10.106.70.55 Default backend: default-http-backend:80 () Rules: Host Path Backends ---- ---- -------- * - /console accessdomain-adminserver:7001 (10.244.6.63:7001) - /rreg/rreg accessdomain-adminserver:7001 (10.244.6.63:7001) - /em accessdomain-adminserver:7001 (10.244.6.63:7001) - /oamconsole accessdomain-adminserver:7001 (10.244.6.63:7001) - /dms accessdomain-adminserver:7001 (10.244.6.63:7001) - /oam/services/rest accessdomain-adminserver:7001 (10.244.6.63:7001) - /iam/admin/config accessdomain-adminserver:7001 (10.244.6.63:7001) - /iam/admin/diag accessdomain-adminserver:7001 (10.244.6.63:7001) - /iam/access accessdomain-cluster-oam-cluster:14100 (10.244.5.12:14100,10.244.6.64:14100) - /oam/admin/api accessdomain-adminserver:7001 (10.244.6.63:7001) - /oam/services/rest/access/api accessdomain-cluster-oam-cluster:14100 (10.244.5.12:14100,10.244.6.64:14100) - /access accessdomain-cluster-policy-cluster:15100 (10.244.5.13:15100,10.244.6.65:15100) - / accessdomain-cluster-oam-cluster:14100 (10.244.5.12:14100,10.244.6.64:14100) + /console accessdomain-adminserver:7001 (10.244.1.18:7001) + /consolehelp accessdomain-adminserver:7001 (10.244.1.18:7001) + /rreg/rreg accessdomain-adminserver:7001 (10.244.1.18:7001) + /em accessdomain-adminserver:7001 (10.244.1.18:7001) + /oamconsole accessdomain-adminserver:7001 (10.244.1.18:7001) + /dms accessdomain-adminserver:7001 (10.244.1.18:7001) + /oam/services/rest accessdomain-adminserver:7001 (10.244.1.18:7001) + /iam/admin/config accessdomain-adminserver:7001 (10.244.1.18:7001) + /iam/admin/diag accessdomain-adminserver:7001 (10.244.1.18:7001) + /iam/access accessdomain-cluster-oam-cluster:14100 (10.244.1.20:14100,10.244.2.13:14100) + /oam/admin/api accessdomain-adminserver:7001 (10.244.1.18:7001) + /oam/services/rest/access/api accessdomain-cluster-oam-cluster:14100 (10.244.1.20:14100,10.244.2.13:14100) + /access accessdomain-cluster-policy-cluster:15100 (10.244.1.19:15100,10.244.2.12:15100) + / accessdomain-cluster-oam-cluster:14100 (10.244.1.20:14100,10.244.2.13:14100) Annotations: kubernetes.io/ingress.class: nginx meta.helm.sh/release-name: oam-nginx meta.helm.sh/release-namespace: oamns nginx.ingress.kubernetes.io/configuration-snippet: + more_clear_input_headers "WL-Proxy-Client-IP" "WL-Proxy-SSL"; more_set_input_headers "X-Forwarded-Proto: https"; more_set_input_headers "WL-Proxy-SSL: true"; nginx.ingress.kubernetes.io/enable-access-log: false nginx.ingress.kubernetes.io/ingress.allow-http: false nginx.ingress.kubernetes.io/proxy-buffer-size: 2000k Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal Sync 6m22s (x2 over 6m31s) nginx-ingress-controller Scheduled for sync + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Sync 14m (x2 over 15m) nginx-ingress-controller Scheduled for sync ``` -1. To confirm that the new ingress is successfully routing to the domain's server pods, run the following command to send a request to the URL for the `WebLogic ReadyApp framework`: +1. To confirm that the new ingress is successfully routing to the domain's server pods, run the following command to send a request to the URL for the 'WebLogic ReadyApp framework': ```bash $ curl -v -k https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/weblogic/ready ``` + + For example: @@ -422,8 +435,8 @@ If you are using a Managed Service for your Kubernetes cluster, for example Orac * SSL connection using TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 * Server certificate: * subject: CN=masternode.example.com - * start date: Jul 12 14:31:07 2021 GMT - * expire date: Jul 12 14:31:07 2022 GMT + * start date: + * expire date: * common name: masternode.example.com * issuer: CN=masternode.example.com > GET /weblogic/ready HTTP/1.1 diff --git a/docs-source/content/oam/create-oam-domains/_index.md b/docs-source/content/oam/create-oam-domains/_index.md index aa83bc502..25f9e77aa 100644 --- a/docs-source/content/oam/create-oam-domains/_index.md +++ b/docs-source/content/oam/create-oam-domains/_index.md @@ -66,7 +66,7 @@ The sample scripts for Oracle Access Management domain deployment are available ```bash domainUID: accessdomain domainHome: /u01/oracle/user_projects/domains/accessdomain - image: container-registry.oracle.com/middleware/oam_cpu:12.2.1.4-jdk8-ol7- + image: container-registry.oracle.com/middleware/oam_cpu:12.2.1.4-jdk8-ol7- imagePullSecretName: orclcred weblogicCredentialsSecretName: accessdomain-credentials logHome: /u01/oracle/user_projects/domains/logs/accessdomain @@ -162,7 +162,7 @@ generated artifacts: export initialManagedServerReplicas="2" export managedServerNameBase="oam_server" export managedServerPort="14100" - export image="container-registry.oracle.com/middleware/oam_cpu:12.2.1.4-jdk8-ol7-" + export image="container-registry.oracle.com/middleware/oam_cpu:12.2.1.4-jdk8-ol7-" export imagePullPolicy="IfNotPresent" export imagePullSecretName="orclcred" export productionModeEnabled="true" @@ -446,7 +446,7 @@ By default, the java memory parameters assigned to the oam_server cluster are ve API Version: weblogic.oracle/v8 Kind: Domain Metadata: - Creation Timestamp: 2022-07-07T11:59:51Z + Creation Timestamp: Generation: 1 Managed Fields: API Version: weblogic.oracle/v8 @@ -461,7 +461,7 @@ By default, the java memory parameters assigned to the oam_server cluster are ve f:startTime: Manager: Kubernetes Java Client Operation: Update - Time: 2022-07-07T11:59:51Z + Time: API Version: weblogic.oracle/v8 Fields Type: FieldsV1 fieldsV1: @@ -474,7 +474,7 @@ By default, the java memory parameters assigned to the oam_server cluster are ve f:weblogic.domainUID: Manager: kubectl-client-side-apply Operation: Update - Time: 2022-07-07T11:59:51Z + Time: Resource Version: 1495179 UID: a90107d5-dbaf-4d86-9439-d5369faabd35 Spec: @@ -530,7 +530,7 @@ By default, the java memory parameters assigned to the oam_server cluster are ve Domain Home: /u01/oracle/user_projects/domains/accessdomain Domain Home Source Type: PersistentVolume Http Access Log In Log Home: true - Image: container-registry.oracle.com/middleware/oam_cpu:12.2.1.4-jdk8-ol7- + Image: container-registry.oracle.com/middleware/oam_cpu:12.2.1.4-jdk8-ol7- Image Pull Policy: IfNotPresent Image Pull Secrets: Name: orclcred @@ -568,7 +568,7 @@ By default, the java memory parameters assigned to the oam_server cluster are ve Replicas: 2 Replicas Goal: 2 Conditions: - Last Transition Time: 2022-07-07T12:11:52.623959Z + Last Transition Time: Reason: ServersReady Status: True Type: Available @@ -576,7 +576,7 @@ By default, the java memory parameters assigned to the oam_server cluster are ve Servers: Desired State: RUNNING Health: - Activation Time: 2022-07-07T12:08:29.271000Z + Activation Time: Overall Health: ok Subsystems: Subsystem Name: ServerRuntime @@ -587,7 +587,7 @@ By default, the java memory parameters assigned to the oam_server cluster are ve Cluster Name: oam_cluster Desired State: RUNNING Health: - Activation Time: 2022-07-07T12:11:02.696000Z + Activation Time: Overall Health: ok Subsystems: Subsystem Name: ServerRuntime @@ -598,7 +598,7 @@ By default, the java memory parameters assigned to the oam_server cluster are ve Cluster Name: oam_cluster Desired State: RUNNING Health: - Activation Time: 2022-07-07T12:11:46.175000Z + Activation Time: Overall Health: ok Subsystems: Subsystem Name: ServerRuntime @@ -618,7 +618,7 @@ By default, the java memory parameters assigned to the oam_server cluster are ve Cluster Name: policy_cluster Desired State: RUNNING Health: - Activation Time: 2022-07-07T12:11:20.404000Z + Activation Time: Overall Health: ok Subsystems: Subsystem Name: ServerRuntime @@ -629,7 +629,7 @@ By default, the java memory parameters assigned to the oam_server cluster are ve Cluster Name: policy_cluster Desired State: RUNNING Health: - Activation Time: 2022-07-07T12:11:09.719000Z + Activation Time: Overall Health: ok Subsystems: Subsystem Name: ServerRuntime @@ -646,7 +646,7 @@ By default, the java memory parameters assigned to the oam_server cluster are ve Cluster Name: policy_cluster Desired State: SHUTDOWN Server Name: oam_policy_mgr5 - Start Time: 2022-07-07T11:59:51.682687Z + Start Time: Events: Type Reason Age From Message ---- ------ ---- ---- ------- diff --git a/docs-source/content/oam/manage-oam-domains/logging-and-visualization.md b/docs-source/content/oam/manage-oam-domains/logging-and-visualization.md index 5b89a7efd..b434481b7 100644 --- a/docs-source/content/oam/manage-oam-domains/logging-and-visualization.md +++ b/docs-source/content/oam/manage-oam-domains/logging-and-visualization.md @@ -5,259 +5,509 @@ description: "Describes the steps for logging and visualization with Elasticsear After the OAM domain is set up you can publish operator and WebLogic Server logs into Elasticsearch and interact with them in Kibana. -### Install Elasticsearch and Kibana +### Install Elasticsearch stack and Kibana +If you do not already have a centralized Elasticsearch (ELK) stack then you must configure this first. For details on how to configure the ELK stack, follow +[Installing Elasticsearch (ELK) Stack and Kibana](https://docs.oracle.com/en/middleware/fusion-middleware/12.2.1.4/ikedg/installing-monitoring-and-visualization-software.html) -1. If your domain namespace is anything other than `oamns`, edit the `$WORKDIR/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml` and change all instances of `oamns` to your domain namespace. -1. Create a Kubernetes secret to access the Elasticsearch and Kibana container images: +### Create the logstash pod - **Note:** You must first have a user account on [hub.docker.com](https://hub.docker.com). +#### Variables used in this chapter - ```bash - $ kubectl create secret docker-registry "dockercred" --docker-server="https://index.docker.io/v1/" --docker-username="" --docker-password= --docker-email= --namespace= - ``` +In order to create the logstash pod, you must create several files. These files contain variables which you must substitute with variables applicable to your environment. + +Most of the values for the variables will be based on your ELK deployment as per [Installing Elasticsearch (ELK) Stack and Kibana](https://docs.oracle.com/en/middleware/fusion-middleware/12.2.1.4/ikedg/installing-monitoring-and-visualization-software.html). + +The table below outlines the variables and values you must set: + + + +| Variable | Sample Value | Description | +| --- | --- | --- | +| `` | `8.3.1` | The version of logstash you want to install.| +| `` | `true` | If SSL is enabled for ELK set the value to `true`, or if NON-SSL set to `false`. This value must be lowercase.| +| `` | `MIIDVjCCAj6gAwIBAgIRAOqQ3Gy75..etc...P9ovZ/EKPpE6Gq` | If `ELK_SSL=true`, this is the BASE64 version of the certificate between `---BEGIN CERTIFICATE---` and `---END CERTIFICATE---`. This is the Certificate Authority (CA) certificate(s), that signed the certificate of the Elasticsearch server. If using a self-signed certificate, this is the self signed certificate of the Elasticsearch server. See [Copying the Elasticsearch Certificate](https://docs.oracle.com/en/middleware/fusion-middleware/12.2.1.4/ikedg/installing-monitoring-and-visualization-software.html#GUID-C1FC1063-FA76-48AD-AE3D-A39390874C74) for details on how to get the correct certificate.| +| `` | `https://elasticsearch.example.com:9200` | The URL for sending logs to Elasticsearch. HTTP if NON-SSL is used.| +| `` | `oamns` | The domain namespace.| +| `` | `logstash_internal` | The name of the user for logstash to access Elasticsearch.| +| `` | `password` | The password for ELK_USER.| +| `` | `apikey` | The API key details.| + +#### Create Kubernetes secrets + +1. Create a Kubernetes secret for Elasticsearch using the API Key or Password. + + a) If ELK uses an API Key for authentication: + + ``` + $ kubectl create secret generic elasticsearch-pw-elastic -n --from-literal password= + ``` + For example: ``` - $ kubectl create secret docker-registry "dockercred" --docker-server="https://index.docker.io/v1/" --docker-username="username" --docker-password= --docker-email=user@example.com --namespace=oamns + $ kubectl create secret generic elasticsearch-pw-elastic -n oamns --from-literal password= ``` The output will look similar to the following: - ```bash - secret/dockercred created - ``` + ``` + secret/elasticsearch-pw-elastic created + ``` - -1. Create the Kubernetes resource using the following command: - ```bash - $ kubectl apply -f $WORKDIR/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml + b) If ELK uses a password for authentication: + ``` - + $ kubectl create secret generic elasticsearch-pw-elastic -n --from-literal password= + ``` + + For example: + + ``` + $ kubectl create secret generic elasticsearch-pw-elastic -n oamns --from-literal password= + ``` + The output will look similar to the following: ``` - deployment.apps/elasticsearch created - service/elasticsearch created - deployment.apps/kibana created - service/kibana created + secret/elasticsearch-pw-elastic created ``` + + + **Note**: It is recommended that the ELK Stack is created with authentication enabled. If no authentication is enabled you may create a secret using the values above. + + +1. Create a Kubernetes secret to access the required images on [hub.docker.com](https://hub.docker.com): -1. Run the following command to ensure Elasticsearch is used by the operator: + **Note**: Before executing the command below, you must first have a user account on [hub.docker.com](https://hub.docker.com). ```bash - $ helm get values --all weblogic-kubernetes-operator -n opns + kubectl create secret docker-registry "dockercred" --docker-server="https://index.docker.io/v1/" \ + --docker-username="" \ + --docker-password= --docker-email= \ + --namespace= ``` - The output will look similar to the following: + For example, + ```bash + kubectl create secret docker-registry "dockercred" --docker-server="https://index.docker.io/v1/" \ + --docker-username="user@example.com" \ + --docker-password=password --docker-email=user@example.com \ + --namespace=oamns ``` - COMPUTED VALUES: - clusterSizePaddingValidationEnabled: true - domainNamespaceLabelSelector: weblogic-operator=enabled - domainNamespaceSelectionStrategy: LabelSelector - domainNamespaces: - - default - elasticSearchHost: elasticsearch.default.svc.cluster.local - elasticSearchPort: 9200 - elkIntegrationEnabled: true - enableClusterRoleBinding: true - externalDebugHttpPort: 30999 - externalRestEnabled: false - externalRestHttpsPort: 31001 - externalServiceNameSuffix: -ext - image: ghcr.io/oracle/weblogic-kubernetes-operator:3.3.0 - imagePullPolicy: IfNotPresent - internalDebugHttpPort: 30999 - introspectorJobNameSuffix: -introspector - javaLoggingFileCount: 10 - javaLoggingFileSizeLimit: 20000000 - javaLoggingLevel: FINE - logStashImage: logstash:6.6.0 - remoteDebugNodePortEnabled: false - serviceAccount: op-sa - suspendOnDebugStartup: false + + The output will look similar to the following: + + ```bash + secret/dockercred created ``` -1. To check that Elasticsearch and Kibana are deployed in the Kubernetes cluster, run the following command: +#### Find the mountPath details - ``` - $ kubectl get pods -n | grep 'elasticsearch\|kibana' + +1. Run the following command to get the `mountPath` of your domain: + + ```bash + $ kubectl describe domains -n | grep "Mount Path" ``` For example: - ``` - $ kubectl get pods -n oamns | grep 'elasticsearch\|kibana' + ```bash + $ kubectl describe domains accessdomain -n oamns | grep "Mount Path" ``` The output will look similar to the following: ``` - elasticsearch-f7b7c4c4-tb4pp 1/1 Running 0 85s - kibana-57f6685789-mgwdl 1/1 Running 0 85s + Mount Path: /u01/oracle/user_projects/domains ``` - -### Create the logstash pod - -OAM Server logs can be pushed to the Elasticsearch server using the `logstash` pod. The `logstash` pod needs access to the persistent volume of the OAM domain created previously, for example `accessdomain-domain-pv`. The steps to create the `logstash` pod are as follows: +#### Find the persistentVolumeClaim details -1. Obtain the OAM domain persistence volume details: +1. Run the following command to get the OAM domain persistence volume details: - ```bash + ``` $ kubectl get pv -n ``` - + For example: - - ```bash + + ``` $ kubectl get pv -n oamns ``` The output will look similar to the following: - + ``` - NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE + NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE accessdomain-domain-pv 10Gi RWX Retain Bound oamns/accessdomain-domain-pvc accessdomain-domain-storage-class 23h ``` - Make note of the `CLAIM` value, for example in this case `accessdomain-domain-pvc` + Make note of the CLAIM value, for example in this case `accessdomain-domain-pvc`. + +#### Create the Configmap + +1. Navigate to the `$WORKDIR/kubernetes/elasticsearch-and-kibana` directory and create a `logstash_cm.yaml` file as follows: + + ``` + apiVersion: v1 + kind: ConfigMap + metadata: + name: oam-logstash-configmap + namespace: + data: + logstash.yml: | + #http.host: "0.0.0.0" + + elk.crt: | + -----BEGIN CERTIFICATE----- + + -----END CERTIFICATE----- + logstash-config.conf: | + input { + file { + path => "/u01/oracle/user_projects/domains/logs/accessdomain/AdminServer*.log" + tags => "Adminserver_log" + start_position => beginning + } + file { + path => "/u01/oracle/user_projects/domains/logs/accessdomain/oam_policy_mgr*.log" + tags => "Policymanager_log" + start_position => beginning + } + file { + path => "/u01/oracle/user_projects/domains/logs/accessdomain/oam_server*.log" + tags => "Oamserver_log" + start_position => beginning + } + file { + path => "/u01/oracle/user_projects/domains/accessdomain/servers/AdminServer/logs/AdminServer-diagnostic.log" + tags => "Adminserver_diagnostic" + start_position => beginning + } + file { + path => "/u01/oracle/user_projects/domains/accessdomain/servers/**/logs/oam_policy_mgr*-diagnostic.log" + tags => "Policy_diagnostic" + start_position => beginning + } + file { + path => "/u01/oracle/user_projects/domains/accessdomain/servers/**/logs/oam_server*-diagnostic.log" + tags => "Oamserver_diagnostic" + start_position => beginning + } + file { + path => "/u01/oracle/user_projects/domains/accessdomain/servers/**/logs/access*.log" + tags => "Access_logs" + start_position => beginning + } + file { + path => "/u01/oracle/user_projects/domains/accessdomain/servers/AdminServer/logs/auditlogs/OAM/audit.log" + tags => "Audit_logs" + start_position => beginning + } + } + filter { + grok { + match => [ "message", "<%{DATA:log_timestamp}> <%{WORD:log_level}> <%{WORD:thread}> <%{HOSTNAME:hostname}> <%{HOSTNAME:servername}> <%{DATA:timer}> <<%{DATA:kernel}>> <> <%{DATA:uuid}> <%{NUMBER:timestamp}> <%{DATA:misc}> <%{DATA:log_number}> <%{DATA:log_message}>" ] + } + if "_grokparsefailure" in [tags] { + mutate { + remove_tag => [ "_grokparsefailure" ] + } + } + } + output { + elasticsearch { + hosts => [""] + cacert => '/usr/share/logstash/config/certs/elk.crt' + index => "oamlogs-000001" + ssl => + ssl_certificate_verification => false + user => "" + password => "${ELASTICSEARCH_PASSWORD}" + api_key => "${ELASTICSEARCH_PASSWORD}" + } + } + ``` -1. Run the following command to get the `mountPath` of your domain: + Change the values in the above file as follows: - ```bash - $ kubectl describe domains -n | grep "Mount Path" - ``` + + Change the ``, ``, ``. `` to match the values for your environment. + + If using SSL, make sure the value for `` is indented correctly. You can use the command: `sed 's/^/ /' elk.crt` to output the certificate with the correct indentation. + + If not using SSL, delete the `` line, but leave the -----BEGIN CERTIFICATE----- and -----END CERTIFICATE-----. + + Change `/u01/oracle/user_projects/domains` to match the `mountPath` returned earlier + + If your domainUID is anything other than `accessdomain`, change each instance of `accessdomain` to your domainUID. + + If using API KEY for your ELK authentication, delete the `user` and `password` lines. + + If using a password for ELK authentication, delete the `api_key` line. + + If no authentication is used for ELK, delete the `user`, `password`, and `api_key` lines. For example: - ```bash - $ kubectl describe domains accessdomain -n oamns | grep "Mount Path" + ``` + apiVersion: v1 + kind: ConfigMap + metadata: + name: oam-logstash-configmap + namespace: oamns + data: + logstash.yml: | + #http.host: "0.0.0.0" + + elk.crt: | + -----BEGIN CERTIFICATE----- + MIIDVjCCAj6gAwIBAgIRAOqQ3Gy75NvPPQUN5kXqNQUwDQYJKoZIhvcNAQELBQAw + NTEWMBQGA1UECxMNZWxhc3RpY3NlYXJjaDEbMBkGA1UEAxMSZWxhc3RpY3NlYXJj + aC1odHRwMB4XDTIyMDgyNDA1MTU1OVoXDTIzMDgyNDA1MjU1OVowNTEWMBQGA1UE + CxMNZWxhc3RpY3NlYXJjaDEbMBkGA1UEAxMSZWxhc3RpY3NlYXJjaC1odHRwMIIB + IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsQOnxUm9uF32+lyc9SA3WcMZ + P1X7TbHMDuO/l3UHBUf5F/bt2m3YkGw+enIos9wzuUNpjIwVt8q4WrRCMl80nAQ0 + yCXrfLSI9zaHxEC8Ht7V0U+7Sgu5uysD4tyZ9T0Q5zjvkWS6oBPxhfri3OQfPvUW + gQ6wJaPGDteYZAwiBMvPEkmh0VUTBTXjToHrtrT7pzmz5BBWnUzdf+jv0+nEfedm + mMWw/8jqyqid7bu7bo6gKBZ8zk06n2iMaXzmGW34QlYRLBgubThhxyDE7joZ4NTA + UFEJecZR2fccmpN8CNkT9Ex4Hq88nh2OP5XKKPNF4kLh2u6F4auF7Uz42jwvIwID + AQABo2EwXzAOBgNVHQ8BAf8EBAMCAoQwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsG + AQUFBwMCMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLQb/IjHHkSmHgKSPY7r + zBIJZMbdMA0GCSqGSIb3DQEBCwUAA4IBBQA01qY0tGIPsKNkn7blxRjEYkTg59Z5 + vi6MCpGtdoyZeJgH621IpwyB34Hpu1RQfyg1aNgmOtIK9cvQZRl008DHF4AiHYhU + 6xe3cjI/QxDXwitoBgWl+a0mkwhSmzJt7TuzImq7RMO4ws3M/nGeNUwFjwsQu86+ + N/Y3RuuUVbK1xy8Jdz3FZADIgHVPN6GQwYKEpWrZNapKBXjunjCZmpBFxqGMRF44 + fcSKFlFkwjyTq4kgq44NPv18NMfKCYZcK7ttRTiep77vKB7No/TM69Oz5ZHhQ+2Q + pSGg3QF+1fOCFCgWXFEOle6lQ5i8a/GihY0FuphrZxP9ovZ/EKPpE6Gq + -----END CERTIFICATE----- + logstash-config.conf: | + input { + file { + path => "/u01/oracle/user_projects/domains/logs/accessdomain/AdminServer*.log" + tags => "Adminserver_log" + start_position => beginning + } + file { + path => "/u01/oracle/user_projects/domains/logs/accessdomain/oam_policy_mgr*.log" + tags => "Policymanager_log" + start_position => beginning + } + file { + path => "/u01/oracle/user_projects/domains/logs/accessdomain/oam_server*.log" + tags => "Oamserver_log" + start_position => beginning + } + file { + path => "/u01/oracle/user_projects/domains/accessdomain/servers/AdminServer/logs/AdminServer-diagnostic.log" + tags => "Adminserver_diagnostic" + start_position => beginning + } + file { + path => "/u01/oracle/user_projects/domains/accessdomain/servers/**/logs/oam_policy_mgr*-diagnostic.log" + tags => "Policy_diagnostic" + start_position => beginning + } + file { + path => "/u01/oracle/user_projects/domains/accessdomain/servers/AdminServer/logs/auditlogs/OAM/audit.log" + tags => "Audit_logs" + start_position => beginning + } + } + filter { + grok { + match => [ "message", "<%{DATA:log_timestamp}> <%{WORD:log_level}> <%{WORD:thread}> <%{HOSTNAME:hostname}> <%{HOSTNAME:servername}> <%{DATA:timer}> <<%{DATA:kernel}>> <> <%{DATA:uuid}> <%{NUMBER:timestamp}> <%{DATA:misc}> <%{DATA:log_number}> <%{DATA:log_message}>" ] + } + if "_grokparsefailure" in [tags] { + mutate { + remove_tag => [ "_grokparsefailure" ] + } + } + } + output { + elasticsearch { + hosts => ["https://elasticsearch.example.com:9200"] + cacert => '/usr/share/logstash/config/certs/elk.crt' + index => "oamlogs-000001" + ssl => true + ssl_certificate_verification => false + user => "logstash_internal" + password => "${ELASTICSEARCH_PASSWORD}" + } + } ``` - The output will look similar to the following: + +1. Run the following command to create the configmap: + ``` - Mount Path: /u01/oracle/user_projects/domains + $ kubectl apply -f logstash_cm.yaml ``` -1. Navigate to the `$WORKDIR/kubernetes/elasticsearch-and-kibana` directory and create a `logstash.yaml` file as follows. - Change the `claimName` and `mountPath` values to match the values returned in the previous commands. Change `namespace` to your domain namespace e.g `oamns`: + The output will look similar to the following: + + ``` + configmap/oam-logstash-configmap created + ``` +#### Deploy the logstash pod + +1. Navigate to the `$WORKDIR/kubernetes/elasticsearch-and-kibana` directory and create a `logstash.yaml` file as follows: + ``` apiVersion: apps/v1 kind: Deployment metadata: - name: logstash-wls - namespace: oamns + name: oam-logstash + namespace: spec: selector: matchLabels: - k8s-app: logstash-wls + k8s-app: logstash template: # create pods using pod definition in this template metadata: - labels: - k8s-app: logstash-wls + labels: + k8s-app: logstash spec: + imagePullSecrets: + - name: dockercred + containers: + - command: + - logstash + image: logstash: + imagePullPolicy: IfNotPresent + name: oam-logstash + env: + - name: ELASTICSEARCH_PASSWORD + valueFrom: + secretKeyRef: + name: elasticsearch-pw-elastic + key: password + resources: + ports: + - containerPort: 5044 + name: logstash + volumeMounts: + - mountPath: /u01/oracle/user_projects/domains + name: weblogic-domain-storage-volume + - name: shared-logs + mountPath: /shared-logs + - mountPath: /usr/share/logstash/pipeline/ + name: oam-logstash-pipeline + - mountPath: /usr/share/logstash/config/logstash.yml + subPath: logstash.yml + name: config-volume + - mountPath: /usr/share/logstash/config/certs + name: elk-cert volumes: + - configMap: + defaultMode: 420 + items: + - key: elk.crt + path: elk.crt + name: oam-logstash-configmap + name: elk-cert + - configMap: + defaultMode: 420 + items: + - key: logstash-config.conf + path: logstash-config.conf + name: oam-logstash-configmap + name: oam-logstash-pipeline + - configMap: + defaultMode: 420 + items: + - key: logstash.yml + path: logstash.yml + name: oam-logstash-configmap + name: config-volume - name: weblogic-domain-storage-volume persistentVolumeClaim: claimName: accessdomain-domain-pvc - name: shared-logs emptyDir: {} + ``` + + + Change the ``, `` to match the values for your environment. + + Change `/u01/oracle/user_projects/domains` to match the `mountPath` returned earlier + + Change the `claimName` value to match the `claimName` returned earlier + + + For example: + + ``` + apiVersion: apps/v1 + kind: Deployment + metadata: + name: oam-logstash + namespace: oamns + spec: + selector: + matchLabels: + k8s-app: logstash + template: # create pods using pod definition in this template + metadata: + labels: + k8s-app: logstash + spec: imagePullSecrets: - name: dockercred containers: - - name: logstash - image: logstash:6.6.0 - command: ["/bin/sh"] - args: ["/usr/share/logstash/bin/logstash", "-f", "/u01/oracle/user_projects/domains/logstash/logstash.conf"] + - command: + - logstash + image: logstash:8.3.1 imagePullPolicy: IfNotPresent + name: oam-logstash + env: + - name: ELASTICSEARCH_PASSWORD + valueFrom: + secretKeyRef: + name: elasticsearch-pw-elastic + key: password + resources: + ports: + - containerPort: 5044 + name: logstash volumeMounts: - mountPath: /u01/oracle/user_projects/domains name: weblogic-domain-storage-volume - name: shared-logs mountPath: /shared-logs - ports: - - containerPort: 5044 - name: logstash - ``` - -1. In the NFS persistent volume directory that corresponds to the mountPath `/u01/oracle/user_projects/domains`, create a `logstash` directory. For example: - - ``` - $ mkdir -p /scratch/shared/accessdomainpv/logstash - ``` - -1. Create a `logstash.conf` in the newly created `logstash` directory that contains the following. Make sure the paths correspond to your `mountPath` and `domain` name. Also, if your namespace is anything other than `oamns` change `"elasticsearch.oamns.svc.cluster.local:9200"` to `"elasticsearch..svc.cluster.local:9200"`: - - ``` - input { - file { - path => "/u01/oracle/user_projects/domains/logs/accessdomain/AdminServer*.log" - tags => "Adminserver_log" - start_position => beginning - } - file { - path => "/u01/oracle/user_projects/domains/logs/accessdomain/oam_policy_mgr*.log" - tags => "Policymanager_log" - start_position => beginning - } - file { - path => "/u01/oracle/user_projects/domains/logs/accessdomain/oam_server*.log" - tags => "Oamserver_log" - start_position => beginning - } - file { - path => "/u01/oracle/user_projects/domains/accessdomain/servers/AdminServer/logs/AdminServer-diagnostic.log" - tags => "Adminserver_diagnostic" - start_position => beginning - } - file { - path => "/u01/oracle/user_projects/domains/accessdomain/servers/**/logs/oam_policy_mgr*-diagnostic.log" - tags => "Policy_diagnostic" - start_position => beginning - } - file { - path => "/u01/oracle/user_projects/domains/accessdomain/servers/**/logs/oam_server*-diagnostic.log" - tags => "Oamserver_diagnostic" - start_position => beginning - } - file { - path => "/u01/oracle/user_projects/domains/accessdomain/servers/**/logs/access*.log" - tags => "Access_logs" - start_position => beginning - } - file { - path => "/u01/oracle/user_projects/domains/accessdomain/servers/AdminServer/logs/auditlogs/OAM/audit.log" - tags => "Audit_logs" - start_position => beginning - } - } - filter { - grok { - match => [ "message", "<%{DATA:log_timestamp}> <%{WORD:log_level}> <%{WORD:thread}> <%{HOSTNAME:hostname}> <%{HOSTNAME:servername}> <%{DATA:timer}> <<%{DATA:kernel}>> <> <%{DATA:uuid}> <%{NUMBER:timestamp}> <%{DATA:misc}> <%{DATA:log_number}> <%{DATA:log_message}>" ] - } - if "_grokparsefailure" in [tags] { - mutate { - remove_tag => [ "_grokparsefailure" ] - } - } - } - output { - elasticsearch { - hosts => ["elasticsearch.oamns.svc.cluster.local:9200"] - } - } + - mountPath: /usr/share/logstash/pipeline/ + name: oam-logstash-pipeline + - mountPath: /usr/share/logstash/config/logstash.yml + subPath: logstash.yml + name: config-volume + - mountPath: /usr/share/logstash/config/certs + name: elk-cert + volumes: + - configMap: + defaultMode: 420 + items: + - key: elk.crt + path: elk.crt + name: oam-logstash-configmap + name: elk-cert + - configMap: + defaultMode: 420 + items: + - key: logstash-config.conf + path: logstash-config.conf + name: oam-logstash-configmap + name: oam-logstash-pipeline + - configMap: + defaultMode: 420 + items: + - key: logstash.yml + path: logstash.yml + name: oam-logstash-configmap + name: config-volume + - name: weblogic-domain-storage-volume + persistentVolumeClaim: + claimName: accessdomain-domain-pvc + - name: shared-logs + emptyDir: {} ``` - - - - + 1. Deploy the `logstash` pod by executing the following command: ```bash @@ -267,7 +517,7 @@ OAM Server logs can be pushed to the Elasticsearch server using the `logstash` p The output will look similar to the following: ``` - deployment.apps/logstash-wls created + deployment.apps/oam-logstash created ``` 1. Run the following command to check the `logstash` pod is created correctly: @@ -294,121 +544,65 @@ OAM Server logs can be pushed to the Elasticsearch server using the `logstash` p accessdomain-oam-server2 1/1 Running 1 18h elasticsearch-f7b7c4c4-tb4pp 1/1 Running 0 5m helper 1/1 Running 0 23h - kibana-57f6685789-mgwdl 1/1 Running 0 5m - logstash-wls-6687c5bf6-jmmdp 1/1 Running 0 12s nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 20h + oam-logstash-bbbdf5876-85nkd 1/1 Running 0 4m23s ``` - - -### Verify and access the Kibana console - -1. Check if the indices are created correctly in the elasticsearch pod shown above: - - ```bash - $ kubectl exec -it -n -- /bin/bash - ``` - - For example: - - ```bash - $ kubectl exec -it elasticsearch-f7b7c4c4-tb4pp -n oamns -- /bin/bash - ``` - - This will take you into a bash shell in the elasticsearch pod: + **Note**: Wait a couple of minutes to make sure the pod has not had any failures or restarts. If the pod fails you can view the pod log using: - ```bash - [root@elasticsearch-f7b7c4c4-tb4pp elasticsearch]# ``` - -1. In the elasticsearch bash shell, run the following to check the indices: - - ```bash - [root@elasticsearch-f7b7c4c4-tb4pp elasticsearch]# curl -i "127.0.0.1:9200/_cat/indices?v" + $ kubectl logs -f oam-logstash- -n oamns ``` - The output will look similar to the following: + Most errors occur due to misconfiguration of the `logstash_cm.yaml` or `logstash.yaml`. This is usually because of an incorrect value set, or the certificate was not pasted with the correct indentation. - ``` - HTTP/1.1 200 OK - content-type: text/plain; charset=UTF-8 - content-length: 696 - - health status index uuid pri rep docs.count docs.deleted store.size pri.store.size - green open .kibana_task_manager -IPDdiajTSyIRjelI2QJIg 1 0 2 0 12.6kb 12.6kb - green open .kibana_1 YI9CZAjsTsCCuAyBb1ho3A 1 0 2 0 7.6kb 7.6kb - yellow open logstash-2022.03.08 4pDJSTGVR3-oOwTtHnnTkQ 5 1 148 0 173.9kb 173.9kb + If the pod has errors, delete the pod and configmap as follows: ``` - - Exit the bash shell by typing `exit`. - -1. Find the Kibana port by running the following command: - - ```bash - $ kubectl get svc -n | grep kibana + $ kubectl delete -f $WORKDIR/kubernetes/elasticsearch-and-kibana/logstash.yaml + $ kubectl delete -f $WORKDIR/kubernetes/elasticsearch-and-kibana/logstash_cm.yaml ``` - For example: - - ```bash - $ kubectl get svc -n oamns | grep kibana - ``` + Once you have resolved the issue in the yaml files, run the commands outlined earlier to recreate the configmap and logstash pod. - The output will look similar to the following: - ``` - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - kibana NodePort 10.104.248.203 5601:31394/TCP 11m - ``` - In the example above the Kibana port is `31394`. -1. Access the Kibana console with `http://${MASTERNODE-HOSTNAME}:${KIBANA-PORT}/app/kibana`. -1. Click **Dashboard** and in the **Create index pattern** page enter `logstash*`. Click **Next Step**. +### Verify and access the Kibana console -1. From the **Time Filter field name** drop down menu select `@timestamp` and click **Create index pattern**. +To access the Kibana console you will need the Kibana URL as per [Installing Elasticsearch (ELK) Stack and Kibana](https://docs.oracle.com/en/middleware/fusion-middleware/12.2.1.4/ikedg/installing-monitoring-and-visualization-software.html#GUID-C0013AA8-B229-4237-A1D8-8F38FA6E2CEC). -1. Once the index pattern is created click on **Discover** in the navigation menu to view the logs. -For more details on how to use the Kibana console see the [Kibana Guide](https://www.elastic.co/guide/en/kibana/current/index.html) +**For Kibana 7.7.x and below**: +1. Access the Kibana console with `http://:/app/kibana` and login with your username and password. -### Cleanup +1. From the Navigation menu, navigate to **Management** > **Kibana** > **Index Patterns**. -To clean up the Elasticsearch and Kibana install: +1. In the **Create Index Pattern** page enter `oamlogs*` for the **Index pattern** and click **Next Step**. -1. Run the following command to delete logstash: +1. In the **Configure settings** page, from the **Time Filter field name** drop down menu select `@timestamp` and click **Create index pattern**. - ```bash - $ kubectl delete -f $WORKDIR/kubernetes/elasticsearch-and-kibana/logstash.yaml - ``` +1. Once the index pattern is created click on **Discover** in the navigation menu to view the OAM logs. - The output will look similar to the following: - ``` - deployment.apps "logstash-wls" deleted - ``` +**For Kibana version 7.8.X and above**: -1. Run the following command to delete Elasticsearch and Kibana: +1. Access the Kibana console with `http://:/app/kibana` and login with your username and password. - ```bash - $ kubectl delete -f $WORKDIR/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml - ``` +1. From the Navigation menu, navigate to **Management** > **Stack Management**. - The output will look similar to the following: +1. Click **Data Views** in the **Kibana** section. - ``` - deployment.apps "elasticsearch" deleted - service "elasticsearch" deleted - deployment.apps "kibana" deleted - service "kibana" deleted - ``` +1. Click **Create Data View** and enter the following information: + + Name: `oamlogs*` + + Timestamp: `@timestamp` - - - - \ No newline at end of file +1. Click **Create Data View**. + +1. From the Navigation menu, click **Discover** to view the log file entries. + +1. From the drop down menu, select `oamlogs*` to view the log file entries. \ No newline at end of file diff --git a/docs-source/content/oam/manage-oam-domains/monitoring-oam-domains.md b/docs-source/content/oam/manage-oam-domains/monitoring-oam-domains.md index ac26355d2..47348962c 100644 --- a/docs-source/content/oam/manage-oam-domains/monitoring-oam-domains.md +++ b/docs-source/content/oam/manage-oam-domains/monitoring-oam-domains.md @@ -18,7 +18,7 @@ The `$WORKDIR/kubernetes/monitoring-service/setup-monitoring.sh` sets up the m For usage details execute `./setup-monitoring.sh -h`. -1. Edit the `$WORKDIR/kubernetes/monitoring-service/monitoring-inputs.yaml` and change the `domainUID`, `domainNamespace`, and `weblogicCredentialsSecretName` to correspond to your deployment. For example: +1. Edit the `$WORKDIR/kubernetes/monitoring-service/monitoring-inputs.yaml` and change the `domainUID`, `domainNamespace`, and `weblogicCredentialsSecretName` to correspond to your deployment. Also change `wlsMonitoringExporterTooamCluster`, `wlsMonitoringExporterTopolicyCluster`, `exposeMonitoringNodePort` to `true`. For example: ``` version: create-accessdomain-monitoring-inputs-v1 @@ -108,7 +108,7 @@ For usage details execute `./setup-monitoring.sh -h`. Update Complete. ⎈ Happy Helming!⎈ Setup prometheus-community/kube-prometheus-stack in progress NAME: monitoring - LAST DEPLOYED: Tue Jul 12 14:13:49 2022 + LAST DEPLOYED: NAMESPACE: monitoring STATUS: deployed REVISION: 1 @@ -159,14 +159,14 @@ For usage details execute `./setup-monitoring.sh -h`. Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war to targets AdminServer (upload=true) ... - + .Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-adminserver. - + Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start @@ -177,14 +177,14 @@ For usage details execute `./setup-monitoring.sh -h`. Deployment Message : no message Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-oam.war to targets oam_cluster (upload=true) ... - + .Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-oam. - + .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start @@ -192,14 +192,14 @@ For usage details execute `./setup-monitoring.sh -h`. Deployment Message : no message Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-policy.war to targets policy_cluster (upload=true) ... - + .Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-policy. - + .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start @@ -210,7 +210,7 @@ For usage details execute `./setup-monitoring.sh -h`. Exiting WebLogic Scripting Tool. - + 14:27 Deploy WebLogic Monitoring Exporter completed secret/basic-auth created @@ -567,14 +567,14 @@ Install Prometheus, Grafana and WebLogic Monitoring Exporter manually. Create th Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war to targets AdminServer (upload=true) ... - + ..Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-adminserver. - + .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start @@ -582,14 +582,14 @@ Install Prometheus, Grafana and WebLogic Monitoring Exporter manually. Create th Deployment Message : no message Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-oam.war to targets oam_cluster (upload=true) ... - + .Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-oam. - + .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start @@ -597,14 +597,14 @@ Install Prometheus, Grafana and WebLogic Monitoring Exporter manually. Create th Deployment Message : no message Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-policy.war to targets policy_cluster (upload=true) ... - + .Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-policy. - + .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start @@ -614,7 +614,7 @@ Install Prometheus, Grafana and WebLogic Monitoring Exporter manually. Create th Exiting WebLogic Scripting Tool. - + ``` #### Configure Prometheus Operator diff --git a/docs-source/content/oam/manage-oam-domains/wlst-admin-operations.md b/docs-source/content/oam/manage-oam-domains/wlst-admin-operations.md index d3d346449..c7f83dabc 100644 --- a/docs-source/content/oam/manage-oam-domains/wlst-admin-operations.md +++ b/docs-source/content/oam/manage-oam-domains/wlst-admin-operations.md @@ -240,11 +240,11 @@ $ kubectl exec -it accessdomain-adminserver -n oamns -- /bin/bash [oracle@accessdomain-adminserver oracle]$ [oracle@accessdomain-adminserver oracle]$ cd /u01/oracle/user_projects/domains/accessdomain/servers/oam_server1/logs [oracle@accessdomain-adminserver logs]$ tail oam_server1-diagnostic.log -2022-07-12T10:26:14.793+00:00] [oam_server1] [TRACE:32] [] [oracle.oam.config] [tid: Configuration Store Observer] [userId: ] [ecid: 8b3ac37b-c7cf-46dd-aeee-5ed67886be21-0000000b,0:1795] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.admin.config.util.observable.ObservableConfigStore$StoreWatcher] [SRC_METHOD: run] Start of run before start of detection at 1,635,848,774,793. Detector: oracle.security.am.admin.config.util.observable.DbStoreChangeDetector:Database configuration store:DSN:jdbc/oamds. Monitor: { StoreMonitor: { disabled: 'false' } } -[2022-07-12T10:26:14.793+00:00] [oam_server1] [TRACE] [] [oracle.oam.config] [tid: Configuration Store Observer] [userId: ] [ecid: 8b3ac37b-c7cf-46dd-aeee-5ed67886be21-0000000b,0:1795] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.admin.config.util.store.StoreUtil] [SRC_METHOD: getContainerProperty] Configuration property CONFIG_HISTORY not specified -[2022-07-12T10:26:14.793+00:00] [oam_server1] [TRACE] [] [oracle.oam.config] [tid: Configuration Store Observer] [userId: ] [ecid: 8b3ac37b-c7cf-46dd-aeee-5ed67886be21-0000000b,0:1795] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.admin.config.util.store.StoreUtil] [SRC_METHOD: getContainerProperty] Configuration property CONFIG not specified -[2022-07-12T10:26:14.795+00:00] [oam_server1] [TRACE:32] [] [oracle.oam.config] [tid: Configuration Store Observer] [userId: ] [ecid: 8b3ac37b-c7cf-46dd-aeee-5ed67886be21-0000000b,0:1795] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.admin.config.util.store.DbStore] [SRC_METHOD: getSelectSQL] SELECT SQL:SELECT version from IDM_OBJECT_STORE where id = ? and version = (select max(version) from IDM_OBJECT_STORE where id = ?) -[2022-07-12T10:26:14.797+00:00] [oam_server1] [TRACE] [] [oracle.oam.config] [tid: Configuration Store Observer] [userId: ] [ecid: 8b3ac37b-c7cf-46dd-aeee-5ed67886be21-0000000b,0:1795] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.admin.config.util.store.DbStore] [SRC_METHOD: load] Time (ms) to load key CONFIG:-1{FIELD_TYPES=INT, SELECT_FIELDS=SELECT version from IDM_OBJECT_STORE }:4 +[] [oam_server1] [TRACE:32] [] [oracle.oam.config] [tid: Configuration Store Observer] [userId: ] [ecid: 8b3ac37b-c7cf-46dd-aeee-5ed67886be21-0000000b,0:1795] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.admin.config.util.observable.ObservableConfigStore$StoreWatcher] [SRC_METHOD: run] Start of run before start of detection at 1,635,848,774,793. Detector: oracle.security.am.admin.config.util.observable.DbStoreChangeDetector:Database configuration store:DSN:jdbc/oamds. Monitor: { StoreMonitor: { disabled: 'false' } } +[] [oam_server1] [TRACE] [] [oracle.oam.config] [tid: Configuration Store Observer] [userId: ] [ecid: 8b3ac37b-c7cf-46dd-aeee-5ed67886be21-0000000b,0:1795] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.admin.config.util.store.StoreUtil] [SRC_METHOD: getContainerProperty] Configuration property CONFIG_HISTORY not specified +[] [oam_server1] [TRACE] [] [oracle.oam.config] [tid: Configuration Store Observer] [userId: ] [ecid: 8b3ac37b-c7cf-46dd-aeee-5ed67886be21-0000000b,0:1795] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.admin.config.util.store.StoreUtil] [SRC_METHOD: getContainerProperty] Configuration property CONFIG not specified +[] [oam_server1] [TRACE:32] [] [oracle.oam.config] [tid: Configuration Store Observer] [userId: ] [ecid: 8b3ac37b-c7cf-46dd-aeee-5ed67886be21-0000000b,0:1795] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.admin.config.util.store.DbStore] [SRC_METHOD: getSelectSQL] SELECT SQL:SELECT version from IDM_OBJECT_STORE where id = ? and version = (select max(version) from IDM_OBJECT_STORE where id = ?) +[] [oam_server1] [TRACE] [] [oracle.oam.config] [tid: Configuration Store Observer] [userId: ] [ecid: 8b3ac37b-c7cf-46dd-aeee-5ed67886be21-0000000b,0:1795] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.admin.config.util.store.DbStore] [SRC_METHOD: load] Time (ms) to load key CONFIG:-1{FIELD_TYPES=INT, SELECT_FIELDS=SELECT version from IDM_OBJECT_STORE }:4 ``` ### Performing WLST Administration via SSL @@ -390,9 +390,9 @@ $ kubectl exec -it accessdomain-adminserver -n oamns -- /bin/bash ```bash wls:/offline> connect('weblogic','','t3s://accessdomain-adminserverssl:7002') Connecting to t3s://accessdomain-adminserverssl:7002 with userid weblogic ... - - - + <> + <> + <> Successfully connected to Admin Server "AdminServer" that belongs to domain "accessdomain". wls:/accessdomain/serverConfig/> @@ -403,9 +403,9 @@ $ kubectl exec -it accessdomain-adminserver -n oamns -- /bin/bash ```bash wls:/offline> connect('weblogic','','t3s://accessdomain-oamcluster-ssl:14101') Connecting to t3s://accessdomain-oamcluster-ssl:14101 with userid weblogic ... - - - + <> + <> + <> Successfully connected to managed Server "oam_server1" that belongs to domain "accessdomain". ``` diff --git a/docs-source/content/oam/patch-and-upgrade/_index.md b/docs-source/content/oam/patch-and-upgrade/_index.md index acd22f520..0e464895f 100644 --- a/docs-source/content/oam/patch-and-upgrade/_index.md +++ b/docs-source/content/oam/patch-and-upgrade/_index.md @@ -2,10 +2,14 @@ title = "Patch and Upgrade" weight = 11 pre = "11. " -description= "This document provides steps to patch or upgrade an OAM image, WebLogic Kubernetes Operator or Kubernetes Cluster." +description= "This document provides steps to patch or upgrade an OAM image, WebLogic Kubernetes Operator, ELK, and Ingress." +++ -Patch an existing OAM image, or upgrade the WebLogic Kubernetes Operator release. +This section shows you how to upgrade the WebLogic Kubernetes Operator, the OAM image, the Elasticsearch and Kibana stack, and the Ingress. + +The upgrade path taken depends on the version you are upgrading from, and the version you are upgrading to. + +Please refer to the [Release Notes](../release-notes) for information on which upgrade steps are necessary for the version you are upgrading to. {{% children style="h4" description="true" %}} diff --git a/docs-source/content/oam/patch-and-upgrade/patch_an_image.md b/docs-source/content/oam/patch-and-upgrade/patch-an-image.md similarity index 98% rename from docs-source/content/oam/patch-and-upgrade/patch_an_image.md rename to docs-source/content/oam/patch-and-upgrade/patch-an-image.md index f672c3315..83ed8a137 100644 --- a/docs-source/content/oam/patch-and-upgrade/patch_an_image.md +++ b/docs-source/content/oam/patch-and-upgrade/patch-an-image.md @@ -1,5 +1,5 @@ --- -title: "a. Patch an image" +title: "b. Patch an image" description: "Instructions on how to update your OAM Kubernetes cluster with a new OAM container image." --- diff --git a/docs-source/content/oam/patch-and-upgrade/upgrade-an-ingress.md b/docs-source/content/oam/patch-and-upgrade/upgrade-an-ingress.md new file mode 100644 index 000000000..5239692b8 --- /dev/null +++ b/docs-source/content/oam/patch-and-upgrade/upgrade-an-ingress.md @@ -0,0 +1,179 @@ +--- +title: "c. Upgrade Ingress" +description: "Instructions on how to upgrade the ingress." +--- + +This section shows how to upgrade the ingress. + +To determine if this step is required for the version you are upgrading to, refer to the [Release Notes](../../release-notes). + +### Download the latest code repository + +Download the latest code repository as follows: + +1. Create a working directory to setup the source code. + ```bash + $ mkdir + ``` + + For example: + ```bash + $ mkdir /scratch/OAMK8Slatest + ``` + +1. Download the latest OAM deployment scripts from the OAM repository. + + ```bash + $ cd + $ git clone https://github.com/oracle/fmw-kubernetes.git + ``` + + For example: + + ```bash + $ cd /scratch/OAMK8Slatest + $ git clone https://github.com/oracle/fmw-kubernetes.git + ``` + +1. Set the `$WORKDIR` environment variable as follows: + + ```bash + $ export WORKDIR=/fmw-kubernetes/OracleAccessManagement + ``` + + For example: + + ```bash + $ export WORKDIR=/scratch/OAMK8Slatest/fmw-kubernetes/OracleAccessManagement + ``` + +### Upgrading the ingress + +To upgrade the existing ingress rules, follow the steps below: + +1. List the existing ingress: + + ``` + $ helm list -n oamns + ``` + + The output will look similar to the following: + + ``` + NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION + nginx-ingress oamns 1 deployed ingress-nginx-4.3.0 1.4.0 + oam-nginx oamns 1 deployed ingress-per-domain-0.1.0 1.0 + ``` + +1. Edit the `$WORKDIR/kubernetes/charts/ingress-per-domain/values.yaml` and change the `domainUID` parameter to match your domainUID, for example `domainUID: accessdomain`. For example: + + ``` + # Load balancer type. Supported values are: NGINX + type: NGINX + + # SSL configuration Type. Supported Values are : NONSSL,SSL + sslType: SSL + + # domainType. Supported values are: oam + domainType: oam + + #WLS domain as backend to the load balancer + wlsDomain: + domainUID: accessdomain + adminServerName: AdminServer + adminServerPort: 7001 + adminServerSSLPort: + oamClusterName: oam_cluster + oamManagedServerPort: 14100 + oamManagedServerSSLPort: + policyClusterName: policy_cluster + policyManagedServerPort: 15100 + policyManagedServerSSLPort: + + + # Host specific values + hostName: + enabled: false + admin: + runtime: + ``` + +1. Upgrade the `oam-nginx` with the following command: + + ``` + $ helm upgrade oam-nginx kubernetes/charts/ingress-per-domain/ --namespace oamns --values kubernetes/charts/ingress-per-domain/values.yaml --reuse-values + ``` + + The output will look similar to the following: + + ``` + Release "oam-nginx" has been upgraded. Happy Helming! + NAME: oam-nginx + LAST DEPLOYED: + NAMESPACE: oamns + STATUS: deployed + REVISION: 2 + TEST SUITE: None + ``` + + +1. List the ingress: + + ``` + $ kubectl get ing -n oamns + ``` + + The output will look similar to the following: + + ``` + NAME CLASS HOSTS ADDRESS PORTS AGE + accessdomain-nginx * 10.99.189.61 80 18s + ``` + +1. Describe the ingress and make sure all the listed paths are accessible: + + ``` + $ kubectl describe ing accessdomain-nginx -n oamns + ``` + + The output will look similar to the following: + + ``` + Name: accessdomain-nginx + Labels: app.kubernetes.io/managed-by=Helm + Namespace: oamns + Address: 10.99.189.61 + Default backend: default-http-backend:80 () + Rules: + Host Path Backends + ---- ---- -------- + * + /console accessdomain-adminserver:7001 (10.244.1.224:7001) + /consolehelp accessdomain-adminserver:7001 (10.244.1.224:7001) + /rreg/rreg accessdomain-adminserver:7001 (10.244.1.224:7001) + /em accessdomain-adminserver:7001 (10.244.1.224:7001) + /oamconsole accessdomain-adminserver:7001 (10.244.1.224:7001) + /dms accessdomain-adminserver:7001 (10.244.1.224:7001) + /oam/services/rest accessdomain-adminserver:7001 (10.244.1.224:7001) + /iam/admin/config accessdomain-adminserver:7001 (10.244.1.224:7001) + /iam/admin/diag accessdomain-adminserver:7001 (10.244.1.224:7001) + /iam/access accessdomain-cluster-oam-cluster:14100 (10.244.1.225:14100) + /oam/admin/api accessdomain-adminserver:7001 (10.244.1.224:7001) + /oam/services/rest/access/api accessdomain-cluster-oam-cluster:14100 (10.244.1.225:14100) + /access accessdomain-cluster-policy-cluster:15100 (10.244.1.226:15100) + / accessdomain-cluster-oam-cluster:14100 (10.244.1.225:14100) + Annotations: kubernetes.io/ingress.class: nginx + meta.helm.sh/release-name: oam-nginx + meta.helm.sh/release-namespace: oamns + nginx.ingress.kubernetes.io/configuration-snippet: + more_clear_input_headers "WL-Proxy-Client-IP" "WL-Proxy-SSL"; + more_set_input_headers "X-Forwarded-Proto: https"; + more_set_input_headers "WL-Proxy-SSL: true"; + nginx.ingress.kubernetes.io/enable-access-log: false + nginx.ingress.kubernetes.io/ingress.allow-http: false + nginx.ingress.kubernetes.io/proxy-buffer-size: 2000k + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Sync 55s (x2 over 63s) nginx-ingress-controller Scheduled for sync + ``` \ No newline at end of file diff --git a/docs-source/content/oam/patch-and-upgrade/upgrade_an_operator_release.md b/docs-source/content/oam/patch-and-upgrade/upgrade-an-operator-release.md similarity index 97% rename from docs-source/content/oam/patch-and-upgrade/upgrade_an_operator_release.md rename to docs-source/content/oam/patch-and-upgrade/upgrade-an-operator-release.md index 16be5d8b4..942a9ec04 100644 --- a/docs-source/content/oam/patch-and-upgrade/upgrade_an_operator_release.md +++ b/docs-source/content/oam/patch-and-upgrade/upgrade-an-operator-release.md @@ -1,5 +1,5 @@ --- -title: "b. Upgrade an operator release" +title: "a. Upgrade an operator release" description: "Instructions on how to update the WebLogic Kubernetes Operator version." --- @@ -44,7 +44,7 @@ These instructions apply to upgrading the operator within the 3.x release family ``` Release "weblogic-kubernetes-operator" has been upgraded. Happy Helming! NAME: weblogic-kubernetes-operator - LAST DEPLOYED: Tue Jul 12 18:36:10 2021 + LAST DEPLOYED: NAMESPACE: opns STATUS: deployed REVISION: 3 diff --git a/docs-source/content/oam/patch-and-upgrade/upgrade-elk.md b/docs-source/content/oam/patch-and-upgrade/upgrade-elk.md new file mode 100644 index 000000000..e61826075 --- /dev/null +++ b/docs-source/content/oam/patch-and-upgrade/upgrade-elk.md @@ -0,0 +1,30 @@ +--- +title: "d. Upgrade Elasticsearch and Kibana" +description: "Instructions on how to upgrade Elastic Search and Kibana." +--- + +This section shows how to upgrade Elasticsearch and Kibana. + +To determine if this step is required for the version you are upgrading to, refer to the [Release Notes](../../release-notes). + +### Undeploy Elasticsearch and Kibana + +From October 22 (22.4.1) onwards, OAM logs should be stored on a centralized Elasticsearch and Kibana stack. + +Deployments prior to October 22 (22.4.1) used local deployments of Elasticsearch and Kibana. + +If you are upgrading from July 22 (22.3.1) or earlier, to October 22 (22.4.1) or later, you must first undeploy Elasticsearch and Kibana using the steps below: + +1. Make sure you have downloaded the latest code repository as per [Download the latest code repository](../upgrade-an-ingress/#download-the-latest-code-repository) + +1. If your domain namespace is anything other than `oamns`, edit the `$WORKDIR/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml` and change all instances of `namespace: "oamns"` to your domain namespace. + +1. Delete the Elasticsearch and Kibana resources using the following command: + + ``` + $ kubectl delete -f $WORKDIR/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml + ``` + +### Deploy Elasticsearch and Kibana in centralized stack + +1. Follow [Install Elasticsearch stack and Kibana](../../manage-oam-domains/logging-and-visualization/#install-elasticsearch-stack-and-kibana) to deploy Elasticsearch and Kibana in a centralized stack. \ No newline at end of file diff --git a/docs-source/content/oam/post-install-config/_index.md b/docs-source/content/oam/post-install-config/_index.md index 3bdb7f4e1..4d8498721 100644 --- a/docs-source/content/oam/post-install-config/_index.md +++ b/docs-source/content/oam/post-install-config/_index.md @@ -210,13 +210,6 @@ For production environments, the following WebLogic Server tuning parameters mus $ cd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv/common $ chmod 777 oamconfig_modify.sh ``` - - For example: - - ```bash - $ cd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv/common - $ chmod 777 oamconfig_modify.sh - ``` 1. Edit the `oamconfig.properties` and change the `OAM_NAMESPACE` and `LBR_HOST` to match the values for your OAM Kubernetes environment. For example: @@ -279,7 +272,7 @@ For production environments, the following WebLogic Server tuning parameters mus HTTP/1.1 100 Continue HTTP/1.1 201 Created - Date: Mon, 01 Nov 2021 16:59:12 GMT + Date: Content-Type: text/plain Content-Length: 76 Connection: keep-alive diff --git a/docs-source/content/oam/prepare-your-environment/_index.md b/docs-source/content/oam/prepare-your-environment/_index.md index 4618ca91c..19a62a2ba 100644 --- a/docs-source/content/oam/prepare-your-environment/_index.md +++ b/docs-source/content/oam/prepare-your-environment/_index.md @@ -67,17 +67,15 @@ The OAM Kubernetes deployment requires access to an OAM container image. The ima #### Prebuilt OAM container image -The prebuilt OAM July 2022 container image can be downloaded from [Oracle Container Registry](https://container-registry.oracle.com). This image is prebuilt by Oracle and includes Oracle Access Management 12.2.1.4.0, the July Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. +The prebuilt OAM October 2022 container image can be downloaded from [Oracle Container Registry](https://container-registry.oracle.com). This image is prebuilt by Oracle and includes Oracle Access Management 12.2.1.4.0, the October Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. **Note**: Before using this image you must login to [Oracle Container Registry](https://container-registry.oracle.com), navigate to `Middleware` > `oam_cpu` and accept the license agreement. -Alternatively the same image can also be downloaded from [My Oracle Support](https://support.oracle.com) by referring to the document ID 2723908.1. - You can use this image in the following ways: - Pull the container image from the Oracle Container Registry automatically during the OAM Kubernetes deployment. -- Manually pull the container image from the Oracle Container Registry or My Oracle Support, and then upload it to your own container registry. -- Manually pull the container image from the Oracle Container Registry or My Oracle Support and manually stage it on the master node and each worker node. +- Manually pull the container image from the Oracle Container Registry and then upload it to your own container registry. +- Manually pull the container image from the Oracle Container Registry and manually stage it on the master node and each worker node. #### Build your own OAM container image using WebLogic Image Tool @@ -205,7 +203,7 @@ OAM domain deployment on Kubernetes leverages the WebLogic Kubernetes Operator i $ cd $WORKDIR $ helm install weblogic-kubernetes-operator kubernetes/charts/weblogic-operator \ --namespace \ - --set image=ghcr.io/oracle/weblogic-kubernetes-operator:3.3.0 \ + --set image=ghcr.io/oracle/weblogic-kubernetes-operator:3.4.2 \ --set serviceAccount= \ --set “enableClusterRoleBinding=true” \ --set "domainNamespaceSelectionStrategy=LabelSelector" \ @@ -219,7 +217,7 @@ OAM domain deployment on Kubernetes leverages the WebLogic Kubernetes Operator i $ cd $WORKDIR $ helm install weblogic-kubernetes-operator kubernetes/charts/weblogic-operator \ --namespace opns \ - --set image=ghcr.io/oracle/weblogic-kubernetes-operator:3.3.0 \ + --set image=ghcr.io/oracle/weblogic-kubernetes-operator:3.4.2 \ --set serviceAccount=op-sa \ --set "enableClusterRoleBinding=true" \ --set "domainNamespaceSelectionStrategy=LabelSelector" \ @@ -232,7 +230,7 @@ OAM domain deployment on Kubernetes leverages the WebLogic Kubernetes Operator i ``` NAME: weblogic-kubernetes-operator - LAST DEPLOYED: Tue Jul 12 10:25:39 + LAST DEPLOYED: NAMESPACE: opns STATUS: deployed REVISION: 1 @@ -255,7 +253,7 @@ OAM domain deployment on Kubernetes leverages the WebLogic Kubernetes Operator i ``` NAME READY STATUS RESTARTS AGE - pod/weblogic-operator-676d5cc6f4-wct7b 2/2 Running 0 40s + pod/weblogic-operator-676d5cc6f4-wct7b 1/1 Running 0 40s NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/internal-weblogic-operator-svc ClusterIP 10.101.1.198 8082/TCP 40s @@ -283,10 +281,10 @@ OAM domain deployment on Kubernetes leverages the WebLogic Kubernetes Operator i ``` ... - {"timestamp":"2022-07-12T10:26:10.917829423Z","thread":13,"fiber":"","namespace":"","domainUID":"","level":"CONFIG","class":"oracle.kubernetes.operator.TuningParametersImpl","method":"update","timeInMillis":1635762370917,"message":"Reloading tuning parameters from Operator's config map","exception":"","code":"","headers":{},"body":""} - {"timestamp":"2022-07-12T10:26:20.920145876Z","thread":13,"fiber":"","namespace":"","domainUID":"","level":"CONFIG","class":"oracle.kubernetes.operator.TuningParametersImpl","method":"update","timeInMillis":1635762380920,"message":"Reloading tuning parameters from Operator's config map","exception":"","code":"","headers":{},"body":""} - {"timestamp":"2022-07-12T10:26:30.922360564Z","thread":19,"fiber":"","namespace":"","domainUID":"","level":"CONFIG","class":"oracle.kubernetes.operator.TuningParametersImpl","method":"update","timeInMillis":1635762390922,"message":"Reloading tuning parameters from Operator's config map","exception":"","code":"","headers":{},"body":""} - {"timestamp":"2022-07-12T10:26:40.924847211Z","thread":29,"fiber":"","namespace":"","domainUID":"","level":"CONFIG","class":"oracle.kubernetes.operator.TuningParametersImpl","method":"update","timeInMillis":1635762400924,"message":"Reloading tuning parameters from Operator's config map","exception":"","code":"","headers":{},"body":""} + {"timestamp":"","thread":26,"fiber":"","namespace":"","domainUID":"","level":"CONFIG","class":"oracle.kubernetes.operator.TuningParametersImpl","method":"update","timeInMillis":1664440408119,"message":"Reloading tuning parameters from Operator's config map","exception":"","code":"","headers":{},"body":""} + {"timestamp":"","thread":19,"fiber":"","namespace":"","domainUID":"","level":"CONFIG","class":"oracle.kubernetes.operator.TuningParametersImpl","method":"update","timeInMillis":1664440418120,"message":"Reloading tuning parameters from Operator's config map","exception":"","code":"","headers":{},"body":""} + {"timestamp":"","thread":29,"fiber":"","namespace":"","domainUID":"","level":"CONFIG","class":"oracle.kubernetes.operator.TuningParametersImpl","method":"update","timeInMillis":1664440428123,"message":"Reloading tuning parameters from Operator's config map","exception":"","code":"","headers":{},"body":""} + {"timestamp":"","thread":29,"fiber":"","namespace":"","domainUID":"","level":"CONFIG","class":"oracle.kubernetes.operator.TuningParametersImpl","method":"update","timeInMillis":1664440438124,"message":"Reloading tuning parameters from Operator's config map","exception":"","code":"","headers":{},"body":"" ``` ### Create a namespace for Oracle Access Management @@ -344,7 +342,8 @@ OAM domain deployment on Kubernetes leverages the WebLogic Kubernetes Operator i ``` Name: oamns - Labels: weblogic-operator=enabled + Labels: kubernetes.io/metadata.name=oamns + weblogic-operator=enabled Annotations: Status: Active @@ -406,7 +405,7 @@ Before following the steps in this section, make sure that the database and list For example: ```bash - $ kubectl run --image=container-registry.oracle.com/middleware/oam_cpu:12.2.1.4-jdk8-ol7- --image-pull-policy="IfNotPresent" --overrides='{"apiVersion": "v1","spec":{"imagePullSecrets": [{"name": "orclcred"}]}}' helper -n oamns -- sleep infinity + $ kubectl run --image=container-registry.oracle.com/middleware/oam_cpu:12.2.1.4-jdk8-ol7- --image-pull-policy="IfNotPresent" --overrides='{"apiVersion": "v1","spec":{"imagePullSecrets": [{"name": "orclcred"}]}}' helper -n oamns -- sleep infinity ``` If you are not using a container registry and have loaded the image on each of the master and worker nodes, run the following command: @@ -418,7 +417,7 @@ Before following the steps in this section, make sure that the database and list For example: ```bash - $ kubectl run helper --image oracle/oam:12.2.1.4-jdk8-ol7- -n oamns -- sleep infinity + $ kubectl run helper --image oracle/oam:12.2.1.4-jdk8-ol7- -n oamns -- sleep infinity ``` The output will look similar to the following: @@ -512,7 +511,7 @@ Before following the steps in this section, make sure that the database and list The output will look similar to the following: ``` - RCU Logfile: /tmp/RCU2022-07-12_10-29_561898106/logs/rcu.log + RCU Logfile: /tmp/RCU/logs/rcu.log Processing command line .... Repository Creation Utility - Checking Prerequisites Checking Global Prerequisites @@ -581,18 +580,21 @@ Before following the steps in this section, make sure that the database and list Service Name : ORCL.EXAMPLE.COM Connected As : sys Prefix for (prefixable) Schema Owners : OAMK8S - RCU Logfile : /tmp/RCU2022-07-12_10-29_561898106/logs/rcu.log + RCU Logfile : /tmp/RCU/logs/rcu.log + Component schemas created: ----------------------------- - Component Status Logfile - Common Infrastructure Services Success /tmp/RCU2022-07-12_10-29_561898106/logs/stb.log - Oracle Platform Security Services Success /tmp/RCU2022-07-12_10-29_561898106/logs/opss.log - Oracle Access Manager Success /tmp/RCU2022-07-12_10-29_561898106/logs/oam.log - Audit Services Success /tmp/RCU2022-07-12_10-29_561898106/logs/iau.log - Audit Services Append Success /tmp/RCU2022-07-12_10-29_561898106/logs/iau_append.log - Audit Services Viewer Success /tmp/RCU2022-07-12_10-29_561898106/logs/iau_viewer.log - Metadata Services Success /tmp/RCU2022-07-12_10-29_561898106/logs/mds.log - WebLogic Services Success /tmp/RCU2022-07-12_10-29_561898106/logs/wls.log + Component Status Logfile + + Common Infrastructure Services Success /tmp/RCU/logs/stb.log + Oracle Platform Security Services Success /tmp/RCU/logs/opss.log + Oracle Access Manager Success /tmp/RCU/logs/oam.log + Audit Services Success /tmp/RCU/logs/iau.log + Audit Services Append Success /tmp/RCU/logs/iau_append.log + Audit Services Viewer Success /tmp/RCU/logs/iau_viewer.log + Metadata Services Success /tmp/RCU/logs/mds.log + WebLogic Services Success /tmp/RCU/logs/wls.log + Repository Creation Utility - Create : Operation Completed [oracle@helper ~]$ ``` @@ -668,14 +670,14 @@ In this section you prepare the environment for the OAM domain creation. This in username: d2VibG9naWM= kind: Secret metadata: - creationTimestamp: "2022-07-12T10:41:11Z" + creationTimestamp: "" labels: weblogic.domainName: accessdomain weblogic.domainUID: accessdomain name: accessdomain-credentials namespace: oamns - resourceVersion: "2913144" - uid: 5f8d9874-9cd7-42be-af4b-54f787e71ac2 + resourceVersion: "29428101" + uid: 6dac0561-d157-4144-9ed7-c475a080eb3a type: Opaque ``` @@ -738,14 +740,14 @@ In this section you prepare the environment for the OAM domain creation. This in username: T0FNSzhT kind: Secret metadata: - creationTimestamp: "2022-07-12T10:50:34Z" + creationTimestamp: "" labels: weblogic.domainName: accessdomain weblogic.domainUID: accessdomain name: accessdomain-rcu-credentials namespace: oamns - resourceVersion: "2913938" - uid: 3798af1b-2783-415f-aea8-31e0610220a7 + resourceVersion: "29428242" + uid: 1b81b6e0-fd7d-40b8-a060-454c8d23f4dc type: Opaque ``` @@ -760,7 +762,7 @@ In this section you prepare the environment for the OAM domain creation. This in The example below uses an NFS mounted volume (/accessdomainpv). Other volume types can also be used. See the official [Kubernetes documentation for Volumes](https://kubernetes.io/docs/concepts/storage/volumes/). - **Note**: The persistent volume directory needs to be accessible to both the master and worker node(s). Make sure this path has **full** access permissions, and that the folder is empty. In this example `/scratch/shared/accessdomainpv` is accessible from all nodes via NFS. + **Note**: The persistent volume directory needs to be accessible to both the master and worker node(s). In this example `/scratch/shared/accessdomainpv` is accessible from all nodes via NFS. @@ -773,7 +775,7 @@ In this section you prepare the environment for the OAM domain creation. This in $ cp create-pv-pvc-inputs.yaml create-pv-pvc-inputs.yaml.orig $ mkdir output $ mkdir -p /accessdomainpv - $ chmod -R 777 /accessdomainpv + $ sudo chown -R 1000:0 /accessdomainpv ``` For example: @@ -783,7 +785,7 @@ In this section you prepare the environment for the OAM domain creation. This in $ cp create-pv-pvc-inputs.yaml create-pv-pvc-inputs.yaml.orig $ mkdir output $ mkdir -p /scratch/shared/accessdomainpv - $ chmod -R 777 /scratch/shared/accessdomainpv + $ sudo chown -R 1000:0 /scratch/shared/accessdomainpv ``` 1. On the master node run the following command to ensure it is possible to read and write to the persistent volume: @@ -989,4 +991,4 @@ In this section you prepare the environment for the OAM domain creation. This in ``` - You are now ready to create the OAM domain as per [Create OAM Domains](../create-oam-domains/) + You are now ready to create the OAM domain as per [Create OAM Domains](../create-oam-domains/). diff --git a/docs-source/content/oam/prerequisites/_index.md b/docs-source/content/oam/prerequisites/_index.md index 35b973c19..804fd57d8 100644 --- a/docs-source/content/oam/prerequisites/_index.md +++ b/docs-source/content/oam/prerequisites/_index.md @@ -7,7 +7,7 @@ description: "System requirements and limitations for deploying and running an O ### Introduction -This document provides information about the system requirements and limitations for deploying and running OAM domains with the WebLogic Kubernetes Operator 3.3.0. +This document provides information about the system requirements and limitations for deploying and running OAM domains with the WebLogic Kubernetes Operator 3.4.2. diff --git a/docs-source/content/oam/release-notes.md b/docs-source/content/oam/release-notes.md index 2b69e0465..3a5c02c3e 100644 --- a/docs-source/content/oam/release-notes.md +++ b/docs-source/content/oam/release-notes.md @@ -10,9 +10,19 @@ Review the latest changes and known issues for Oracle Access Management on Kuber | Date | Version | Change | | --- | --- | --- | +| October, 2022 | 22.4.1 | Supports Oracle Access Management 12.2.1.4 domain deployment using the October 2022 container image which contains the October Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program.| +| | | Support for WebLogic Kubernetes Operator 3.4.2.| +| | | Additional Ingress mappings added.| +| | | Changes to deployment of Logging and Visualization with Elasticsearch and Kibana.| +| | | OAM container images are now only available from [container-registry.oracle.com](https://container-registry.oracle.com) and are no longer available from My Oracle Support.| +| | | If upgrading to October 22 (22.4.1) from a previous release, you must upgrade the following in order: +| | | 1. WebLogic Kubernetes Operator to 3.4.2| +| | | 2. Patch the OAM container image to October 22| +| | | 3. Upgrade the Ingress| +| | | 4. Upgrade Elasticsearch and Kibana

See [Patch and Upgrade](../patch-and-upgrade) for these instructions.| | July, 2022 | 22.3.1 | Supports Oracle Access Management 12.2.1.4 domain deployment using the July 2022 container image which contains the July Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program.| | April, 2022 | 22.2.1 | Updated for CRI-O support.| | November, 2021 | 21.4.2 | Supports Oracle Access Management domain deployment using WebLogic Kubernetes Operator 3.3.0. Voyager ingress removed as no longer supported.| -| October 2021 | 21.4.1 | **A**) References to supported Kubernetes, Helm and Docker versions removed and replaced with Support note reference. **B**) Namespace and domain names changed to be consistent with [Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster](https://docs.oracle.com/en/middleware/fusion-middleware/12.2.1.4/ikedg/). **C**) Addtional post configuration tasks added. **D**) *Upgrading a Kubernetes Cluster* and *Security Hardening* removed as vendor specific.| +| October 2021 | 21.4.1 | **A**) References to supported Kubernetes, Helm and Docker versions removed and replaced with Support note reference. **B**) Namespace and domain names changed to be consistent with [Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster](https://docs.oracle.com/en/middleware/fusion-middleware/12.2.1.4/ikedg/). **C**) Additional post configuration tasks added. **D**) *Upgrading a Kubernetes Cluster* and *Security Hardening* removed as vendor specific.| | November 2020 | 20.4.1 | Initial release of Oracle Access Management on Kubernetes.| diff --git a/docs-source/content/oam/validate-domain-urls/_index.md b/docs-source/content/oam/validate-domain-urls/_index.md index fb558858b..ef789bbf8 100644 --- a/docs-source/content/oam/validate-domain-urls/_index.md +++ b/docs-source/content/oam/validate-domain-urls/_index.md @@ -29,5 +29,5 @@ Launch a browser and access the following URL's. Login with the weblogic usernam The browser will give certificate errors if you used a self signed certificate and have not imported it into the browsers Certificate Authority store. If this occurs you can proceed with the connection and ignore the errors. -After validating the URL's proceed to [Post Install Configuraton](../post-install-configuration). +After validating the URL's proceed to [Post Install Configuration](../post-install-config). diff --git a/docs-source/content/oig/_index.md b/docs-source/content/oig/_index.md index 9eee77dca..510ba36d8 100644 --- a/docs-source/content/oig/_index.md +++ b/docs-source/content/oig/_index.md @@ -21,10 +21,9 @@ environment. You can: ### Current production release -The current production release for the Oracle Identity Governance domain deployment on Kubernetes is [22.3.1](https://github.com/oracle/fmw-kubernetes/releases). This release uses the WebLogic Kubernetes Operator version 3.3.0. +The current production release for the Oracle Identity Governance domain deployment on Kubernetes is [22.4.1](https://github.com/oracle/fmw-kubernetes/releases). This release uses the WebLogic Kubernetes Operator version 3.4.2. -This release of the documentation can also be used for 3.1.X and 3.2.0 WebLogic Kubernetes Operator. -For 3.0.X WebLogic Kubernetes Operator refer to [Version 21.4.1](https://oracle.github.io/fmw-kubernetes/21.4.1/oig/) +For 3.3.X WebLogic Kubernetes Operator refer to [Version 22.3.1](https://oracle.github.io/fmw-kubernetes/22.3.1/oig/) ### Recent changes and known issues @@ -44,6 +43,7 @@ If performing an Enterprise Deployment, refer to the [Enterprise Deployment Guid To view documentation for an earlier release, see: +* [Version 22.3.1](https://oracle.github.io/fmw-kubernetes/22.3.1/oig/) * [Version 22.2.1](https://oracle.github.io/fmw-kubernetes/22.2.1/oig/) * [Version 21.4.2](https://oracle.github.io/fmw-kubernetes/21.4.2/oig/) * [Version 21.4.1](https://oracle.github.io/fmw-kubernetes/21.4.1/oig/) diff --git a/docs-source/content/oig/configure-design-console/Using the design console with NGINX (SSL).md b/docs-source/content/oig/configure-design-console/Using-the-design-console-with-nginx-ssl.md similarity index 98% rename from docs-source/content/oig/configure-design-console/Using the design console with NGINX (SSL).md rename to docs-source/content/oig/configure-design-console/Using-the-design-console-with-nginx-ssl.md index b385b184a..0b6b7f248 100644 --- a/docs-source/content/oig/configure-design-console/Using the design console with NGINX (SSL).md +++ b/docs-source/content/oig/configure-design-console/Using-the-design-console-with-nginx-ssl.md @@ -68,7 +68,7 @@ Make sure you know the master hostname and ingress port for NGINX before proceed ``` NAME: governancedomain-nginx-designconsole - Mon Thu Jul 13 14:42:16 2022 + NAMESPACE: oigns STATUS: deployed REVISION: 1 @@ -225,7 +225,7 @@ The Design Console can be run from a container using X windows emulation. For example: ```bash - $ docker run -u root -it --name oigdcbase container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7- bash + $ docker run -u root -it --name oigdcbase container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7- bash ``` This will take you into a bash shell inside the container: @@ -357,7 +357,7 @@ The Design Console can be run from a container using X windows emulation. For example: ```bash - $ podman run -u root -it --name oigdcbase container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7- bash + $ podman run -u root -it --name oigdcbase container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7- bash ``` This will take you into a bash shell inside the container: diff --git a/docs-source/content/oig/configure-design-console/Using the design console with NGINX (non-SSL).md b/docs-source/content/oig/configure-design-console/using-the-design-console-with-nginx-non-ssl.md similarity index 98% rename from docs-source/content/oig/configure-design-console/Using the design console with NGINX (non-SSL).md rename to docs-source/content/oig/configure-design-console/using-the-design-console-with-nginx-non-ssl.md index 21a9d2ed7..1fb6f851b 100644 --- a/docs-source/content/oig/configure-design-console/Using the design console with NGINX (non-SSL).md +++ b/docs-source/content/oig/configure-design-console/using-the-design-console-with-nginx-non-ssl.md @@ -69,7 +69,7 @@ Make sure you know the master hostname and ingress port for NGINX before proceed ``` NAME: governancedomain-nginx-designconsole - LAST DEPLOYED: Thu Jul 13 14:32:16 2022 + LAST DEPLOYED: NAMESPACE: oigns STATUS: deployed REVISION: 1 @@ -206,7 +206,7 @@ The Design Console can be run from a container using X windows emulation. For example: ```bash - $ docker run -u root -it --name oigdcbase container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7- bash + $ docker run -u root -it --name oigdcbase container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7- bash ``` This will take you into a bash shell inside the container: @@ -304,7 +304,7 @@ The Design Console can be run from a container using X windows emulation. For example: ```bash - $ podman run -u root -it --name oigdcbase container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7- bash + $ podman run -u root -it --name oigdcbase container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7- bash ``` This will take you into a bash shell inside the container: diff --git a/docs-source/content/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-K8S-ssl.md b/docs-source/content/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-K8S-ssl.md index 777fc9c58..5fcef6f5d 100644 --- a/docs-source/content/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-K8S-ssl.md +++ b/docs-source/content/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-K8S-ssl.md @@ -102,7 +102,7 @@ The instructions below explain how to set up NGINX as an ingress for the OIG dom tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV1d0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktVd2dnU2hBZ0VBQW9JQkFRRFp3aUpRZW1yaFNEUnEKRjNPTExhbS9xbTBoSGVTVDBNYS9KTXh3cmFKODhLQ1pBMEcyN05Td1Rzakd5akhOWWMxNzBObFlTdEluazN1cApkdVo0N1ozVmEvbTZGZXljQktmTFJ4SW84NnIwSmhQYzhsa0pVd3Y0aDU1QW0vRmxYcGxoN3hab1Q5SThkdXl6Cmh4eittbVorek9xcjVIL3VxcmJhaWZHd2FFNmdKZTQxUC9SQzlpNnpheWVtU2ZKMGtPN1hOY1dYZ05zQWQxeisKTnhUNEk1aDAzTDg2dmVSc2RZQmswUEgyVmw0TVAzaC9tOHFWdW5mK1NvQzlZQjBoRmlSNzRtTXd2SGhQUHA5TApoVFBXanNBam1jYzRKVXVkVEpJRjJGNmRqY1hnWVJJdlNmUDVWY1JuaFZYaWVweHdXUDJ1dTBLaUk0OERwZi9pCkNyUVVpZjgvQWdNQkFBRUNnZjl6cnE2TUVueTFNYWFtdGM2c0laWU1QSDI5R2lSVVlwVXk5bG1sZ3BqUHh3V0sKUkRDay9Td0FmZG9yd1Q2ejNVRk1oYWJ4UU01a04vVjZFYkJlamQxT15bjdvWTVEQWJRRTR3RG9SZWlrVApONndWU0FrVC92Z1RXc1RqRlY1bXFKMCt6U2ppOWtySkZQNVNRN1F2cUswQ3BHRlNhVjY2dW8ycktiNmJWSkJYCkxPZmZPMytlS0tVazBaTnE1Q1NVQk9mbnFoNVFJSGdpaDNiMTRlNjB6bndrNWhaMHBHZE9BQm9aTkoKZ21lanUyTEdzVWxXTjBLOVdsUy9lcUllQzVzQm9jaWlocmxMVUpGWnpPRUV6LzErT2cyemhmT29yTE9rMTIrTgpjQnV0cTJWQ2I4ZFJDaFg1ZzJ0WnBrdzgzcXN5RSt3M09zYlQxa0VDZ1lFQTdxUnRLWGFONUx1SENvWlM1VWhNCm1WcnYxTEg0eGNhaDJIZnMksrMHJqQkJONGpkZkFDMmF3R3ZzU1EyR0lYRzVGYmYyK0pwL1kxbktKOEgKZU80MzNLWVgwTDE4NlNNLzFVay9HSEdTek1CWS9KdGR6WkRrbTA4UnBwaTl4bExTeDBWUWtFNVJVcnJJcTRJVwplZzBOM2RVTHZhTVl1UTBrR2dncUFETUNnWUVBNlpqWCtjU2VMZ1BVajJENWRpUGJ1TmVFd2RMeFNPZDFZMUFjCkUzQ01YTWozK2JxQ3BGUVIrTldYWWVuVmM1QiszajlSdHVnQ0YyTkNSdVdkZWowalBpL243UExIRHdCZVY0bVIKM3VQVHJmamRJbFovSFgzQ2NjVE94TmlaajU4VitFdkRHNHNHOGxtRTRieStYRExIYTJyMWxmUk9sUVRMSyswVgpyTU93eU1VQ2dZRUF1dm14WGM4NWxZRW9hU0tkU0cvQk9kMWlYSUtmc2VDZHRNT2M1elJ0UXRsSDQwS0RscE54CmxYcXBjbVc3MWpyYzk1RzVKNmE1ZG5xTE9OSFZoWW8wUEpmSXhPU052RXI2MTE5NjRBMm5sZXRHYlk0M0twUkEKaHBPRHlmdkZoSllmK29kaUJpZFUyL3ZBMCtUczNSUHJzRzBSOUVDOEZqVDNaZVhaNTF1R0xPa0NnWUFpTmU0NwplQjRxWXdrNFRsMTZmZG5xQWpaQkpLR05xY2c1V1R3alpMSkp6R3owdCtuMkl4SFd2WUZFSjdqSkNmcHFsaDlqCmlDcjJQZVV3K09QTlNUTG1JcUgydzc5L1pQQnNKWXVsZHZ4RFdGVWFlRXg1aHpkNDdmZlNRRjZNK0NHQmthYnIKVzdzU3R5V000ZFdITHpDaGZMS20yWGJBd0VqNUQrbkN1WTRrZVFLQmdFSkRHb0puM1NCRXcra2xXTE85N09aOApnc3lYQm9mUW1lRktIS2NHNzFZUFhJbTRlV1kyUi9KOCt5anc5b1FJQ3o5NlRidkdSZEN5QlJhbWhoTmFGUzVyCk9MZUc0ejVENE4zdThUc0dNem9QcU13KzBGSXJiQ3FzTnpGWTg3ekZweEdVaXZvRWZLNE82YkdERTZjNHFqNGEKNmlmK0RSRSt1TWRMWTQyYTA3ekoKLS0tLS1FTkQgUFJJVkFURSBLRVktLS0tLQo= kind: Secret metadata: - creationTimestamp: "2022-07-13T14:02:50Z" + creationTimestamp: "" name: governancedomain-tls-cert namespace: oigns resourceVersion: "3319899" @@ -176,7 +176,7 @@ If you are using a Managed Service for your Kubernetes cluster, for example Orac ``` $ helm install nginx-ingress -n nginxssl --set controller.extraArgs.default-ssl-certificate=oigns/governancedomain-tls-cert --set controller.service.type=NodePort --set controller.admissionWebhooks.enabled=false stable/ingress-nginx NAME: nginx-ingress - LAST DEPLOYED: Tue Jul 13 14:04:40 2022 + LAST DEPLOYED: NAMESPACE: nginxssl STATUS: deployed REVISION: 1 @@ -241,7 +241,7 @@ If you are using a Managed Service for your Kubernetes cluster, for example Orac ``` NAME: nginx-ingress - LAST DEPLOYED: Tue Jul 13 14:06:42 2022 + LAST DEPLOYED: NAMESPACE: nginxssl STATUS: deployed REVISION: 1 @@ -299,32 +299,41 @@ If you are using a Managed Service for your Kubernetes cluster, for example Orac $ cd $WORKDIR/kubernetes/charts/ingress-per-domain ``` - Edit `values.yaml` and change the `domainUID` parameter to match your `domainUID`, for example `domainUID: governancedomain`. Change `sslType` to `SSL` and `secretName` to `governancedomain-tls-cert`. The file should look as follows: - + Edit `values.yaml` and change the `domainUID` parameter to match your `domainUID`, for example `domainUID: governancedomain`. Change `sslType` to `SSL`. The file should look as follows: ``` - # Load balancer type. Supported values are: TRAEFIK, NGINX + # Load balancer type. Supported values are: NGINX type: NGINX - - # Type of Configuration Supported Values are : NONSSL,SSL - # tls: NONSSL - tls: SSL - - # TLS secret name if the mode is SSL - secretName: governancedomain-tls-cert - # TimeOut value to be set for nginx parameters proxy-read-timeout and proxy-send-timeout - nginxTimeOut: 180 - - # WLS domain as backend to the load balancer + # SSL configuration Type. Supported Values are : NONSSL,SSL + sslType: SSL + + # domainType. Supported values are: oim + domainType: oim + + #WLS domain as backend to the load balancer wlsDomain: domainUID: governancedomain adminServerName: AdminServer adminServerPort: 7001 + adminServerSSLPort: soaClusterName: soa_cluster soaManagedServerPort: 8001 + soaManagedServerSSLPort: oimClusterName: oim_cluster oimManagedServerPort: 14000 + oimManagedServerSSLPort: + + # Host specific values + hostName: + enabled: false + admin: + runtime: + internal: + + # Ngnix specific values + nginx: + nginxTimeOut: 180 ``` #### Create an ingress for the domain @@ -336,7 +345,7 @@ If you are using a Managed Service for your Kubernetes cluster, for example Orac $ helm install governancedomain-nginx kubernetes/charts/ingress-per-domain --namespace oigns --values kubernetes/charts/ingress-per-domain/values.yaml ``` - **Note**: The `$WORKDIR/kubernetes/charts/ingress-per-domain/templates/nginx-ingress-k8s1.19.yaml and nginx-ingress.yaml` has `nginx.ingress.kubernetes.io/enable-access-log` set to `false`. If you want to enable access logs then set this value to `true` before executing the command. Enabling access-logs can cause issues with disk space if not regularly maintained. + **Note**: The `$WORKDIR/kubernetes/charts/ingress-per-domain/templates/nginx-ingress-ssl.yaml` has `nginx.ingress.kubernetes.io/enable-access-log` set to `false`. If you want to enable access logs then set this value to `true` before executing the command. Enabling access-logs can cause issues with disk space if not regularly maintained. For example: @@ -349,7 +358,7 @@ If you are using a Managed Service for your Kubernetes cluster, for example Orac ``` NAME: governancedomain-nginx - LAST DEPLOYED: Tue Jul 13 14:07:51 2022 + LAST DEPLOYED: NAMESPACE: oigns STATUS: deployed REVISION: 1 @@ -402,50 +411,56 @@ If you are using a Managed Service for your Kubernetes cluster, for example Orac The output will look similar to the following: ``` + Name: governancedomain-nginx Namespace: oigns - Address: 10.96.160.58 + Address: 10.111.175.104 Default backend: default-http-backend:80 () Rules: Host Path Backends ---- ---- -------- * - /console governancedomain-adminserver:7001 (10.244.2.96:7001) - /em governancedomain-adminserver:7001 (10.244.2.96:7001) - /soa governancedomain-cluster-soa-cluster:8001 (10.244.2.97:8001) - /integration governancedomain-cluster-soa-cluster:8001 (10.244.2.97:8001) - /soa-infra governancedomain-cluster-soa-cluster:8001 (10.244.2.97:8001) - /identity governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) - /admin governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) - /oim governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) - /sysadmin governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) - /workflowservice governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) - /xlWebApp governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) - /Nexaweb governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) - /callbackResponseService governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) - /spml-xsd governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) - /HTTPClnt governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) - /reqsvc governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) - /iam governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) - /provisioning-callback governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) - /CertificationCallbackService governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) - /ucs governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) - /FacadeWebApp governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) - /OIGUI governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) - /weblogic governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) - Annotations: kubernetes.io/ingress.class: nginx + /console governancedomain-adminserver:7001 (10.244.2.50:7001) + /consolehelp governancedomain-adminserver:7001 (10.244.2.50:7001) + /em governancedomain-adminserver:7001 (10.244.2.50:7001) + /ws_utc governancedomain-cluster-soa-cluster:8001 (10.244.2.51:8001) + /soa governancedomain-cluster-soa-cluster:8001 (10.244.2.51:8001) + /integration governancedomain-cluster-soa-cluster:8001 (10.244.2.51:8001) + /soa-infra governancedomain-cluster-soa-cluster:8001 (10.244.2.51:8001) + /identity governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) + /admin governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) + /oim governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) + /sysadmin governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) + /workflowservice governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) + /callbackResponseService governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) + /spml-xsd governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) + /HTTPClnt governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) + /reqsvc governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) + /iam governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) + /provisioning-callback governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) + /CertificationCallbackService governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) + /ucs governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) + /FacadeWebApp governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) + /OIGUI governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) + /weblogic governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) + Annotations: kubernetes.io/ingress.class: nginx meta.helm.sh/release-name: governancedomain-nginx meta.helm.sh/release-namespace: oigns nginx.ingress.kubernetes.io/affinity: cookie + nginx.ingress.kubernetes.io/affinity-mode: persistent nginx.ingress.kubernetes.io/configuration-snippet: + more_clear_input_headers "WL-Proxy-Client-IP" "WL-Proxy-SSL"; more_set_input_headers "X-Forwarded-Proto: https"; more_set_input_headers "WL-Proxy-SSL: true"; nginx.ingress.kubernetes.io/enable-access-log: false nginx.ingress.kubernetes.io/ingress.allow-http: false nginx.ingress.kubernetes.io/proxy-buffer-size: 2000k + nginx.ingress.kubernetes.io/proxy-read-timeout: 180 + nginx.ingress.kubernetes.io/proxy-send-timeout: 180 + nginx.ingress.kubernetes.io/session-cookie-name: sticky Events: Type Reason Age From Message ---- ------ ---- ---- ------- - Normal Sync 17s (x2 over 28s) nginx-ingress-controller Scheduled for sync + Normal Sync 18s (x2 over 38s) nginx-ingress-controller Scheduled for sync ``` 1. To confirm that the new Ingress is successfully routing to the domain's server pods, run the following command to send a request to the URL for the `WebLogic ReadyApp framework`: @@ -474,8 +489,8 @@ If you are using a Managed Service for your Kubernetes cluster, for example Orac * SSL connection using TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 * Server certificate: * subject: CN=masternode.example.com - * start date: Jul 13 13:05:21 2021 GMT - * expire date: Jul 13 13:05:21 2022 GMT + * start date: + * expire date: * common name: masternode.example.com * issuer: CN=masternode.example.com > GET /weblogic/ready HTTP/1.1 @@ -485,7 +500,7 @@ If you are using a Managed Service for your Kubernetes cluster, for example Orac > < HTTP/1.1 200 OK < Server: nginx/1.19.1 - < Date: Thu, 13 Jul 2022 14:09:57 GMT + < Date: < Content-Length: 0 < Connection: keep-alive < Strict-Transport-Security: max-age=15724800; includeSubDomains diff --git a/docs-source/content/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-K8S.md b/docs-source/content/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-K8S.md index 759054c38..db86523c6 100644 --- a/docs-source/content/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-K8S.md +++ b/docs-source/content/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-K8S.md @@ -86,7 +86,7 @@ If you are using a Managed Service for your Kubernetes cluster,for example Oracl ``` NAME: nginx-ingress - LAST DEPLOYED: Thu 13 Jul 2022 14:13:33 GMT + LAST DEPLOYED: NAMESPACE: nginx STATUS: deployed REVISION: 1 @@ -151,7 +151,7 @@ If you are using a Managed Service for your Kubernetes cluster,for example Oracl ``` NAME: nginx-ingress - LAST DEPLOYED: Thu Jul 13 14:15:33 2022 + LAST DEPLOYED: NAMESPACE: nginx STATUS: deployed REVISION: 1 @@ -212,27 +212,38 @@ If you are using a Managed Service for your Kubernetes cluster,for example Oracl Edit `values.yaml` and change the `domainUID` parameter to match your `domainUID`, for example `domainUID: governancedomain`. Also change `sslType` to `NONSSL`. The file should look as follows: ``` - # Load balancer type. Supported values are: TRAEFIK, NGINX + # Load balancer type. Supported values are: NGINX type: NGINX - # Type of Configuration Supported Values are : NONSSL, SSL + # SSL configuration Type. Supported Values are : NONSSL,SSL sslType: NONSSL - - # TimeOut value to be set for nginx parameters proxy-read-timeout and proxy-send-timeout - nginxTimeOut: 180 - # TLS secret name if the mode is SSL - secretName: domain1-tls-cert + # domainType. Supported values are: oim + domainType: oim #WLS domain as backend to the load balancer wlsDomain: domainUID: governancedomain adminServerName: AdminServer adminServerPort: 7001 + adminServerSSLPort: soaClusterName: soa_cluster soaManagedServerPort: 8001 + soaManagedServerSSLPort: oimClusterName: oim_cluster oimManagedServerPort: 14000 + oimManagedServerSSLPort: + + # Host specific values + hostName: + enabled: false + admin: + runtime: + internal: + + # Ngnix specific values + nginx: + nginxTimeOut: 180 ``` @@ -245,7 +256,7 @@ If you are using a Managed Service for your Kubernetes cluster,for example Oracl $ helm install governancedomain-nginx kubernetes/charts/ingress-per-domain --namespace --values kubernetes/charts/ingress-per-domain/values.yaml ``` - **Note**: The `/samples/kubernetes/charts/ingress-per-domain/templates//nginx-ingress-k8s1.19.yaml and nginx-ingress.yaml` has `nginx.ingress.kubernetes.io/enable-access-log` set to `false`. If you want to enable access logs then set this value to `true` before executing the command. Enabling access-logs can cause issues with disk space if not regularly maintained. + **Note**: The `/kubernetes/charts/ingress-per-domain/templates/nginx-ingress-nonssl.yaml` has `nginx.ingress.kubernetes.io/enable-access-log` set to `false`. If you want to enable access logs then set this value to `true` before executing the command. Enabling access-logs can cause issues with disk space if not regularly maintained. For example: @@ -259,7 +270,7 @@ If you are using a Managed Service for your Kubernetes cluster,for example Oracl ``` $ helm install governancedomain-nginx kubernetes/charts/ingress-per-domain --namespace oigns --values kubernetes/charts/ingress-per-domain/values.yaml NAME: governancedomain-nginx - LAST DEPLOYED: Thu Jul 13 14:18:23 2022 + LAST DEPLOYED: NAMESPACE: oigns STATUS: deployed REVISION: 1 @@ -320,38 +331,42 @@ If you are using a Managed Service for your Kubernetes cluster,for example Oracl Host Path Backends ---- ---- -------- * - /console governancedomain-adminserver:7001 (10.244.2.59:7001) - /em governancedomain-adminserver:7001 (10.244.2.59:7001) - /soa governancedomain-cluster-soa-cluster:8001 (10.244.2.60:8001) - /integration governancedomain-cluster-soa-cluster:8001 (10.244.2.60:8001) - /soa-infra governancedomain-cluster-soa-cluster:8001 (10.244.2.60:8001) - /identity governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) - /admin governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) - /oim governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) - /sysadmin governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) - /workflowservice governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) - /xlWebApp governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) - /Nexaweb governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) - /callbackResponseService governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) - /spml-xsd governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) - /HTTPClnt governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) - /reqsvc governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) - /iam governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) - /provisioning-callback governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) - /CertificationCallbackService governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) - /ucs governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) - /FacadeWebApp governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) - /OIGUI governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) - /weblogic governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) + /console governancedomain-adminserver:7001 (10.244.2.50:7001) + /consolehelp governancedomain-adminserver:7001 (10.244.2.50:7001) + /em governancedomain-adminserver:7001 (10.244.2.50:7001) + /ws_utc governancedomain-cluster-soa-cluster:8001 (10.244.2.51:8001) + /soa governancedomain-cluster-soa-cluster:8001 (10.244.2.51:8001) + /integration governancedomain-cluster-soa-cluster:8001 (10.244.2.51:8001) + /soa-infra governancedomain-cluster-soa-cluster:8001 (10.244.2.51:8001) + /identity governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) + /admin governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) + /oim governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) + /sysadmin governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) + /workflowservice governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) + /callbackResponseService governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) + /spml-xsd governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) + /HTTPClnt governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) + /reqsvc governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) + /iam governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) + /provisioning-callback governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) + /CertificationCallbackService governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) + /ucs governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) + /FacadeWebApp governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) + /OIGUI governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) + /weblogic governancedomain-cluster-oim-cluster:14000 (10.244.2.52:14000) Annotations: kubernetes.io/ingress.class: nginx meta.helm.sh/release-name: governancedomain-nginx meta.helm.sh/release-namespace: oigns nginx.ingress.kubernetes.io/affinity: cookie + nginx.ingress.kubernetes.io/affinity-mode: persistent nginx.ingress.kubernetes.io/enable-access-log: false + nginx.ingress.kubernetes.io/proxy-read-timeout: 180 + nginx.ingress.kubernetes.io/proxy-send-timeout: 180 + nginx.ingress.kubernetes.io/session-cookie-name: sticky Events: Type Reason Age From Message ---- ------ ---- ---- ------- - Normal Sync 35s nginx-ingress-controller Scheduled for sync + Normal Sync 27s nginx-ingress-controller Scheduled for sync ``` 1. To confirm that the new ingress is successfully routing to the domain's server pods, run the following command to send a request to the URL for the `WebLogic ReadyApp framework`: @@ -390,7 +405,7 @@ If you are using a Managed Service for your Kubernetes cluster,for example Oracl > < HTTP/1.1 200 OK < Server: nginx/1.19.2 - < Date: Thu Jul 13 14:21:14 2022 + < Date: < Content-Length: 0 < Connection: keep-alive < diff --git a/docs-source/content/oig/create-oig-domains/_index.md b/docs-source/content/oig/create-oig-domains/_index.md index 59d0075ed..adbe58d48 100644 --- a/docs-source/content/oig/create-oig-domains/_index.md +++ b/docs-source/content/oig/create-oig-domains/_index.md @@ -71,7 +71,7 @@ The sample scripts for Oracle Identity Governance domain deployment are availabl ``` domainUID: governancedomain domainHome: /u01/oracle/user_projects/domains/governancedomain - image: container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7- + image: container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7- imagePullSecretName: orclcred weblogicCredentialsSecretName: oig-domain-credentials logHome: /u01/oracle/user_projects/domains/logs/governancedomain @@ -174,7 +174,7 @@ generated artifacts: export initialManagedServerReplicas="1" export managedServerNameBase="oim_server" export managedServerPort="14000" - export image="container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-" + export image="container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-" export imagePullPolicy="IfNotPresent" export imagePullSecretName="orclcred" export productionModeEnabled="true" @@ -463,7 +463,7 @@ The default domain created by the script has the following characteristics: API Version: weblogic.oracle/v8 Kind: Domain Metadata: - Creation Timestamp: 2022-07-13T11:44:17Z + Creation Timestamp: Generation: 2 Managed Fields: API Version: weblogic.oracle/v8 @@ -478,7 +478,7 @@ The default domain created by the script has the following characteristics: f:weblogic.domainUID: Manager: kubectl-client-side-apply Operation: Update - Time: 2022-07-13T14:59:44Z + Time: API Version: weblogic.oracle/v8 Fields Type: FieldsV1 fieldsV1: @@ -491,7 +491,7 @@ The default domain created by the script has the following characteristics: f:startTime: Manager: Kubernetes Java Client Operation: Update - Time: 2022-07-13T11:51:12Z + Time: Resource Version: 383381 UID: ea95c549-c414-42a6-8de4-beaf1204872e Spec: @@ -545,7 +545,7 @@ The default domain created by the script has the following characteristics: Domain Home: /u01/oracle/user_projects/domains/governancedomain Domain Home Source Type: PersistentVolume Http Access Log In Log Home: true - Image: container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7- + Image: container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7- Image Pull Policy: IfNotPresent Image Pull Secrets: Name: orclcred @@ -583,7 +583,7 @@ The default domain created by the script has the following characteristics: Replicas: 1 Replicas Goal: 1 Conditions: - Last Transition Time: 2022-07-13T11:59:53.249700Z + Last Transition Time: Reason: ServersReady Status: True Type: Available @@ -591,7 +591,7 @@ The default domain created by the script has the following characteristics: Servers: Desired State: RUNNING Health: - Activation Time: 2022-07-13T11:46:49.874000Z + Activation Time: Overall Health: ok Subsystems: Subsystem Name: ServerRuntime @@ -602,7 +602,7 @@ The default domain created by the script has the following characteristics: Cluster Name: oim_cluster Desired State: RUNNING Health: - Activation Time: 2022-07-13T15:06:21.693000Z + Activation Time: Overall Health: ok Subsystems: Subsystem Name: ServerRuntime @@ -625,7 +625,7 @@ The default domain created by the script has the following characteristics: Cluster Name: soa_cluster Desired State: RUNNING Health: - Activation Time: 2022-07-13T11:49:26.340000Z + Activation Time: Overall Health: ok Subsystems: Subsystem Name: ServerRuntime @@ -645,7 +645,7 @@ The default domain created by the script has the following characteristics: Cluster Name: soa_cluster Desired State: SHUTDOWN Server Name: soa_server5 - Start Time: 2022-07-13T14:50:19.148541Z + Start Time: Events: Type Reason Age From Message ---- ------ ---- ---- ------- diff --git a/docs-source/content/oig/manage-oig-domains/logging-and-visualization.md b/docs-source/content/oig/manage-oig-domains/logging-and-visualization.md index aaa69ec65..db7588f5a 100644 --- a/docs-source/content/oig/manage-oig-domains/logging-and-visualization.md +++ b/docs-source/content/oig/manage-oig-domains/logging-and-visualization.md @@ -9,257 +9,514 @@ After the OIG domain is set up you can publish operator and WebLogic Server logs ### Install Elasticsearch and Kibana -1. If your domain namespace is anything other than `oigns`, edit the `$WORKDIR/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml` and change all instances of `oigns` to your domain namespace. +If you do not already have a centralized Elasticsearch (ELK) stack then you must configure this first. For details on how to configure the ELK stack, follow +[Installing Elasticsearch (ELK) Stack and Kibana](https://docs.oracle.com/en/middleware/fusion-middleware/12.2.1.4/ikedg/installing-monitoring-and-visualization-software.html) -1. Create a Kubernetes secret to access the elasticsearch and kibana container images: +### Create the logstash pod - **Note:** You must first have a user account on [hub.docker.com](https://hub.docker.com). +#### Variables used in this chapter + +In order to create the logstash pod, you must create several files. These files contain variables which you must substitute with variables applicable to your environment. + +Most of the values for the variables will be based on your ELK deployment as per [Installing Elasticsearch (ELK) Stack and Kibana](https://docs.oracle.com/en/middleware/fusion-middleware/12.2.1.4/ikedg/installing-monitoring-and-visualization-software.html). + +The table below outlines the variables and values you must set: + + + +| Variable | Sample Value | Description | +| --- | --- | --- | +| `` | `8.3.1` | The version of logstash you want to install.| +| `` | `true` | If SSL is enabled for ELK set the value to `true`, or if NON-SSL set to `false`. This value must be lowercase.| +| `` | `MIIDVjCCAj6gAwIBAgIRAOqQ3Gy75..etc...P9ovZ/EKPpE6Gq` | If `ELK_SSL=true`, this is the BASE64 version of the certificate between `---BEGIN CERTIFICATE---` and `---END CERTIFICATE---`. This is the Certificate Authority (CA) certificate(s), that signed the certificate of the Elasticsearch server. If using a self-signed certificate, this is the self signed certificate of the Elasticserver server. See [Copying the Elasticsearch Certificate](https://docs.oracle.com/en/middleware/fusion-middleware/12.2.1.4/ikedg/installing-monitoring-and-visualization-software.html#GUID-C1FC1063-FA76-48AD-AE3D-A39390874C74) for details on how to get the correct certificate.| +| `` | `https://elasticsearch.example.com:9200` | The URL for sending logs to Elasticsearch. HTTP if NON-SSL is used.| +| `` | `oigns` | The domain namespace.| +| `` | `logstash_internal` | The name of the user for logstash to access Elasticsearch.| +| `` | `password` | The password for ELK_USER.| +| `` | `apikey` | The API key details.| + + +#### Create kubernetes secrets + +1. Create a Kubernetes secret for Elasticsearch using the API Key or Password. + + a) If ELK uses an API Key for authentication: + + ``` + $ kubectl create secret generic elasticsearch-pw-elastic -n --from-literal password= + ``` - ```bash - $ kubectl create secret docker-registry "dockercred" --docker-server="https://index.docker.io/v1/" --docker-username="" --docker-password= --docker-email= --namespace= - ``` - For example: ``` - $ kubectl create secret docker-registry "dockercred" --docker-server="https://index.docker.io/v1/" --docker-username="username" --docker-password= --docker-email=user@example.com --namespace=oigns + $ kubectl create secret generic elasticsearch-pw-elastic -n oigns --from-literal password= ``` The output will look similar to the following: - ```bash - secret/dockercred created - ``` - -1. Create the Kubernetes resource using the following command: + ``` + secret/elasticsearch-pw-elastic created + ``` - ```bash - $ kubectl apply -f $WORKDIR/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml + b) If ELK uses a password for authentication: + + ``` + $ kubectl create secret generic elasticsearch-pw-elastic -n --from-literal password= ``` - + + For example: + + ``` + $ kubectl create secret generic elasticsearch-pw-elastic -n oigns --from-literal password= + ``` + The output will look similar to the following: ``` - deployment.apps/elasticsearch created - service/elasticsearch created - deployment.apps/kibana created - service/kibana created + secret/elasticsearch-pw-elastic created ``` -1. Run the following command to ensure Elasticsearch is used by the operator: + + **Note**: It is recommended that the ELK Stack is created with authentication enabled. If no authentication is enabled you may create a secret using the values above. + + +1. Create a Kubernetes secret to access the required images on [hub.docker.com](https://hub.docker.com): + + **Note**: Before executing the command below, you must first have a user account on [hub.docker.com](https://hub.docker.com). ```bash - $ helm get values --all weblogic-kubernetes-operator -n opns + kubectl create secret docker-registry "dockercred" --docker-server="https://index.docker.io/v1/" \ + --docker-username="" \ + --docker-password= --docker-email= \ + --namespace= ``` + For example, + + ```bash + kubectl create secret docker-registry "dockercred" --docker-server="https://index.docker.io/v1/" \ + --docker-username="user@example.com" \ + --docker-password=password --docker-email=user@example.com \ + --namespace=oigns + ``` + The output will look similar to the following: + ```bash + secret/dockercred created ``` - COMPUTED VALUES: - clusterSizePaddingValidationEnabled: true - domainNamespaceLabelSelector: weblogic-operator=enabled - domainNamespaceSelectionStrategy: LabelSelector - domainNamespaces: - - default - elasticSearchHost: elasticsearch.default.svc.cluster.local - elasticSearchPort: 9200 - elkIntegrationEnabled: true - enableClusterRoleBinding: true - externalDebugHttpPort: 30999 - externalRestEnabled: false - externalRestHttpsPort: 31001 - externalServiceNameSuffix: -ext - image: ghcr.io/oracle/weblogic-kubernetes-operator:3.3.0 - imagePullPolicy: IfNotPresent - internalDebugHttpPort: 30999 - introspectorJobNameSuffix: -introspector - javaLoggingFileCount: 10 - javaLoggingFileSizeLimit: 20000000 - javaLoggingLevel: FINE - logStashImage: logstash:6.6.0 - remoteDebugNodePortEnabled: false - serviceAccount: op-sa - suspendOnDebugStartup: false - ``` - -1. To check that Elasticsearch and Kibana are deployed in the Kubernetes cluster, run the following command: + +#### Find the mountPath details + +1. Run the following command to get the `mountPath` of your domain: + ```bash - $ kubectl get pods -n | grep 'elasticsearch\|kibana' + $ kubectl describe domains -n | grep "Mount Path" ``` For example: ```bash - $ kubectl get pods -n oigns | grep 'elasticsearch\|kibana' + $ kubectl describe domains governancedomain -n oigns | grep "Mount Path" ``` The output will look similar to the following: ``` - elasticsearch-857bd5ff6b-tvqdn 1/1 Running 0 2m9s - kibana-594465687d-zc2rt 1/1 Running 0 2m9s + Mount Path: /u01/oracle/user_projects/domains ``` - -### Create the logstash pod - -OIG Server logs can be pushed to the Elasticsearch server using the `logstash` pod. The `logstash` pod needs access to the persistent volume of the OIG domain created previously, for example `governancedomain-domain-pv`. The steps to create the `logstash` pod are as follows: +#### Find the persistentVolumeClaim details -1. Obtain the OIG domain persistence volume details: +1. Run the following command to get the OIG domain persistence volume details: - ```bash + ``` $ kubectl get pv -n ``` - + For example: - - ```bash + + ``` $ kubectl get pv -n oigns ``` The output will look similar to the following: - + ``` NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE governancedomain-domain-pv 10Gi RWX Retain Bound oigns/governancedomain-domain-pvc governancedomain-oim-storage-class 28h ``` - Make note of the `CLAIM` value, for example in this case `governancedomain-oim-pvc` + Make note of the CLAIM value, for example in this case `governancedomain-oim-pvc`. + +#### Create the Configmap + +1. Navigate to the `$WORKDIR/kubernetes/elasticsearch-and-kibana` directory and create a `logstash_cm.yaml` file as follows: + + ``` + apiVersion: v1 + kind: ConfigMap + metadata: + name: oig-logstash-configmap + namespace: + data: + logstash.yml: | + #http.host: "0.0.0.0" + elk.crt: | + -----BEGIN CERTIFICATE----- + + -----END CERTIFICATE----- + logstash-config.conf: | + input { + file { + path => "/u01/oracle/user_projects/domains/logs/governancedomain/AdminServer*.log" + tags => "Adminserver_log" + start_position => beginning + } + file { + path => "/u01/oracle/user_projects/domains/logs/governancedomain/soa_server*.log" + tags => "soaserver_log" + start_position => beginning + } + file { + path => "/u01/oracle/user_projects/domains/logs/governancedomain/oim_server*.log" + tags => "Oimserver_log" + start_position => beginning + } + file { + path => "/u01/oracle/user_projects/domains/governancedomain/servers/AdminServer/logs/AdminServer-diagnostic.log" + tags => "Adminserver_diagnostic" + start_position => beginning + } + file { + path => "/u01/oracle/user_projects/domains/governancedomain/servers/**/logs/soa_server*-diagnostic.log" + tags => "Soa_diagnostic" + start_position => beginning + } + file { + path => "/u01/oracle/user_projects/domains/governancedomain/servers/**/logs/oim_server*-diagnostic.log" + tags => "Oimserver_diagnostic" + start_position => beginning + } + file { + path => "/u01/oracle/user_projects/domains/governancedomain/servers/**/logs/access*.log" + tags => "Access_logs" + start_position => beginning + } + } + filter { + grok { + match => [ "message", "<%{DATA:log_timestamp}> <%{WORD:log_level}> <%{WORD:thread}> <%{HOSTNAME:hostname}> <%{HOSTNAME:servername}> <%{DATA:timer}> <<%{DATA:kernel}>> <> <%{DATA:uuid}> <%{NUMBER:timestamp}> <%{DATA:misc} > <%{DATA:log_number}> <%{DATA:log_message}>" ] + } + if "_grokparsefailure" in [tags] { + mutate { + remove_tag => [ "_grokparsefailure" ] + } + } + } + output { + elasticsearch { + hosts => [""] + cacert => '/usr/share/logstash/config/certs/elk.crt' + index => "oiglogs-000001" + ssl => + ssl_certificate_verification => false + user => "" + password => "${ELASTICSEARCH_PASSWORD}" + api_key => "${ELASTICSEARCH_PASSWORD}" + } + } + ``` -1. Run the following command to get the `mountPath` of your domain: + Change the values in the above file as follows: - ```bash - $ kubectl describe domains -n | grep "Mount Path" - ``` + + Change the ``, ``, ``, ``, and `` to match the values for your environment. + + If using SSL, make sure the value for is indented correctly. You can use the command: `sed 's/^/ /' elk.crt` to output the certificate with the correct indentation. + + If not using SSL, delete the `` line, but leave the -----BEGIN CERTIFICATE----- and -----END CERTIFICATE-----. + + Change `/u01/oracle/user_projects/domains` to match the `mountPath` returned earlier + + If your domainUID is anything other than `governancedomain`, change each instance of `governancedomain` to your domainUID. + + If using API KEY for your ELK authentication, delete the `user` and `password` lines. + + If using a password for ELK authentication, delete the `api_key` line. + + If no authentication is used for ELK, delete the `user`, `password`, and `api_key` lines. For example: - ```bash - $ kubectl describe domains governancedomain -n oigns | grep "Mount Path" + ``` + apiVersion: v1 + kind: ConfigMap + metadata: + name: oig-logstash-configmap + namespace: oigns + data: + logstash.yml: | + #http.host: "0.0.0.0" + elk.crt: | + -----BEGIN CERTIFICATE----- + MIIDVjCCAj6gAwIBAgIRAOqQ3Gy75NvPPQUN5kXqNQUwDQYJKoZIhvcNAQELBQAw + NTEWMBQGA1UECxMNZWxhc3RpY3NlYXJjaDEbMBkGA1UEAxMSZWxhc3RpY3NlYXJj + aC1odHRwMB4XDTIyMDgyNDA1MTU1OVoXDTIzMDgyNDA1MjU1OVowNTEWMBQGA1UE + CxMNZWxhc3RpY3NlYXJjaDEbMBkGA1UEAxMSZWxhc3RpY3NlYXJjaC1odHRwMIIB + IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsQOnxUm9uF32+lyc9SA3WcMZ + P1X7TbHMDuO/l3UHBUf5F/bt2m3YkGw+enIos9wzuUNpjIwVt8q4WrRCMl80nAQ0 + yCXrfLSI9zaHxEC8Ht7V0U+7Sgu5uysD4tyZ9T0Q5zjvkWS6oBPxhfri3OQfPvUW + gQ6wJaPGDteYZAwiBMvPEkmh0VUTBTXjToHrtrT7pzmz5BBWnUzdf+jv0+nEfedm + mMWw/8jqyqid7bu7bo6gKBZ8zk06n2iMaXzmGW34QlYRLBgubThhxyDE7joZ4NTA + UFEJecZR2fccmpN8CNkT9Ex4Hq88nh2OP5XKKPNF4kLh2u6F4auF7Uz42jwvIwID + AQABo2EwXzAOBgNVHQ8BAf8EBAMCAoQwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsG + AQUFBwMCMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLQb/IjHHkSmHgKSPY7r + zBIJZMbdMA0GCSqGSIb3DQEBCwUAA4IBBQA01qY0tGIPsKNkn7blxRjEYkTg59Z5 + vi6MCpGtdoyZeJgH621IpwyB34Hpu1RQfyg1aNgmOtIK9cvQZRl008DHF4AiHYhU + 6xe3cjI/QxDXwitoBgWl+a0mkwhSmzJt7TuzImq7RMO4ws3M/nGeNUwFjwsQu86+ + N/Y3RuuUVbK1xy8Jdz3FZADIgHVPN6GQwYKEpWrZNapKBXjunjCZmpBFxqGMRF44 + fcSKFlFkwjyTq4kgq44NPv18NMfKCYZcK7ttRTiep77vKB7No/TM69Oz5ZHhQ+2Q + pSGg3QF+1fOCFCgWXFEOle6lQ5i8a/GihY0FuphrZxP9ovZ/EKPpE6Gq + -----END CERTIFICATE----- + logstash-config.conf: | + input { + file { + path => "/u01/oracle/user_projects/domains/logs/governancedomain/AdminServer*.log" + tags => "Adminserver_log" + start_position => beginning + } + file { + path => "/u01/oracle/user_projects/domains/logs/governancedomain/soa_server*.log" + tags => "soaserver_log" + start_position => beginning + } + file { + path => "/u01/oracle/user_projects/domains/logs/governancedomain/oim_server*.log" + tags => "Oimserver_log" + start_position => beginning + } + file { + path => "/u01/oracle/user_projects/domains/governancedomain/servers/AdminServer/logs/AdminServer-diagnostic.log" + tags => "Adminserver_diagnostic" + start_position => beginning + } + file { + path => "/u01/oracle/user_projects/domains/governancedomain/servers/**/logs/soa_server*-diagnostic.log" + tags => "Soa_diagnostic" + start_position => beginning + } + file { + path => "/u01/oracle/user_projects/domains/governancedomain/servers/**/logs/oim_server*-diagnostic.log" + tags => "Oimserver_diagnostic" + start_position => beginning + } + file { + path => "/u01/oracle/user_projects/domains/governancedomain/servers/**/logs/access*.log" + tags => "Access_logs" + start_position => beginning + } + } + filter { + grok { + match => [ "message", "<%{DATA:log_timestamp}> <%{WORD:log_level}> <%{WORD:thread}> <%{HOSTNAME:hostname}> <%{HOSTNAME:servername}> <%{DATA:timer}> <<%{DATA:kernel}>> <> <%{DATA:uuid}> <%{NUMBER:timestamp}> <%{DATA:misc} > <%{DATA:log_number}> <%{DATA:log_message}>" ] + } + if "_grokparsefailure" in [tags] { + mutate { + remove_tag => [ "_grokparsefailure" ] + } + } + } + output { + elasticsearch { + hosts => ["https://elasticsearch.example.com:9200"] + cacert => '/usr/share/logstash/config/certs/elk.crt' + index => "oiglogs-000001" + ssl => true + ssl_certificate_verification => false + user => "logstash_internal" + password => "${ELASTICSEARCH_PASSWORD}" + } + } ``` - The output will look similar to the following: + +1. Run the following command to create the configmap: + ``` - Mount Path: /u01/oracle/user_projects/domains + $ kubectl apply -f logstash_cm.yaml ``` -1. Navigate to the `$WORKDIR/kubernetes/elasticsearch-and-kibana` directory and create a `logstash.yaml` file as follows. - Change the `claimName` and `mountPath` values to match the values returned in the previous commands. Change `namespace` to your domain namespace e.g `oigns`: + The output will look similar to the following: + + ``` + configmap/oig-logstash-configmap created + ``` - ``` +#### Deploy the logstash pod + +1. Navigate to the `$WORKDIR/kubernetes/elasticsearch-and-kibana` directory and create a `logstash.yaml` file as follows: + + ``` apiVersion: apps/v1 kind: Deployment metadata: - name: logstash-wls - namespace: oigns + name: oig-logstash + namespace: spec: selector: matchLabels: - k8s-app: logstash-wls + k8s-app: logstash template: # create pods using pod definition in this template metadata: labels: - k8s-app: logstash-wls + k8s-app: logstash spec: + imagePullSecrets: + - name: dockercred + containers: + - command: + - logstash + image: logstash: + imagePullPolicy: IfNotPresent + name: oig-logstash + env: + - name: ELASTICSEARCH_PASSWORD + valueFrom: + secretKeyRef: + name: elasticsearch-pw-elastic + key: password + resources: + ports: + - containerPort: 5044 + name: logstash + volumeMounts: + - mountPath: /u01/oracle/user_projects/domains + name: weblogic-domain-storage-volume + - name: shared-logs + mountPath: /shared-logs + - mountPath: /usr/share/logstash/pipeline/ + name: oig-logstash-pipeline + - mountPath: /usr/share/logstash/config/logstash.yml + subPath: logstash.yml + name: config-volume + - mountPath: /usr/share/logstash/config/certs + name: elk-cert volumes: + - configMap: + defaultMode: 420 + items: + - key: elk.crt + path: elk.crt + name: oig-logstash-configmap + name: elk-cert + - configMap: + defaultMode: 420 + items: + - key: logstash-config.conf + path: logstash-config.conf + name: oig-logstash-configmap + name: oig-logstash-pipeline + - configMap: + defaultMode: 420 + items: + - key: logstash.yml + path: logstash.yml + name: oig-logstash-configmap + name: config-volume - name: weblogic-domain-storage-volume persistentVolumeClaim: claimName: governancedomain-domain-pvc - name: shared-logs emptyDir: {} - imagePullSecrets: + ``` + + + Change the ``, and `` to match the values for your environment. + + Change `/u01/oracle/user_projects/domains` to match the `mountPath` returned earlier + + Change the `claimName` value to match the `claimName` returned earlier + + + For example: + + ``` + apiVersion: apps/v1 + kind: Deployment + metadata: + name: oig-logstash + namespace: oigns + spec: + selector: + matchLabels: + k8s-app: logstash + template: # create pods using pod definition in this template + metadata: + labels: + k8s-app: logstash + spec: + imagePullSecrets: - name: dockercred containers: - - name: logstash - image: logstash:6.6.0 - command: ["/bin/sh"] - args: ["/usr/share/logstash/bin/logstash", "-f", "/u01/oracle/user_projects/domains/logstash/logstash.conf"] + - command: + - logstash + image: logstash:8.3.1 imagePullPolicy: IfNotPresent + name: oig-logstash + env: + - name: ELASTICSEARCH_PASSWORD + valueFrom: + secretKeyRef: + name: elasticsearch-pw-elastic + key: password + resources: + ports: + - containerPort: 5044 + name: logstash volumeMounts: - mountPath: /u01/oracle/user_projects/domains name: weblogic-domain-storage-volume - name: shared-logs mountPath: /shared-logs - ports: - - containerPort: 5044 - name: logstash - ``` - -1. In the persistent volume directory that corresponds to the mountPath `/u01/oracle/user_projects/domains`, create a `logstash` directory. For example: - - ```bash - $ mkdir -p /scratch/shared/governancedomainpv/logstash - ``` - -1. Create a `logstash.conf` in the newly created `logstash` directory that contains the following. Make sure the paths correspond to your `mountPath` and `domain` name. Also, if your namespace is anything other than `oigns` change `"elasticsearch.oigns.svc.cluster.local:9200"` to `"elasticsearch..svc.cluster.local:9200"`:: - - ``` - input { - file { - path => "/u01/oracle/user_projects/domains/logs/governancedomain/AdminServer*.log" - tags => "Adminserver_log" - start_position => beginning - } - file { - path => "/u01/oracle/user_projects/domains/logs/governancedomain/soa_server*.log" - tags => "soaserver_log" - start_position => beginning - } - file { - path => "/u01/oracle/user_projects/domains/logs/governancedomain/oim_server*.log" - tags => "Oimserver_log" - start_position => beginning - } - file { - path => "/u01/oracle/user_projects/domains/governancedomain/servers/AdminServer/logs/AdminServer-diagnostic.log" - tags => "Adminserver_diagnostic" - start_position => beginning - } - file { - path => "/u01/oracle/user_projects/domains/governancedomain/servers/**/logs/soa_server*-diagnostic.log" - tags => "Soa_diagnostic" - start_position => beginning - } - file { - path => "/u01/oracle/user_projects/domains/governancedomain/servers/**/logs/oim_server*-diagnostic.log" - tags => "Oimserver_diagnostic" - start_position => beginning - } - file { - path => "/u01/oracle/user_projects/domains/governancedomain/servers/**/logs/access*.log" - tags => "Access_logs" - start_position => beginning - } - } - filter { - grok { - match => [ "message", "<%{DATA:log_timestamp}> <%{WORD:log_level}> <%{WORD:thread}> <%{HOSTNAME:hostname}> <%{HOSTNAME:servername}> <%{DATA:timer}> <<%{DATA:kernel}>> <> <%{DATA:uuid}> <%{NUMBER:timestamp}> <%{DATA:misc}> <%{DATA:log_number}> <%{DATA:log_message}>" ] - } - if "_grokparsefailure" in [tags] { - mutate { - remove_tag => [ "_grokparsefailure" ] - } - } - } - output { - elasticsearch { - hosts => ["elasticsearch.oigns.svc.cluster.local:9200"] - } - } - ``` + - mountPath: /usr/share/logstash/pipeline/ + name: oig-logstash-pipeline + - mountPath: /usr/share/logstash/config/logstash.yml + subPath: logstash.yml + name: config-volume + - mountPath: /usr/share/logstash/config/certs + name: elk-cert + volumes: + - configMap: + defaultMode: 420 + items: + - key: elk.crt + path: elk.crt + name: oig-logstash-configmap + name: elk-cert + - configMap: + defaultMode: 420 + items: + - key: logstash-config.conf + path: logstash-config.conf + name: oig-logstash-configmap + name: oig-logstash-pipeline + - configMap: + defaultMode: 420 + items: + - key: logstash.yml + path: logstash.yml + name: oig-logstash-configmap + name: config-volume + - name: weblogic-domain-storage-volume + persistentVolumeClaim: + claimName: governancedomain-domain-pvc + - name: shared-logs + emptyDir: {} + ``` + 1. Deploy the `logstash` pod by executing the following command: ```bash $ kubectl create -f $WORKDIR/kubernetes/elasticsearch-and-kibana/logstash.yaml ``` - + The output will look similar to the following: ``` - deployment.apps/logstash-wls created + deployment.apps/oig-logstash created ``` 1. Run the following command to check the `logstash` pod is created correctly: @@ -277,122 +534,66 @@ OIG Server logs can be pushed to the Elasticsearch server using the `logstash` p The output should look similar to the following: ``` - NAME READY STATUS RESTARTS AGE - elasticsearch-678ff4fb5-89rpf 1/1 Running 0 13m + NAME READY STATUS RESTARTS AGE governancedomain-adminserver 1/1 Running 0 90m - governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 25h - governancedomain-oim-server1 1/1 Running 0 87m - governancedomain-soa-server1 1/1 Running 0 87m - kibana-589466bb89-k8wdr 1/1 Running 0 13m - logstash-wls-f448b44c8-92l27 1/1 Running 0 7s + governancedomain-create-fmw-infra-sample-domain-job-fqgnr 0/1 Completed 0 2d19h + governancedomain-oim-server1 1/1 Running 0 88m + governancedomain-soa-server1 1/1 Running 0 88m + helper 1/1 Running 0 2d20h + oig-logstash-77fbbc66f8-lsvcw 1/1 Running 0 3m25s ``` + **Note**: Wait a couple of minutes to make sure the pod has not had any failures or restarts. If the pod fails you can view the pod log using: - -### Verify and access the Kibana console - -1. Check if the indices are created correctly in the elasticsearch pod shown above: - - ```bash - $ kubectl exec -it -n -- /bin/bash ``` - - For example: - - ```bash - $ kubectl exec -it elasticsearch-678ff4fb5-89rpf -n oigns -- /bin/bash - ``` - - This will take you into a bash shell in the elasticsearch pod: - - ```bash - [root@elasticsearch-678ff4fb5-89rpf elasticsearch]# - ``` - -1. In the elasticsearch bash shell run the following to check the indices: - - ```bash - [root@elasticsearch-678ff4fb5-89rpf elasticsearch]# curl -i "127.0.0.1:9200/_cat/indices?v" + $ kubectl logs -f oig-logstash- -n oigns ``` - The output will look similar to the following: - - ``` - HTTP/1.1 200 OK - content-type: text/plain; charset=UTF-8 - content-length: 580 - - health status index uuid pri rep docs.count docs.deleted store.size pri.store.size - yellow open logstash-2022.03.10 7oXXCureSWKwNY0626Szeg 5 1 46887 0 11.7mb 11.7mb - green open .kibana_task_manager alZtnv2WRy6Y4iSRIbmCrQ 1 0 2 0 12.6kb 12.6kb - green open .kibana_1 JeZKrO4fS_GnRL92qRmQDQ 1 0 2 0 7.6kb 7.6kb - ``` - - Exit the bash shell by typing `exit`. - -1. Find the Kibana port by running the following command: - - ```bash - $ kubectl get svc -n | grep kibana - ``` - - For example: - - ```bash - $ kubectl get svc -n oigns | grep kibana - ``` + Most errors occur due to misconfiguration of the `logstash_cm.yaml` or `logstash.yaml`. This is usually because of an incorrect value set, or the certificate was not pasted with the correct indentation. - The output will look similar to the following: + If the pod has errors, delete the pod and configmap as follows: ``` - kibana NodePort 10.111.224.230 5601:31490/TCP 11m + $ kubectl delete -f $WORKDIR/kubernetes/elasticsearch-and-kibana/logstash.yaml + $ kubectl delete -f $WORKDIR/kubernetes/elasticsearch-and-kibana/logstash_cm.yaml ``` - In the example above the Kibana port is `31490`. - + Once you have resolved the issue in the yaml files, run the commands outlined earlier to recreate the configmap and logstash pod. -1. Access the Kibana console with `http://${MASTERNODE-HOSTNAME}:${KIBANA-PORT}/app/kibana`. -1. Click on **Dashboard** in the left hand Navigation Menu. +### Verify and access the Kibana console -1. In the **Create index pattern** page enter `logstash*` and click **Next Step**. +To access the Kibana console you will need the Kibana URL as per [Installing Elasticsearch (ELK) Stack and Kibana](https://docs.oracle.com/en/middleware/fusion-middleware/12.2.1.4/ikedg/installing-monitoring-and-visualization-software.html#GUID-C0013AA8-B229-4237-A1D8-8F38FA6E2CEC). -1. From the **Time Filter field name** drop down menu select `@timestamp` and click **Create index pattern**. -1. Once the index pattern is created click on **Discover** in the navigation menu to view the logs. +**For Kibana 7.7.x and below**: -For more details on how to use the Kibana console see the [Kibana Guide](https://www.elastic.co/guide/en/kibana/current/index.html) - -### Cleanup +1. Access the Kibana console with `http://:/app/kibana` and login with your username and password. -To clean up the Elasticsearch and Kibana install: +1. From the Navigation menu, navigate to **Management** > **Kibana** > **Index Patterns**. -1. Run the following command to delete logstash: +1. In the **Create Index Pattern** page enter `oiglogs*` for the **Index pattern** and click **Next Step**. - ```bash - $ kubectl delete -f $WORKDIR/kubernetes/elasticsearch-and-kibana/logstash.yaml - ``` +1. In the **Configure settings** page, from the **Time Filter field name** drop down menu select `@timestamp` and click **Create index pattern**. - The output will look similar to the following: +1. Once the index pattern is created click on **Discover** in the navigation menu to view the OIG logs. - ``` - deployment.apps "logstash-wls" deleted - ``` -1. Run the following command to delete Elasticsearch and Kibana: +**For Kibana version 7.8.X and above**: - ```bash - $ kubectl delete -f $WORKDIR/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml - ``` +1. Access the Kibana console with `http://:/app/kibana` and login with your username and password. - The output will look similar to the following: +1. From the Navigation menu, navigate to **Management** > **Stack Management**. - ``` - deployment.apps "elasticsearch" deleted - service "elasticsearch" deleted - deployment.apps "kibana" deleted - service "kibana" deleted - ``` - +1. Click **Data Views** in the **Kibana** section. + +1. Click **Create Data View** and enter the following information: + + + Name: `oiglogs*` + + Timestamp: `@timestamp` - \ No newline at end of file +1. Click **Create Data View**. + +1. From the Navigation menu, click **Discover** to view the log file entries. + +1. From the drop down menu, select `oiglogs*` to view the log file entries. \ No newline at end of file diff --git a/docs-source/content/oig/manage-oig-domains/monitoring-oim-domains.md b/docs-source/content/oig/manage-oig-domains/monitoring-oim-domains.md index b8b1dc5e2..a56fa09ff 100644 --- a/docs-source/content/oig/manage-oig-domains/monitoring-oim-domains.md +++ b/docs-source/content/oig/manage-oig-domains/monitoring-oim-domains.md @@ -22,7 +22,7 @@ The `$WORKDIR/kubernetes/monitoring-service/setup-monitoring.sh` sets up the m For usage details execute `./setup-monitoring.sh -h`. -1. Edit the `$WORKDIR/kubernetes/monitoring-service/monitoring-inputs.yaml` and change the `domainUID`, `domainNamespace`, and `weblogicCredentialsSecretName` to correspond to your deployment. For example: +1. Edit the `$WORKDIR/kubernetes/monitoring-service/monitoring-inputs.yaml` and change the `domainUID`, `domainNamespace`, and `weblogicCredentialsSecretName` to correspond to your deployment. Also change `wlsMonitoringExporterTosoaCluster`, `wlsMonitoringExporterTooimCluster`, `exposeMonitoringNodePort` to `true`. For example: ``` version: create-oimcluster-monitoring-inputs-v1 @@ -108,11 +108,10 @@ For usage details execute `./setup-monitoring.sh -h`. ...Successfully got an update from the "stable" chart repository ...Successfully got an update from the "prometheus" chart repository ...Successfully got an update from the "prometheus-community" chart repository - ...Successfully got an update from the "appscode" chart repository Update Complete. ⎈Happy Helming!⎈ Setup prometheus-community/kube-prometheus-stack in progress NAME: monitoring - LAST DEPLOYED: Tue Jul 13 14:58:56 2022 + LAST DEPLOYED: NAMESPACE: monitoring STATUS: deployed REVISION: 1 @@ -162,14 +161,14 @@ For usage details execute `./setup-monitoring.sh -h`. Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war to targets AdminServer (upload=true) ... - + .Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-adminserver. - + .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start @@ -177,14 +176,14 @@ For usage details execute `./setup-monitoring.sh -h`. Deployment Message : no message Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-soa.war to targets soa_cluster (upload=true) ... - + .Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-soa. - + .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start @@ -192,14 +191,14 @@ For usage details execute `./setup-monitoring.sh -h`. Deployment Message : no message Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-oim.war to targets oim_cluster (upload=true) ... - + .Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-oim. - + .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start @@ -210,7 +209,7 @@ For usage details execute `./setup-monitoring.sh -h`. Exiting WebLogic Scripting Tool. - + Deploy WebLogic Monitoring Exporter completed secret/basic-auth created servicemonitor.monitoring.coreos.com/wls-exporter created @@ -532,7 +531,7 @@ Generate the WebLogic Monitoring Exporter deployment package. The `wls-exporter. ``` -1. Deploy the WebLogic Monitoring Exporter WAR files into the Oracle Access Management domain: +1. Deploy the WebLogic Monitoring Exporter WAR files into the Oracle Identity Governance domain: ```bash $ cd $WORKDIR/kubernetes/monitoring-service/scripts @@ -559,22 +558,22 @@ Generate the WebLogic Monitoring Exporter deployment package. The `wls-exporter. Type help() for help on available commands - Connecting to t3://accessdomain-adminserver:7001 with userid weblogic ... - Successfully connected to Admin Server "AdminServer" that belongs to domain "accessdomain". + Connecting to t3://governancedomain-adminserver:7001 with userid weblogic ... + Successfully connected to Admin Server "AdminServer" that belongs to domain "governancedomaindomain". Warning: An insecure protocol was used to connect to the server. To ensure on-the-wire security, the SSL port or Admin port should be used instead. Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war to targets AdminServer (upload=true) ... - + .Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-adminserver. - + .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start @@ -582,14 +581,14 @@ Generate the WebLogic Monitoring Exporter deployment package. The `wls-exporter. Deployment Message : no message Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-soa.war to targets soa_cluster (upload=true) ... - + ..Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-soa. - + .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start @@ -597,14 +596,14 @@ Generate the WebLogic Monitoring Exporter deployment package. The `wls-exporter. Deployment Message : no message Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-oim.war to targets oim_cluster (upload=true) ... - + .Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-oim. - + .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start @@ -614,7 +613,7 @@ Generate the WebLogic Monitoring Exporter deployment package. The `wls-exporter. Exiting WebLogic Scripting Tool. - + ``` #### Configure Prometheus Operator diff --git a/docs-source/content/oig/manage-oig-domains/wlst-admin-operations.md b/docs-source/content/oig/manage-oig-domains/wlst-admin-operations.md index 06a3c57e2..5a39eaad6 100644 --- a/docs-source/content/oig/manage-oig-domains/wlst-admin-operations.md +++ b/docs-source/content/oig/manage-oig-domains/wlst-admin-operations.md @@ -259,9 +259,9 @@ wls:/governancedomain/serverConfig/Servers> ```bash wls:/offline> connect('weblogic','','t3s://governancedomain-adminserver-ssl:7002') Connecting to t3s://governancedomain-adminserver-ssl:7002 with userid weblogic ... - - - + + + Successfully connected to Admin Server "AdminServer" that belongs to domain "governancedomain". wls:/governancedomain/serverConfig/> @@ -272,9 +272,9 @@ wls:/governancedomain/serverConfig/Servers> ```bash wls:/offline> connect('weblogic','','t3s://governancedomain-cluster-oim-cluster-ssl:14101') Connecting to t3s://governancedomain-cluster-oim-cluster-ssl:14101 with userid weblogic ... - - - + + + Successfully connected to managed Server "oim_server1" that belongs to domain "governancedomain". wls:/governancedomain/serverConfig/> diff --git a/docs-source/content/oig/patch-and-upgrade/_index.md b/docs-source/content/oig/patch-and-upgrade/_index.md index ad892f8c0..e07031f0c 100644 --- a/docs-source/content/oig/patch-and-upgrade/_index.md +++ b/docs-source/content/oig/patch-and-upgrade/_index.md @@ -5,7 +5,11 @@ pre = "11. " description= "This document provides steps to patch or upgrade an OIG image, or WebLogic Kubernetes Operator." +++ -Patch an existing Oracle OIG image, or upgrade the WebLogic Kubernetes Operator release. +This section shows you how to upgrade the WebLogic Kubernetes Operator, upgrade the OIG image, and patch the OIG domain. It also shows you how to upgrade the Elasticsearch and Kibana stack, and the Ingress. + +The upgrade path taken depends on the version you are upgrading from, and the version you are upgrading to. + +Please refer to the [Release Notes](../release-notes) for information on which upgrade steps are necessary for the version you are upgrading to. {{% children style="h4" description="true" %}} diff --git a/docs-source/content/oig/patch-and-upgrade/patch-an-image.md b/docs-source/content/oig/patch-and-upgrade/patch-an-image.md new file mode 100644 index 000000000..455d86a76 --- /dev/null +++ b/docs-source/content/oig/patch-and-upgrade/patch-an-image.md @@ -0,0 +1,120 @@ +--- +title: "b. Patch an image" +description: "Instructions on how to update your OIG Kubernetes cluster with a new OIG container image." +--- + + +### Introduction + +The OIG domain patching script automatically performs the update of your OIG Kubernetes cluster with a new OIG container image. + +The script executes the following steps sequentially: + +* Checks if the helper pod exists in the given namespace. If yes, then it deletes the helper pod. +* Brings up a new helper pod with the new image. +* Stops the Administration Server, SOA and OIM managed servers using `serverStartPolicy` set as `NEVER` in the domain definition yaml. +* Waits for all servers to be stopped (default timeout 2000s) +* Introspects database properties including credentials from the job configmap. +* Performs database schema changes from the helper pod +* Starts the Administration Server, SOA and OIM managed servers by setting `serverStartPolicy` to IF_NEEDED and `image` to new image tag. +* Waits for all the servers to be ready (default timeout 2000s) + +The script exits with a failure if a configurable timeout is reached before the target pod count is reached, depending upon the domain configuration. It also exits if there is any failure while patching the database schema and domain. + +**Note**: The script execution will cause downtime while patching the OIG deployment and database schemas. + +### Prerequisites + +Before you begin, perform the following steps: + +1. Review the [Domain resource](https://oracle.github.io/weblogic-kubernetes-operator/userguide/managing-domains/domain-resource) documentation. + +1. Ensure that you have a running OIG deployment in your cluster. + +1. Ensure that the database is up and running. + + +### Download the latest code repository + +Download the latest code repository as follows: + +1. Create a working directory to setup the source code. + ```bash + $ mkdir + ``` + + For example: + ```bash + $ mkdir /scratch/OIGK8Slatest + ``` + +1. Download the latest OIG deployment scripts from the OIG repository. + + ```bash + $ cd + $ git clone https://github.com/oracle/fmw-kubernetes.git + ``` + + For example: + + ```bash + $ cd /scratch/OIGK8Slatest + $ git clone https://github.com/oracle/fmw-kubernetes.git + ``` + +1. Set the `$WORKDIR` environment variable as follows: + + ```bash + $ export WORKDIR=/fmw-kubernetes/OracleIdentityGovernance + ``` + + For example: + + ```bash + $ export WORKDIR=/scratch/OIGK8Slatest/fmw-kubernetes/OracleIdentityGovernance + ``` + +### Run the patch domain script + +1. Run the patch domain script as follows. Specify the inputs required by the script. If you need help understanding the inputs run the command help `-h`. + + ```bash + $ cd $WORKDIR/kubernetes/domain-lifecycle + $ ./patch_oig_domain.sh -h + $ ./patch_oig_domain.sh -i -n + ``` + + For example: + + ```bash + $ cd $WORKDIR/kubernetes/domain-lifecycle + $ ./patch_oig_domain.sh -h + $ ./patch_oig_domain.sh -i 12.2.1.4.0-8-ol7- -n oigns + ``` + + The output will look similar to the following + + ``` + [INFO] Found domain name: governancedomain + [INFO] Image Registry: container-registry.oracle.com/middleware/oig_cpu + [INFO] Domain governancedomain is currently running with image: container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7- + current no of pods under governancedomain are 3 + [INFO] The pod helper already exists in namespace oigns. + [INFO] Deleting pod helper + pod "helper" deleted + [INFO] Fetched Image Pull Secret: orclcred + [INFO] Creating new helper pod with image: container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7- + pod/helper created + Checking helper Running + [INFO] Stopping Admin, SOA and OIM servers in domain governancedomain. This may take some time, monitor log /scratch/OIGK8Slatest/fmw-kubernetes/OracleIdentityGovernance/kubernetes/domain-lifecycle/log/oim_patch_log-/stop_servers.log for details + [INFO] All servers are now stopped successfully. Proceeding with DB Schema changes + [INFO] Patching OIM schemas... + [INFO] DB schema update successful. Check log /scratch/OIGK8Slatest/fmw-kubernetes/OracleIdentityGovernance/kubernetes/domain-lifecycle/log/oim_patch_log-/patch_oim_wls.log for details + [INFO] Starting Admin, SOA and OIM servers with new image container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7- + [INFO] Waiting for 3 weblogic pods to be ready..This may take several minutes, do not close the window. Check log /scratch/OIGK8Slatest/fmw-kubernetes/OracleIdentityGovernance/kubernetes/domain-lifecycle/log/oim_patch_log-/monitor_weblogic_pods.log for progress + [SUCCESS] All servers under governancedomain are now in ready state with new image: container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7- + ``` + + The logs are available at `$WORKDIR/kubernetes/domain-lifecycle` by default. A custom log location can also be provided to the script. + + **Note**: If the patch domain script creation fails, refer to the [Troubleshooting](../../troubleshooting/#patch-domain-failures) section. \ No newline at end of file diff --git a/docs-source/content/oig/patch-and-upgrade/patch_an_image.md b/docs-source/content/oig/patch-and-upgrade/patch_an_image.md deleted file mode 100644 index 97108fd85..000000000 --- a/docs-source/content/oig/patch-and-upgrade/patch_an_image.md +++ /dev/null @@ -1,212 +0,0 @@ ---- -title: "a. Patch an image" -description: "Instructions on how to update your OIG Kubernetes cluster with a new OIG container image." ---- - -Choose one of the following options to update your OIG kubernetes cluster to use the new image: - -1. Run the `kubectl edit domain` command -2. Run the `kubectl patch domain` command - -In all of the above cases, the WebLogic Kubernetes Operator will restart the Administration Server pod first and then perform a rolling restart on the OIG Managed Servers. - -**Note**: If you are not using Oracle Container Registry or your own container registry, then you must first load the new container image on all nodes in your Kubernetes cluster. - - -### Run the kubectl edit domain command - -1. To update the domain with the `kubectl edit domain` command, run the following: - - ```bash - $ kubectl edit domain -n - ``` - - For example: - - ```bash - $ kubectl edit domain governancedomain -n oigns - ``` - - If using Oracle Container Registry or your own container registry for your OIG container image, update the `image` to point at the new image, for example: - - ``` - domainHomeInImage: false - image: container-registry.oracle.com/middleware/oig_cpu: - imagePullPolicy: IfNotPresent - ``` - - If you are not using a container registry and have loaded the image on each of the master and worker nodes, update the `image` to point at the new image: - - ``` - domainHomeInImage: false - image: oracle/oig: - imagePullPolicy: IfNotPresent - ``` - -1. Save the file and exit (:wq!) - - -### Run the kubectl patch command - -1. To update the domain with the `kubectl patch domain` command, run the following: - - ```bash - $ kubectl patch domain -n --type merge -p '{"spec":{"image":"newimage:tag"}}' - ``` - - - For example, if using Oracle Container Registry or your own container registry for your OIG container image: - - ```bash - $ kubectl patch domain governancedomain -n oigns --type merge -p '{"spec":{"image":"container-registry.oracle.com/middleware/oig_cpu:"}}' - ``` - - For example, if you are not using a container registry and have loaded the image on each of the master and worker nodes: - - ```bash - $ kubectl patch domain governancedomain -n oigns --type merge -p '{"spec":{"image":"oracle/oig:"}}' - ``` - - The output will look similar to the following: - - ``` - domain.weblogic.oracle/governancedomain patched - ``` - -### Patch the database schemas - -Once the image has been updated you must patch the schemas in the database. - - -1. Check to see if the helper pod exists by running: - - ```bash - $ kubectl get pods -n | grep helper - ``` - - For example: - - ```bash - $ kubectl get pods -n oigns | grep helper - ``` - - The output should look similar to the following: - - ``` - helper 1/1 Running 0 26h - ``` - - If the helper pod exists delete the pod with following command: - - ```bash - $ kubectl delete pod helper -n - ``` - - For example: - - ```bash - $ kubectl delete pod helper -n oigns - ``` - -1. Create a new helper pod by following the instructions in [Prepare you environment ](../../prepare-your-environment/#rcu-schema-creation). **Note**: The new helper pod should be started using the new image. - - -1. Run the following command to start a bash shell in the helper pod: - - ```bash - $ kubectl exec -it helper -n -- /bin/bash - ``` - - For example: - - ```bash - $ kubectl exec -it helper -n oigns -- /bin/bash - ``` - - This will take you into a bash shell in the running helper pod: - - ```bash - [oracle@helper ~]$ - ``` - -1. In the helper bash shell run the following commands to set the environment: - - ```bash - [oracle@helper oracle]$ export DB_HOST= - [oracle@helper oracle]$ export DB_PORT= - [oracle@helper oracle]$ export DB_SERVICE= - [oracle@helper oracle]$ export RCUPREFIX= - [oracle@helper oracle]$ export RCU_SCHEMA_PWD= - [oracle@helper oracle]$ echo -e "\n" > /tmp/pwd.txt - [oracle@helper oracle]$ cat /tmp/pwd.txt - ``` - - where: - - `` is the database server hostname - - `` is the database listener port - - `` is the database service name - - `` is the RCU schema prefix you want to set - - `` is the password you want to set for the `` - - `` is the SYS password for the database - - For example: - - ```bash - [oracle@helper oracle]$ export DB_HOST=mydatabasehost.example.com - [oracle@helper oracle]$ export DB_PORT=1521 - [oracle@helper oracle]$ export DB_SERVICE=orcl.example.com - [oracle@helper oracle]$ export RCUPREFIX=OIGK8S - [oracle@helper oracle]$ export RCU_SCHEMA_PWD= - ``` - -1. Run the following command to patch the schemas: - - {{% notice note %}} - This command should be run if you are using an OIG image that contains OIG bundle patches. If using an OIG image without OIG bundle patches, then you can skip this step. - {{% /notice %}} - - ```bash - [oracle@helper oracle]$ /u01/oracle/oracle_common/modules/thirdparty/org.apache.ant/1.10.5.0.0/apache-ant-1.10.5/bin/ant \ - -f /u01/oracle/idm/server/setup/deploy-files/automation.xml \ - run-patched-sql-files \ - -logger org.apache.tools.ant.NoBannerLogger \ - -logfile /u01/oracle/idm/server/bin/patch_oim_wls.log \ - -DoperationsDB.host=$DB_HOST \ - -DoperationsDB.port=$DB_PORT \ - -DoperationsDB.serviceName=$DB_SERVICE \ - -DoperationsDB.user=${RCUPREFIX}_OIM \ - -DOIM.DBPassword=$RCU_SCHEMA_PWD \ - -Dojdbc=/u01/oracle/oracle_common/modules/oracle.jdbc/ojdbc8.jar - ``` - - The output will look similar to the following: - - ``` - Buildfile: /u01/oracle/idm/server/setup/deploy-files/automation.xml - ``` - -1. Verify the database was patched successfully by viewing the `patch_oim_wls.log`: - - ```bash - [oracle@helper oracle]$ cat /u01/oracle/idm/server/bin/patch_oim_wls.log - ``` - - The output should look similar to below: - - ``` - ... - [sql] Executing resource: /u01/oracle/idm/server/db/oim/oracle/StoredProcedures/OfflineDataPurge/oim_pkg_offline_datapurge_pkg_body.sql - [sql] Executing resource: /u01/oracle/idm/server/db/oim/oracle/Upgrade/oim12cps4/list/oim12cps4_dml_pty_insert_sysprop_RequestJustificationLocale.sql - [sql] Executing resource: /u01/oracle/idm/server/db/oim/oracle/Upgrade/oim12cps4/list/oim12cps4_dml_pty_insert_sysprop_reportee_chain_for_mgr.sql - [sql] 36 of 36 SQL statements executed successfully - - - BUILD SUCCESSFUL - Total time: 5 second - ``` diff --git a/docs-source/content/oig/patch-and-upgrade/upgrade-an-ingress.md b/docs-source/content/oig/patch-and-upgrade/upgrade-an-ingress.md new file mode 100644 index 000000000..3d79b98cc --- /dev/null +++ b/docs-source/content/oig/patch-and-upgrade/upgrade-an-ingress.md @@ -0,0 +1,173 @@ +--- +title: "c. Upgrade Ingress" +description: "Instructions on how to upgrade the ingress." +--- + +This section shows how to upgrade the ingress. + +To determine if this step is required for the version you are upgrading to, refer to the [Release Notes](../../release-notes). + + +### Upgrading the ingress + +To upgrade the existing ingress rules, follow the steps below: + +1. List the existing ingress: + + ``` + $ helm list -n + ``` + + For example: + + ``` + $ helm list -n oigns + ``` + + The output will look similar to the following: + + ``` + NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION + governancedomain-nginx oigns 1 deployed ingress-per-domain-0.1.0 1.0 + ``` + +1. Make sure you have downloaded the latest code as per [Download the latest code repository](../patch-an-image/#download-the-latest-code-repository). + +1. Edit the `$WORKDIR/kubernetes/charts/ingress-per-domain/values.yaml` and change the `domainUID` parameter to match your domainUID, for example `domainUID: governancedomain`. Change `sslType` to `NONSSL` or `SSL` depending on your existing configuration. For example: + + ``` + # Load balancer type. Supported values are: NGINX + type: NGINX + + # SSL configuration Type. Supported Values are : NONSSL,SSL + sslType: SSL + + # domainType. Supported values are: oim + domainType: oim + + #WLS domain as backend to the load balancer + wlsDomain: + domainUID: governancedomain + adminServerName: AdminServer + adminServerPort: 7001 + adminServerSSLPort: + soaClusterName: soa_cluster + soaManagedServerPort: 8001 + soaManagedServerSSLPort: + oimClusterName: oim_cluster + oimManagedServerPort: 14000 + oimManagedServerSSLPort: + + + # Host specific values + hostName: + enabled: false + admin: + runtime: + internal: + + # Ngnix specific values + nginx: + nginxTimeOut: 180 + ``` + +1. Upgrade the `governancedomain-nginx` with the following command: + + ``` + $ cd $WORKDIR + $ helm upgrade kubernetes/charts/ingress-per-domain/ --namespace --values kubernetes/charts/ingress-per-domain/values.yaml --reuse-values + ``` + + For example: + + ``` + $ cd $WORKDIR + $ helm upgrade governancedomain-nginx kubernetes/charts/ingress-per-domain/ --namespace oigns --values kubernetes/charts/ingress-per-domain/values.yaml --reuse-values + ``` + + The output will look similar to the following: + + ``` + Release "governancedomain-nginx" has been upgraded. Happy Helming! + NAME: governancedomain-nginx + LAST DEPLOYED: + NAMESPACE: oigns + STATUS: deployed + REVISION: 2 + TEST SUITE: None + ``` + + +1. List the ingress: + + ``` + $ kubectl get ing -n oigns + ``` + + The output will look similar to the following: + + ``` + NAME CLASS HOSTS ADDRESS PORTS AGE + governancedomain-nginx * 10.107.182.40 80 18s + ``` + +1. Describe the ingress and make sure all the listed paths are accessible: + + ``` + $ kubectl describe ing governancedomain-nginx -n oigns + ``` + + The output will look similar to the following: + + ``` + Name: governancedomain-nginx + Namespace: oigns + Address: 10.107.182.40 + Default backend: default-http-backend:80 () + Rules: + Host Path Backends + ---- ---- -------- + * + /console governancedomain-adminserver:7001 (10.244.4.240:7001) + /consolehelp governancedomain-adminserver:7001 (10.244.4.240:7001) + /em governancedomain-adminserver:7001 (10.244.4.240:7001) + /ws_utc governancedomain-cluster-soa-cluster:8001 (10.244.4.242:8001) + /soa governancedomain-cluster-soa-cluster:8001 (10.244.4.242:8001) + /integration governancedomain-cluster-soa-cluster:8001 (10.244.4.242:8001) + /soa-infra governancedomain-cluster-soa-cluster:8001 (10.244.4.242:8001) + /identity governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000) + /admin governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000) + /oim governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000) + /sysadmin governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000) + /workflowservice governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000) + /callbackResponseService governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000) + /spml-xsd governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000) + /HTTPClnt governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000) + /reqsvc governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000) + /iam governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000) + /provisioning-callback governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000) + /CertificationCallbackService governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000) + /ucs governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000) + /FacadeWebApp governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000) + /OIGUI governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000) + /weblogic governancedomain-cluster-oim-cluster:14000 (10.244.4.241:14000) + Annotations: kubernetes.io/ingress.class: nginx + meta.helm.sh/release-name: governancedomain-nginx + meta.helm.sh/release-namespace: oigns + nginx.ingress.kubernetes.io/affinity: cookie + nginx.ingress.kubernetes.io/affinity-mode: persistent + nginx.ingress.kubernetes.io/configuration-snippet: + more_clear_input_headers "WL-Proxy-Client-IP" "WL-Proxy-SSL"; + more_set_input_headers "X-Forwarded-Proto: https"; + more_set_input_headers "WL-Proxy-SSL: true"; + nginx.ingress.kubernetes.io/enable-access-log: false + nginx.ingress.kubernetes.io/ingress.allow-http: false + nginx.ingress.kubernetes.io/proxy-buffer-size: 2000k + nginx.ingress.kubernetes.io/proxy-read-timeout: 180 + nginx.ingress.kubernetes.io/proxy-send-timeout: 180 + nginx.ingress.kubernetes.io/session-cookie-name: sticky + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Sync 51m (x3 over 54m) nginx-ingress-controller Scheduled for sync + ``` \ No newline at end of file diff --git a/docs-source/content/oig/patch-and-upgrade/upgrade_an_operator_release.md b/docs-source/content/oig/patch-and-upgrade/upgrade-an-operator-release.md similarity index 97% rename from docs-source/content/oig/patch-and-upgrade/upgrade_an_operator_release.md rename to docs-source/content/oig/patch-and-upgrade/upgrade-an-operator-release.md index 2191bc6c2..ee7fdbf92 100644 --- a/docs-source/content/oig/patch-and-upgrade/upgrade_an_operator_release.md +++ b/docs-source/content/oig/patch-and-upgrade/upgrade-an-operator-release.md @@ -1,5 +1,5 @@ --- -title: "b. Upgrade an operator release" +title: "a. Upgrade an operator release" description: "Instructions on how to update the WebLogic Kubernetes Operator version." --- @@ -43,7 +43,7 @@ These instructions apply to upgrading operators within the 3.x release family as ``` Release "weblogic-kubernetes-operator" has been upgraded. Happy Helming! NAME: weblogic-kubernetes-operator - LAST DEPLOYED: Tue Jul 13 09:24:40 2022 + LAST DEPLOYED: NAMESPACE: operator STATUS: deployed REVISION: 3 diff --git a/docs-source/content/oig/patch-and-upgrade/upgrade-elk.md b/docs-source/content/oig/patch-and-upgrade/upgrade-elk.md new file mode 100644 index 000000000..ad7c67115 --- /dev/null +++ b/docs-source/content/oig/patch-and-upgrade/upgrade-elk.md @@ -0,0 +1,32 @@ +--- +title: "d. Upgrade Elasticsearch and Kibana" +description: "Instructions on how to upgrade Elastic Search and Kibana." +--- + +This section shows how to upgrade Elasticsearch and Kibana. + +To determine if this step is required for the version you are upgrading to, refer to the [Release Notes](../../release-notes). + +### Download the latest code repository + +1. Make sure you have downloaded the latest code as per [Download the latest code repository](../patch-an-image/#download-the-latest-code-repository). + +### Undeploy Elasticsearch and Kibana + +From October 22 (22.4.1) onwards, OIG logs should be stored on a centralized Elasticsearch and Kibana stack. + +Deployments prior to October 22 (22.4.1) used local deployments of Elasticsearch and Kibana. + +If you are upgrading from July 22 (22.3.1) or earlier, to October 22 (22.4.1) or later, you must first undeploy Elasticsearch and Kibana using the steps below: + +1. If your domain namespace is anything other than `oigns`, edit the `$WORKDIR/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml` and change all instances of `namespace: "oigns"` to your domain namespace. + +1. Delete the Elasticsearch and Kibana resources using the following command: + + ``` + $ kubectl delete -f $WORKDIR/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml + ``` + +### Deploy Elasticsearch and Kibana in centralized stack + +1. Follow [Install Elasticsearch stack and Kibana](../manage-oig-domains/logging-and-visualization/#install-elasticsearch-stack-and-kibana) to deploy Elasticsearch and Kibana in a centralized stack. \ No newline at end of file diff --git a/docs-source/content/oig/post-install-config/set_oimfronendurl_using_mbeans.md b/docs-source/content/oig/post-install-config/set_oimfronendurl_using_mbeans.md index aed6ab4a9..c0c27db38 100644 --- a/docs-source/content/oig/post-install-config/set_oimfronendurl_using_mbeans.md +++ b/docs-source/content/oig/post-install-config/set_oimfronendurl_using_mbeans.md @@ -142,8 +142,9 @@ Follow these post install configuration steps. 1. Enter a new value for the `OimFrontEndURL` attribute, in the format: * If using an External LoadBalancer for your ingress: `https://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-PORT}` - * If using NodePort for your ingress: `http://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}` + * If using NodePort for your ingress: `https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}` + If using HTTP instead of HTTPS for your ingress, change the URL appropriately. Then click `Apply`. diff --git a/docs-source/content/oig/prepare-your-environment/_index.md b/docs-source/content/oig/prepare-your-environment/_index.md index 1eb16771f..0281c280c 100644 --- a/docs-source/content/oig/prepare-your-environment/_index.md +++ b/docs-source/content/oig/prepare-your-environment/_index.md @@ -64,17 +64,15 @@ The OIG Kubernetes deployment requires access to an OIG container image. The ima #### Prebuilt OIG container image -The latest prebuilt OIG July 2022 container image can be downloaded from [Oracle Container Registry](https://container-registry.oracle.com). This image is prebuilt by Oracle and includes Oracle Identity Governance 12.2.1.4.0, the July Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program.. +The latest prebuilt OIG October 2022 container image can be downloaded from [Oracle Container Registry](https://container-registry.oracle.com). This image is prebuilt by Oracle and includes Oracle Identity Governance 12.2.1.4.0, the October Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program.. **Note**: Before using this image you must login to [Oracle Container Registry](https://container-registry.oracle.com), navigate to `Middleware` > `oig_cpu` and accept the license agreement. -Alternatively the same image can also be downloaded from [My Oracle Support](https://support.oracle.com) by referring to the document ID 2723908.1. - You can use this image in the following ways: - Pull the container image from the Oracle Container Registry automatically during the OIG Kubernetes deployment. -- Manually pull the container image from the Oracle Container Registry or My Oracle Support, and then upload it to your own container registry. -- Manually pull the container image from the Oracle Container Registry or My Oracle Support and manually stage it on the master node and each worker node. +- Manually pull the container image from the Oracle Container Registry and then upload it to your own container registry. +- Manually pull the container image from the Oracle Container Registry and manually stage it on the master node and each worker node. #### Build your own OIG container image using WebLogic Image Tool @@ -201,7 +199,7 @@ Oracle Identity Governance domain deployment on Kubernetes leverages the WebLogi $ cd $WORKDIR $ helm install weblogic-kubernetes-operator kubernetes/charts/weblogic-operator \ --namespace \ - --set image=ghcr.io/oracle/weblogic-kubernetes-operator:3.3.0 \ + --set image=ghcr.io/oracle/weblogic-kubernetes-operator:3.4.2 \ --set serviceAccount= \ --set “enableClusterRoleBinding=true” \ --set "domainNamespaceSelectionStrategy=LabelSelector" \ @@ -215,7 +213,7 @@ Oracle Identity Governance domain deployment on Kubernetes leverages the WebLogi $ cd $WORKDIR $ helm install weblogic-kubernetes-operator kubernetes/charts/weblogic-operator \ --namespace opns \ - --set image=ghcr.io/oracle/weblogic-kubernetes-operator:3.3.0 \ + --set image=ghcr.io/oracle/weblogic-kubernetes-operator:3.4.2 \ --set serviceAccount=op-sa \ --set "enableClusterRoleBinding=true" \ --set "domainNamespaceSelectionStrategy=LabelSelector" \ @@ -227,7 +225,7 @@ Oracle Identity Governance domain deployment on Kubernetes leverages the WebLogi ``` NAME: weblogic-kubernetes-operator - LAST DEPLOYED: Wed Jul 13 11:51:37 2022 + LAST DEPLOYED: NAMESPACE: opns STATUS: deployed REVISION: 1 @@ -250,16 +248,16 @@ Oracle Identity Governance domain deployment on Kubernetes leverages the WebLogi ``` NAME READY STATUS RESTARTS AGE - pod/weblogic-operator-676d5cc6f4-rwzxf 2/2 Running 0 59s + pod/weblogic-operator-5f75f48f99-2p86g 1/1 Running 0 23s - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - service/internal-weblogic-operator-svc ClusterIP 10.102.7.232 8082/TCP 59s + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + service/internal-weblogic-operator-svc ClusterIP 10.97.190.110 8082/TCP 23s NAME READY UP-TO-DATE AVAILABLE AGE - deployment.apps/weblogic-operator 1/1 1 1 59s + deployment.apps/weblogic-operator 1/1 1 1 23s NAME DESIRED CURRENT READY AGE - replicaset.apps/weblogic-operator-676d5cc6f4 1 1 1 59s + replicaset.apps/weblogic-operator-5f75f48f99 1 1 1 23s ``` 1. Verify the operator pod's log: @@ -277,9 +275,9 @@ Oracle Identity Governance domain deployment on Kubernetes leverages the WebLogi The output will look similar to the following: ``` - {"timestamp":"2022-07-13T11:52:53.167756673Z","thread":23,"fiber":"","namespace":"","domainUID":"","level":"CONFIG","class":"oracle.kubernetes.operator.TuningParametersImpl","method":"update","timeInMillis":1636650293167,"message":"Reloading tuning parameters from Operator's config map","exception":"","code":"","headers":{},"body":""} - {"timestamp":"2022-07-13T11:53:03.170083172Z","thread":30,"fiber":"","namespace":"","domainUID":"","level":"CONFIG","class":"oracle.kubernetes.operator.TuningParametersImpl","method":"update","timeInMillis":1636650303170,"message":"Reloading tuning parameters from Operator's config map","exception":"","code":"","headers":{},"body":""} - {"timestamp":"2022-07-13T11:52:13.172302644Z","thread":29,"fiber":"","namespace":"","domainUID":"","level":"CONFIG","class":"oracle.kubernetes.operator.TuningParametersImpl","method":"update","timeInMillis":1636650313172,"message":"Reloading tuning parameters from Operator's config map","exception":"","code":"","headers":{},"body":""} + {"timestamp":"","thread":23,"fiber":"","namespace":"","domainUID":"","level":"CONFIG","class":"oracle.kubernetes.operator.TuningParametersImpl","method":"update","timeInMillis":1636650293167,"message":"Reloading tuning parameters from Operator's config map","exception":"","code":"","headers":{},"body":""} + {"timestamp":"","thread":30,"fiber":"","namespace":"","domainUID":"","level":"CONFIG","class":"oracle.kubernetes.operator.TuningParametersImpl","method":"update","timeInMillis":1636650303170,"message":"Reloading tuning parameters from Operator's config map","exception":"","code":"","headers":{},"body":""} + {"timestamp":"","thread":29,"fiber":"","namespace":"","domainUID":"","level":"CONFIG","class":"oracle.kubernetes.operator.TuningParametersImpl","method":"update","timeInMillis":1636650313172,"message":"Reloading tuning parameters from Operator's config map","exception":"","code":"","headers":{},"body":""} ``` ### Create a namespace for Oracle Identity Governance @@ -337,10 +335,11 @@ Oracle Identity Governance domain deployment on Kubernetes leverages the WebLogi ``` Name: oigns - Labels: weblogic-operator=enabled + Labels: kubernetes.io/metadata.name=oigns + weblogic-operator=enabled Annotations: Status: Active - + No resource quota. No LimitRange resource. @@ -398,7 +397,7 @@ Before following the steps in this section, make sure that the database and list For example: ```bash - $ kubectl run --image=container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7- --image-pull-policy="IfNotPresent" --overrides='{"apiVersion": "v1","spec":{"imagePullSecrets": [{"name": "orclcred"}]}}' helper -n oigns -- sleep infinity + $ kubectl run --image=container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7- --image-pull-policy="IfNotPresent" --overrides='{"apiVersion": "v1","spec":{"imagePullSecrets": [{"name": "orclcred"}]}}' helper -n oigns -- sleep infinity ``` If you are not using a container registry and have loaded the image on each of the master and worker nodes, run the following command: @@ -410,7 +409,7 @@ Before following the steps in this section, make sure that the database and list For example: ```bash - $ kubectl run helper --image oracle/oig:12.2.1.4-jdk8-ol7- -n oigns -- sleep infinity + $ kubectl run helper --image oracle/oig:12.2.1.4-jdk8-ol7- -n oigns -- sleep infinity ``` The output will look similar to the following: @@ -514,7 +513,7 @@ Before following the steps in this section, make sure that the database and list The output will look similar to the following: ``` - RCU Logfile: /tmp/RCU2022-07-13_17-09_964981565/logs/rcu.log + RCU Logfile: /tmp/RCU/logs/rcu.log Processing command line .... Repository Creation Utility - Checking Prerequisites @@ -596,22 +595,22 @@ Before following the steps in this section, make sure that the database and list Service Name : ORCL.EXAMPLE.COM Connected As : sys Prefix for (prefixable) Schema Owners : OIGK8S - RCU Logfile : /tmp/RCU2022-07-13_17-09_964981565/logs/rcu.log + RCU Logfile : /tmp/RCU/logs/rcu.log Component schemas created: ----------------------------- Component Status Logfile - Common Infrastructure Services Success /tmp/RCU2022-07-13_17-09_964981565/logs/stb.log - Oracle Platform Security Services Success /tmp/RCU2022-07-13_17-09_964981565/logs/opss.log - SOA Infrastructure Success /tmp/RCU2022-07-13_17-09_964981565/logs/soainfra.log - Oracle Identity Manager Success /tmp/RCU2022-07-13_17-09_964981565/logs/oim.log - User Messaging Service Success /tmp/RCU2022-07-13_17-09_964981565/logs/ucsums.log - Audit Services Success /tmp/RCU2022-07-13_17-09_964981565/logs/iau.log - Audit Services Append Success /tmp/RCU2022-07-13_17-09_964981565/logs/iau_append.log - Audit Services Viewer Success /tmp/RCU2022-07-13_17-09_964981565/logs/iau_viewer.log - Metadata Services Success /tmp/RCU2022-07-13_17-09_964981565/logs/mds.log - WebLogic Services Success /tmp/RCU2022-07-13_17-09_964981565/logs/wls.log + Common Infrastructure Services Success /tmp/RCU/logs/stb.log + Oracle Platform Security Services Success /tmp/RCU/logs/opss.log + SOA Infrastructure Success /tmp/RCU/logs/soainfra.log + Oracle Identity Manager Success /tmp/RCU/logs/oim.log + User Messaging Service Success /tmp/RCU/logs/ucsums.log + Audit Services Success /tmp/RCU/logs/iau.log + Audit Services Append Success /tmp/RCU/logs/iau_append.log + Audit Services Viewer Success /tmp/RCU/logs/iau_viewer.log + Metadata Services Success /tmp/RCU/logs/mds.log + WebLogic Services Success /tmp/RCU/logs/wls.log Repository Creation Utility - Create : Operation Completed [oracle@helper oracle]$ @@ -654,14 +653,14 @@ Before following the steps in this section, make sure that the database and list ``` ... - [sql] Executing resource: /u01/oracle/idm/server/db/oim/oracle/StoredProcedures/OfflineDataPurge/oim_pkg_offline_datapurge_pkg_body.sql - [sql] Executing resource: /u01/oracle/idm/server/db/oim/oracle/Upgrade/oim12cps4/list/oim12cps4_dml_pty_insert_sysprop_RequestJustificationLocale.sql - [sql] Executing resource: /u01/oracle/idm/server/db/oim/oracle/Upgrade/oim12cps4/list/oim12cps4_dml_pty_insert_sysprop_reportee_chain_for_mgr.sql - [sql] 36 of 36 SQL statements executed successfully + [sql] Executing resource: /u01/oracle/idm/server/db/oim/oracle/Upgrade/oim12cps4/list/oim12cps4_upg_ent_trg_bkp.sql + [sql] Executing resource: /u01/oracle/idm/server/db/oim/oracle/Upgrade/oim12cps4/list/oim12cps4_upg_ent_trg_fix.sql + [sql] Executing resource: /u01/oracle/idm/server/db/oim/oracle/Upgrade/oim12cps4/list/oim12cps4_upg_ent_trg_restore_bkp.sql + [sql] 58 of 58 SQL statements executed successfully BUILD SUCCESSFUL - Total time: 5 second + Total time: 8 seconds ``` @@ -733,7 +732,7 @@ In this section you prepare the environment for the OIG domain creation. This in username: d2VibG9naWM= kind: Secret metadata: - creationTimestamp: "2022-07-13T17:47:29Z" + creationTimestamp: "" labels: weblogic.domainName: governancedomain weblogic.domainUID: governancedomain @@ -805,7 +804,7 @@ In this section you prepare the environment for the OIG domain creation. This in username: T0lHSzhT kind: Secret metadata: - creationTimestamp: "2022-07-13T17:50:50Z" + creationTimestamp: "" labels: weblogic.domainName: governancedomain weblogic.domainUID: governancedomain @@ -826,7 +825,7 @@ When a container is started, it needs to mount that volume. The physical volume The example below uses an NFS mounted volume (/governancedomainpv). Other volume types can also be used. See the official [Kubernetes documentation for Volumes](https://kubernetes.io/docs/concepts/storage/volumes/). -**Note**: The persistent volume directory needs to be accessible to both the master and worker node(s). Make sure this path has **full** access permissions, and that the folder is empty. In this example `/scratch/shared/governancedomainpv` is accessible from all nodes via NFS. +**Note**: The persistent volume directory needs to be accessible to both the master and worker node(s). In this example `/scratch/shared/governancedomainpv` is accessible from all nodes via NFS. @@ -837,7 +836,7 @@ The example below uses an NFS mounted volume (/governancedoma $ cp create-pv-pvc-inputs.yaml create-pv-pvc-inputs.yaml.orig $ mkdir output $ mkdir -p /governancedomainpv - $ chmod -R 777 /governancedomainpv + $ sudo chown -R 1000:0 /governancedomainpv ``` For example: @@ -847,7 +846,7 @@ The example below uses an NFS mounted volume (/governancedoma $ cp create-pv-pvc-inputs.yaml create-pv-pvc-inputs.yaml.orig $ mkdir output $ mkdir -p /scratch/shared/governancedomainpv - $ chmod -R 777 /scratch/shared/governancedomainpv + $ sudo chown -R 1000:0 /scratch/shared/governancedomainpv ``` diff --git a/docs-source/content/oig/prerequisites/_index.md b/docs-source/content/oig/prerequisites/_index.md index 1c52a6742..a6aa9c461 100644 --- a/docs-source/content/oig/prerequisites/_index.md +++ b/docs-source/content/oig/prerequisites/_index.md @@ -7,7 +7,7 @@ description: "System requirements and limitations for deploying and running an O ### Introduction -This document provides information about the system requirements and limitations for deploying and running OIG domains with the WebLogic Kubernetes Operator 3.3.0. +This document provides information about the system requirements and limitations for deploying and running OIG domains with the WebLogic Kubernetes Operator 3.4.2. ### System requirements for OIG domains diff --git a/docs-source/content/oig/release-notes.md b/docs-source/content/oig/release-notes.md index 31125adeb..24f55f581 100644 --- a/docs-source/content/oig/release-notes.md +++ b/docs-source/content/oig/release-notes.md @@ -10,6 +10,17 @@ Review the latest changes and known issues for Oracle Identity Governance on Kub | Date | Version | Change | | --- | --- | --- | +| October, 2022 | 22.4.1 | Supports Oracle Identity Governance 12.2.1.4 domain deployment using the October 2022 container image which contains the October Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program.| +| | | Support for WebLogic Kubernetes Operator 3.4.2.| +| | | Additional Ingress mappings added.| +| | | Changes to deployment of Logging and Visualization with Elasticsearch and Kibana. +| | | OIG container images are now only available from [container-registry.oracle.com](https://container-registry.oracle.com) and are no longer available from My Oracle Support.| +| | | If upgrading to October 22 (22.4.1) from a previous release, you must upgrade the following in order:| +| | | 1. WebLogic Kubernetes Operator to 3.4.2| +| | | 2. Patch the OIG container image to October 22| +| | | 3. Upgrade the Ingress| +| | | 4. Upgrade Elasticsearch and Kibana | +| | | See [Patch and Upgrade](../patch-and-upgrade) for these instructions.| | July, 2022 | 22.3.1 | Supports Oracle Identity Governance 12.2.1.4 domain deployment using the July 2022 container image which contains the July Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program.| | April, 2022 | 22.2.1 | Updated for CRI-O support.| | November, 2021 | 21.4.2 | Supports Oracle Identity Governance domain deployment using WebLogic Kubernetes Operator 3.3.0. Voyager ingress removed as no longer supported.| diff --git a/docs-source/content/oig/troubleshooting/_index.md b/docs-source/content/oig/troubleshooting/_index.md index 2a1276ed1..ce5e3c5e4 100644 --- a/docs-source/content/oig/troubleshooting/_index.md +++ b/docs-source/content/oig/troubleshooting/_index.md @@ -62,4 +62,62 @@ If the OIG domain creation fails when running `create-domain.sh`, run the follow weblogicDomainStoragePath: /scratch/shared/governancedomainpv/ ``` - Clean down the failed domain creation by following steps 1-3 in [Delete the OIG domain home]({{< relref "/oig/manage-oig-domains/delete-domain-home" >}}). Then follow [RCU schema creation]({{< relref "/oig/prepare-your-environment/#rcu-schema-creation" >}}) onwards to recreate the RCU schema, kubernetes secrets for domain and RCU, the persistent volume and the persistent volume claim. Then execute the [OIG domain creation]({{< relref "/oig/create-oig-domains" >}}) steps again. \ No newline at end of file + Clean down the failed domain creation by following steps 1-3 in [Delete the OIG domain home]({{< relref "/oig/manage-oig-domains/delete-domain-home" >}}). Then follow [RCU schema creation]({{< relref "/oig/prepare-your-environment/#rcu-schema-creation" >}}) onwards to recreate the RCU schema, kubernetes secrets for domain and RCU, the persistent volume and the persistent volume claim. Then execute the [OIG domain creation]({{< relref "/oig/create-oig-domains" >}}) steps again. + + + +### Patch domain failures + +The instructions in this section relate to problems patching a deployment with a new image as per [Patch an image](../patch-and-upgrade/patch-an-image). + +1. If the OIG domain patching fails when running `patch_oig_domain.sh`, run the following to diagnose the issue: + + ``` + $ kubectl describe domain -n + ``` + + For example: + + ``` + $ kubectl describe domain governancedomain -n oigns + ``` + + Using the output you should be able to diagnose the problem and resolve the issue. + + If the domain is already patched successfully and the script failed at the last step of waiting for pods to come up with the new image, then you do not need to rerun the script again after issue resolution. The pods will come up automatically once you resolve the underlying issue. + +1. If the script is stuck at the following message for a long time: + + ``` + "[INFO] Waiting for weblogic pods to be ready..This may take several minutes, do not close the window. Check log /scratch/OIGK8Slatest/fmw-kubernetes/OracleIdentityGovernance/kubernetes/domain-lifecycle/log/oim_patch_log-/monitor_weblogic_pods.log for progress" + ``` + + run the following command to diagnose the issue: + + ``` + $ kubectl get pods -n + ``` + + For example: + + ``` + $ kubectl get pods -n oigns + ``` + + Run the following to check the logs of the AdminServer, SOA server or OIM server pods, as there may be an issue that is not allowing the domain pods to start properly: + + ```bash + $ kubectl logs -n oigns + ``` + + If the above does not glean any information you can also run: + + ``` + $ kubectl describe pod -n oigns + ``` + + Further diagnostic logs can also be found under the `$WORKDIR/kubernetes/domain-lifecycle`. + + Once any issue is resolved the pods will come up automatically without the need to rerun the script. + + \ No newline at end of file diff --git a/docs-source/content/oud/_index.md b/docs-source/content/oud/_index.md index 6f0732db6..655d45ea6 100644 --- a/docs-source/content/oud/_index.md +++ b/docs-source/content/oud/_index.md @@ -20,7 +20,7 @@ This project has several key features to assist you with deploying and managing ### Current production release -The current production release for the Oracle Unified Directory 12c PS4 (12.2.1.4.0) deployment on Kubernetes is [22.3.1](https://github.com/oracle/fmw-kubernetes/releases). +The current production release for the Oracle Unified Directory 12c PS4 (12.2.1.4.0) deployment on Kubernetes is [22.4.1](https://github.com/oracle/fmw-kubernetes/releases). ### Recent changes and known issues @@ -36,6 +36,7 @@ If performing an Enterprise Deployment, refer to the [Enterprise Deployment Guid To view documentation for an earlier release, see: +* [Version 22.3.1](https://oracle.github.io/fmw-kubernetes/22.3.1/oud/) * [Version 22.2.1](https://oracle.github.io/fmw-kubernetes/22.2.1/oud/) * [Version 21.4.2](https://oracle.github.io/fmw-kubernetes/21.4.2/oud/) * [Version 21.4.1](https://oracle.github.io/fmw-kubernetes/21.4.1/oud/) diff --git a/docs-source/content/oud/configure-ingress/_index.md b/docs-source/content/oud/configure-ingress/_index.md index e6a8bf14b..fa0d809a6 100644 --- a/docs-source/content/oud/configure-ingress/_index.md +++ b/docs-source/content/oud/configure-ingress/_index.md @@ -157,7 +157,7 @@ Use Helm to install NGINX. ``` NAME: lbr-nginx - LAST DEPLOYED: Mon Jul 11 16:49:35 2022 + LAST DEPLOYED: NAMESPACE: mynginx STATUS: deployed REVISION: 1 diff --git a/docs-source/content/oud/create-oud-instances/_index.md b/docs-source/content/oud/create-oud-instances/_index.md index 64a1ffb39..b1831c38b 100644 --- a/docs-source/content/oud/create-oud-instances/_index.md +++ b/docs-source/content/oud/create-oud-instances/_index.md @@ -16,7 +16,8 @@ description= "This document provides details of the oud-ds-rs Helm chart." 1. [Verify the OUD replication](#verify-the-oud-replication) 1. [Verify the cronjob](#verify-the-cronjob) 1. [Undeploy an OUD deployment](#undeploy-an-oud-deployment) -1. [Appendix: Configuration parameters](#appendix-configuration-parameters) +1. [Appendix A: Configuration parameters](#appendix-a-configuration-parameters) +1. [Appendix B: Environment Variables](#appendix-b-environment-variables) ### Introduction @@ -189,7 +190,7 @@ You can create OUD instances using one of the following methods: ```yaml image: repository: container-registry.oracle.com/middleware/oud_cpu - tag: 12.2.1.4-jdk8-ol7- + tag: 12.2.1.4-jdk8-ol7- pullPolicy: IfNotPresent imagePullSecrets: - name: orclcred @@ -289,7 +290,7 @@ You can create OUD instances using one of the following methods: ```bash $ helm install --namespace oudns \ - --set oudConfig.rootUserPassword=,persistence.filesystem.hostPath.path=/scratch/shared/oud_user_projects,image.repository=container-registry.oracle.com/middleware/oud_cpu,image.tag=12.2.1.4-jdk8-ol7- \ + --set oudConfig.rootUserPassword=,persistence.filesystem.hostPath.path=/scratch/shared/oud_user_projects,image.repository=container-registry.oracle.com/middleware/oud_cpu,image.tag=12.2.1.4-jdk8-ol7- \ --set oudConfig.sampleData="200" \ --set cronJob.kubectlImage.repository=bitnami/kubectl,cronJob.kubectlImage.tag=1.21.6 \ --set cronJob.imagePullSecrets[0].name="dockercred" \ @@ -313,7 +314,7 @@ In all the examples above, the following output is shown following a successful ```bash NAME: oud-ds-rs - LAST DEPLOYED: Mon Jul 11 12:02:40 2022 + LAST DEPLOYED: NAMESPACE: oudns STATUS: deployed REVISION: 4 @@ -538,11 +539,11 @@ Once all the PODs created are visible as `READY` (i.e. `1/1`), you can verify yo Server : Entries : M.C. [1] : A.O.M.C. [2] : Port [3] : Encryption [4] : Trust [5] : U.C. [6] : Status [7] : ChangeLog [8] : Group ID [9] : Connected To [10] ---------------------:---------:----------:--------------:----------:----------------:-----------:----------:------------:---------------:--------------:------------------------------- - oud-ds-rs-0:1444 : 1 : 0 : 0 : 1898 : Disabled : Trusted : -- : Normal : Enabled : 1 : oud-ds-rs-0:1898 + oud-ds-rs-0:1444 : 202 : 0 : 0 : 1898 : Disabled : Trusted : -- : Normal : Enabled : 1 : oud-ds-rs-0:1898 : : : : : : : : : : : (GID=1) - oud-ds-rs-1:1444 : 1 : 0 : 0 : 1898 : Disabled : Trusted : -- : Normal : Enabled : 1 : oud-ds-rs-1:1898 + oud-ds-rs-1:1444 : 202 : 0 : 0 : 1898 : Disabled : Trusted : -- : Normal : Enabled : 1 : oud-ds-rs-1:1898 : : : : : : : : : : : (GID=1) - oud-ds-rs-2:1444 : 1 : 0 : 0 : 1898 : Disabled : Trusted : -- : Normal : Enabled : 1 : oud-ds-rs-2:1898 + oud-ds-rs-2:1444 : 202 : 0 : 0 : 1898 : Disabled : Trusted : -- : Normal : Enabled : 1 : oud-ds-rs-2:1898 : : : : : : : : : : : (GID=1) Replication Server [11] : RS #1 : RS #2 : RS #3 @@ -710,8 +711,8 @@ With an OUD instance now deployed you are now ready to configure an ingress cont The output will look similar to the following: ``` - NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION - oud-ds-rs oudns 1 2022-07-11 09:46:17.613632382 +0000 UTC deployed oud-ds-rs-0.2 12.2.1.4.0 + NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION + oud-ds-rs oudns 1 deployed oud-ds-rs-0.2 12.2.1.4.0 ``` 1. Delete the deployment using the following command: @@ -772,7 +773,7 @@ With an OUD instance now deployed you are now ready to configure an ingress cont $ rm -rf * ``` -### Appendix: Configuration Parameters +### Appendix A: Configuration Parameters The following table lists the configurable parameters of the `oud-ds-rs` chart and their default values. @@ -849,9 +850,9 @@ The following table lists the configurable parameters of the `oud-ds-rs` chart a | oudConfig.adminUID | AdminUID to be configured with each replicated Oracle Unified Directory instance | admin | | oudConfig.adminPassword | Password for AdminUID. If the value is not passed, value of rootUserPassword would be used as password for AdminUID. | rootUserPassword | | baseOUD.envVarsConfigMap | Reference to ConfigMap which can contain additional environment variables to be passed on to POD for Base Oracle Unified Directory Instance. Following are the environment variables which would not be honored from the ConfigMap.
instanceType, sleepBeforeConfig, OUD_INSTANCE_NAME, hostname, baseDN, rootUserDN, rootUserPassword, adminConnectorPort, httpAdminConnectorPort, ldapPort, ldapsPort, httpPort, httpsPort, replicationPort, sampleData. | - | -| baseOUD.envVars | Environment variables in Yaml Map format. This is helpful when its requried to pass environment variables through --values file. List of env variables which would not be honored from envVars map is same as list of env var names mentioned for envVarsConfigMap. | - | +| baseOUD.envVars | Environment variables in Yaml Map format. This is helpful when its requried to pass environment variables through --values file. List of env variables which would not be honored from envVars map is same as list of env var names mentioned for envVarsConfigMap. For a full list of environment variables, see [Appendix B: Environment Variables](#appendix-b-environment-variables).| - | | replOUD.envVarsConfigMap | Reference to ConfigMap which can contain additional environment variables to be passed on to PODs for Replicated Oracle Unified Directory Instances. Following are the environment variables which would not be honored from the ConfigMap.
instanceType, sleepBeforeConfig, OUD_INSTANCE_NAME, hostname, baseDN, rootUserDN, rootUserPassword, adminConnectorPort, httpAdminConnectorPort, ldapPort, ldapsPort, httpPort, httpsPort, replicationPort, sampleData, sourceHost, sourceServerPorts, sourceAdminConnectorPort, sourceReplicationPort, dsreplication_1, dsreplication_2, dsreplication_3, dsreplication_4, post_dsreplication_dsconfig_1, post_dsreplication_dsconfig_2 | - | -| replOUD.envVars | Environment variables in Yaml Map format. This is helpful when its required to pass environment variables through --values file. List of env variables which would not be honored from envVars map is same as list of env var names mentioned for envVarsConfigMap. | - | +| replOUD.envVars | Environment variables in Yaml Map format. This is helpful when its required to pass environment variables through --values file. List of env variables which would not be honored from envVars map is same as list of env var names mentioned for envVarsConfigMap. For a full list of environment variables, see [Appendix B: Environment Variables](#appendix-b-environment-variables).| - | | podManagementPolicy | Defines the policy for pod management within the statefulset. Typical values are OrderedReady/Parallel | OrderedReady | | updateStrategy | Allows you to configure and disable automated rolling updates for containers, labels, resource request/limits, and annotations for the Pods in a StatefulSet. Typical values are OnDelete/RollingUpdate | RollingUpdate | | busybox.image | busy box image name. Used for initcontainers | busybox | @@ -907,3 +908,62 @@ The following table lists the configurable parameters of the `oud-ds-rs` chart a | elk.elkVolume.annotations | specifies any annotations that will be used| { } | +### Appendix B: Environment Variables + +| **Environment Variable** | **Description** | **Default Value** | +| ------ | ------ | ------ | +| ldapPort | Port on which the Oracle Unified Directory instance in the container should listen for LDAP communication. Use 'disabled' if you do not want to enable it. | 1389 | +| ldapsPort | Port on which the Oracle Unified Directory instance in the container should listen for LDAPS communication. Use 'disabled' if you do not want to enable it. | 1636 | +| rootUserDN | DN for the Oracle Unified Directory instance root user. | ------ | +| rootUserPassword | Password for the Oracle Unified Directory instance root user. | ------ | +| adminConnectorPort | Port on which the Oracle Unified Directory instance in the container should listen for administration communication over LDAPS. Use 'disabled' if you do not want to enable it. Note that at least one of the LDAP or the HTTP administration ports must be enabled. | 1444 | +| httpAdminConnectorPort | Port on which the Oracle Unified Directory Instance in the container should listen for Administration Communication over HTTPS Protocol. Use 'disabled' if you do not want to enable it. Note that at least one of the LDAP or the HTTP administration ports must be enabled. | 1888 | +| httpPort | Port on which the Oracle Unified Directory Instance in the container should listen for HTTP Communication. Use 'disabled' if you do not want to enable it. | 1080 | +| httpsPort | Port on which the Oracle Unified Directory Instance in the container should listen for HTTPS Communication. Use 'disabled' if you do not want to enable it. | 1081 | +| sampleData | Specifies the number of sample entries to populate the Oracle Unified Directory instance with on creation. If this parameter has a non-numeric value, the parameter addBaseEntry is added to the command instead of sampleData. Similarly, when the ldifFile_n parameter is specified sampleData will not be considered and ldifFile entries will be populated.| 0 | +| adminUID | User ID of the Global Administrator to use to bind to the server. This parameter is primarily used with the dsreplication command. | ------ | +| adminPassword | Password for adminUID | ------ | +| bindDN1 | BindDN to be used while setting up replication using `dsreplication` to connect to First Directory/Replication Instance. | ------ | +| bindPassword1 | Password for bindDN1 | ------ | +| bindDN2 | BindDN to be used while setting up replication using `dsreplication` to connect to Second Directory/Replication Instance. | ------ | +| bindPassword2 | Password for bindDN2 | ------ | +| replicationPort | Port value to be used while setting up a replication server. This variable is used to substitute values in `dsreplication` parameters. | 1898 | +| sourceHost | Value for the hostname to be used while setting up a replication server. This variable is used to substitute values in `dsreplication` parameters. | ------ | +| initializeFromHost | Value for the hostname to be used while initializing data on a new Oracle Unified Directory instance replicated from an existing instance. This variable is used to substitute values in `dsreplication` parameters. It is possible to have a different value for sourceHost and initializeFromHost while setting up replication with Replication Server, sourceHost can be used for the Replication Server and initializeFromHost can be used for an existing Directory instance from which data will be initialized.| $sourceHost | +| serverTuning | Values to be used to tune JVM settings. The default value is jvm-default. If specific tuning parameters are required, they can be added using this variable. | jvm-default | +| offlineToolsTuning | Values to be used to specify the tuning for offline tools. This variable if not specified will consider jvm-default as the default or specify the complete set of values with options if wanted to set to specific tuning | jvm-default| +| generateSelfSignedCertificate | Set to "true" if the requirement is to generate a self signed certificate when creating an Oracle Unified Directory instance. If no value is provided this value takes the default, "true". If using a certificate generated separately this value should be set to "false". | true | +| usePkcs11Keystore | Use a certificate in a PKCS#11 token that the replication gateway will use as servercertificate when accepting encrypted connections from the Oracle Directory Server Enterprise Edition server. Set to "true" if the requirement is to use the usePkcs11Keystore parameter when creating an Oracle Unified Directory instance. By default this parameter is not set. To use this option generateSelfSignedCertificate should be set to "false".| ------ | +| enableStartTLS | Enable StartTLS to allow secure communication with the directory server by using the LDAP port. By default this parameter is not set. To use this option generateSelfSignedCertificate should be set to "false". | ------ | +| useJCEKS | Specifies the path of a JCEKS that contains a certificate that the replication gateway will use as server certificate when accepting encrypted connections from the Oracle Directory Server Enterprise Edition server. If required this should specify the keyStorePath, for example, `/u01/oracle/config/keystore`. | ------ | +| useJavaKeystore | Specify the path to the Java Keystore (JKS) that contains the server certificate. If required this should specify the path to the JKS, for example, `/u01/oracle/config/keystore`. By default this parameter is not set. To use this option generateSelfSignedCertificate should be set to "false". | ------ | +| usePkcs12keyStore | Specify the path to the PKCS#12 keystore that contains the server certificate. If required this should specify the path, for example, `/u01/oracle/config/keystore.p12`. By default this parameter is not set. | ------ | +| keyStorePasswordFile | Use the password in the specified file to access the certificate keystore. A password is required when you specify an existing certificate (JKS, JCEKS, PKCS#11, orPKCS#12) as a server certificate. If required this should specify the path of the password file, for example, `/u01/oracle/config/keystorepassword.txt`. By default this parameter is not set. | ------ | +| eusPasswordScheme | Set password storage scheme, if configuring Oracle Unified Directory for Enterprise User Security. Set this to a value of either "sha1" or "sha2". By default this parameter is not set. | ------ | +| jmxPort | Port on which the Directory Server should listen for JMX communication. Use 'disabled' if you do not want to enable it. | disabled | +| javaSecurityFile | Specify the path to the Java security file. If required this should specify the path, for example, `/u01/oracle/config/new_security_file`. By default this parameter is not set. | ------ | +| schemaConfigFile_n | 'n' in the variable name represents a numeric value between 1 and 50. This variable is used to set the full path of LDIF files that need to be passed to the Oracle Unified Directory instance for schema configuration/extension. If required this should specify the path, for example, `schemaConfigFile_1=/u01/oracle/config/00_test.ldif`. | ------ | +| ldifFile_n | 'n' in the variable name represents a numeric value between 1 and 50. This variable is used to set the full path of LDIF files that need to be passed to the Oracle Unified Directory instance for initial data population. If required this should specify the path, for example, `ldifFile_1=/u01/oracle/config/test1.ldif`. | ------ | +| dsconfigBatchFile_n | 'n' in the variable name represents a numeric value between 1 and 50. This variable is used to set the full path of LDIF files that need to be passed to the Oracle Unified Directory instance for batch processing by the `dsconfig` command. If required this should specify the path, for example, `dsconfigBatchFile_1=/u01/oracle/config/dsconfig_1.txt`. When executing the `dsconfig` command the following values are added implicitly to the arguments contained in the batch file : ${hostname}, ${adminConnectorPort}, ${bindDN} and ${bindPasswordFile} | ------ | +| dstune_n | 'n' in the variable name represents a numeric value between 1 and 50. Allows commands and options to be passed to the `dstune` utility as a full command. | ------ | +| dsconfig_n | 'n' in the variable name represents a numeric value between 1 and 300. Each file represents a set of execution parameters for the `dsconfig` command. For each `dsconfig` execution, the following variables are added implicitly : ${hostname}, ${adminConnectorPort}, ${bindDN}, ${bindPasswordFile}. | ------ | +| dsreplication_n | 'n' in the variable name represents a numeric value between 1 and 50. Each file represents a set of execution parameters for the `dsreplication` command. For each `dsreplication` execution, the following variables are added implicitly : ${hostname}, ${ldapPort}, ${ldapsPort}, ${adminConnectorPort}, ${replicationPort}, ${sourceHost}, ${initializeFromHost}, and ${baseDN}. Depending on the dsreplication sub-command, the following variables are added implicitly : ${bindDN1}, ${bindPasswordFile1}, ${bindDN2}, ${bindPasswordFile2}, ${adminUID}, and ${adminPasswordFile}. | ------ | +| post_dsreplication_dsconfig_n | 'n' in the variable name represents a numeric value between 1 and 300. Each file represents a set of execution parameters for the `dsconfig` command to be run following execution of the `dsreplication` command. For each `dsconfig` execution, the following variables/values are added implicitly : --provider-name "Multimaster Synchronization", ${hostname}, ${adminConnectorPort}, ${bindDN}, ${bindPasswordFile}. | ------ | +| rebuildIndex_n | 'n' in the variable name represents a numeric value between 1 and 50. Each file represents a set of execution parameters for the `rebuild-index` command. For each `rebuild-index` execution, the following variables are added implicitly : ${hostname}, ${adminConnectorPort}, ${bindDN}, ${bindPasswordFile}, and ${baseDN}. | ------ | +| manageSuffix_n | 'n' in the variable name represents a numeric value between 1 and 50. Each file represents a set of execution parameters for the `manage-suffix` command. For each `manage-suffix` execution, the following variables are added implicitly : ${hostname}, ${adminConnectorPort}, ${bindDN}, ${bindPasswordFile}. | ------ | +| importLdif_n | 'n' in the variable name represents a numeric value between 1 and 50. Each file represents a set of execution parameters for the `import-ldif` command. For each `import-ldif` execution, the following variables are added implicitly : ${hostname}, ${adminConnectorPort}, ${bindDN}, ${bindPasswordFile}. | ------ | +| execCmd_n | 'n' in the variable name represents a numeric value between 1 and 300. Each file represents a command to be executed in the container. For each command execution, the following variables are replaced, if present in the command : ${hostname}, ${ldapPort}, ${ldapsPort}, ${adminConnectorPort}. | ------ | +| restartAfterRebuildIndex | Specifies whether to restart the server after building the index. | false | +| restartAfterSchemaConfig | Specifies whether to restart the server after configuring the schema. | false | + +**Note** For the following parameters above, the following statement applies: + +* dsconfig_n +* dsreplication_n +* post_dsreplication_dsconfig_n +* rebuildIndex_n +* manageSuffix_n +* importLdif_n +* execCmd_n + +If values are provided the following variables will be substituted with their values: ${hostname},${ldapPort},${ldapsPort},${adminConnectorPort},${replicationPort},${sourceHost},${initializeFromHost},${sourceAdminConnectorPort},${sourceReplicationPort},${baseDN},${rootUserDN},${adminUID},${rootPwdFile},${bindPasswordFile},${adminPwdFile},${bindPwdFile1},${bindPwdFile2} \ No newline at end of file diff --git a/docs-source/content/oud/manage-oud-containers/logging-and-visualization.md b/docs-source/content/oud/manage-oud-containers/logging-and-visualization.md index 5a2a5deca..816451ef6 100644 --- a/docs-source/content/oud/manage-oud-containers/logging-and-visualization.md +++ b/docs-source/content/oud/manage-oud-containers/logging-and-visualization.md @@ -4,12 +4,12 @@ description: "Describes the steps for logging and visualization with Elasticsear --- 1. [Introduction](#introduction) -1. [Installation](#installation) - 1. [Create a Kubernetes secret](#create-a-kubernetes-secret) - 1. [Enable Elasticsearch, Logstash, and Kibana](#enable-elasticsearch-logstash-and-kibana) +1. [Install Elasticsearch and Kibana](#install-elasticsearch-and-kibana) +1. [Create a Kubernetes secret](#create-a-kubernetes-secret) +1. [Enable Logstash](#enable-logstash) 1. [Upgrade OUD deployment with ELK configuration](#upgrade-oud-deployment-with-elk-configuration) 1. [Verify the pods](#verify-the-pods) -1. [Verify using the Kibana application](#verify-using-the-kibana-application) +1. [Verify and access the Kibana console](#verify-and-access-the-kibana-console) ### Introduction @@ -21,204 +21,349 @@ The ELK stack consists of Elasticsearch, Logstash, and Kibana. Using ELK you can * Logstash is an open source, server-side data processing pipeline that ingests data from a multitude of sources simultaneously, transforms it, and then sends it to your favorite “stash.” * Kibana lets you visualize your Elasticsearch data and navigate the Elastic Stack. It gives you the freedom to select the way you give shape to your data. And you don’t always have to know what you're looking for. -### Installation +### Install Elasticsearch and Kibana -ELK can be enabled for environments created using the Helm charts provided. The example below will demonstrate installation and configuration of ELK for the `oud-ds-rs` chart. +If you do not already have a centralized Elasticsearch (ELK) stack then you must configure this first. For details on how to configure the ELK stack, follow +[Installing Elasticsearch (ELK) Stack and Kibana](https://docs.oracle.com/en/middleware/fusion-middleware/12.2.1.4/ikedg/installing-monitoring-and-visualization-software.html) -#### Create a Kubernetes secret +### Create the logstash pod -A Kubernetes secret to access the required images on [hub.docker.com](https://hub.docker.com) should have been previously created in [Create OUD instances](../../create-oud-instances/#create-a-kubernetes-secret-for-cronjob-images). +#### Variables used in this chapter -If you have not yet created a Kubernetes secret refer back to [Create OUD instances](../../create-oud-instances/#create-a-kubernetes-secret-for-cronjob-images). +In order to create the logstash pod, you must create a yaml file. This file contains variables which you must substitute with variables applicable to your ELK environment. -#### Enable Elasticsearch, Logstash, and Kibana +Most of the values for the variables will be based on your ELK deployment as per [Installing Elasticsearch (ELK) Stack and Kibana](https://docs.oracle.com/en/middleware/fusion-middleware/12.2.1.4/ikedg/installing-monitoring-and-visualization-software.html). -1. Create a directory on the persistent volume to store the ELK log files: +The table below outlines the variables and values you must set: + +| Variable | Sample Value | Description | +| --- | --- | --- | +| `` | `8.3.1` | The version of logstash you want to install.| +| `` | `true` | If SSL is enabled for ELK set the value to `true`, or if NON-SSL set to `false`. This value must be lowercase.| +| `` | `MIIDVjCCAj6gAwIBAgIRAOqQ3Gy75..etc...P9ovZ/EKPpE6Gq` | If `ELK_SSL=true`, this is the BASE64 version of the certificate. This is the Certificate Authority (CA) certificate(s), that signed the certificate of the Elasticsearch server. If using a self-signed certificate, this is the self signed certificate of the Elasticserver server. See [Copying the Elasticsearch Certificate](https://docs.oracle.com/en/middleware/fusion-middleware/12.2.1.4/ikedg/installing-monitoring-and-visualization-software.html#GUID-C1FC1063-FA76-48AD-AE3D-A39390874C74) for details on how to get the correct certificate.| +| `` | `https://elasticsearch.example.com:9200` | The URL for sending logs to Elasticsearch. HTTP if NON-SSL is used.| +| `` | `logstash_internal` | The name of the user for logstash to access Elasticsearch.| +| `` | `password` | The password for ELK_USER.| +| `` | `apikey` | The API key details.| + + + +#### Create a kubernetes secret + +1. Create a Kubernetes secret for Elasticsearch using the API Key or Password. + + a) If ELK uses an API Key for authentication: - ```bash - $ mkdir -p /oud_elk_data - $ chmod 777 /oud_elk_data ``` - + $ kubectl create secret generic elasticsearch-pw-elastic -n --from-literal password= + ``` + For example: - ```bash - $ mkdir -p /scratch/shared/oud_elk_data - $ chmod 777 /scratch/shared/oud_elk_data + ``` + $ kubectl create secret generic elasticsearch-pw-elastic -n oudns --from-literal password= + ``` + + The output will look similar to the following: + + ``` + secret/elasticsearch-pw-elastic created ``` -1. Navigate to the `$WORKDIR/kubernetes/helm` directory and create a `logging-override-values.yaml` with the following: - - ```yaml - elk: - enabled: true - imagePullSecrets: - - name: dockercred + b) If ELK uses a password for authentication: - elkVolume: - # If enabled, it will use the persistent volume. - # if value is false, PV and PVC would not be used and there would not be any mount point available for config - enabled: true - type: filesystem - filesystem: - hostPath: - path: /oud_elk_data + ``` + $ kubectl create secret generic elasticsearch-pw-elastic -n --from-literal password= ``` For example: + + ``` + $ kubectl create secret generic elasticsearch-pw-elastic -n oudns --from-literal password= + ``` + + The output will look similar to the following: + + ``` + secret/elasticsearch-pw-elastic created + ``` - ```yaml - elk: - enabled: true - imagePullSecrets: - - name: dockercred + + **Note**: It is recommended that the ELK Stack is created with authentication enabled. If no authentication is enabled you may create a secret using the values above. + + +1. Check that the `dockercred` secret that was created previously in [Create a Kubernetes secret for cronjob images](../../create-oud-instances/#create-a-kubernetes-secret-for-cronjob-images) exists: - elkVolume: - # If enabled, it will use the persistent volume. - # if value is false, PV and PVC would not be used and there would not be any mount point available for config - enabled: true - type: filesystem - filesystem: - hostPath: - path: /scratch/shared/oud_elk_data + ```bash + $ kubectl get secret -n | grep dockercred + ``` + + For example, + + ```bash + $ kubectl get secret -n oudns | grep dockercred + ``` + + The output will look similar to the following: + + ```bash + dockercred kubernetes.io/dockerconfigjson 1 149m ``` + + If the secret does not exist, create it as per [Create a Kubernetes secret for cronjob images](../../create-oud-instances/#create-a-kubernetes-secret-for-cronjob-images). - If using NFS for the persistent volume change the `elkVolume` section as follows: +#### Enable logstash - ```yaml - elkVolume: - # If enabled, it will use the persistent volume. - # if value is false, PV and PVC would not be used and there would not be any mount point available for config - enabled: true - type: networkstorage - networkstorage: - nfs: - server: myserver - path: /oud_elk_data +1. Navigate to the `$WORKDIR/kubernetes/helm` directory and create a `logging-override-values.yaml` file as follows: + ``` - + elk: + imagePullSecrets: + - name: dockercred + IntegrationEnabled: true + logStashImage: logstash: + logstashConfigMap: false + esindex: oudlogs-00001 + sslenabled: + eshosts: + # Note: We need to provide either esuser,espassword or esapikey + esuser: + espassword: elasticsearch-pw-elastic + esapikey: elasticsearch-pw-elastic + escert: | + -----BEGIN CERTIFICATE----- + + -----END CERTIFICATE----- + ``` + + + Change the ``, ``, ``, ``, and `` to match the values for your environment. + + If using SSL, make sure the value for is indented correctly. You can use the command: `sed 's/^/ /' elk.crt` to output the certificate with the correct indentation. + + If not using SSL, delete the `` line, but leave the `-----BEGIN CERTIFICATE-----` and `-----END CERTIFICATE-----`. + + If using API KEY for your ELK authentication, leave both `esuser:` and `espassword:` with no value. + + If using a password for ELK authentication, leave `esapi_key:` but delete `elasticsearch-pw-elastic`. + + If no authentication is used for ELK, leave `esuser`, `espassword`, and `esapi_key` with no value assigned. + + The rest of the lines in the yaml file should not be changed. + + For example: + + ``` + elk: + imagePullSecrets: + - name: dockercred + IntegrationEnabled: true + logStashImage: logstash:8.3.1 + logstashConfigMap: false + esindex: oudlogs-00001 + sslenabled: true + eshosts: https://elasticsearch.example.com:9200 + # Note: We need to provide either esuser,espassword or esapikey + esuser: logstash_internal + espassword: elasticsearch-pw-elastic + esapikey: + escert: | + -----BEGIN CERTIFICATE----- + MIIDVjCCAj6gAwIBAgIRAOqQ3Gy75NvPPQUN5kXqNQUwDQYJKoZIhvcNAQELBQAw + NTEWMBQGA1UECxMNZWxhc3RpY3NlYXJjaDEbMBkGA1UEAxMSZWxhc3RpY3NlYXJj + aC1odHRwMB4XDTIyMDgyNDA1MTU1OVoXDTIzMDgyNDA1MjU1OVowNTEWMBQGA1UE + CxMNZWxhc3RpY3NlYXJjaDEbMBkGA1UEAxMSZWxhc3RpY3NlYXJjaC1odHRwMIIB + IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsQOnxUm9uF32+lyc9SA3WcMZ + P1X7TbHMDuO/l3UHBUf5F/bt2m3YkGw+enIos9wzuUNpjIwVt8q4WrRCMl80nAQ0 + yCXrfLSI9zaHxEC8Ht7V0U+7Sgu5uysD4tyZ9T0Q5zjvkWS6oBPxhfri3OQfPvUW + gQ6wJaPGDteYZAwiBMvPEkmh0VUTBTXjToHrtrT7pzmz5BBWnUzdf+jv0+nEfedm + mMWw/8jqyqid7bu7bo6gKBZ8zk06n2iMaXzmGW34QlYRLXCgbThhxyDE7joZ4NTA + UFEJecZR2fccmpN8CNkT9Ex4Hq88nh2OP5XKKPNF4kLh2u6F4auF7Uz42jwvIwID + AQABo2EwXzAOBgNVHQ8BAf8EBAMCAoQwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsG + AQUFBwMCMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLQb/IjHHkSmHgKSPY7r + zBIJZMbdMA0GCSqGSIb3DQEBCwUAA4IBAQA01qY0tGIPsKNkn7blxRjEYkTg59Z5 + vi6MCpGtdoyZeJgH621IpwyB34Hpu1RQfyg1aNgmOtIK9cvQZRl008DHF4AiHYhU + 6xe3cjI/QxDXwitoBgWl+a0mkwhSmzJt7TuzImq7RMO4ws3M/nGeNUwFjwsQu86+ + N/Y3RuuUVbK1xy8Jdz3FZADIgHVPN6GQwYKEpWrZNapKBXjunjCZmpBFxqGMRF44 + fcSKFlFkwjyTq4kgq44NPv18NMfKCYZcK7ttRTiep77vKB7No/TM69Oz5ZHhQ+2Q + pSGg3QF+1fOCFCgWXFEOle6lQ5i8a/GihY0FuphrZxP9ovZ/EKPpE6Gq + -----END CERTIFICATE----- + ``` + + + #### Upgrade OUD deployment with ELK configuration 1. Run the following command to upgrade the OUD deployment with the ELK configuration: - ```bash + ``` $ helm upgrade --namespace --values oud-ds-rs --reuse-values ``` - + For example: - ```bash + ``` $ helm upgrade --namespace oudns --values logging-override-values.yaml oud-ds-rs oud-ds-rs --reuse-values ``` + + The output should look similar to the following: + + ``` + Release "oud-ds-rs" has been upgraded. Happy Helming! + NAME: oud-ds-rs + LAST DEPLOYED: + NAMESPACE: oudns + STATUS: deployed + REVISION: 2 + NOTES: + # + # Copyright (c) 2020, 2022, Oracle and/or its affiliates. + # + # Licensed under the Universal Permissive License v 1.0 as shown at + # https://oss.oracle.com/licenses/upl + # + # + Since "nginx" has been chosen, follow the steps below to configure nginx ingress controller. + Add Repo reference to helm for retriving/installing Chart for nginx-ingress implementation. + command-# helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx -#### Verify the pods + Command helm install to install nginx-ingress related objects like pod, service, deployment, etc. + # helm install --namespace --values nginx-ingress-values-override.yaml lbr-nginx ingress-nginx/ingress-nginx -1. Run the following command to verify the elasticsearch, logstash and kibana pods are running: + For details of content of nginx-ingress-values-override.yaml refer README.md file of this chart. - ```bash - $ kubectl get pods -o wide -n | grep 'es\|kibana\|logstash' - ``` + Run these commands to check port mapping and services: + # kubectl --namespace get services -o wide -w lbr-nginx-ingress-controller + # kubectl describe --namespace ingress.extensions/oud-ds-rs-http-ingress-nginx + # kubectl describe --namespace ingress.extensions/oud-ds-rs-admin-ingress-nginx - For example: + Accessible interfaces through ingress: + (External IP Address for LoadBalancer NGINX Controller can be determined through details associated with lbr-nginx-ingress-controller) - ```bash - $ kubectl get pods -o wide -n oudns | grep 'es\|kibana\|logstash' - ``` + 1. OUD Admin REST: + Port: http/https - The output will look similar to the following: + 2. OUD Data REST: + Port: http/https + + 3. OUD Data SCIM: + Port: http/https + + 4. OUD LDAP/LDAPS: + Port: ldap/ldaps + + 5. OUD Admin LDAPS: + Port: ldaps + + Please refer to README.md from Helm Chart to find more details about accessing interfaces and configuration parameters. - ``` - oud-ds-rs-es-cluster-0 1/1 Running 0 6m28s - oud-ds-rs-kibana-7b7769485f-b9mr4 1/1 Running 0 6m28s - oud-ds-rs-logstash-5995948d7f-nqlh6 1/1 Running 0 6m28s - ``` - From the above identify the elasticsearch pod, for example: `oud-ds-rs-es-cluster-0`. + Accessible interfaces through ingress: -1. Run the `port-forward` command to allow elasticsearch to listen on port 9200: + 1. OUD Admin REST: + Port: http/https + 2. OUD Data REST: + Port: http/https + + 3. OUD Data SCIM: + Port: http/https + + Please refer to README.md from Helm Chart to find more details about accessing interfaces and configuration parameters. + ``` + +#### Verify the pods + +1. Run the following command to check the `logstash` pod is created correctly: + ```bash - $ kubectl port-forward oud-ds-rs-es-cluster-0 9200:9200 --namespace= & + $ kubectl get pods -n ``` - + For example: ```bash - $ kubectl port-forward oud-ds-rs-es-cluster-0 9200:9200 --namespace=oudns & + $ kubectl get pods -n oudns ``` - - The output will look similar to the following: - ```bash - [1] 98458 - bash-4.2$ Forwarding from 127.0.0.1:9200 -> 9200 - Forwarding from [::1]:9200 -> 9200 + The output should look similar to the following: + ``` - -1. Verify that elasticsearch is running by interrogating port 9200: - - ```bash - $ curl http://localhost:9200 + NAME READY STATUS RESTARTS AGE + oud-ds-rs-0 1/1 Running 0 150m + oud-ds-rs-1 1/1 Running 0 143m + oud-ds-rs-2 1/1 Running 0 137m + oud-ds-rs-logstash-5dc8d94597-knk8g 1/1 Running 0 2m12s + oud-pod-cron-job-27758370-wpfq7 0/1 Completed 0 66m + oud-pod-cron-job-27758400-kd6pn 0/1 Completed 0 36m + oud-pod-cron-job-27758430-ndmgj 0/1 Completed 0 6m33s + ``` + + **Note**: Wait a couple of minutes to make sure the pod has not had any failures or restarts. If the pod fails you can view the pod log using: + + ``` + $ kubectl logs -f oud-ds-rs-logstash- -n oudns + ``` + + Most errors occur due to misconfiguration of the `logging-override-values.yaml`. This is usually because of an incorrect value set, or the certificate was not pasted with the correct indentation. + + If the pod has errors, view the helm history to find the last working revision, for example: + + ``` + $ helm history oud-ds-rs -n oudns ``` The output will look similar to the following: - ```bash - { - "name" : "oud-ds-rs-es-cluster-0", - "cluster_name" : "OUD-elk", - "cluster_uuid" : "J42fuv_XSHGy-uolRyNEtA", - "version" : { - "number" : "6.8.0", - "build_flavor" : "default", - "build_type" : "docker", - "build_hash" : "65b6179", - "build_date" : "2019-05-15T20:06:13.172855Z", - "build_snapshot" : false, - "lucene_version" : "7.7.0", - "minimum_wire_compatibility_version" : "5.6.0", - "minimum_index_compatibility_version" : "5.0.0" - }, - "tagline" : "You Know, for Search" - } - ``` - - -### Verify using the Kibana application - -1. List the Kibana application service using the following command: - - ```bash - $ kubectl get svc -o wide -n | grep kibana ``` - + REVISION UPDATED STATUS CHART APP VERSION DESCRIPTION + 1 Tue Oct 11 14:06:01 2022 superseded oud-ds-rs-0.2 12.2.1.4.0 Install complete + 2 Tue Oct 11 16:34:21 2022 deployed oud-ds-rs-0.2 12.2.1.4.0 Upgrade complete + ``` + + Rollback to the previous working revision by running: + + ``` + $ helm rollback -n + ``` + For example: - - ```bash - $ kubectl get svc -o wide -n oudns | grep kibana + + ``` + helm rollback oud-ds-rs 1 -n oudns ``` + + Once you have resolved the issue in the yaml files, run the `helm upgrade` command outlined earlier to recreate the logstash pod. + - The output will look similar to the following: +### Verify and access the Kibana console + +To access the Kibana console you will need the Kibana URL as per [Installing Elasticsearch (ELK) Stack and Kibana](https://docs.oracle.com/en/middleware/fusion-middleware/12.2.1.4/ikedg/installing-monitoring-and-visualization-software.html#GUID-C0013AA8-B229-4237-A1D8-8F38FA6E2CEC). - ```bash - oud-ds-rs-kibana NodePort 10.103.169.218 5601:31199/TCP 13m app=kibana - ``` - In this example, the port to access the Kibana application is `31199`. +**For Kibana 7.7.x and below**: -1. Access the Kibana console in a browser with: `http://${MASTERNODE-HOSTNAME}:${KIBANA-PORT}/app/kibana`. +1. Access the Kibana console with `http://:/app/kibana` and login with your username and password. -1. From the Kibana portal navigate to `Management`> `Kibana` > `Index Patterns`. +1. From the Navigation menu, navigate to **Management** > **Kibana** > **Index Patterns**. -1. In the **Create Index Pattern** page enter `*` for the **Index pattern** and click **Next Step**. +1. In the **Create Index Pattern** page enter `oudlogs*` for the **Index pattern** and click **Next Step**. 1. In the **Configure settings** page, from the **Time Filter field name** drop down menu select `@timestamp` and click **Create index pattern**. -1. Once the index pattern is created click on **Discover** in the navigation menu to view the OUD logs. +1. Once the index pattern is created click on **Discover** in the navigation menu to view the OIG logs. + + +**For Kibana version 7.8.X and above**: +1. Access the Kibana console with `http://:/app/kibana` and login with your username and password. +1. From the Navigation menu, navigate to **Management** > **Stack Management**. +1. Click **Data Views** in the **Kibana** section. + +1. Click **Create Data View** and enter the following information: + + + Name: `oudlogs*` + + Timestamp: `@timestamp` + +1. Click **Create Data View**. +1. From the Navigation menu, click **Discover** to view the log file entries. +1. From the drop down menu, select `oudlogs*` to view the log file entries. \ No newline at end of file diff --git a/docs-source/content/oud/manage-oud-containers/monitoring-oud-instance.md b/docs-source/content/oud/manage-oud-containers/monitoring-oud-instance.md index b6a7bb45c..f2242bcc6 100644 --- a/docs-source/content/oud/manage-oud-containers/monitoring-oud-instance.md +++ b/docs-source/content/oud/manage-oud-containers/monitoring-oud-instance.md @@ -89,7 +89,7 @@ After the Oracle Unified Directory instance (OUD) is set up you can monitor it u ```bash NAME: monitoring - LAST DEPLOYED: Mon Jul 11 09:57:54 2022 + LAST DEPLOYED: NAMESPACE: monitoring STATUS: deployed REVISION: 1 @@ -195,6 +195,6 @@ statefulset.apps/prometheus-monitoring-kube-prometheus-prometheus 1/1 1. Download the K8 Cluster Detail Dashboard json file from: https://grafana.com/grafana/dashboards/10856. -1. Import the Grafana dashboard by navigating on the left hand menu to **Create** > **Import**. Click **Upload JSON file** and select the json downloaded file. In the `Prometheus` drop down box select `Prometheus`. Click **Import**. The dashboard should be displayed. +1. Import the Grafana dashboard by navigating on the left hand menu to **Dashboards** > **Import**. Click **Upload JSON file** and select the json downloaded file. In the `Prometheus` drop down box select `Prometheus`. Click **Import**. The dashboard should be displayed. 1. Verify your installation by viewing some of the customized dashboard views. diff --git a/docs-source/content/oud/patch-and-upgrade/index.md b/docs-source/content/oud/patch-and-upgrade/index.md index 083dc5e83..a1aafc216 100644 --- a/docs-source/content/oud/patch-and-upgrade/index.md +++ b/docs-source/content/oud/patch-and-upgrade/index.md @@ -5,16 +5,15 @@ pre = "8. " description= "This document provides steps to patch or upgrade an OUD image" +++ -1. [Introduction](#introduction) -1. [Upgrading to July 22 (22.3.1) or later from earlier versions](#upgrading-to-july-22-2231-or-later-from-earlier-versions) -1. [Upgrading from July 22 (22.3.1) to a later release](#upgrading-from-july-22-2231-to-a-later-release) +In this section you learn how to upgrade OUD from a previous version. Follow the section relevant to the version you are upgrading from. -### Introduction -In this section the Oracle Unified Directory (OUD) deployment is updated with a new OUD container image. +1. [Upgrading to October 22 (22.4.1) or later from releases prior to July 22 (22.3.1)](#upgrading-to-october-22-2241-or-later-from-releases-prior-to-july-22-2231) +1. [Upgrading to October 22 (22.4.1) or later from July 22 (22.3.1)](#upgrading-to-october-22-2241-or-later-from-july-22-2231) +1. [Upgrading Elasticsearch and Kibana](#upgrading-elasticsearch-and-kibana) -### Upgrading to July 22 (22.3.1) or later from earlier versions +### Upgrading to October 22 (22.4.1) or later from releases prior to July 22 (22.3.1) In releases prior to July 22 ([22.3.1](https://github.com/oracle/fmw-kubernetes/releases)) OUD used pod based deployment. From July 22 ([22.3.1](https://github.com/oracle/fmw-kubernetes/releases)) onwards OUD is deployed using [StatefulSets](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/). @@ -97,7 +96,7 @@ If you are upgrading from a release prior to July 22 ([22.3.1](https://github.co For example: ```bash - $ mkdir /scratch/shared/OUDLatestSource + $ mkdir /scratch/shared/OUDK8Slatest ``` @@ -124,11 +123,11 @@ If you are upgrading from a release prior to July 22 ([22.3.1](https://github.co For example: ```bash - $ export WORKDIR=/scratch/shared/OUDLatestSource/fmw-kubernetes/OracleUnifiedDirectory + $ export WORKDIR=/scratch/shared/OUDK8Slatest/fmw-kubernetes/OracleUnifiedDirectory ``` -#### 2. Create a new instance against your existing persistent volume +#### Create a new instance against your existing persistent volume 1. Navigate to the `$WORKDIR/kubernetes/helm` directory @@ -234,9 +233,13 @@ If you are upgrading from a release prior to July 22 ([22.3.1](https://github.co 1. Check the OUD deployment as per [Verify the OUD deployment](../create-oud-instances/#verify-the-oud-deployment) and [Verify the OUD replication](../create-oud-instances#verify-the-oud-replication). -### Upgrading from July 22 (22.3.1) to a later release +1. Upgrade Elasticsearch and Kibana by following [Upgrading Elasticsearch and Kibana](#upgrading-elasticsearch-and-kibana). -The instructions below are for upgrading from Jul 22 ([22.3.1](https://github.com/oracle/fmw-kubernetes/releases)) to a later release. + + +### Upgrading to October 22 (22.4.1) or later from July 22 (22.3.1) + +The instructions below are for upgrading from Jul 22 ([22.3.1](https://github.com/oracle/fmw-kubernetes/releases)) to October 22 ([22.4.1](https://github.com/oracle/fmw-kubernetes/releases)) or later. **Note**: If you are not using Oracle Container Registry or your own container registry, then you must first load the new container image on all nodes in your Kubernetes cluster. @@ -374,7 +377,7 @@ You can update the deployment with a new OUD container image using one of the fo Namespace: oudns Priority: 0 Node: /100.102.48.28 - Start Time: Wed, 16 Mar 2022 12:07:36 +0000 + Start Time: Labels: app.kubernetes.io/instance=oud-ds-rs app.kubernetes.io/managed-by=Helm app.kubernetes.io/name=oud-ds-rs @@ -398,4 +401,83 @@ You can update the deployment with a new OUD container image using one of the fo Normal Created 3m22s (x2 over 142m) kubelet Created container oud-ds-rs Normal Started 3m22s (x2 over 142m) kubelet Started container oud-ds-rs Normal Pulled 3m22s kubelet Successfully pulled image "container-registry.oracle.com/middleware/oud_cpu:12.2.1.4-jdk8-ol7-new" in 33.477063844s - ``` \ No newline at end of file + ``` + +1. Upgrade Elasticsearch and Kibana by following [Upgrading Elasticsearch and Kibana](#upgrading-elasticsearch-and-kibana). + + +### Upgrading Elasticsearch and Kibana + +This section shows how to upgrade Elasticsearch and Kibana. From October 22 (22.4.1) onwards, OUD logs should be stored on a centralized Elasticsearch and Kibana stack. + +#### Download the latest code repository + +If you haven't already downloaded the latest code repository, do so as follows: + +1. Create a working directory to setup the source code. + ```bash + $ mkdir + ``` + + For example: + ```bash + $ mkdir /scratch/shared/OUDK8Slatest + ``` + +1. Download the latest OUD deployment scripts from the OUD repository. + + ```bash + $ cd + $ git clone https://github.com/oracle/fmw-kubernetes.git + ``` + + For example: + + ```bash + $ cd /scratch/OUDK8Slatest + $ git clone https://github.com/oracle/fmw-kubernetes.git + ``` + +1. Set the `$WORKDIR` environment variable as follows: + + ```bash + $ export WORKDIR=/fmw-kubernetes/OracleUnifiedDirectory + ``` + + For example: + + ```bash + $ export WORKDIR=/scratch/shared/OUDK8Slatest/fmw-kubernetes/OracleUnifiedDirectory + ``` + +#### Undeploy Elasticsearch and Kibana + +From October 22 (22.4.1) onwards, OUD logs should be stored on a centralized Elasticsearch and Kibana (ELK) stack. + +Deployments prior to October 22 (22.4.1) used local deployments of Elasticsearch and Kibana. + +If you are upgrading from July 22 (22.3.1) or earlier, to October 22 (22.4.1) or later, you must first undeploy Elasticsearch and Kibana using the steps below: + +1. Navigate to the `$WORKDIR/kubernetes/helm` directory and create a `logging-override-values-uninstall.yaml` with the following: + + ``` + elk: + enabled: false + ``` + +1. Run the following command to remove the existing ELK deployment: + + ``` + $ helm upgrade --namespace --values oud-ds-rs --reuse-values + ``` + + For example: + + ``` + $ helm upgrade --namespace oudns --values logging-override-values-uninstall.yaml oud-ds-rs oud-ds-rs --reuse-values + ``` + + +#### Deploy ElasticSearch and Kibana in centralized stack + +1. Follow [Install Elasticsearch stack and Kibana](../manage-oud-containers/logging-and-visualization/#install-elasticsearch-stack-and-kibana)) to deploy ElasticSearch and Kibana in a centralized stack. \ No newline at end of file diff --git a/docs-source/content/oud/prepare-your-environment/_index.md b/docs-source/content/oud/prepare-your-environment/_index.md index 5a416ac16..c21e71e42 100644 --- a/docs-source/content/oud/prepare-your-environment/_index.md +++ b/docs-source/content/oud/prepare-your-environment/_index.md @@ -54,17 +54,15 @@ The OUD Kubernetes deployment requires access to an OUD container image. The ima #### Prebuilt OUD container image -The prebuilt OUD July 2022 container image can be downloaded from [Oracle Container Registry](https://container-registry.oracle.com). This image is prebuilt by Oracle and includes Oracle Unified Directory 12.2.1.4.0, the July Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program.. +The prebuilt OUD October 2022 container image can be downloaded from [Oracle Container Registry](https://container-registry.oracle.com). This image is prebuilt by Oracle and includes Oracle Unified Directory 12.2.1.4.0, the October Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program.. **Note**: Before using this image you must login to [Oracle Container Registry](https://container-registry.oracle.com), navigate to `Middleware` > `oud_cpu` and accept the license agreement. -Alternatively the same image can also be downloaded from [My Oracle Support](https://support.oracle.com) by referring to the document ID 2723908.1. - You can use this image in the following ways: - Pull the container image from the Oracle Container Registry automatically during the OUD Kubernetes deployment. -- Manually pull the container image from the Oracle Container Registry or My Oracle Support, and then upload it to your own container registry. -- Manually pull the container image from the Oracle Container Registry or My Oracle Support and manually stage it on the master node and each worker node. +- Manually pull the container image from the Oracle Container Registry and then upload it to your own container registry. +- Manually pull the container image from the Oracle Container Registry and manually stage it on the master node and each worker node. #### Build your own OUD container image using WebLogic Image Tool @@ -82,14 +80,14 @@ You can use an image built with WebLogic Image Tool in the following ways: As referenced in [Prerequisites](../prerequisites) the nodes in the Kubernetes cluster must have access to a persistent volume such as a Network File System (NFS) mount or a shared file system. -Make sure the persistent volume path has **full** access permissions, and that the folder is empty. In this example `/scratch/shared/` is a shared directory accessible from all nodes. +In this example `/scratch/shared/` is a shared directory accessible from all nodes. 1. On the master node run the following command to create a `user_projects` directory: ```bash $ cd $ mkdir oud_user_projects - $ chmod 777 oud_user_projects + $ sudo chown -R 1000:0 oud_user_projects ``` For example: @@ -97,7 +95,7 @@ Make sure the persistent volume path has **full** access permissions, and that t ```bash $ cd /scratch/shared $ mkdir oud_user_projects - $ chmod 777 oud_user_projects + $ sudo chown -R 1000:0 oud_user_projects ``` 1. On the master node run the following to ensure it is possible to read and write to the persistent volume: diff --git a/docs-source/content/oud/release-notes.md b/docs-source/content/oud/release-notes.md index 312c085af..7c3da3e9e 100644 --- a/docs-source/content/oud/release-notes.md +++ b/docs-source/content/oud/release-notes.md @@ -10,6 +10,9 @@ Review the latest changes and known issues for Oracle Unified Directory on Kuber | Date | Version | Change | | --- | --- | --- | +| October, 2022 | 22.4.1 | Supports Oracle Unified Directory 12.2.1.4 domain deployment using the October 2022 container image which contains the October Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program.| +| | | Changes to deployment of Logging and Visualization with Elasticsearch and Kibana. +| | | OUD container images are now only available from [container-registry.oracle.com](https://container-registry.oracle.com) and are no longer available from My Oracle Support.| | July, 2022 | 22.3.1 | Supports Oracle Unified Directory 12.2.1.4 domain deployment using the July 2022 container image which contains the July Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program. From July 2022 onwards OUD deployment is performed using [StatefulSets](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/). | | April, 2022 | 22.2.1 | Updated for CRI-O support.| | November 2021 | 21.4.2 | Voyager ingress removed as no longer supported.| diff --git a/docs-source/content/oud/troubleshooting/_index.md b/docs-source/content/oud/troubleshooting/_index.md index dae4a2c3e..93417b1b6 100644 --- a/docs-source/content/oud/troubleshooting/_index.md +++ b/docs-source/content/oud/troubleshooting/_index.md @@ -116,7 +116,7 @@ Name: oud-ds-rs-0 Namespace: oudns Priority: 0 Node: /100.102.48.84 -Start Time: Wed, 11 Jul 2022 14:39:09 +0000 +Start Time: Labels: app.kubernetes.io/instance=oud-ds-rs app.kubernetes.io/managed-by=Helm app.kubernetes.io/name=oud-ds-rs @@ -132,17 +132,17 @@ IPs: Containers: oud-ds-rs: Container ID: cri-o://2795176b6af2c17a9426df54214c7e53318db9676bbcf3676d67843174845d68 - Image: container-registry.oracle.com/middleware/oud_cpu:12.2.1.4-jdk8-ol7- + Image: container-registry.oracle.com/middleware/oud_cpu:12.2.1.4-jdk8-ol7- Image ID: container-registry.oracle.com/middleware/oud_cpu@sha256:6ba20e54d17bb41312618011481e9b35a40f36f419834d751277f2ce2f172dca Ports: 1444/TCP, 1888/TCP, 1389/TCP, 1636/TCP, 1080/TCP, 1081/TCP, 1898/TCP Host Ports: 0/TCP, 0/TCP, 0/TCP, 0/TCP, 0/TCP, 0/TCP, 0/TCP State: Running - Started: Wed, 11 Jul 2022 15:38:10 +0000 + Started: Last State: Terminated Reason: Error Exit Code: 137 - Started: Wed, 11 Jul 2022 14:39:10 +0000 - Finished: Wed, 11 Jul 2022 15:37:16 +0000 + Started: + Finished: Ready: True Restart Count: 1 Liveness: tcp-socket :ldap delay=900s timeout=15s period=30s #success=1 #failure=1 @@ -286,45 +286,4 @@ In this situation perform the following steps to remove the server: ``` rm /tmp/adminpassword.txt - ``` - -#### Helm upgrade fails enabling ELK - -When deploying Elasticsearch and Kibana (ELK) in OUD, you may hit the following error during the `helm upgrade` command: - -``` -Error: UPGRADE FAILED: error validating "": error validating data: [ValidationError(PersistentVolume.spec.accessModes): unknown object type "nil" in PersistentVolume.spec.accessModes[0], unknown object type "nil" in PersistentVolume.spec.capacity.storage] - -Error: UPGRADE FAILED: error validating "": error validating data: ValidationError(StatefulSet.spec.template.spec.containers[0].volumeMounts[0]): missing required field "mountPath" in io.k8s.api.core.v1.VolumeMount -``` - -If this error occurs add the following lines to the `$WORKDIR/kubernetes/helm/logging-override-values.yaml` and rerun the `helm upgrade` command: - -``` -elkVolume: - mountPath: /usr/share/elasticsearch/data - size: 20Gi - storageClass: elk-oud - reclaimPolicy: "Delete" -``` - -For example: - -``` -elk: - enabled: true - imagePullSecrets: - - name: dockercred - -elkVolume: - mountPath: /usr/share/elasticsearch/data - size: 20Gi - storageClass: elk-oud - reclaimPolicy: "Delete" - enabled: true - type: networkstorage - networkstorage: - nfs: - server: myserver - path: /oud_elk_data -``` + ``` \ No newline at end of file diff --git a/docs-source/content/oudsm/_index.md b/docs-source/content/oudsm/_index.md index 0889fd075..7dc6901f8 100644 --- a/docs-source/content/oudsm/_index.md +++ b/docs-source/content/oudsm/_index.md @@ -12,7 +12,7 @@ Follow the instructions in this guide to set up Oracle Unified Directory Service ### Current production release -The current production release for the Oracle Unified Directory 12c PS4 (12.2.1.4.0) deployment on Kubernetes is [22.3.1](https://github.com/oracle/fmw-kubernetes/releases). +The current production release for the Oracle Unified Directory 12c PS4 (12.2.1.4.0) deployment on Kubernetes is [22.4.1](https://github.com/oracle/fmw-kubernetes/releases). ### Recent changes and known issues @@ -28,6 +28,7 @@ If performing an Enterprise Deployment, refer to the [Enterprise Deployment Guid To view documentation for an earlier release, see: +* [Version 22.3.1](https://oracle.github.io/fmw-kubernetes/22.3.1/oudsm/) * [Version 22.2.1](https://oracle.github.io/fmw-kubernetes/22.2.1/oudsm/) * [Version 21.4.2](https://oracle.github.io/fmw-kubernetes/21.4.2/oudsm/) * [Version 21.4.1](https://oracle.github.io/fmw-kubernetes/21.4.1/oudsm/) \ No newline at end of file diff --git a/docs-source/content/oudsm/configure-ingress/_index.md b/docs-source/content/oudsm/configure-ingress/_index.md index cee6610ca..8554ba6aa 100644 --- a/docs-source/content/oudsm/configure-ingress/_index.md +++ b/docs-source/content/oudsm/configure-ingress/_index.md @@ -133,7 +133,7 @@ Use Helm to install NGINX. ``` NAME: lbr-nginx - LAST DEPLOYED: Mon Jul 11 17:07:32 2022 + LAST DEPLOYED: NAMESPACE: mynginx STATUS: deployed REVISION: 1 @@ -209,12 +209,12 @@ If it is not possible to have LoadBalancer configuration updated to have host na 1. Launch a browser and access the OUDSM console. -* If using an External LoadBalancer: `https:///oudsm`. -* If not using an External LoadBalancer use `https://:30443/oudsm`. + * If using an External LoadBalancer: `https:///oudsm`. + * If not using an External LoadBalancer use `https://:30443/oudsm`. 1. Access the WebLogic Administration console by accessing the following URL and login with `weblogic/` where `weblogic/` is the `adminUser` and `adminPass` set when creating the OUDSM instance. -* If using an External LoadBalancer: `https:///console`. -* If not using an External LoadBalancer use `https://:30443/console`. + * If using an External LoadBalancer: `https:///console`. + * If not using an External LoadBalancer use `https://:30443/console`. diff --git a/docs-source/content/oudsm/create-oudsm-instances/_index.md b/docs-source/content/oudsm/create-oudsm-instances/_index.md index e93e409f7..debbe8983 100644 --- a/docs-source/content/oudsm/create-oudsm-instances/_index.md +++ b/docs-source/content/oudsm/create-oudsm-instances/_index.md @@ -89,14 +89,14 @@ Create a Kubernetes secret that stores the credentials for the container registr As referenced in [Prerequisites](../prerequisites) the nodes in the Kubernetes cluster must have access to a persistent volume such as a Network File System (NFS) mount or a shared file system. -Make sure the persistent volume path has **full** access permissions, and that the folder is empty. In this example `/scratch/shared/` is a shared directory accessible from all nodes. +In this example `/scratch/shared/` is a shared directory accessible from all nodes. 1. On the master node run the following command to create a `user_projects` directory: ```bash $ cd $ mkdir oudsm_user_projects - $ chmod 777 oudsm_user_projects + $ sudo chown -R 1000:0 oudsm_user_projects ``` For example: @@ -104,7 +104,7 @@ Make sure the persistent volume path has **full** access permissions, and that t ```bash $ cd /scratch/shared $ mkdir oudsm_user_projects - $ chmod 777 oudsm_user_projects + $ sudo chown -R 1000:0 oudsm_user_projects ``` 1. On the master node run the following to ensure it is possible to read and write to the persistent volume: @@ -198,7 +198,7 @@ You can create OUDSM instances using one of the following methods: ```yaml image: repository: container-registry.oracle.com/middleware/oudsm_cpu - tag: 12.2.1.4-jdk8-ol7- + tag: 12.2.1.4-jdk8-ol7- pullPolicy: IfNotPresent imagePullSecrets: - name: orclcred @@ -272,7 +272,7 @@ You can create OUDSM instances using one of the following methods: ```bash $ helm install --namespace oudsmns \ - --set oudsm.adminUser=weblogic,oudsm.adminPass=,persistence.filesystem.hostPath.path=/scratch/shared/oudsm_user_projects,image.repository=container-registry.oracle.com/middleware/oudsm_cpu,image.tag=12.2.1.4-jdk8-ol7- \ + --set oudsm.adminUser=weblogic,oudsm.adminPass=,persistence.filesystem.hostPath.path=/scratch/shared/oudsm_user_projects,image.repository=container-registry.oracle.com/middleware/oudsm_cpu,image.tag=12.2.1.4-jdk8-ol7- \ --set imagePullSecrets[0].name="orclcred" \ oudsm oudsm ``` @@ -291,7 +291,7 @@ In all the examples above, the following output is shown following a successful ```bash NAME: oudsm - LAST DEPLOYED: Mon Jul 11 12:21:06 2022 + LAST DEPLOYED: NAMESPACE: oudsmns STATUS: deployed REVISION: 1 @@ -388,8 +388,8 @@ With an OUDSM instance now deployed you are now ready to configure an ingress co The output will look similar to the following: ``` - NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION - oudsm oudsmns 2 2022-07-11 16:46:34.05531056 +0000 UTC deployed oudsm-0.1 12.2.1.4.0 + NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION + oudsm oudsmns 2 deployed oudsm-0.1 12.2.1.4.0 ``` 1. Delete the deployment using the following command: diff --git a/docs-source/content/oudsm/manage-oudsm-containers/logging-and-visualization.md b/docs-source/content/oudsm/manage-oudsm-containers/logging-and-visualization.md index 9e2c86868..e6cceaf63 100644 --- a/docs-source/content/oudsm/manage-oudsm-containers/logging-and-visualization.md +++ b/docs-source/content/oudsm/manage-oudsm-containers/logging-and-visualization.md @@ -3,34 +3,91 @@ title: "b) Logging and Visualization for Helm Chart oudsm Deployment" description: "Describes the steps for logging and visualization with Elasticsearch and Kibana." --- -1. [Introduction](#introduction) -1. [Installation](#installation) - 1. [Create a Kubernetes secret](#create-a-kubernetes-secret) - 1. [Enable Elasticsearch, Logstash, and Kibana](#enable-elasticsearch-logstash-and-kibana) - 1. [Upgrade OUDSM deployment with ELK configuration](#upgrade-oudsm-deployment-with-elk-configuration) - 1. [Verify the pods](#verify-the-pods) -1. [Verify using the Kibana application](#verify-using-the-kibana-application) - ### Introduction -This section describes how to install and configure logging and visualization for the [oudsm](../../create-oudsm-instances) Helm Chart deployment. +This section describes how to install and configure logging and visualization for the [oudsm](../../create-oudsm-instances) Helm chart deployment. -The ELK stack consists of Elasticsearch, Logstash, and Kibana. Using ELK we can gain insights in real-time from the log data from your applications. +The ELK stack consists of Elasticsearch, Logstash, and Kibana. Using ELK you can gain insights in real-time from the log data from your applications. * Elasticsearch is a distributed, RESTful search and analytics engine capable of solving a growing number of use cases. As the heart of the Elastic Stack, it centrally stores your data so you can discover the expected and uncover the unexpected. * Logstash is an open source, server-side data processing pipeline that ingests data from a multitude of sources simultaneously, transforms it, and then sends it to your favorite “stash.” * Kibana lets you visualize your Elasticsearch data and navigate the Elastic Stack. It gives you the freedom to select the way you give shape to your data. And you don’t always have to know what you're looking for. -### Installation +### Install Elasticsearch and Kibana + +If you do not already have a centralized Elasticsearch (ELK) stack then you must configure this first. For details on how to configure the ELK stack, follow +[Installing Elasticsearch (ELK) Stack and Kibana](https://docs.oracle.com/en/middleware/fusion-middleware/12.2.1.4/ikedg/installing-monitoring-and-visualization-software.html) + + +### Create the logstash pod + +#### Variables used in this chapter + +In order to create the logstash pod, you must create a yaml file. This file contains variables which you must substitute with variables applicable to your ELK environment. + +Most of the values for the variables will be based on your ELK deployment as per [Installing Elasticsearch (ELK) Stack and Kibana](https://docs.oracle.com/en/middleware/fusion-middleware/12.2.1.4/ikedg/installing-monitoring-and-visualization-software.html). + +The table below outlines the variables and values you must set: -ELK can be enabled for environments created using the Helm charts provided. The example below will demonstrate installation and configuration of ELK for the `oudsm` chart. +| Variable | Sample Value | Description | +| --- | --- | --- | +| `` | `8.3.1` | The version of logstash you want to install.| +| `` | `true` | If SSL is enabled for ELK set the value to `true`, or if NON-SSL set to `false`. This value must be lowercase.| +| `` | `MIIDVjCCAj6gAwIBAgIRAOqQ3Gy75..etc...P9ovZ/EKPpE6Gq` | If `ELK_SSL=true`, this is the BASE64 version of the certificate. This is the Certificate Authority (CA) certificate(s), that signed the certificate of the Elasticsearch server. If using a self-signed certificate, this is the self signed certificate of the Elasticserver server. See [Copying the Elasticsearch Certificate](https://docs.oracle.com/en/middleware/fusion-middleware/12.2.1.4/ikedg/installing-monitoring-and-visualization-software.html#GUID-C1FC1063-FA76-48AD-AE3D-A39390874C74) for details on how to get the correct certificate.| +| `` | `https://elasticsearch.example.com:9200` | The URL for sending logs to Elasticsearch. HTTP if NON-SSL is used.| +| `` | `logstash_internal` | The name of the user for logstash to access Elasticsearch.| +| `` | `password` | The password for ELK_USER.| +| `` | `apikey` | The API key details.| -#### Create a Kubernetes secret -1. Create a Kubernetes secret to access the required images on [hub.docker.com](https://hub.docker.com): +#### Create Kubernetes secrets + +1. Create a Kubernetes secret for Elasticsearch using the API Key or Password. + + a) If ELK uses an API Key for authentication: + + ``` + $ kubectl create secret generic elasticsearch-pw-elastic -n --from-literal password= + ``` + + For example: + + ``` + $ kubectl create secret generic elasticsearch-pw-elastic -n oudsmns --from-literal password= + ``` + + The output will look similar to the following: + + ``` + secret/elasticsearch-pw-elastic created + ``` + + b) If ELK uses a password for authentication: - **Note:** You must first have a user account on [hub.docker.com](https://hub.docker.com). + ``` + $ kubectl create secret generic elasticsearch-pw-elastic -n --from-literal password= + ``` + + For example: + + ``` + $ kubectl create secret generic elasticsearch-pw-elastic -n oudsmns --from-literal password= + ``` + + The output will look similar to the following: + + ``` + secret/elasticsearch-pw-elastic created + ``` + + + **Note**: It is recommended that the ELK Stack is created with authentication enabled. If no authentication is enabled you may create a secret using the values above. + + +1. Create a Kubernetes secret to access the required images on [hub.docker.com](https://hub.docker.com): + + **Note:** You must first have a user account on [hub.docker.com](https://hub.docker.com): ```bash $ kubectl create secret docker-registry "dockercred" --docker-server="https://index.docker.io/v1/" --docker-username="" --docker-password= --docker-email= --namespace= @@ -48,198 +105,199 @@ ELK can be enabled for environments created using the Helm charts provided. The secret/dockercred created ``` -#### Enable Elasticsearch, Logstash, and Kibana -1. Create a directory on the persistent volume to store the ELK log files: - ```bash - $ mkdir -p /oudsm_elk_data - $ chmod 777 /oudsm_elk_data - ``` - - For example: +#### Enable logstash + +1. Navigate to the `$WORKDIR/kubernetes/helm` directory and create a `logging-override-values.yaml` file as follows: - ```bash - $ mkdir -p /scratch/shared/oudsm_elk_data - $ chmod 777 /scratch/shared/oudsm_elk_data ``` - -1. Navigate to the `$WORKDIR/kubernetes/helm` directory and create a `logging-override-values.yaml` with the following: - - - ```yaml elk: - enabled: true - imagePullSecrets: - - name: dockercred - - elkVolume: - # If enabled, it will use the persistent volume. - # if value is false, PV and PVC would not be used and there would not be any mount point available for config - enabled: true - type: filesystem - filesystem: - hostPath: - path: /oudsm_elk_data + imagePullSecrets: + - name: dockercred + IntegrationEnabled: true + logStashImage: logstash: + logstashConfigMap: false + esindex: oudsmlogs-00001 + sslenabled: + eshosts: + # Note: We need to provide either esuser,espassword or esapikey + esuser: + espassword: elasticsearch-pw-elastic + esapikey: elasticsearch-pw-elastic + escert: | + -----BEGIN CERTIFICATE----- + + -----END CERTIFICATE----- ``` - + + + Change the ``, ``, ``, ``, and `` to match the values for your environment. + + If using SSL, make sure the value for `` is indented correctly. You can use the command: `sed 's/^/ /' elk.crt` to output the certificate with the correct indentation. + + If not using SSL, delete the `` line, but leave the `-----BEGIN CERTIFICATE-----` and `-----END CERTIFICATE-----`. + + If using API KEY for your ELK authentication, leave both `esuser:` and `espassword:` with no value. + + If using a password for ELK authentication, leave `esapi_key:` but delete `elasticsearch-pw-elastic`. + + If no authentication is used for ELK, leave `esuser`, `espassword`, and `esapi_key` with no value assigned. + + The rest of the lines in the yaml file should not be changed. + For example: - - ```yaml + + ``` elk: - enabled: true - imagePullSecrets: - - name: dockercred - - elkVolume: - # If enabled, it will use the persistent volume. - # if value is false, PV and PVC would not be used and there would not be any mount point available for config - enabled: true - type: filesystem - filesystem: - hostPath: - path: /scratch/shared/oudsm_elk_data + imagePullSecrets: + - name: dockercred + IntegrationEnabled: true + logStashImage: logstash:8.3.1 + logstashConfigMap: false + esindex: oudsmlogs-00001 + sslenabled: true + eshosts: https://elasticsearch.example.com:9200 + # Note: We need to provide either esuser,espassword or esapikey + esuser: logstash_internal + espassword: elasticsearch-pw-elastic + esapikey: + escert: | + -----BEGIN CERTIFICATE----- + MIIDVjCCAj6gAwIBAgIRAOqQ3Gy75NvPPQUN5kXqNQUwDQYJKoZIhvcNAQELBQAw + NTEWMBQGA1UECxMNZWxhc3RpY3NlYXJjaDEbMBkGA1UEAxMSZWxhc3RpY3NlYXJj + aC1odHRwMB4XDTIyMDgyNDA1MTU1OVoXDTIzMDgyNDA1MjU1OVowNTEWMBQGA1UE + CxMNZWxhc3RpY3NlYXJjaDEbMBkGA1UEAxMSZWxhc3RpY3NlYXJjaC1odHRwMIIB + IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsQOnxUm9uF32+lyc9SA3WcMZ + P1X7TbHMDuO/l3UHBUf5F/bt2m3YkGw+enIos9wzuUNpjIwVt8q4WrRCMl80nAQ0 + yCXrfLSI9zaHxEC8Ht7V0U+7Sgu5uysD4tyZ9T0Q5zjvkWS6oBPxhfri3OQfPvUW + gQ6wJaPGDteYZAwiBMvPEkmh0VUTBTXjToHrtrT7pzmz5BBWnUzdf+jv0+nEfedm + mMWw/8jqyqid7bu7bo6gKBZ8zk06n2iMaXzmGW34QlYRLXCgbThhxyDE7joZ4NTA + UFEJecZR2fccmpN8CNkT9Ex4Hq88nh2OP5XKKPNF4kLh2u6F4auF7Uz42jwvIwID + AQABo2EwXzAOBgNVHQ8BAf8EBAMCAoQwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsG + AQUFBwMCMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLQb/IjHHkSmHgKSPY7r + zBIJZMbdMA0GCSqGSIb3DQEBCwUAA4IBAQA01qY0tGIPsKNkn7blxRjEYkTg59Z5 + vi6MCpGtdoyZeJgH621IpwyB34Hpu1RQfyg1aNgmOtIK9cvQZRl008DHF4AiHYhU + 6xe3cjI/QxDXwitoBgWl+a0mkwhSmzJt7TuzImq7RMO4ws3M/nGeNUwFjwsQu86+ + N/Y3RuuUVbK1xy8Jdz3FZADIgHVPN6GQwYKEpWrZNapKBXjunjCZmpBFxqGMRF44 + fcSKFlFkwjyTq4kgq44NPv18NMfKCYZcK7ttRTiep77vKB7No/TM69Oz5ZHhQ+2Q + pSGg3QF+1fOCFCgWXFEOle6lQ5i8a/GihY0FuphrZxP9ovZ/EKPpE6Gq + -----END CERTIFICATE----- ``` + + + +#### Upgrade oudsm deployment with ELK configuration - If using NFS for the persistent volume change the `elkVolume` section as follows: - +1. Run the following command to upgrade the oudsm deployment with the ELK configuration: - ```yaml - elkVolume: - # If enabled, it will use the persistent volume. - # if value is false, PV and PVC would not be used and there would not be any mount point available for config - enabled: true - type: networkstorage - networkstorage: - nfs: - server: myserver - path: /oudsm_elk_data ``` - -#### Upgrade OUDSM deployment with ELK configuration - -1. Run the following command to upgrade the OUDSM deployment with the ELK configuration: - - ```bash - $ helm upgrade --namespace --values logging-override-values.yaml oudsm --reuse-values + $ helm upgrade --namespace --values oudsm --reuse-values ``` - + For example: - ```bash + ``` $ helm upgrade --namespace oudsmns --values logging-override-values.yaml oudsm oudsm --reuse-values ``` - + + The output should look similar to the following: + + ``` + Release "oudsm" has been upgraded. Happy Helming! + NAME: oudsm + LAST DEPLOYED: + NAMESPACE: oudsmns + STATUS: deployed + REVISION: 2 + TEST SUITE: None + ``` + #### Verify the pods - -1. Run the following command to verify the elasticsearch, logstash and kibana pods are running: - + +1. Run the following command to check the `logstash` pod is created correctly: + ```bash - $ kubectl get pods -o wide -n | grep 'es\|kibana\|logstash' + $ kubectl get pods -n ``` - + For example: - + ```bash - $ kubectl get pods -o wide -n oudsmns | grep 'es\|kibana\|logstash' + $ kubectl get pods -n oudsmns ``` - - The output will look similar to the following: - + + The output should look similar to the following: + ``` - NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES - oudsm-es-cluster-0 1/1 Running 0 4m5s 10.244.1.124 - oudsm-kibana-7bf95b4c45-sfst6 1/1 Running 1 4m5s 10.244.2.137 - oudsm-logstash-5bb6bc67bf-l4mdv 1/1 Running 0 4m5s 10.244.2.138 + NAME READY STATUS RESTARTS AGE + oudsm-1 1/1 Running 0 51m + oudsm-logstash-56dbcc6d9f-mxsgj 1/1 Running 0 2m7s ``` - - From the above identify the elasticsearch pod, for example: `oudsm-es-cluster-0`. - - -1. Run the `port-forward` command to allow ElasticSearch to be listening on port 9200: - - ```bash - $ kubectl port-forward oudsm-es-cluster-0 9200:9200 --namespace= & + + **Note**: Wait a couple of minutes to make sure the pod has not had any failures or restarts. If the pod fails you can view the pod log using: + ``` - - For example: - - ```bash - $ kubectl port-forward oudsm-es-cluster-0 9200:9200 --namespace=oudsmns & + $ kubectl logs -f oudsm-logstash- -n oudsmns + ``` + + Most errors occur due to misconfiguration of the `logging-override-values.yaml`. This is usually because of an incorrect value set, or the certificate was not pasted with the correct indentation. + + If the pod has errors, view the helm history to find the last working revision, for example: + + ``` + $ helm history oudsm -n oudsmns ``` The output will look similar to the following: ``` - [1] 98458 - bash-4.2$ Forwarding from 127.0.0.1:9200 -> 9200 - Forwarding from [::1]:9200 -> 9200 - ``` - -1. Verify that ElasticSearch is running by interrogating port 9200: - - - ```bash - $ curl http://localhost:9200 + REVISION UPDATED STATUS CHART APP VERSION DESCRIPTION + 1 superseded oudsm-0.1 12.2.1.4.0 Install complete + 2 deployed oudsm-0.1 12.2.1.4.0 Upgrade complete ``` - The output will look similar to the following: + Rollback to the previous working revision by running: - ```bash - { - "name" : "oudsm-es-cluster-0", - "cluster_name" : "OUD-elk", - "cluster_uuid" : "TIKKJuK4QdWcOZrEOA1zeQ", - "version" : { - "number" : "6.8.0", - "build_flavor" : "default", - "build_type" : "docker", - "build_hash" : "65b6179", - "build_date" : "2019-05-15T20:06:13.172855Z", - "build_snapshot" : false, - "lucene_version" : "7.7.0", - "minimum_wire_compatibility_version" : "5.6.0", - "minimum_index_compatibility_version" : "5.0.0" - }, - "tagline" : "You Know, for Search" - } - ``` - - -### Verify using the Kibana application - -1. List the Kibana application service using the following command: - - ```bash - $ kubectl get svc -o wide -n | grep kibana ``` - + $ helm rollback -n + ``` + For example: - - ```bash - $ kubectl get svc -o wide -n oudsmns | grep kibana + ``` + helm rollback oudsm 1 -n oudsmns + ``` + + Once you have resolved the issue in the yaml files, run the `helm upgrade` command outlined earlier to recreate the logstash pod. + - The output will look similar to the following: +### Verify and access the Kibana console - ``` - oudsm-kibana NodePort 10.101.248.248 5601:31195/TCP 7m56s app=kibana - ``` +To access the Kibana console you will need the Kibana URL as per [Installing Elasticsearch (ELK) Stack and Kibana](https://docs.oracle.com/en/middleware/fusion-middleware/12.2.1.4/ikedg/installing-monitoring-and-visualization-software.html#GUID-C0013AA8-B229-4237-A1D8-8F38FA6E2CEC). - In this example, the port to access Kibana application via a Web browser will be `31195`. -1. Access the Kibana console in a browser with: `http://${MASTERNODE-HOSTNAME}:${KIBANA-PORT}/app/kibana`. +**For Kibana 7.7.x and below**: -1. From the Kibana portal navigate to `Management`> `Kibana` > `Index Patterns`. +1. Access the Kibana console with `http://:/app/kibana` and login with your username and password. -1. In the **Create Index Pattern** page enter `*` for the **Index pattern** and click **Next Step**. +1. From the Navigation menu, navigate to **Management** > **Kibana** > **Index Patterns**. + +1. In the **Create Index Pattern** page enter `oudsmlogs*` for the **Index pattern** and click **Next Step**. 1. In the **Configure settings** page, from the **Time Filter field name** drop down menu select `@timestamp` and click **Create index pattern**. -1. Once the index pattern is created click on **Discover** in the navigation menu to view the OUDSM logs. +1. Once the index pattern is created click on **Discover** in the navigation menu to view the OIG logs. + +**For Kibana version 7.8.X and above**: +1. Access the Kibana console with `http://:/app/kibana` and login with your username and password. +1. From the Navigation menu, navigate to **Management** > **Stack Management**. +1. Click **Data Views** in the **Kibana** section. + +1. Click **Create Data View** and enter the following information: + + + Name: `oudsmlogs*` + + Timestamp: `@timestamp` + +1. Click **Create Data View**. +1. From the Navigation menu, click **Discover** to view the log file entries. +1. From the drop down menu, select `oudsmlogs*` to view the log file entries. diff --git a/docs-source/content/oudsm/manage-oudsm-containers/monitoring-oudsm-instance.md b/docs-source/content/oudsm/manage-oudsm-containers/monitoring-oudsm-instance.md index 29ed5f959..ead28dc68 100644 --- a/docs-source/content/oudsm/manage-oudsm-containers/monitoring-oudsm-instance.md +++ b/docs-source/content/oudsm/manage-oudsm-containers/monitoring-oudsm-instance.md @@ -88,7 +88,7 @@ After the Oracle Unified Directory Services Manager instance is set up you can m ```bash NAME: monitoring - LAST DEPLOYED: Mon Jul 11 16:29:23 2022 + LAST DEPLOYED: NAMESPACE: monitoring STATUS: deployed REVISION: 1 @@ -118,7 +118,7 @@ The output will look similar to the following: ``` NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/alertmanager-monitoring-kube-prometheus-alertmanager-0 2/2 Running 0 27s 10.244.2.141 -pod/monitoring-grafana-578f79599c-qqdfb 2/3 Running 0 34s 10.244.1.127 +pod/monitoring-grafana-578f79599c-qqdfb 3/3 Running 0 34s 10.244.1.127 pod/monitoring-kube-prometheus-operator-65cdf7995-w6btr 1/1 Running 0 34s 10.244.1.126 pod/monitoring-kube-state-metrics-56bfd4f44f-5ls8t 1/1 Running 0 34s 10.244.2.139 pod/monitoring-prometheus-node-exporter-5b2f6 1/1 Running 0 34s 100.102.48.84 @@ -195,7 +195,7 @@ statefulset.apps/prometheus-monitoring-kube-prometheus-prometheus 1/1 1. Download the K8 Cluster Detail Dashboard json file from: https://grafana.com/grafana/dashboards/10856. -1. Import the Grafana dashboard by navigating on the left hand menu to **Create** > **Import**. Click **Upload JSON file** and select the json downloaded file. In the `Prometheus` drop down box select `Prometheus`. Click **Import**. The dashboard should be displayed. +1. Import the Grafana dashboard by navigating on the left hand menu to **Dashboards** > **Import**. Click **Upload JSON file** and select the json downloaded file. In the `Prometheus` drop down box select `Prometheus`. Click **Import**. The dashboard should be displayed. 1. Verify your installation by viewing some of the customized dashboard views. diff --git a/docs-source/content/oudsm/patch-and-upgrade/_index.md b/docs-source/content/oudsm/patch-and-upgrade/_index.md new file mode 100644 index 000000000..d063e17d0 --- /dev/null +++ b/docs-source/content/oudsm/patch-and-upgrade/_index.md @@ -0,0 +1,15 @@ ++++ +title = "Patch and upgrade" +weight = 11 +pre = "11. " +description= "This document provides steps to patch or upgrade an OUD image, and Elasticsearch and Kibana." ++++ + +This section shows you how to upgrade the OUDSM image, and how to upgrade the Elasticsearch and Kibana stack. + +The upgrade path taken depends on the version you are upgrading from, and the version you are upgrading to. + +Please refer to the [Release Notes](../release-notes) for information on which upgrade steps are necessary for the version you are upgrading to. + +{{% children style="h4" description="true" %}} + diff --git a/docs-source/content/oudsm/patch-and-upgrade/index.md b/docs-source/content/oudsm/patch-and-upgrade/patch-an-oudsm-image.md similarity index 95% rename from docs-source/content/oudsm/patch-and-upgrade/index.md rename to docs-source/content/oudsm/patch-and-upgrade/patch-an-oudsm-image.md index b2f8aa101..494c91c69 100644 --- a/docs-source/content/oudsm/patch-and-upgrade/index.md +++ b/docs-source/content/oudsm/patch-and-upgrade/patch-an-oudsm-image.md @@ -1,9 +1,7 @@ -+++ -title = "Patch and Upgrade" -weight = 8 -pre = "8. " -description= "This document provides steps to patch or upgrade an OUDSM image" -+++ +--- +title: "a. Patch an image" +description: "Instructions on how to update your OUDSM Kubernetes cluster with a new OUDSM container image." +--- ### Introduction @@ -144,8 +142,8 @@ You can update the deployment with a new OUDSM container image using one of the Name: oudsm-1 Namespace: oudsmns Priority: 0 - Node: prats-crio-worker2/100.102.48.28 - Start Time: Mon, 11 Jul 2022 10:38:20 +0000 + Node: /100.102.48.28 + Start Time: Labels: app.kubernetes.io/instance=oudsm app.kubernetes.io/managed-by=Helm app.kubernetes.io/name=oudsm diff --git a/docs-source/content/oudsm/patch-and-upgrade/upgrade-elk.md b/docs-source/content/oudsm/patch-and-upgrade/upgrade-elk.md new file mode 100644 index 000000000..f1212be04 --- /dev/null +++ b/docs-source/content/oudsm/patch-and-upgrade/upgrade-elk.md @@ -0,0 +1,79 @@ +--- +title: "b. Upgrade Elasticsearch and Kibana" +description: "Instructions on how to upgrade Elastic Search and Kibana." +--- + +This section shows how to upgrade Elasticsearch and Kibana. + +To determine if this step is required for the version you are upgrading to, refer to the [Release Notes](../../release-notes). + +### Download the latest code repository + +Download the latest code repository as follows: + +1. Create a working directory to setup the source code. + ```bash + $ mkdir + ``` + + For example: + ```bash + $ mkdir /scratch/OUDSMK8Slatest + ``` + +1. Download the latest OUDSM deployment scripts from the OUDSM repository. + + ```bash + $ cd + $ git clone https://github.com/oracle/fmw-kubernetes.git + ``` + + For example: + + ```bash + $ cd /scratch/OUDSMK8Slatest + $ git clone https://github.com/oracle/fmw-kubernetes.git + ``` + +1. Set the `$WORKDIR` environment variable as follows: + + ```bash + $ export WORKDIR=/fmw-kubernetes/OracleUnifiedDirectorySM + ``` + + For example: + + ```bash + $ export WORKDIR=/scratch/OUDSMK8Slatest/fmw-kubernetes/OracleUnifiedDirectorySM + ``` + +### Undeploy Elasticsearch and Kibana + +From October 22 (22.4.1) onwards, OUDSM logs should be stored on a centralized Elasticsearch and Kibana (ELK) stack. + +Deployments prior to October 22 (22.4.1) used local deployments of Elasticsearch and Kibana. + +If you are upgrading from July 22 (22.3.1) or earlier, to October 22 (22.4.1) or later, you must first undeploy Elasticsearch and Kibana using the steps below: + +1. Navigate to the `$WORKDIR/kubernetes/helm` directory and create a `logging-override-values-uninstall.yaml` with the following: + + ``` + elk: + enabled: false + ``` + +1. Run the following command to remove the existing ELK deployment: + + ``` + $ helm upgrade --namespace --values oudsm --reuse-values + ``` + + For example: + + ``` + $ helm upgrade --namespace oudsmns --values logging-override-values-uninstall.yaml oudsm oudsm --reuse-values + ``` + +### Deploy Elasticsearch and Kibana in centralized stack + +1. Follow [Install Elasticsearch stack and Kibana](../../manage-oudsm-containers/logging-and-visualization/#install-elasticsearch-stack-and-kibana) to deploy ElasticSearch and Kibana in a centralized stack. \ No newline at end of file diff --git a/docs-source/content/oudsm/prepare-your-environment/_index.md b/docs-source/content/oudsm/prepare-your-environment/_index.md index 9cf7b1a9c..1fbb7b31e 100644 --- a/docs-source/content/oudsm/prepare-your-environment/_index.md +++ b/docs-source/content/oudsm/prepare-your-environment/_index.md @@ -6,7 +6,6 @@ description = "Prepare your environment" +++ - 1. [Check the Kubernetes cluster is ready](#check-the-kubernetes-cluster-is-ready) 1. [Obtain the OUDSM container image](#obtain-the-oudsm-container-image) 1. [Setup the code repository to deploy OUDSM](#setup-the-code-repository-to-deploy-oudsm) @@ -55,17 +54,15 @@ The Oracle Unified Directory Services Manager (OUDSM) Kubernetes deployment requ #### Prebuilt OUDSM container image -The prebuilt OUDSM July 2022 container image can be downloaded from [Oracle Container Registry](https://container-registry.oracle.com). This image is prebuilt by Oracle and includes Oracle Unified Directory Services Manager 12.2.1.4.0, the July Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program.. +The prebuilt OUDSM October 2022 container image can be downloaded from [Oracle Container Registry](https://container-registry.oracle.com). This image is prebuilt by Oracle and includes Oracle Unified Directory Services Manager 12.2.1.4.0, the October Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program.. **Note**: Before using this image you must login to [Oracle Container Registry](https://container-registry.oracle.com), navigate to `Middleware` > `oudsm_cpu` and accept the license agreement. -Alternatively the same image can also be downloaded from [My Oracle Support](https://support.oracle.com) by referring to the document ID 2723908.1. - You can use this image in the following ways: - Pull the container image from the Oracle Container Registry automatically during the OUDSM Kubernetes deployment. -- Manually pull the container image from the Oracle Container Registry or My Oracle Support, and then upload it to your own container registry. -- Manually pull the container image from the Oracle Container Registry or My Oracle Support and manually stage it on the master node and each worker node. +- Manually pull the container image from the Oracle Container Registry and then upload it to your own container registry. +- Manually pull the container image from the Oracle Container Registry and manually stage it on the master node and each worker node. #### Build your own OUDSM container image using WebLogic Image Tool diff --git a/docs-source/content/oudsm/release-notes.md b/docs-source/content/oudsm/release-notes.md index 5e6b0c2af..0e6bc9024 100644 --- a/docs-source/content/oudsm/release-notes.md +++ b/docs-source/content/oudsm/release-notes.md @@ -10,6 +10,13 @@ Review the latest changes and known issues for Oracle Unified Directory Services | Date | Version | Change | | --- | --- | --- | +| October, 2022 | 22.4.1 | Supports Oracle Unified Directory Services Manager 12.2.1.4 domain deployment using the October 2022 container image which contains the October Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program.| +| | | Changes to deployment of Logging and Visualization with Elasticsearch and Kibana.| +| | | OUDSM container images are now only available from [container-registry.oracle.com](https://container-registry.oracle.com) and are no longer available from My Oracle Support.| +| | | If upgrading to October 22 (22.4.1) from a previous release, you must upgrade the following in order:| +| | | 1. Patch the OUDSM container image to October 22| +| | | 2. Upgrade Elasticsearch and Kibana.| +| | | See [Patch and Upgrade](../patch-and-upgrade) for these instructions.| | July, 2022 | 22.3.1 | Supports Oracle Unified Directory Services Manager 12.2.1.4 domain deployment using the July 2022 container image which contains the July Patch Set Update (PSU) and other fixes released with the Critical Patch Update (CPU) program.| | April, 2022 | 22.2.1 | Updated for CRI-O support.| | November 2021 | 21.4.2 | Voyager ingress removed as no longer supported.| diff --git a/docs-source/content/oudsm/troubleshooting/_index.md b/docs-source/content/oudsm/troubleshooting/_index.md index 8c9aa1a36..eaac9c4c8 100644 --- a/docs-source/content/oudsm/troubleshooting/_index.md +++ b/docs-source/content/oudsm/troubleshooting/_index.md @@ -89,7 +89,7 @@ Name: oudsm-1 Namespace: oudsmns Priority: 0 Node: /100.102.48.28 -Start Time: Mon, 11 Jul 2022 09:56:11 +0000 +Start Time: Labels: app.kubernetes.io/instance=oudsm app.kubernetes.io/managed-by=Helm app.kubernetes.io/name=oudsm @@ -110,7 +110,7 @@ Containers: Ports: 7001/TCP, 7002/TCP Host Ports: 0/TCP, 0/TCP State: Running - Started: Tue, 12 Jul 2022 09:56:12 +0000 + Started: Ready: True Restart Count: 0 Liveness: http-get http://:7001/oudsm delay=1200s timeout=15s period=60s #success=1 #failure=3 @@ -151,7 +151,7 @@ Events: ---- ------ ---- ---- ------- Warning FailedScheduling 39m default-scheduler 0/3 nodes are available: 3 pod has unbound immediate PersistentVolumeClaims. Normal Scheduled 39m default-scheduler Successfully assigned oudsmns/oudsm-1 to - Normal Pulled 39m kubelet Container image "container-registry.oracle.com/middleware/oudsm_cpu:12.2.1.4-jdk8-ol7-" already present on machine + Normal Pulled 39m kubelet Container image "container-registry.oracle.com/middleware/oudsm_cpu:12.2.1.4-jdk8-ol7-" already present on machine Normal Created 39m kubelet Created container oudsm Normal Started 39m kubelet Started container oudsm From 2a328e8eed4a77636c05559637788d52c6dc8f2f Mon Sep 17 00:00:00 2001 From: Manjunath Hegde Date: Thu, 20 Oct 2022 07:40:43 +0000 Subject: [PATCH 2/4] idm_22.4.1_release --- docs-source/content/oam/patch-and-upgrade/upgrade-elk.md | 4 ++-- docs-source/content/oig/patch-and-upgrade/upgrade-elk.md | 4 ++-- docs-source/content/oudsm/patch-and-upgrade/_index.md | 4 ++-- docs-source/content/oudsm/patch-and-upgrade/upgrade-elk.md | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/docs-source/content/oam/patch-and-upgrade/upgrade-elk.md b/docs-source/content/oam/patch-and-upgrade/upgrade-elk.md index e61826075..4c0238520 100644 --- a/docs-source/content/oam/patch-and-upgrade/upgrade-elk.md +++ b/docs-source/content/oam/patch-and-upgrade/upgrade-elk.md @@ -17,7 +17,7 @@ If you are upgrading from July 22 (22.3.1) or earlier, to October 22 (22.4.1) or 1. Make sure you have downloaded the latest code repository as per [Download the latest code repository](../upgrade-an-ingress/#download-the-latest-code-repository) -1. If your domain namespace is anything other than `oamns`, edit the `$WORKDIR/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml` and change all instances of `namespace: "oamns"` to your domain namespace. +1. Edit the `$WORKDIR/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml` and change all instances of namespace to correspond to your deployment. 1. Delete the Elasticsearch and Kibana resources using the following command: @@ -27,4 +27,4 @@ If you are upgrading from July 22 (22.3.1) or earlier, to October 22 (22.4.1) or ### Deploy Elasticsearch and Kibana in centralized stack -1. Follow [Install Elasticsearch stack and Kibana](../../manage-oam-domains/logging-and-visualization/#install-elasticsearch-stack-and-kibana) to deploy Elasticsearch and Kibana in a centralized stack. \ No newline at end of file +1. Follow [Install Elasticsearch stack and Kibana](../../manage-oam-domains/logging-and-visualization/#install-elasticsearch-stack-and-kibana) to deploy Elasticsearch and Kibana in a centralized stack. diff --git a/docs-source/content/oig/patch-and-upgrade/upgrade-elk.md b/docs-source/content/oig/patch-and-upgrade/upgrade-elk.md index ad7c67115..34e13e016 100644 --- a/docs-source/content/oig/patch-and-upgrade/upgrade-elk.md +++ b/docs-source/content/oig/patch-and-upgrade/upgrade-elk.md @@ -19,7 +19,7 @@ Deployments prior to October 22 (22.4.1) used local deployments of Elasticsearch If you are upgrading from July 22 (22.3.1) or earlier, to October 22 (22.4.1) or later, you must first undeploy Elasticsearch and Kibana using the steps below: -1. If your domain namespace is anything other than `oigns`, edit the `$WORKDIR/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml` and change all instances of `namespace: "oigns"` to your domain namespace. +1. Edit the `$WORKDIR/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml` and change all instances of namespace to correspond to your deployment. 1. Delete the Elasticsearch and Kibana resources using the following command: @@ -29,4 +29,4 @@ If you are upgrading from July 22 (22.3.1) or earlier, to October 22 (22.4.1) or ### Deploy Elasticsearch and Kibana in centralized stack -1. Follow [Install Elasticsearch stack and Kibana](../manage-oig-domains/logging-and-visualization/#install-elasticsearch-stack-and-kibana) to deploy Elasticsearch and Kibana in a centralized stack. \ No newline at end of file +1. Follow [Install Elasticsearch stack and Kibana](../../manage-oig-domains/logging-and-visualization/#install-elasticsearch-stack-and-kibana) to deploy Elasticsearch and Kibana in a centralized stack. diff --git a/docs-source/content/oudsm/patch-and-upgrade/_index.md b/docs-source/content/oudsm/patch-and-upgrade/_index.md index d063e17d0..0dd7947ca 100644 --- a/docs-source/content/oudsm/patch-and-upgrade/_index.md +++ b/docs-source/content/oudsm/patch-and-upgrade/_index.md @@ -1,7 +1,7 @@ +++ title = "Patch and upgrade" -weight = 11 -pre = "11. " +weight = 8 +pre = "8. " description= "This document provides steps to patch or upgrade an OUD image, and Elasticsearch and Kibana." +++ diff --git a/docs-source/content/oudsm/patch-and-upgrade/upgrade-elk.md b/docs-source/content/oudsm/patch-and-upgrade/upgrade-elk.md index f1212be04..015f45344 100644 --- a/docs-source/content/oudsm/patch-and-upgrade/upgrade-elk.md +++ b/docs-source/content/oudsm/patch-and-upgrade/upgrade-elk.md @@ -76,4 +76,4 @@ If you are upgrading from July 22 (22.3.1) or earlier, to October 22 (22.4.1) or ### Deploy Elasticsearch and Kibana in centralized stack -1. Follow [Install Elasticsearch stack and Kibana](../../manage-oudsm-containers/logging-and-visualization/#install-elasticsearch-stack-and-kibana) to deploy ElasticSearch and Kibana in a centralized stack. \ No newline at end of file +1. Follow [Install Elasticsearch stack and Kibana](../../manage-oudsm-containers/logging-and-visualization/#install-elasticsearch-stack-and-kibana) to deploy Elasticsearch and Kibana in a centralized stack. From 57a0ac8026c4c972588c52dd253cfea8fccd2b01 Mon Sep 17 00:00:00 2001 From: Manjunath Hegde Date: Thu, 20 Oct 2022 13:37:43 +0530 Subject: [PATCH 3/4] Update upgrade-elk.md --- docs-source/content/oam/patch-and-upgrade/upgrade-elk.md | 1 - 1 file changed, 1 deletion(-) diff --git a/docs-source/content/oam/patch-and-upgrade/upgrade-elk.md b/docs-source/content/oam/patch-and-upgrade/upgrade-elk.md index 4d126b31c..0d8930f42 100644 --- a/docs-source/content/oam/patch-and-upgrade/upgrade-elk.md +++ b/docs-source/content/oam/patch-and-upgrade/upgrade-elk.md @@ -18,7 +18,6 @@ If you are upgrading from July 22 (22.3.1) or earlier, to October 22 (22.4.1) or 1. Make sure you have downloaded the latest code repository as per [Download the latest code repository](../upgrade-an-ingress/#download-the-latest-code-repository) 1. Edit the `$WORKDIR/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml` and change all instances of namespace to correspond to your deployment. -======= 1. Delete the Elasticsearch and Kibana resources using the following command: From 631460e7f201b5e3eae950a6c55e531f52932193 Mon Sep 17 00:00:00 2001 From: Manjunath Hegde Date: Thu, 20 Oct 2022 08:16:54 +0000 Subject: [PATCH 4/4] idm_22.4.1_release --- docs-source/content/oam/patch-and-upgrade/upgrade-elk.md | 1 - docs-source/content/oig/patch-and-upgrade/upgrade-elk.md | 2 -- docs-source/content/oudsm/patch-and-upgrade/upgrade-elk.md | 1 - 3 files changed, 4 deletions(-) diff --git a/docs-source/content/oam/patch-and-upgrade/upgrade-elk.md b/docs-source/content/oam/patch-and-upgrade/upgrade-elk.md index 0d8930f42..4c0238520 100644 --- a/docs-source/content/oam/patch-and-upgrade/upgrade-elk.md +++ b/docs-source/content/oam/patch-and-upgrade/upgrade-elk.md @@ -27,5 +27,4 @@ If you are upgrading from July 22 (22.3.1) or earlier, to October 22 (22.4.1) or ### Deploy Elasticsearch and Kibana in centralized stack - 1. Follow [Install Elasticsearch stack and Kibana](../../manage-oam-domains/logging-and-visualization/#install-elasticsearch-stack-and-kibana) to deploy Elasticsearch and Kibana in a centralized stack. diff --git a/docs-source/content/oig/patch-and-upgrade/upgrade-elk.md b/docs-source/content/oig/patch-and-upgrade/upgrade-elk.md index bd9c83626..34e13e016 100644 --- a/docs-source/content/oig/patch-and-upgrade/upgrade-elk.md +++ b/docs-source/content/oig/patch-and-upgrade/upgrade-elk.md @@ -21,7 +21,6 @@ If you are upgrading from July 22 (22.3.1) or earlier, to October 22 (22.4.1) or 1. Edit the `$WORKDIR/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml` and change all instances of namespace to correspond to your deployment. - 1. Delete the Elasticsearch and Kibana resources using the following command: ``` @@ -30,5 +29,4 @@ If you are upgrading from July 22 (22.3.1) or earlier, to October 22 (22.4.1) or ### Deploy Elasticsearch and Kibana in centralized stack - 1. Follow [Install Elasticsearch stack and Kibana](../../manage-oig-domains/logging-and-visualization/#install-elasticsearch-stack-and-kibana) to deploy Elasticsearch and Kibana in a centralized stack. diff --git a/docs-source/content/oudsm/patch-and-upgrade/upgrade-elk.md b/docs-source/content/oudsm/patch-and-upgrade/upgrade-elk.md index e922ad8a9..015f45344 100644 --- a/docs-source/content/oudsm/patch-and-upgrade/upgrade-elk.md +++ b/docs-source/content/oudsm/patch-and-upgrade/upgrade-elk.md @@ -77,4 +77,3 @@ If you are upgrading from July 22 (22.3.1) or earlier, to October 22 (22.4.1) or ### Deploy Elasticsearch and Kibana in centralized stack 1. Follow [Install Elasticsearch stack and Kibana](../../manage-oudsm-containers/logging-and-visualization/#install-elasticsearch-stack-and-kibana) to deploy Elasticsearch and Kibana in a centralized stack. -