diff --git a/main.jsonnet b/main.jsonnet index e6d5060..0db56fc 100644 --- a/main.jsonnet +++ b/main.jsonnet @@ -1,5 +1,9 @@ +// `src/` shall be included into Jsonnet library path +local obstmpl = import 'observer-template.jsonnet'; local regtmpl = import 'registry-template.jsonnet'; +// the following files will be generated by `jsonnet -J src -m . main.jsonnet` { - 'registry/registry-template-test.json': regtmpl {}, + 'registry/ocp-template.json': regtmpl {}, + 'observer/ocp-template.json': obstmpl {}, } diff --git a/observer/ocp-template.json b/observer/ocp-template.json index 6748fbe..180cd55 100644 --- a/observer/ocp-template.json +++ b/observer/ocp-template.json @@ -1,562 +1,509 @@ { - "apiVersion": "template.openshift.io/v1", - "kind": "Template", - "message": "The vsystem-app observer and patcher will be started. You can watch the", - "metadata": { - "annotations": { - "description": "The template spawns the \"sdi-observer\" pod that observes the particular namespace where SAP Data Intelligence runs and modifies its deployments and configuration to enable its pods to run.\nOn Red Hat Enterprise Linux CoreOS, SAP Data Intelligence's vsystem-vrep statefulset needs to be patched to mount `emptyDir` volume at `/exports` directory in order to enable NFS exports in the container running on top of overlayfs which is the default filesystem in RHCOS.\nThe \"sdi-observer\" pod modifies vsystem-vrep statefulset as soon as it appears to enable the NFS exports.\nThe observer also allows to patch pipeline-modeler (aka \"vflow\") pods to mark registry as insecure.\nAdditionally, it patches diagnostics-fluentd daemonset to allow its pods to access log files on the host system. It also modifies it to parse plain text log files instead of preconfigured json.\nOn Red Hat Enterprise Linux CoreOS, \"vsystem-iptables\" containers need to be run as privileged in order to load iptables-related kernel modules. SAP Data Hub containers named \"vsystem-iptables\" deployed as part of every \"vsystem-app\" deployment attempt to modify iptables rules without having the necessary permissions. The ideal solution is to pre-load these modules during node's startup. When not feasable, this template can also fix the permissions on-the-fly as the deployments are created.\nThe template must be instantiated before the installation of SAP Data Hub. Also the namespace, where SAP Data Hub will be installed, must exist before the instantiation.\nTODO: document admin project role requirement.\nUsage:\n If running in the same namespace as Data Intelligence, instantiate the\n template as is in the desired namespace:\n\n oc project $SDI_NAMESPACE\n oc process -n $SDI_NAMESPACE sdi-observer NAMESPACE=$SDI_NAMESPACE | \\\n oc create -f -\n\n If running in a different/new namespace/project, instantiate the\n template with parameters SDI_NAMESPACE and NAMESPACE, e.g.:\n\n oc new-project $SDI_NAMESPACE\n oc new-project sapdatahub-admin\n oc process sdi-observer \\\n SDI_NAMESPACE=$SDI_NAMESPACE \\\n NAMESPACE=sapdatahub-admin | oc create -f -\n", - "openshift.io/display-name": "\"OpenShift enabler and observer for SAP Data intelligence\"\n", - "openshift.io/documentation-url": "https://access.redhat.com/articles/4324391", - "openshift.io/provider-display-name": "Red Hat, Inc." - }, - "name": "sdi-observer" - }, - "objects": [ - { - "apiVersion": "v1", - "kind": "ServiceAccount", - "metadata": { - "labels": { - "deploymentconfig": "sdi-observer" - }, - "name": "sdi-observer", - "namespace": "${NAMESPACE}" - } - }, - { - "apiVersion": "rbac.authorization.k8s.io/v1", - "kind": "Role", - "metadata": { - "labels": { - "deploymentconfig": "sdi-observer" - }, - "name": "sdi-observer", - "namespace": "${SDI_NAMESPACE}" - }, - "rules": [ - { - "apiGroups": [ - "apps", - "extensions" - ], - "resources": [ - "deployments", - "deployments/scale", - "statefulsets", - "statefulsets/scale" - ], - "verbs": [ - "get", - "list", - "patch", - "watch" - ] - }, - { - "apiGroups": [ - "apps", - "extensions" - ], - "resources": [ - "daemonsets" - ], - "verbs": [ - "get", - "list", - "patch", - "update", - "watch" - ] - }, - { - "apiGroups": [ - "" - ], - "resources": [ - "secrets" - ], - "verbs": [ - "get" - ] - }, - { - "apiGroups": [ - "" - ], - "resources": [ - "configmaps" - ], - "verbs": [ - "get", - "list", - "watch", - "patch" - ] - }, - { - "apiGroups": [ - "" - ], - "resources": [ - "namespaces", - "namespaces/status" - ], - "verbs": [ - "get", - "list", - "watch" - ] - }, - { - "apiGroups": [ - "", - "project.openshift.io" - ], - "resources": [ - "projects" - ], - "verbs": [ - "get" - ] - }, - { - "apiGroups": [ - "apps", - "deploymentconfigs.apps.openshift.io" - ], - "resources": [ - "deploymentconfigs" - ], - "verbs": [ - "get", - "list", - "delete" - ] - }, - { - "apiGroups": [ - "", - "authorization.openshift.io", - "rbac.authorization.k8s.io" - ], - "resources": [ - "roles", - "rolebindings", - "serviceaccounts" - ], - "verbs": [ - "get", - "list", - "delete" - ] - } - ] - }, - { - "apiVersion": "rbac.authorization.k8s.io/v1", - "kind": "RoleBinding", - "metadata": { - "labels": { - "deploymentconfig": "sdi-observer" - }, - "name": "sdi-observer-${ROLE_BINDING_SUFFIX}", - "namespace": "${SDI_NAMESPACE}" - }, - "roleRef": { - "apiGroup": "rbac.authorization.k8s.io", - "kind": "Role", - "name": "sdi-observer", - "namespace": "${SDI_NAMESPACE}" - }, - "subjects": [ - { - "kind": "ServiceAccount", - "name": "sdi-observer", - "namespace": "${NAMESPACE}" - } - ] - }, - { - "apiVersion": "rbac.authorization.k8s.io/v1", - "kind": "ClusterRoleBinding", - "metadata": { - "labels": { - "deploymentconfig": "sdi-observer" - }, - "name": "sdi-observer-2-node-reader-${ROLE_BINDING_SUFFIX}" - }, - "roleRef": { - "apiGroup": "rbac.authorization.k8s.io", - "kind": "ClusterRole", - "name": "system:node-reader" - }, - "subjects": [ - { - "kind": "ServiceAccount", - "name": "sdi-observer", - "namespace": "${NAMESPACE}" - } - ] - }, - { - "apiVersion": "v1", - "kind": "ImageStream", - "metadata": { - "name": "sdi-observer", - "namespace": "${NAMESPACE}" - }, - "spec": null, - "status": { - "dockerImageRepository": "" - } - }, - { - "apiVersion": "image.openshift.io/v1", - "kind": "ImageStream", - "metadata": { - "name": "ubi8", - "namespace": "${NAMESPACE}" - }, - "spec": { - "lookupPolicy": { - "local": false - }, - "tags": [ - { - "from": { - "kind": "DockerImage", - "name": "registry.redhat.io/ubi8/ubi:latest" + "apiVersion": "template.openshift.io/v1", + "kind": "Template", + "message": "The vsystem-app observer and patcher will be started. You can watch the progress with the\nfollowing command: oc logs -f dc/sdi-observer\n", + "metadata": { + "annotations": { + "description": "The template spawns the \"sdi-observer\" pod that observes the particular\nnamespace where SAP Data Intelligence runs and modifies its deployments\nand configuration to enable its pods to run.\n\nOn Red Hat Enterprise Linux CoreOS, SAP Data Intelligence's vsystem-vrep\nstatefulset needs to be patched to mount `emptyDir` volume at `/exports`\ndirectory in order to enable NFS exports in the container running on top\nof overlayfs which is the default filesystem in RHCOS.\n\nThe \"sdi-observer\" pod modifies vsystem-vrep statefulset as soon as it\nappears to enable the NFS exports.\n\nThe observer also allows to patch pipeline-modeler (aka \"vflow\") pods to\nmark registry as insecure.\n\nAdditionally, it patches diagnostics-fluentd daemonset to allow its pods\nto access log files on the host system. It also modifies it to parse\nplain text log files instead of preconfigured json.\n\nOn Red Hat Enterprise Linux CoreOS, \"vsystem-iptables\" containers need to\nbe run as privileged in order to load iptables-related kernel modules.\nSAP Data Hub containers named \"vsystem-iptables\" deployed as part of\nevery \"vsystem-app\" deployment attempt to modify iptables rules without\nhaving the necessary permissions. The ideal solution is to pre-load these\nmodules during node's startup. When not feasable, this template can also\nfix the permissions on-the-fly as the deployments are created.\n\nThe template must be instantiated before the installation of SAP Data\nHub. Also the namespace, where SAP Data Hub will be installed, must exist\nbefore the instantiation.\n\nTODO: document admin project role requirement.\n\nUsage:\n If running in the same namespace as Data Intelligence, instantiate the\n template as is in the desired namespace:\n\n oc project $SDI_NAMESPACE\n oc process -n $SDI_NAMESPACE sdi-observer NAMESPACE=$SDI_NAMESPACE | \\\n oc create -f -\n\n If running in a different/new namespace/project, instantiate the\n template with parameters SDI_NAMESPACE and NAMESPACE, e.g.:\n\n oc new-project $SDI_NAMESPACE\n oc new-project sapdatahub-admin\n oc process sdi-observer \\\n SDI_NAMESPACE=$SDI_NAMESPACE \\\n NAMESPACE=sapdatahub-admin | oc create -f -\n", + "openshift.io/display-name": "OpenShift enabler and observer for SAP Data intelligence\n", + "openshift.io/documentation-url": "https://access.redhat.com/articles/4324391", + "openshift.io/provider-display-name": "Red Hat, Inc." + }, + "name": "sdi-observer" + }, + "objects": [ + { + "apiVersion": "v1", + "kind": "DeploymentConfig", + "metadata": { + "labels": { + "deploymentconfig": "sdi-observer" + }, + "name": "sdi-observer", + "namespace": "${NAMESPACE}" + }, + "spec": { + "replicas": 1, + "selector": { + "deploymentconfig": "sdi-observer" + }, + "strategy": { + "type": "Rolling" + }, + "template": { + "metadata": { + "labels": { + "deploymentconfig": "sdi-observer" + } + }, + "spec": { + "containers": [ + { + "env": [ ], + "image": " ", + "name": "sdi-observer" + } + ], + "restartPolicy": "Always", + "serviceAccount": "sdi-observer", + "serviceAccountName": "sdi-observer" + } + }, + "triggers": [ + { + "type": "ConfigChange" + }, + { + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "sdi-observer" + ], + "from": { + "kind": "ImageStreamTag", + "name": "sdi-observer:${OCP_MINOR_RELEASE}" + } + }, + "type": "ImageChange" + } + ] + } + }, + { + "apiVersion": "v1", + "kind": "ServiceAccount", + "metadata": { + "labels": { + "deploymentconfig": "sdi-observer" + }, + "name": "sdi-observer", + "namespace": "${NAMESPACE}" + } + }, + { + "apiVersion": "build.openshift.io/v1", + "kind": "BuildConfig", + "metadata": { + "labels": { + "deploymentconfig": "sdi-observer" + }, + "name": "sdi-observer", + "namespace": "${NAMESPACE}" + }, + "spec": { + "output": { + "to": { + "kind": "ImageStreamTag", + "name": "sdi-observer:${OCP_MINOR_RELEASE}" + } + }, + "runPolicy": "Serial", + "source": { + "dockerfile": "FROM openshift/cli:latest\nRUN dnf update -y\n# TODO: jq is not yet available in EPEL-8\nRUN dnf install -y \\\n https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm && \\\n dnf install -y jq\nRUN dnf install -y \\\n https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && \\\n dnf install -y parallel procps-ng bc git && dnf clean all -y\n# TODO: determine OCP version from environment\nCOPY https://mirror.openshift.com/pub/openshift-v4/clients/ocp/latest-${OCP_MINOR_RELEASE}/openshift-client-linux.tar.gz /tmp/\nCOPY https://mirror.openshift.com/pub/openshift-v4/clients/ocp/latest-${OCP_MINOR_RELEASE}/sha256sum.txt /tmp/\n# verify the downloaded tar\nRUN /bin/bash -c 'grep \"$(awk '\"'\"'{print $1}'\"'\"' \\\n <(sha256sum /tmp/openshift-client-linux.tar.gz))[[:space:]]\\+openshift-client-linux-.\" \\\n /tmp/sha256sum.txt'\nRUN /bin/bash -c 'tar -C /usr/local/bin/ -xzvf /tmp/openshift-client-linux.tar.gz -T <(printf oc)'\n# TODO: verify signatures as well\nRUN mkdir -p /usr/local/bin \\\n /usr/local/share/{sdi-observer,openshift-acme}\nRUN git clone --depth 5 --single-branch \\\n --branch ${LETSENCRYPT_REVISION} \\\n ${LETSENCRYPT_REPOSITORY} /usr/local/share/openshift-acme\nRUN git clone --depth 5 --single-branch \\\n --branch ${SDI_OBSERVER_GIT_REVISION} \\\n ${SDI_OBSERVER_REPOSITORY} /usr/local/share/sap-data-intelligence\nRUN for bin in observer.sh deploy-registry.sh deploy-letsencrypt.sh; do \\\n cp -lv /usr/local/share/sap-data-intelligence/$bin \\\n /usr/local/bin/$bin; \\\n chmod a+rx /usr/local/bin/$bin; \\\n done\nRUN ln -s /usr/local/share/sap-data-intelligence/observer \\\n /usr/local/share/sdi-observer\nWORKDIR /usr/local/share/sdi-observer\n" + }, + "strategy": { + "dockerStrategy": { + "from": { + "kind": "ImageStreamTag", + "name": "ubi8:latest" + }, + "pullSecret": { + "name": "${REDHAT_REGISTRY_SECRET_NAME}" + } + } + }, + "triggers": [ + { + "type": "ImageChange" + }, + { + "type": "ConfigChange" + } + ] + } + }, + { + "apiVersion": "rbac.authorization.k8s.io/v1", + "kind": "Role", + "metadata": { + "labels": { + "deploymentconfig": "sdi-observer" + }, + "name": "sdi-observer", + "namespace": "${SDI_NAMESPACE}" + }, + "rules": [ + { + "apiGroups": [ + "apps", + "extensions" + ], + "resources": [ + "deployments", + "deployments/scale", + "statefulsets", + "statefulsets/scale" + ], + "verbs": [ + "get", + "list", + "patch", + "watch" + ] + }, + { + "apiGroups": [ + "apps", + "extensions" + ], + "resources": [ + "daemonsets" + ], + "verbs": [ + "get", + "list", + "patch", + "update", + "watch" + ] + }, + { + "apiGroups": [ + "" + ], + "resources": [ + "secrets" + ], + "verbs": [ + "get" + ] + }, + { + "apiGroups": [ + "" + ], + "resources": [ + "configmaps" + ], + "verbs": [ + "get", + "list", + "watch", + "patch" + ] + }, + { + "apiGroups": [ + "" + ], + "resources": [ + "namespaces", + "namespaces/status" + ], + "verbs": [ + "get", + "list", + "watch" + ] }, - "name": "latest", - "referencePolicy": { - "type": "Source" + { + "apiGroups": [ + "", + "project.openshift.io" + ], + "resources": [ + "projects" + ], + "verbs": [ + "get" + ] + }, + { + "apiGroups": [ + "apps", + "deploymentconfigs.apps.openshift.io" + ], + "resources": [ + "deploymentconfigs" + ], + "verbs": [ + "get", + "list", + "delete" + ] + }, + { + "apiGroups": [ + "", + "authorization.openshift.io", + "rbac.authorization.k8s.io" + ], + "resources": [ + "roles", + "rolebindings", + "serviceaccounts" + ], + "verbs": [ + "get", + "list", + "delete" + ] } - } - ] - } - }, - { - "apiVersion": "build.openshift.io/v1", - "kind": "BuildConfig", - "metadata": { - "labels": { - "deploymentconfig": "sdi-observer" - }, - "name": "sdi-observer", - "namespace": "${NAMESPACE}" - }, - "spec": { - "output": { - "to": { - "kind": "ImageStreamTag", - "name": "sdi-observer:${OCP_MINOR_RELEASE}" - } - }, - "runPolicy": "Serial", - "source": { - "dockerfile": "FROM openshift/cli:latest\nRUN dnf update -y\n# TODO: jq is not yet available in EPEL-8\nRUN dnf install -y \\\n https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm && \\\n dnf install -y jq\nRUN dnf install -y \\\n https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && \\\n dnf install -y parallel procps-ng bc git && dnf clean all -y\n# TODO: determine OCP version from environment\nCOPY https://mirror.openshift.com/pub/openshift-v4/clients/ocp/latest-${OCP_MINOR_RELEASE}/openshift-client-linux.tar.gz /tmp/\nCOPY https://mirror.openshift.com/pub/openshift-v4/clients/ocp/latest-${OCP_MINOR_RELEASE}/sha256sum.txt /tmp/\n# verify the downloaded tar\nRUN /bin/bash -c 'grep \"$(awk '\"'\"'{print $1}'\"'\"' <(sha256sum /tmp/openshift-client-linux.tar.gz))[[:space:]]\\+openshift-client-linux-.\" /tmp/sha256sum.txt'\nRUN /bin/bash -c 'tar -C /usr/local/bin/ -xzvf /tmp/openshift-client-linux.tar.gz -T <(printf oc)'\n# TODO: verify signatures as well\nRUN mkdir -p /usr/local/bin \\\n /usr/local/share/{sdi-observer,openshift-acme}\nRUN git clone --depth 5 --single-branch \\\n --branch ${LETSENCRYPT_REVISION} \\\n ${LETSENCRYPT_REPOSITORY} /usr/local/share/openshift-acme\nRUN git clone --depth 5 --single-branch \\\n --branch ${SDI_OBSERVER_GIT_REVISION} \\\n ${SDI_OBSERVER_REPOSITORY} /usr/local/share/sap-data-intelligence\nRUN for bin in observer.sh deploy-registry.sh deploy-letsencrypt.sh; do \\\n cp -lv /usr/local/share/sap-data-intelligence/$bin \\\n /usr/local/bin/$bin; \\\n chmod a+rx /usr/local/bin/$bin; \\\n done\nRUN ln -s /usr/local/share/sap-data-intelligence/observer \\\n /usr/local/share/sdi-observer\nWORKDIR /usr/local/share/sdi-observer\n" - }, - "strategy": { - "dockerStrategy": { - "from": { - "kind": "ImageStreamTag", - "name": "ubi8:latest" + ] + }, + { + "apiVersion": "rbac.authorization.k8s.io/v1", + "kind": "RoleBinding", + "metadata": { + "labels": { + "deploymentconfig": "sdi-observer" }, - "pullSecret": { - "name": "${REGISTRY_SECRET_NAME}" + "name": "sdi-observer-${ROLE_BINDING_SUFFIX}", + "namespace": "${SDI_NAMESPACE}" + }, + "roleRef": { + "apiGroup": "rbac.authorization.k8s.io", + "kind": "Role", + "name": "sdi-observer", + "namespace": "${SDI_NAMESPACE}" + }, + "subjects": [ + { + "kind": "ServiceAccount", + "name": "sdi-observer", + "namespace": "${NAMESPACE}" } - } - }, - "triggers": [ - { - "type": "ImageChange" - }, - { - "type": "ConfigChange" - } - ] - } - }, - { - "apiVersion": "v1", - "kind": "DeploymentConfig", - "metadata": { - "labels": { - "deploymentconfig": "sdi-observer" - }, - "name": "sdi-observer", - "namespace": "${NAMESPACE}" - }, - "spec": { - "replicas": 1, - "selector": { - "deploymentconfig": "sdi-observer" - }, - "strategy": { - "type": "Rolling" - }, - "template": { - "metadata": { + ] + }, + { + "apiVersion": "rbac.authorization.k8s.io/v1", + "kind": "ClusterRoleBinding", + "metadata": { "labels": { - "deploymentconfig": "sdi-observer" + "deploymentconfig": "sdi-observer" + }, + "name": "sdi-observer-node-reader-${ROLE_BINDING_SUFFIX}" + }, + "roleRef": { + "apiGroup": "rbac.authorization.k8s.io", + "kind": "ClusterRole", + "name": "system:node-reader" + }, + "subjects": [ + { + "kind": "ServiceAccount", + "name": "sdi-observer", + "namespace": "${NAMESPACE}" } - }, - "spec": { - "containers": [ - { - "command": [ - "/usr/local/bin/observer.sh" - ], - "env": [ - { - "name": "SDI_NAMESPACE", - "value": "${SDI_NAMESPACE}" - }, - { - "name": "DRY_RUN", - "value": "${DRY_RUN}" - }, - { - "name": "REGISTRY", - "value": "${REGISTRY}" - }, - { - "name": "MARK_REGISTRY_INSECURE", - "value": "${MARK_REGISTRY_INSECURE}" - }, - { - "name": "NODE_LOG_FORMAT", - "value": "${NODE_LOG_FORMAT}" - }, - { - "name": "DEPLOY_SDI_REGISTRY", - "value": "${DEPLOY_SDI_REGISTRY}" - }, - { - "name": "DEPLOY_LETSENCRYPT", - "value": "${DEPLOY_LETSENCRYPT}" - }, - { - "name": "EXPOSE_WITH_LETSENCRYPT", - "value": "${EXPOSE_WITH_LETSENCRYPT}" - }, - { - "name": "LETSENCRYPT_ENVIRONMENT", - "value": "${LETSENCRYPT_ENVIRONMENT}" - }, - { - "name": "SDI_REGISTRY_STORAGE_CLASS_NAME", - "value": "${SDI_REGISTRY_STORAGE_CLASS_NAME}" - }, - { - "name": "SDI_REGISTRY_HTPASSWD_SECRET_NAME", - "value": "${SDI_REGISTRY_HTPASSWD_SECRET_NAME}" - }, - { - "name": "SDI_REGISTRY_USERNAME", - "value": "${SDI_REGISTRY_USERNAME}" - }, - { - "name": "SDI_REGISTRY_PASSWORD", - "value": "${SDI_REGISTRY_PASSWORD}" - }, - { - "name": "FORCE_REDEPLOY", - "value": "${FORCE_REDEPLOY}" - }, - { - "name": "RECREATE_SECRETS", - "value": "${RECREATE_SECRETS}" - }, - { - "name": "SDI_REGISTRY_NAMESPACE", - "value": "${SDI_REGISTRY_NAMESPACE}" - }, - { - "name": "REDHAT_REGISTRY_SECRET_NAMESPACE", - "value": "${REDHAT_REGISTRY_SECRET_NAMESPACE}" - } - ], - "image": " ", - "name": "sdi-observer" - } + ] + }, + { + "apiVersion": "v1", + "kind": "ImageStream", + "metadata": { + "name": "sdi-observer", + "namespace": "${NAMESPACE}" + }, + "spec": null, + "status": { + "dockerImageRepository": "" + } + }, + { + "apiVersion": "v1", + "kind": "Service", + "metadata": { + "annotations": { + "template.openshift.io/expose-uri": "https://{.spec.clusterIP}:{.spec.ports[?(.name==\"registry\")].port}\n" + }, + "name": "sdi-observer", + "namespace": "${NAMESPACE}" + }, + "spec": { + "ports": [ + { + "name": "registry", + "port": 5000 + } ], - "restartPolicy": "Always", - "serviceAccount": "sdi-observer", - "serviceAccountName": "sdi-observer" - } - }, - "triggers": [ - { - "type": "ConfigChange" - }, - { - "imageChangeParams": { - "automatic": true, - "containerNames": [ - "sdi-observer" - ], - "from": { - "kind": "ImageStreamTag", - "name": "sdi-observer:${OCP_MINOR_RELEASE}" - } + "selector": { + "deploymentconfig": "sdi-observer" + }, + "sessionAffinity": "ClientIP", + "type": "ClusterIP" + } + }, + { + "apiVersion": "route.openshift.io/v1", + "kind": "Route", + "metadata": { + "annotations": { + "template.openshift.io/expose-uri": "https://{.spec.host}{.spec.path}" }, - "type": "ImageChange" - } - ] + "name": "sdi-observer", + "namespace": "${NAMESPACE}" + }, + "spec": { + "host": "${HOSTNAME}", + "port": { + "targetPort": "registry" + }, + "subdomain": "", + "tls": { + "insecureEdgeTerminationPolicy": "Redirect", + "termination": "edge" + }, + "to": { + "kind": "Service", + "name": "sdi-observer" + } + } + } + ], + "parameters": [ + { + "description": "If set to true, no action will be performed. The pod will just print\nwhat would have been executed.\n", + "name": "DRY_RUN", + "required": false, + "value": "false" + }, + { + "description": "The desired namespace to deploy resources to. Defaults to the current\none.\n", + "name": "NAMESPACE", + "required": true + }, + { + "description": "Name of the secret with credentials for registry.redhat.io registry. Please visit\nhttps://access.redhat.com/terms-based-registry/ to obtain the OpenShift secret. For\nmore details, please refer to https://access.redhat.com/RegistryAuthentication.'\n", + "name": "REDHAT_REGISTRY_SECRET_NAME", + "required": true + }, + { + "description": "Minor release of OpenShift Container Platform (e.g. 4.2). This value must match the OCP\nserver version. The biggest tolerated difference between the versions is 1 in the second\ndigit.\n", + "name": "OCP_MINOR_RELEASE", + "required": true, + "value": "4.2" + }, + { + "description": "The name of the SAP Data Hub namespace to manage. Defaults to the current one. It must be\nset only in case the observer is running in a differnt namespace (see NAMESPACE).\n", + "name": "SDI_NAMESPACE" + }, + { + "description": "TODO\n", + "name": "SDI_OBSERVER_REPOSITORY", + "required": true, + "value": "https://github.com/redhat-sap/sap-data-intelligence" + }, + { + "description": "Revision (e.g. tag, commit or branch) of git repository where SDI Observer's source\nreside.\n", + "name": "SDI_OBSERVER_GIT_REVISION", + "required": true, + "value": "master" + }, + { + "description": "A random suffix for the new RoleBinding's name. No need to edit.\n", + "from": "[a-z0-9]{5}", + "generate": "expression", + "name": "ROLE_BINDING_SUFFIX" + }, + { + "description": "Set to true if the given or configured VFLOW_REGISTRY shall be marked as insecure in all\ninstances of Pipeline Modeler.\n", + "name": "MARK_REGISTRY_INSECURE", + "required": true, + "value": "false" + }, + { + "description": "Patch deployments with vsystem-iptables container to make them privileged in order to load\nkernel modules they need. Unless true, it is assumed that the modules have been pre-loaded\non the worker nodes. This will make also vsystem-vrep-* pod privileged.\n", + "name": "MAKE_VSYSTEM_IPTABLES_PODS_PRIVILEGED", + "required": true, + "value": "false" + }, + { + "description": "Format of the logging files on the nodes. Allowed values are \"json\" and \"text\".\nInitially, SDI fluentd pods are configured to parse \"json\" while OpenShift 4 uses\n\"text\" format by default. If not given, the default is \"text\".\n", + "name": "NODE_LOG_FORMAT", + "required": false + }, + { + "description": "The registry to mark as insecure. If not given, it will be determined from the\ninstaller-config secret in the SDI_NAMESPACE. If DEPLOY_SDI_REGISTRY is set to \"true\",\nthis variable will be used as the container image registry's hostname when creating the\ncorresponding route.\n", + "name": "REGISTRY" + }, + { + "description": "Whether to deploy container image registry for the purpose of SAP Data Intelligence.\nRequires project admin role attached to the sdi-observer service account. If enabled,\nREDHAT_REGISTRY_SECRET_NAME must be provided.\n", + "name": "DEPLOY_SDI_REGISTRY", + "required": false, + "value": "false" + }, + { + "description": "Whether to deploy letsencrypt controller. Requires project admin role attached to the\nsdi-observer service account.\n", + "name": "DEPLOY_LETSENCRYPT", + "required": false, + "value": "false" + }, + { + "description": "Whether to expose routes annotated for letsencrypt controller. Requires project admin role\nattached to the sdi-observer service account. Letsencrypt controller must be deployed\neither via this observer or cluster-wide for this to have an effect. Defaults to\nDEPLOY_LETSENCRYPT.\n", + "name": "EXPOSE_WITH_LETSENCRYPT" + }, + { + "description": "Unless given, a local copy will be used.\n", + "name": "LETSENCRYPT_REPOSITORY", + "required": false, + "value": "https://github.com/tnozicka/openshift-acme" + }, + { + "description": "Revision of letsencrypt repository to check out.\n", + "name": "LETSENCRYPT_REVISION", + "required": false, + "value": "2cfefc7388102408a334ada90933531c7e5e11c2" + }, + { + "description": "Either \"live\" or \"staging\". Use the latter when debugging SDI Observer's deployment.\n", + "name": "LETSENCRYPT_ENVIRONMENT", + "required": true, + "value": "live" + }, + { + "description": "Whether to forcefully replace existing registry and/or letsencrypt deployments and\nconfiguration files.\n", + "name": "FORCE_REDEPLOY", + "required": false, + "value": "false" + }, + { + "description": "Whether to replace secrets like SDI Registry's htpasswd file if they exist already.\n", + "name": "RECREATE_SECRETS", + "required": false, + "value": "false" + }, + { + "description": "Volume space available for container images (e.g. 75Gi).\n", + "name": "SDI_REGISTRY_VOLUME_CAPACITY", + "required": true, + "value": "75Gi" + }, + { + "description": "Unless given, the default storage class will be used.\n", + "name": "SDI_REGISTRY_STORAGE_CLASS_NAME", + "required": false + }, + { + "description": "A secret with htpasswd file with authentication data for the sdi image container If given\nand the secret exists, it will be used instead of SDI_REGISTRY_USERNAME and\nSDI_REGISTRY_PASSWORD.\n", + "name": "SDI_REGISTRY_HTPASSWD_SECRET_NAME", + "required": false + }, + { + "from": "user-[a-z0-9]{6}", + "generage": "expression", + "name": "SDI_REGISTRY_USERNAME", + "required": false + }, + { + "from": "user-[a-zA-Z0-9]{32}", + "generage": "expression", + "name": "SDI_REGISTRY_PASSWORD", + "required": false + }, + { + "description": "Namespace where the secret with credentials for registry.redhat.io registry resides.\n", + "name": "REDHAT_REGISTRY_SECRET_NAMESPACE", + "require": false } - } - ], - "parameters": [ - { - "description": "Minor release of OpenShift Container Platform (e.g. 4.2). This value must match the OCP server version. The biggest tolerated difference between the versions is 1 in the second digit.\n", - "name": "OCP_MINOR_RELEASE", - "required": true, - "value": "4.2" - }, - { - "description": "If set to true, no action will be performed. The pod will just print what would have been executed.\n", - "name": "DRY_RUN", - "required": false, - "value": "false" - }, - { - "description": "The name of the SAP Data Hub namespace to manage. Defaults to the current one. It must be set only in case the observer is running in a differnt namespace (see NAMESPACE).\n", - "name": "SDI_NAMESPACE" - }, - { - "description": "The desired namespace, where the vsystem-app observer shall run. Defaults to the current one. Needs to be set only if running the observer outside of SDI_NAMESPACE.\n", - "name": "NAMESPACE", - "required": true - }, - { - "description": "TODO\n", - "name": "SDI_OBSERVER_REPOSITORY", - "required": true, - "value": "https://github.com/redhat-sap/sap-data-intelligence" - }, - { - "description": "Revision (e.g. tag, commit or branch) of git repository where SDI Observer's source reside.\n", - "name": "SDI_OBSERVER_GIT_REVISION", - "required": true, - "value": "master" - }, - { - "description": "A random suffix for the new RoleBinding's name. No need to edit.\n", - "from": "[a-z0-9]{5}", - "generate": "expression", - "name": "ROLE_BINDING_SUFFIX" - }, - { - "description": "Set to true if the given or configured VFLOW_REGISTRY shall be marked as insecure in all instances of Pipeline Modeler.\n", - "name": "MARK_REGISTRY_INSECURE", - "required": true, - "value": "false" - }, - { - "description": "Patch deployments with vsystem-iptables container to make them privileged in order to load kernel modules they need. Unless true, it is assumed that the modules have been pre-loaded on the worker nodes. This will make also vsystem-vrep-* pod privileged.\n", - "name": "MAKE_VSYSTEM_IPTABLES_PODS_PRIVILEGED", - "required": true, - "value": "false" - }, - { - "description": "Format of the logging files on the nodes. Allowed values are \"json\" and \"text\". Initially, SDI fluentd pods are configured to parse \"json\" while OpenShift 4 uses \"text\" format by default. If not given, the default is \"text\".\n", - "name": "NODE_LOG_FORMAT", - "required": false - }, - { - "description": "The registry to mark as insecure. If not given, it will be determined from the installer-config secret in the SDI_NAMESPACE. If DEPLOY_SDI_REGISTRY is set to \"true\", this variable will be used as the container image registry's hostname when creating the corresponding route.\n", - "name": "REGISTRY" - }, - { - "description": "Target namespace where to deploy container image registry.\n", - "name": "SDI_REGISTRY_NAMESPACE", - "required": false - }, - { - "description": "Whether to deploy container image registry for the purpose of SAP Data Intelligence. Requires project admin role attached to the sdi-observer service account. If enabled, REDHAT_REGISTRY_SECRET_NAME must be provided.\n", - "name": "DEPLOY_SDI_REGISTRY", - "required": false, - "value": "false" - }, - { - "description": "Whether to deploy letsencrypt controller. Requires project admin role attached to the sdi-observer service account.\n", - "name": "DEPLOY_LETSENCRYPT", - "required": false, - "value": "false" - }, - { - "description": "Whether to expose routes annotated for letsencrypt controller. Requires project admin role attached to the sdi-observer service account. Letsencrypt controller must be deployed either via this observer or cluster-wide for this to have an effect. Defaults to DEPLOY_LETSENCRYPT.\n", - "name": "EXPOSE_WITH_LETSENCRYPT" - }, - { - "description": "Unless given, a local copy will be used.\n", - "name": "LETSENCRYPT_REPOSITORY", - "required": false, - "value": "https://github.com/tnozicka/openshift-acme" - }, - { - "description": "Revision of letsencrypt repository to check out.\n", - "name": "LETSENCRYPT_REVISION", - "required": false, - "value": "2cfefc7388102408a334ada90933531c7e5e11c2" - }, - { - "description": "Either \"live\" or \"staging\". Use the latter when debugging SDI Observer's deployment.\n", - "name": "LETSENCRYPT_ENVIRONMENT", - "required": true, - "value": "live" - }, - { - "description": "Whether to forcefully replace existing registry and/or letsencrypt deployments and configuration files.\n", - "name": "FORCE_REDEPLOY", - "required": false, - "value": "false" - }, - { - "description": "Whether to replace secrets like SDI Registry's htpasswd file if they exist already.\n", - "name": "RECREATE_SECRETS", - "required": false, - "value": "false" - }, - { - "description": "Volume space available for container images (e.g. 75Gi).", - "name": "SDI_REGISTRY_VOLUME_CAPACITY", - "required": true, - "value": "75Gi" - }, - { - "description": "Unless given, the default storage class will be used.\n", - "name": "SDI_REGISTRY_STORAGE_CLASS_NAME", - "required": false - }, - { - "description": "A secret with htpasswd file with authentication data for the sdi image container If given and the secret exists, it will be used instead of SDI_REGISTRY_USERNAME and SDI_REGISTRY_PASSWORD.\n", - "name": "SDI_REGISTRY_HTPASSWD_SECRET_NAME", - "required": false - }, - { - "from": "user-[a-z0-9]{6}", - "generage": "expression", - "name": "SDI_REGISTRY_USERNAME", - "required": false - }, - { - "from": "user-[a-zA-Z0-9]{32}", - "generage": "expression", - "name": "SDI_REGISTRY_PASSWORD", - "required": false - }, - { - "description": "Name of the secret with credentials for registry.redhat.io registry. Please visit https://access.redhat.com/terms-based-registry/ to obtain the OpenShift secret. For more details, please refer to https://access.redhat.com/RegistryAuthentication. Required if DEPLOY_SDI_REGISTRY is enabled.", - "name": "REDHAT_REGISTRY_SECRET_NAME", - "required": false - }, - { - "description": "Namespace where the secret with credentials for registry.redhat.io registry resides.", - "name": "REDHAT_REGISTRY_SECRET_NAMESPACE", - "require": false - } - ], - "progress with the following command": "oc logs -f dc/sdi-observer" + ] } diff --git a/observer/ocp-template.yaml b/observer/ocp-template.yaml deleted file mode 100644 index 13b1c88..0000000 --- a/observer/ocp-template.yaml +++ /dev/null @@ -1,504 +0,0 @@ ---- -apiVersion: template.openshift.io/v1 -kind: Template -metadata: - name: sdi-observer - annotations: - openshift.io/display-name: > - "OpenShift enabler and observer for SAP Data intelligence" - description: > - The template spawns the "sdi-observer" pod that observes the particular - namespace where SAP Data Intelligence runs and modifies its deployments - and configuration to enable its pods to run. - - On Red Hat Enterprise Linux CoreOS, SAP Data Intelligence's vsystem-vrep - statefulset needs to be patched to mount `emptyDir` volume at `/exports` - directory in order to enable NFS exports in the container running on top - of overlayfs which is the default filesystem in RHCOS. - - The "sdi-observer" pod modifies vsystem-vrep statefulset as soon as it - appears to enable the NFS exports. - - The observer also allows to patch pipeline-modeler (aka "vflow") pods to - mark registry as insecure. - - Additionally, it patches diagnostics-fluentd daemonset to allow its pods - to access log files on the host system. It also modifies it to parse - plain text log files instead of preconfigured json. - - On Red Hat Enterprise Linux CoreOS, "vsystem-iptables" containers need to - be run as privileged in order to load iptables-related kernel modules. - SAP Data Hub containers named "vsystem-iptables" deployed as part of - every "vsystem-app" deployment attempt to modify iptables rules without - having the necessary permissions. The ideal solution is to pre-load these - modules during node's startup. When not feasable, this template can also - fix the permissions on-the-fly as the deployments are created. - - The template must be instantiated before the installation of SAP Data - Hub. Also the namespace, where SAP Data Hub will be installed, must exist - before the instantiation. - - TODO: document admin project role requirement. - - Usage: - If running in the same namespace as Data Intelligence, instantiate the - template as is in the desired namespace: - - oc project $SDI_NAMESPACE - oc process -n $SDI_NAMESPACE sdi-observer NAMESPACE=$SDI_NAMESPACE | \ - oc create -f - - - If running in a different/new namespace/project, instantiate the - template with parameters SDI_NAMESPACE and NAMESPACE, e.g.: - - oc new-project $SDI_NAMESPACE - oc new-project sapdatahub-admin - oc process sdi-observer \ - SDI_NAMESPACE=$SDI_NAMESPACE \ - NAMESPACE=sapdatahub-admin | oc create -f - - - openshift.io/provider-display-name: "Red Hat, Inc." - openshift.io/documentation-url: "https://access.redhat.com/articles/4324391" -message: >- - The vsystem-app observer and patcher will be started. You can watch the - progress with the following command: oc logs -f dc/sdi-observer -objects: - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: sdi-observer - namespace: ${NAMESPACE} - labels: - deploymentconfig: sdi-observer - - - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: sdi-observer - namespace: ${SDI_NAMESPACE} - labels: - deploymentconfig: sdi-observer - rules: - - apiGroups: - - apps - - extensions - resources: - - deployments - - deployments/scale - - statefulsets - - statefulsets/scale - verbs: - - get - - list - - patch - - watch - - apiGroups: - - apps - - extensions - resources: - - daemonsets - verbs: - - get - - list - - patch - - update - - watch - - apiGroups: - - "" - resources: - # necessary to get the configured registry out of secrets/installer-config - - secrets - verbs: - - get - - apiGroups: - - "" - resources: - - configmaps - verbs: - - get - - list - - watch - - patch - # mandatory permissions if running in a different namespace - - apiGroups: - - "" - resources: - - namespaces - - namespaces/status - verbs: - - get - - list - - watch - - apiGroups: - - "" - - project.openshift.io - resources: - - projects - verbs: - - get - # necessary to cleanup obsolete sdi helpers - - apiGroups: - - apps - - deploymentconfigs.apps.openshift.io - resources: - - deploymentconfigs - verbs: - - get - - list - - delete - - apiGroups: - - "" - - authorization.openshift.io - - rbac.authorization.k8s.io - resources: - - roles - - rolebindings - - serviceaccounts - verbs: - - get - - list - - delete - - - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - namespace: ${SDI_NAMESPACE} - name: sdi-observer-${ROLE_BINDING_SUFFIX} - labels: - deploymentconfig: sdi-observer - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: sdi-observer - namespace: ${SDI_NAMESPACE} - subjects: - - kind: ServiceAccount - name: sdi-observer - namespace: ${NAMESPACE} - - - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: sdi-observer-2-node-reader-${ROLE_BINDING_SUFFIX} - labels: - deploymentconfig: sdi-observer - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:node-reader - subjects: - - kind: ServiceAccount - name: sdi-observer - namespace: ${NAMESPACE} - - - apiVersion: v1 - kind: ImageStream - metadata: - name: sdi-observer - namespace: ${NAMESPACE} - spec: - status: - dockerImageRepository: "" - - - apiVersion: image.openshift.io/v1 - kind: ImageStream - metadata: - name: ubi8 - namespace: ${NAMESPACE} - spec: - lookupPolicy: - local: false - tags: - - from: - kind: DockerImage - name: registry.redhat.io/ubi8/ubi:latest - name: latest - referencePolicy: - type: Source - - - kind: BuildConfig - apiVersion: build.openshift.io/v1 - metadata: - name: sdi-observer - namespace: ${NAMESPACE} - labels: - deploymentconfig: sdi-observer - spec: - runPolicy: "Serial" - triggers: - - type: "ImageChange" - - type: "ConfigChange" - source: - dockerfile: | - FROM openshift/cli:latest - RUN dnf update -y - # TODO: jq is not yet available in EPEL-8 - RUN dnf install -y \ - https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm && \ - dnf install -y jq - RUN dnf install -y \ - https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && \ - dnf install -y parallel procps-ng bc git && dnf clean all -y - # TODO: determine OCP version from environment - COPY https://mirror.openshift.com/pub/openshift-v4/clients/ocp/latest-${OCP_MINOR_RELEASE}/openshift-client-linux.tar.gz /tmp/ - COPY https://mirror.openshift.com/pub/openshift-v4/clients/ocp/latest-${OCP_MINOR_RELEASE}/sha256sum.txt /tmp/ - # verify the downloaded tar - RUN /bin/bash -c 'grep "$(awk '"'"'{print $1}'"'"' <(sha256sum /tmp/openshift-client-linux.tar.gz))[[:space:]]\+openshift-client-linux-." /tmp/sha256sum.txt' - RUN /bin/bash -c 'tar -C /usr/local/bin/ -xzvf /tmp/openshift-client-linux.tar.gz -T <(printf oc)' - # TODO: verify signatures as well - RUN mkdir -p /usr/local/bin \ - /usr/local/share/{sdi-observer,openshift-acme} - RUN git clone --depth 5 --single-branch \ - --branch ${LETSENCRYPT_REVISION} \ - ${LETSENCRYPT_REPOSITORY} /usr/local/share/openshift-acme - RUN git clone --depth 5 --single-branch \ - --branch ${SDI_OBSERVER_GIT_REVISION} \ - ${SDI_OBSERVER_REPOSITORY} /usr/local/share/sap-data-intelligence - RUN for bin in observer.sh deploy-registry.sh deploy-letsencrypt.sh; do \ - cp -lv /usr/local/share/sap-data-intelligence/$bin \ - /usr/local/bin/$bin; \ - chmod a+rx /usr/local/bin/$bin; \ - done - RUN ln -s /usr/local/share/sap-data-intelligence/observer \ - /usr/local/share/sdi-observer - WORKDIR /usr/local/share/sdi-observer - strategy: - dockerStrategy: - from: - kind: "ImageStreamTag" - name: "ubi8:latest" - pullSecret: - name: ${REGISTRY_SECRET_NAME} - output: - to: - kind: "ImageStreamTag" - name: "sdi-observer:${OCP_MINOR_RELEASE}" - - - apiVersion: v1 - kind: DeploymentConfig - metadata: - name: sdi-observer - namespace: ${NAMESPACE} - labels: - deploymentconfig: sdi-observer - spec: - selector: - deploymentconfig: sdi-observer - replicas: 1 - strategy: - type: Rolling - triggers: - - type: "ConfigChange" - - type: ImageChange - imageChangeParams: - automatic: true - containerNames: - - sdi-observer - from: - kind: ImageStreamTag - name: "sdi-observer:${OCP_MINOR_RELEASE}" - template: - metadata: - labels: - deploymentconfig: sdi-observer - spec: - containers: - - env: - - name: SDI_NAMESPACE - value: ${SDI_NAMESPACE} - - name: DRY_RUN - value: ${DRY_RUN} - - name: REGISTRY - value: ${REGISTRY} - - name: MARK_REGISTRY_INSECURE - value: ${MARK_REGISTRY_INSECURE} - - name: NODE_LOG_FORMAT - value: ${NODE_LOG_FORMAT} - - name: DEPLOY_SDI_REGISTRY - value: ${DEPLOY_SDI_REGISTRY} - - name: DEPLOY_LETSENCRYPT - value: ${DEPLOY_LETSENCRYPT} - - name: EXPOSE_WITH_LETSENCRYPT - value: ${EXPOSE_WITH_LETSENCRYPT} - - name: LETSENCRYPT_ENVIRONMENT - value: ${LETSENCRYPT_ENVIRONMENT} - - name: SDI_REGISTRY_STORAGE_CLASS_NAME - value: ${SDI_REGISTRY_STORAGE_CLASS_NAME} - - name: SDI_REGISTRY_HTPASSWD_SECRET_NAME - value: ${SDI_REGISTRY_HTPASSWD_SECRET_NAME} - - name: SDI_REGISTRY_USERNAME - value: ${SDI_REGISTRY_USERNAME} - - name: SDI_REGISTRY_PASSWORD - value: ${SDI_REGISTRY_PASSWORD} - - name: FORCE_REDEPLOY - value: ${FORCE_REDEPLOY} - - name: RECREATE_SECRETS - value: ${RECREATE_SECRETS} - - name: SDI_REGISTRY_NAMESPACE - value: ${SDI_REGISTRY_NAMESPACE} - - name: REDHAT_REGISTRY_SECRET_NAMESPACE - value: ${REDHAT_REGISTRY_SECRET_NAMESPACE} - image: " " - name: sdi-observer - command: - - /usr/local/bin/observer.sh - restartPolicy: Always - serviceAccount: sdi-observer - serviceAccountName: sdi-observer - -parameters: - - name: OCP_MINOR_RELEASE - description: > - Minor release of OpenShift Container Platform (e.g. 4.2). This value must - match the OCP server version. The biggest tolerated difference between - the versions is 1 in the second digit. - required: true - value: "4.2" - - name: DRY_RUN - description: > - If set to true, no action will be performed. The pod will just print what - would have been executed. - required: false - value: "false" - - name: SDI_NAMESPACE - description: > - The name of the SAP Data Hub namespace to manage. Defaults to the current - one. It must be set only in case the observer is running in a differnt - namespace (see NAMESPACE). - - name: NAMESPACE - description: > - The desired namespace, where the vsystem-app observer shall run. Defaults - to the current one. Needs to be set only if running the observer outside - of SDI_NAMESPACE. - required: true - - name: SDI_OBSERVER_REPOSITORY - description: > - TODO - value: https://github.com/redhat-sap/sap-data-intelligence - required: true - - name: SDI_OBSERVER_GIT_REVISION - description: > - Revision (e.g. tag, commit or branch) of git repository where SDI - Observer's source reside. - required: true - value: master - - name: ROLE_BINDING_SUFFIX - description: > - A random suffix for the new RoleBinding's name. No need to edit. - generate: expression - from: "[a-z0-9]{5}" - - name: MARK_REGISTRY_INSECURE - description: > - Set to true if the given or configured VFLOW_REGISTRY shall be marked as - insecure in all instances of Pipeline Modeler. - required: true - value: "false" - - name: MAKE_VSYSTEM_IPTABLES_PODS_PRIVILEGED - description: > - Patch deployments with vsystem-iptables container to make them privileged - in order to load kernel modules they need. Unless true, it is assumed - that the modules have been pre-loaded on the worker nodes. - This will make also vsystem-vrep-* pod privileged. - required: true - value: "false" - - name: NODE_LOG_FORMAT - description: > - Format of the logging files on the nodes. Allowed values are "json" and - "text". Initially, SDI fluentd pods are configured to parse "json" while - OpenShift 4 uses "text" format by default. If not given, the default is - "text". - required: false - - name: REGISTRY - description: > - The registry to mark as insecure. If not given, it will be determined - from the installer-config secret in the SDI_NAMESPACE. If - DEPLOY_SDI_REGISTRY is set to "true", this variable will be used as the - container image registry's hostname when creating the corresponding - route. - - name: SDI_REGISTRY_NAMESPACE - description: > - Target namespace where to deploy container image registry. - required: false - - name: DEPLOY_SDI_REGISTRY - description: > - Whether to deploy container image registry for the purpose of SAP Data - Intelligence. Requires project admin role attached to the sdi-observer - service account. If enabled, REDHAT_REGISTRY_SECRET_NAME must be - provided. - required: false - value: "false" - - name: DEPLOY_LETSENCRYPT - description: > - Whether to deploy letsencrypt controller. Requires project admin role - attached to the sdi-observer service account. - required: false - value: "false" - - name: EXPOSE_WITH_LETSENCRYPT - description: > - Whether to expose routes annotated for letsencrypt controller. Requires - project admin role attached to the sdi-observer service account. - Letsencrypt controller must be deployed either via this observer or - cluster-wide for this to have an effect. Defaults to DEPLOY_LETSENCRYPT. - - name: LETSENCRYPT_REPOSITORY - description: > - Unless given, a local copy will be used. - value: https://github.com/tnozicka/openshift-acme - required: false - - name: LETSENCRYPT_REVISION - description: > - Revision of letsencrypt repository to check out. - value: 2cfefc7388102408a334ada90933531c7e5e11c2 - required: false - - name: LETSENCRYPT_ENVIRONMENT - description: > - Either "live" or "staging". Use the latter when debugging SDI Observer's - deployment. - required: true - value: "live" - - name: FORCE_REDEPLOY - description: > - Whether to forcefully replace existing registry and/or letsencrypt - deployments and configuration files. - required: false - value: "false" - - name: RECREATE_SECRETS - description: > - Whether to replace secrets like SDI Registry's htpasswd file if they - exist already. - required: false - value: "false" - - name: SDI_REGISTRY_VOLUME_CAPACITY - description: Volume space available for container images (e.g. 75Gi). - required: true - value: 75Gi - - name: SDI_REGISTRY_STORAGE_CLASS_NAME - description: > - Unless given, the default storage class will be used. - required: false - - name: SDI_REGISTRY_HTPASSWD_SECRET_NAME - required: false - description: > - A secret with htpasswd file with authentication data for the sdi image - container If given and the secret exists, it will be used instead of - SDI_REGISTRY_USERNAME and SDI_REGISTRY_PASSWORD. - - name: SDI_REGISTRY_USERNAME - from: "user-[a-z0-9]{6}" - generage: expression - required: false - - name: SDI_REGISTRY_PASSWORD - from: "user-[a-zA-Z0-9]{32}" - generage: expression - required: false - - name: REDHAT_REGISTRY_SECRET_NAME - description: >- - Name of the secret with credentials for registry.redhat.io registry. - Please visit https://access.redhat.com/terms-based-registry/ to obtain - the OpenShift secret. For more details, please refer to - https://access.redhat.com/RegistryAuthentication. Required if - DEPLOY_SDI_REGISTRY is enabled. - required: false - - name: REDHAT_REGISTRY_SECRET_NAMESPACE - description: >- - Namespace where the secret with credentials for registry.redhat.io - registry resides. - require: false diff --git a/registry/ocp-template.json b/registry/ocp-template.json new file mode 100644 index 0000000..f0b01e5 --- /dev/null +++ b/registry/ocp-template.json @@ -0,0 +1,245 @@ +{ + "apiVersion": "template.openshift.io/v1", + "kind": "Template", + "message": "TODO", + "metadata": { + "annotations": { + "description": "TODO", + "openshift.io/display-name": "TODO", + "openshift.io/documentation-url": "https://access.redhat.com/articles/4324391", + "openshift.io/provider-display-name": "Red Hat, Inc." + }, + "name": "container-image-registry" + }, + "objects": [ + { + "apiVersion": "v1", + "kind": "DeploymentConfig", + "metadata": { + "labels": { + "deploymentconfig": "container-image-registry" + }, + "name": "container-image-registry", + "namespace": "${NAMESPACE}" + }, + "spec": { + "replicas": 1, + "selector": { + "deploymentconfig": "container-image-registry" + }, + "strategy": { + "type": "Rolling" + }, + "template": { + "metadata": { + "labels": { + "deploymentconfig": "container-image-registry" + } + }, + "spec": { + "containers": [ + { + "env": [ ], + "image": " ", + "name": "container-image-registry" + } + ], + "restartPolicy": "Always", + "serviceAccount": "container-image-registry", + "serviceAccountName": "container-image-registry" + } + }, + "triggers": [ + { + "type": "ConfigChange" + }, + { + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "container-image-registry" + ], + "from": { + "kind": "ImageStreamTag", + "name": "container-image-registry:latest" + } + }, + "type": "ImageChange" + } + ] + } + }, + { + "apiVersion": "v1", + "kind": "ServiceAccount", + "metadata": { + "labels": { + "deploymentconfig": "container-image-registry" + }, + "name": "container-image-registry", + "namespace": "${NAMESPACE}" + } + }, + { + "apiVersion": "build.openshift.io/v1", + "kind": "BuildConfig", + "metadata": { + "labels": { + "deploymentconfig": "container-image-registry" + }, + "name": "container-image-registry", + "namespace": "${NAMESPACE}" + }, + "spec": { + "output": { + "to": { + "kind": "ImageStreamTag", + "name": "container-image-registry:latest" + } + }, + "runPolicy": "Serial", + "source": { + "dockerfile": "FROM openshift/ubi8:latest\n# docker-distribution is not yet available on UBI - install from fedora repo\n# RHEL8 / UBI8 is based on fedora 28\nENV FEDORA_BASE_RELEASE=28\nRUN curl -L -o /etc/pki/rpms-fedora.gpg \\\n https://getfedora.org/static/fedora.gpg\nRUN /bin/bash -c 'for repo in base updates; do printf \"%s\\n\" \\\n \"[fedora-$repo]\" \\\n \"name=Fedora $FEDORA_BASE_RELEASE - $(uname -m) - ${repo^}\" \\\n \"metalink=https://mirrors.fedoraproject.org/metalink?repo=fedora-$FEDORA_BASE_RELEASE&arch=$(uname -m)\" \\\n \"enabled=0\" \\\n \"countme=1\" \\\n \"type=rpm\" \\\n \"gpgcheck=0\" \\\n \"priority=99\" \\\n \"gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-$FEDORA_BASE_RELEASE-$(uname -m)\" \\\n \"skip_if_unavailable=False\" >/etc/yum.repos.d/fedora-$repo.repo; \\\n done'\nRUN dnf update -y\n# install the GPG keys first, so we can enable GPG keys checking for\n# the package in question\nRUN dnf install -y \\\n --enablerepo=fedora-base \\\n --enablerepo=fedora-updates \\\n fedora-gpg-keys\nRUN sed -i 's/^\\(gpgcheck=\\)0/\\11/' /etc/yum.repos.d/fedora-*.repo\nRUN dnf install -y \\\n --enablerepo=fedora-base \\\n --enablerepo=fedora-updates \\\n docker-distribution\nRUN dnf clean all -y\nEXPOSE 5000\nENTRYPOINT [ \\\n \"/usr/bin/registry\", \\\n \"serve\", \"/etc/docker-distribution/registry/config.yml\"]\n" + }, + "strategy": { + "dockerStrategy": { + "from": { + "kind": "ImageStreamTag", + "name": "ubi8:latest" + }, + "pullSecret": { + "name": "${REDHAT_REGISTRY_SECRET_NAME}" + } + } + }, + "triggers": [ + { + "type": "ImageChange" + }, + { + "type": "ConfigChange" + } + ] + } + }, + { + "apiVersion": "v1", + "kind": "ImageStream", + "metadata": { + "name": "container-image-registry", + "namespace": "${NAMESPACE}" + }, + "spec": null, + "status": { + "dockerImageRepository": "" + } + }, + { + "apiVersion": "v1", + "kind": "Service", + "metadata": { + "annotations": { + "template.openshift.io/expose-uri": "\"https://{.spec.clusterIP}:{.spec.ports[?(.name==\\\"registry\\\")].port)}\"" + }, + "name": "container-image-registry", + "namespace": "${NAMESPACE}" + }, + "spec": { + "ports": [ + { + "name": "registry", + "port": 5000 + } + ], + "selector": { + "deploymentconfig": "container-image-registry" + }, + "sessionAffinity": "ClientIP", + "type": "ClusterIP" + } + }, + { + "apiVersion": "route.openshift.io/v1", + "kind": "Route", + "metadata": { + "annotations": { + "template.openshift.io/expose-uri": "https://{.spec.host}{.spec.path}" + }, + "name": "container-image-registry", + "namespace": "${NAMESPACE}" + }, + "spec": { + "host": "${HOSTNAME}", + "port": { + "targetPort": "registry" + }, + "subdomain": "", + "tls": { + "insecureEdgeTerminationPolicy": "Redirect", + "termination": "edge" + }, + "to": { + "kind": "Service", + "name": "container-image-registry" + } + } + }, + { + "apiVersion": "v1", + "kind": "PersistentVolumeClaim", + "metadata": { + "name": "container-image-registry", + "namespace": "${NAMESPACE}" + }, + "spec": { + "accessModes": [ + "ReadWriteOnce" + ], + "resources": { + "requests": { + "storage": "${VOLUME_CAPACITY}" + } + } + } + } + ], + "parameters": [ + { + "description": "If set to true, no action will be performed. The pod will just print\nwhat would have been executed.\n", + "name": "DRY_RUN", + "required": false, + "value": "false" + }, + { + "description": "The desired namespace to deploy resources to. Defaults to the current\none.\n", + "name": "NAMESPACE", + "required": true + }, + { + "description": "Name of the secret with credentials for registry.redhat.io registry. Please visit\nhttps://access.redhat.com/terms-based-registry/ to obtain the OpenShift secret. For\nmore details, please refer to https://access.redhat.com/RegistryAuthentication.'\n", + "name": "REDHAT_REGISTRY_SECRET_NAME", + "required": true + }, + { + "description": "Volume space available for container images (e.g. 75Gi).", + "name": "VOLUME_CAPACITY", + "required": true, + "value": "75Gi" + }, + { + "name": "HTPASSWD_SECRET_NAME", + "required": true, + "value": "container-image-registry-htpasswd" + }, + { + "from": "[a-zA-Z0-9]{32}", + "generage": "expression", + "name": "REGISTRY_HTTP_SECRET" + }, + { + "description": "Desired domain name of the exposed registry service.'\n", + "name": "HOSTNAME", + "required": false + } + ] +} diff --git a/registry/registry-template.json b/registry/registry-template.json deleted file mode 100644 index 7e9c0d9..0000000 --- a/registry/registry-template.json +++ /dev/null @@ -1,323 +0,0 @@ -{ - "apiVersion": "template.openshift.io/v1", - "kind": "Template", - "message": "TODO", - "metadata": { - "annotations": { - "description": "TODO\n", - "openshift.io/display-name": "TODO", - "openshift.io/documentation-url": "https://access.redhat.com/articles/4324391", - "openshift.io/provider-display-name": "Red Hat, Inc." - }, - "name": "container-image-registry" - }, - "objects": [ - { - "apiVersion": "v1", - "kind": "ServiceAccount", - "metadata": { - "name": "container-image-registry", - "namespace": "${NAMESPACE}" - } - }, - { - "apiVersion": "v1", - "kind": "ImageStream", - "metadata": { - "name": "container-image-registry", - "namespace": "${NAMESPACE}" - }, - "spec": null, - "status": { - "dockerImageRepository": "" - } - }, - { - "apiVersion": "image.openshift.io/v1", - "kind": "ImageStream", - "metadata": { - "name": "ubi8", - "namespace": "${NAMESPACE}" - }, - "spec": { - "lookupPolicy": { - "local": false - }, - "tags": [ - { - "from": { - "kind": "DockerImage", - "name": "registry.redhat.io/ubi8/ubi:latest" - }, - "name": "latest", - "referencePolicy": { - "type": "Source" - } - } - ] - } - }, - { - "apiVersion": "build.openshift.io/v1", - "kind": "BuildConfig", - "metadata": { - "labels": { - "deploymentconfig": "container-image-registry" - }, - "name": "container-image-registry", - "namespace": "${NAMESPACE}" - }, - "spec": { - "output": { - "to": { - "kind": "ImageStreamTag", - "name": "container-image-registry:latest" - } - }, - "runPolicy": "Serial", - "source": { - "dockerfile": "FROM openshift/ubi8:latest\n# docker-distribution is not yet available on UBI - install from fedora repo\n# RHEL8 / UBI8 is based on fedora 28\nENV FEDORA_BASE_RELEASE=28\nRUN curl -L -o /etc/pki/rpms-fedora.gpg \\\n https://getfedora.org/static/fedora.gpg\nRUN /bin/bash -c 'for repo in base updates; do printf \"%s\\n\" \\\n \"[fedora-$repo]\" \\\n \"name=Fedora $FEDORA_BASE_RELEASE - $(uname -m) - ${repo^}\" \\\n \"metalink=https://mirrors.fedoraproject.org/metalink?repo=fedora-$FEDORA_BASE_RELEASE&arch=$(uname -m)\" \\\n \"enabled=0\" \\\n \"countme=1\" \\\n \"type=rpm\" \\\n \"gpgcheck=0\" \\\n \"priority=99\" \\\n \"gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-$FEDORA_BASE_RELEASE-$(uname -m)\" \\\n \"skip_if_unavailable=False\" >/etc/yum.repos.d/fedora-$repo.repo; \\\n done'\nRUN dnf update -y\n# install the GPG keys first, so we can enable GPG keys checking for\n# the package in question\nRUN dnf install -y \\\n --enablerepo=fedora-base \\\n --enablerepo=fedora-updates \\\n fedora-gpg-keys\nRUN sed -i 's/^\\(gpgcheck=\\)0/\\11/' /etc/yum.repos.d/fedora-*.repo\nRUN dnf install -y \\\n --enablerepo=fedora-base \\\n --enablerepo=fedora-updates \\\n docker-distribution\nRUN dnf clean all -y\nEXPOSE 5000\nENTRYPOINT [ \\\n \"/usr/bin/registry\", \\\n \"serve\", \"/etc/docker-distribution/registry/config.yml\"]\n" - }, - "strategy": { - "dockerStrategy": { - "from": { - "kind": "ImageStreamTag", - "name": "ubi8:latest" - }, - "pullSecret": { - "name": "${REDHAT_REGISTRY_SECRET_NAME}" - } - } - }, - "triggers": [ - { - "type": "ImageChange" - }, - { - "type": "ConfigChange" - } - ] - } - }, - { - "apiVersion": "v1", - "kind": "Service", - "metadata": { - "annotations": { - "template.openshift.io/expose-uri": "\"https://{.spec.clusterIP}:{.spec.ports[?(.name==\\\"registry\\\")].port)}\"" - }, - "name": "container-image-registry", - "namespace": "${NAMESPACE}" - }, - "spec": { - "ports": [ - { - "name": "registry", - "port": 5000 - } - ], - "selector": { - "deploymentconfig": "container-image-registry" - }, - "sessionAffinity": "ClientIP", - "type": "ClusterIP" - } - }, - { - "apiVersion": "route.openshift.io/v1", - "kind": "Route", - "metadata": { - "annotations": { - "template.openshift.io/expose-uri": "https://{.spec.host}{.spec.path}" - }, - "name": "container-image-registry", - "namespace": "${NAMESPACE}" - }, - "spec": { - "host": "${HOSTNAME}", - "port": { - "targetPort": "registry" - }, - "subdomain": "", - "tls": { - "insecureEdgeTerminationPolicy": "Redirect", - "termination": "edge" - }, - "to": { - "kind": "Service", - "name": "container-image-registry" - } - } - }, - { - "apiVersion": "v1", - "kind": "DeploymentConfig", - "metadata": { - "labels": { - "deploymentconfig": "container-image-registry" - }, - "name": "container-image-registry", - "namespace": "${NAMESPACE}" - }, - "spec": { - "replicas": 1, - "selector": { - "deploymentconfig": "container-image-registry" - }, - "strategy": { - "type": "Rolling" - }, - "template": { - "metadata": { - "labels": { - "deploymentconfig": "container-image-registry" - } - }, - "spec": { - "containers": [ - { - "env": [ - { - "name": "REGISTRY_AUTH_HTPASSWD_REALM", - "value": "basic-realm" - }, - { - "name": "REGISTRY_AUTH_HTPASSWD_PATH", - "value": "/etc/docker-distribution/htpasswd" - }, - { - "name": "REGISTRY_HTTP_SECRET", - "value": "${REGISTRY_HTTP_SECRET}" - } - ], - "image": " ", - "name": "container-image-registry", - "volumeMounts": [ - { - "mountPath": "/var/lib/registry", - "name": "storage" - }, - { - "mountPath": "/etc/docker-distribution/htpasswd", - "name": "htpasswd", - "readonly": true, - "subPath": "htpasswd" - } - ] - } - ], - "livenessProbe": { - "httpGet": { - "path": "/v2/", - "port": 5000, - "scheme": "HTTPS" - } - }, - "readinessProbe": { - "httpGet": { - "path": "/v2/", - "port": 5000, - "scheme": "HTTPS" - } - }, - "resources": { - "limits": { - "memory": "512Mi" - }, - "requests": { - "cpu": "100m", - "memory": "256Mi" - } - }, - "restartPolicy": "Always", - "serviceAccount": "container-image-registry", - "serviceAccountName": "container-image-registry", - "volumes": [ - { - "name": "storage", - "persistentVolumeClaim": { - "claimName": "container-image-registry" - } - }, - { - "name": "htpasswd", - "readonly": true, - "secret": { - "secretName": "${HTPASSWD_SECRET_NAME}" - } - } - ] - } - }, - "triggers": [ - { - "type": "ConfigChange" - }, - { - "imageChangeParams": { - "automatic": true, - "containerNames": [ - "container-image-registry" - ], - "from": { - "kind": "ImageStreamTag", - "name": "container-image-registry:latest" - } - }, - "type": "ImageChange" - } - ] - } - }, - { - "apiVersion": "v1", - "kind": "PersistentVolumeClaim", - "metadata": { - "name": "container-image-registry", - "namespace": "${NAMESPACE}" - }, - "spec": { - "accessModes": [ - "ReadWriteOnce" - ], - "resources": { - "requests": { - "storage": "${VOLUME_CAPACITY}" - } - } - } - } - ], - "parameters": [ - { - "description": "Volume space available for container images (e.g. 75Gi).", - "name": "VOLUME_CAPACITY", - "required": true, - "value": "75Gi" - }, - { - "description": "Name of the secret with credentials for registry.redhat.io registry. Please visit https://access.redhat.com/terms-based-registry/ to obtain the OpenShift secret. For more details, please refer to https://access.redhat.com/RegistryAuthentication.", - "name": "REDHAT_REGISTRY_SECRET_NAME", - "required": true - }, - { - "name": "HTPASSWD_SECRET_NAME", - "required": true, - "value": "container-image-registry-htpasswd" - }, - { - "from": "[a-zA-Z0-9]{32}", - "generage": "expression", - "name": "REGISTRY_HTTP_SECRET" - }, - { - "description": "Desired domain name of the exposed registry service.\n", - "name": "HOSTNAME", - "required": false - }, - { - "description": "Target namespace where to deploy the registry.\n", - "name": "NAMESPACE" - } - ] -} diff --git a/registry/registry-template.yaml b/registry/registry-template.yaml deleted file mode 100644 index a184e50..0000000 --- a/registry/registry-template.yaml +++ /dev/null @@ -1,255 +0,0 @@ ---- -apiVersion: template.openshift.io/v1 -kind: Template -metadata: - name: container-image-registry - annotations: - openshift.io/display-name: "TODO" - description: > - TODO - openshift.io/provider-display-name: "Red Hat, Inc." - openshift.io/documentation-url: "https://access.redhat.com/articles/4324391" -message: >- - TODO -objects: - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: container-image-registry - namespace: ${NAMESPACE} - - - apiVersion: v1 - kind: ImageStream - metadata: - name: container-image-registry - namespace: ${NAMESPACE} - spec: - status: - dockerImageRepository: "" - - - apiVersion: image.openshift.io/v1 - kind: ImageStream - metadata: - name: ubi8 - namespace: ${NAMESPACE} - spec: - lookupPolicy: - local: false - tags: - - from: - kind: DockerImage - name: registry.redhat.io/ubi8/ubi:latest - name: latest - referencePolicy: - type: Source - - - kind: BuildConfig - apiVersion: build.openshift.io/v1 - metadata: - name: "container-image-registry" - namespace: ${NAMESPACE} - labels: - deploymentconfig: container-image-registry - spec: - runPolicy: "Serial" - triggers: - - type: "ImageChange" - - type: "ConfigChange" - source: - dockerfile: | - FROM openshift/ubi8:latest - # docker-distribution is not yet available on UBI - install from fedora repo - # RHEL8 / UBI8 is based on fedora 28 - ENV FEDORA_BASE_RELEASE=28 - RUN curl -L -o /etc/pki/rpms-fedora.gpg \ - https://getfedora.org/static/fedora.gpg - RUN /bin/bash -c 'for repo in base updates; do printf "%s\n" \ - "[fedora-$repo]" \ - "name=Fedora $FEDORA_BASE_RELEASE - $(uname -m) - ${repo^}" \ - "metalink=https://mirrors.fedoraproject.org/metalink?repo=fedora-$FEDORA_BASE_RELEASE&arch=$(uname -m)" \ - "enabled=0" \ - "countme=1" \ - "type=rpm" \ - "gpgcheck=0" \ - "priority=99" \ - "gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-$FEDORA_BASE_RELEASE-$(uname -m)" \ - "skip_if_unavailable=False" >/etc/yum.repos.d/fedora-$repo.repo; \ - done' - RUN dnf update -y - # install the GPG keys first, so we can enable GPG keys checking for - # the package in question - RUN dnf install -y \ - --enablerepo=fedora-base \ - --enablerepo=fedora-updates \ - fedora-gpg-keys - RUN sed -i 's/^\(gpgcheck=\)0/\11/' /etc/yum.repos.d/fedora-*.repo - RUN dnf install -y \ - --enablerepo=fedora-base \ - --enablerepo=fedora-updates \ - docker-distribution - RUN dnf clean all -y - EXPOSE 5000 - ENTRYPOINT [ \ - "/usr/bin/registry", \ - "serve", "/etc/docker-distribution/registry/config.yml"] - strategy: - dockerStrategy: - from: - kind: "ImageStreamTag" - name: "ubi8:latest" - pullSecret: - name: ${REDHAT_REGISTRY_SECRET_NAME} - output: - to: - kind: "ImageStreamTag" - name: "container-image-registry:latest" - - - apiVersion: v1 - kind: Service - metadata: - annotations: - template.openshift.io/expose-uri: >- - "https://{.spec.clusterIP}:{.spec.ports[?(.name==\"registry\")].port)}" - name: container-image-registry - namespace: ${NAMESPACE} - spec: - ports: - - name: registry - port: 5000 - selector: - deploymentconfig: container-image-registry - # in case there are multiple replicas, make sure the same client talks to - # the same replica each time - sessionAffinity: ClientIP - type: ClusterIP - - - apiVersion: route.openshift.io/v1 - kind: Route - metadata: - annotations: - template.openshift.io/expose-uri: "https://{.spec.host}{.spec.path}" - name: container-image-registry - namespace: ${NAMESPACE} - spec: - host: ${HOSTNAME} - port: - targetPort: registry - subdomain: "" - tls: - termination: edge - insecureEdgeTerminationPolicy: Redirect - to: - kind: Service - name: container-image-registry - - - apiVersion: v1 - kind: DeploymentConfig - metadata: - name: container-image-registry - namespace: ${NAMESPACE} - labels: - deploymentconfig: container-image-registry - spec: - selector: - deploymentconfig: container-image-registry - replicas: 1 - strategy: - type: Rolling - triggers: - - type: "ConfigChange" - - type: ImageChange - imageChangeParams: - automatic: true - containerNames: - - container-image-registry - from: - kind: ImageStreamTag - name: "container-image-registry:latest" - template: - metadata: - labels: - deploymentconfig: container-image-registry - spec: - containers: - - env: - - name: REGISTRY_AUTH_HTPASSWD_REALM - value: basic-realm - - name: REGISTRY_AUTH_HTPASSWD_PATH - value: /etc/docker-distribution/htpasswd - - name: REGISTRY_HTTP_SECRET - value: "${REGISTRY_HTTP_SECRET}" - # will be replaced by URL to the built image once it is built - image: " " - name: container-image-registry - volumeMounts: - - name: storage - mountPath: /var/lib/registry - - name: htpasswd - mountPath: /etc/docker-distribution/htpasswd - readonly: true - subPath: htpasswd - livenessProbe: - httpGet: - path: /v2/ - port: 5000 - scheme: HTTPS - readinessProbe: - httpGet: - path: /v2/ - port: 5000 - scheme: HTTPS - resources: - requests: - cpu: 100m - memory: 256Mi - limits: - memory: 512Mi - restartPolicy: Always - serviceAccount: container-image-registry - serviceAccountName: container-image-registry - volumes: - - name: storage - persistentVolumeClaim: - claimName: container-image-registry - - name: htpasswd - secret: - secretName: "${HTPASSWD_SECRET_NAME}" - readonly: true - - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: container-image-registry - namespace: ${NAMESPACE} - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: "${VOLUME_CAPACITY}" - -parameters: - - name: VOLUME_CAPACITY - description: Volume space available for container images (e.g. 75Gi). - required: true - value: 75Gi - - name: REDHAT_REGISTRY_SECRET_NAME - description: >- - Name of the secret with credentials for registry.redhat.io registry. - Please visit https://access.redhat.com/terms-based-registry/ to obtain - the OpenShift secret. For more details, please refer to - https://access.redhat.com/RegistryAuthentication. - required: true - - name: HTPASSWD_SECRET_NAME - required: true - value: container-image-registry-htpasswd - - name: REGISTRY_HTTP_SECRET - from: "[a-zA-Z0-9]{32}" - generage: expression - - name: HOSTNAME - required: false - description: > - Desired domain name of the exposed registry service. - - name: NAMESPACE - description: > - Target namespace where to deploy the registry. diff --git a/src/observer-template.jsonnet b/src/observer-template.jsonnet new file mode 100644 index 0000000..ff9daca --- /dev/null +++ b/src/observer-template.jsonnet @@ -0,0 +1,555 @@ +local base = import 'dc-template.libsonnet'; +local bctmpl = import 'ubi-buildconfig.libsonnet'; + +base.DCTemplate { + local obstmpl = self, + resourceName: 'sdi-observer', + imageStreamTag: obstmpl.resourceName + ':${OCP_MINOR_RELEASE}', + + local bc = bctmpl.BuildConfigTemplate { + resourceName: obstmpl.resourceName, + imageStreamTag: obstmpl.imageStreamTag, + dockerfile: ||| + FROM openshift/cli:latest + RUN dnf update -y + # TODO: jq is not yet available in EPEL-8 + RUN dnf install -y \ + https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm && \ + dnf install -y jq + RUN dnf install -y \ + https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && \ + dnf install -y parallel procps-ng bc git && dnf clean all -y + # TODO: determine OCP version from environment + COPY https://mirror.openshift.com/pub/openshift-v4/clients/ocp/latest-${OCP_MINOR_RELEASE}/openshift-client-linux.tar.gz /tmp/ + COPY https://mirror.openshift.com/pub/openshift-v4/clients/ocp/latest-${OCP_MINOR_RELEASE}/sha256sum.txt /tmp/ + # verify the downloaded tar + RUN /bin/bash -c 'grep "$(awk '"'"'{print $1}'"'"' \ + <(sha256sum /tmp/openshift-client-linux.tar.gz))[[:space:]]\+openshift-client-linux-." \ + /tmp/sha256sum.txt' + RUN /bin/bash -c 'tar -C /usr/local/bin/ -xzvf /tmp/openshift-client-linux.tar.gz -T <(printf oc)' + # TODO: verify signatures as well + RUN mkdir -p /usr/local/bin \ + /usr/local/share/{sdi-observer,openshift-acme} + RUN git clone --depth 5 --single-branch \ + --branch ${LETSENCRYPT_REVISION} \ + ${LETSENCRYPT_REPOSITORY} /usr/local/share/openshift-acme + RUN git clone --depth 5 --single-branch \ + --branch ${SDI_OBSERVER_GIT_REVISION} \ + ${SDI_OBSERVER_REPOSITORY} /usr/local/share/sap-data-intelligence + RUN for bin in observer.sh deploy-registry.sh deploy-letsencrypt.sh; do \ + cp -lv /usr/local/share/sap-data-intelligence/$bin \ + /usr/local/bin/$bin; \ + chmod a+rx /usr/local/bin/$bin; \ + done + RUN ln -s /usr/local/share/sap-data-intelligence/observer \ + /usr/local/share/sdi-observer + WORKDIR /usr/local/share/sdi-observer + |||, + }, + + metadata+: { + annotations+: { + 'openshift.io/display-name': ||| + OpenShift enabler and observer for SAP Data intelligence + |||, + description: ||| + The template spawns the "sdi-observer" pod that observes the particular + namespace where SAP Data Intelligence runs and modifies its deployments + and configuration to enable its pods to run. + + On Red Hat Enterprise Linux CoreOS, SAP Data Intelligence's vsystem-vrep + statefulset needs to be patched to mount `emptyDir` volume at `/exports` + directory in order to enable NFS exports in the container running on top + of overlayfs which is the default filesystem in RHCOS. + + The "sdi-observer" pod modifies vsystem-vrep statefulset as soon as it + appears to enable the NFS exports. + + The observer also allows to patch pipeline-modeler (aka "vflow") pods to + mark registry as insecure. + + Additionally, it patches diagnostics-fluentd daemonset to allow its pods + to access log files on the host system. It also modifies it to parse + plain text log files instead of preconfigured json. + + On Red Hat Enterprise Linux CoreOS, "vsystem-iptables" containers need to + be run as privileged in order to load iptables-related kernel modules. + SAP Data Hub containers named "vsystem-iptables" deployed as part of + every "vsystem-app" deployment attempt to modify iptables rules without + having the necessary permissions. The ideal solution is to pre-load these + modules during node's startup. When not feasable, this template can also + fix the permissions on-the-fly as the deployments are created. + + The template must be instantiated before the installation of SAP Data + Hub. Also the namespace, where SAP Data Hub will be installed, must exist + before the instantiation. + + TODO: document admin project role requirement. + + Usage: + If running in the same namespace as Data Intelligence, instantiate the + template as is in the desired namespace: + + oc project $SDI_NAMESPACE + oc process -n $SDI_NAMESPACE sdi-observer NAMESPACE=$SDI_NAMESPACE | \ + oc create -f - + + If running in a different/new namespace/project, instantiate the + template with parameters SDI_NAMESPACE and NAMESPACE, e.g.: + + oc new-project $SDI_NAMESPACE + oc new-project sapdatahub-admin + oc process sdi-observer \ + SDI_NAMESPACE=$SDI_NAMESPACE \ + NAMESPACE=sapdatahub-admin | oc create -f - + |||, + }, + }, + message: ||| + The vsystem-app observer and patcher will be started. You can watch the progress with the + following command: oc logs -f dc/sdi-observer + |||, + + objects+: [ + bc.bc, + + { + apiVersion: 'rbac.authorization.k8s.io/v1', + kind: 'Role', + metadata: { + labels: { + deploymentconfig: obstmpl.resourceName, + }, + name: obstmpl.resourceName, + namespace: '${SDI_NAMESPACE}', + }, + rules: [ + { + apiGroups: [ + 'apps', + 'extensions', + ], + resources: [ + 'deployments', + 'deployments/scale', + 'statefulsets', + 'statefulsets/scale', + ], + verbs: [ + 'get', + 'list', + 'patch', + 'watch', + ], + }, + { + apiGroups: [ + 'apps', + 'extensions', + ], + resources: [ + 'daemonsets', + ], + verbs: [ + 'get', + 'list', + 'patch', + 'update', + 'watch', + ], + }, + { + apiGroups: [ + '', + ], + resources: [ + 'secrets', + ], + verbs: [ + 'get', + ], + }, + { + apiGroups: [ + '', + ], + resources: [ + 'configmaps', + ], + verbs: [ + 'get', + 'list', + 'watch', + 'patch', + ], + }, + { + apiGroups: [ + '', + ], + resources: [ + 'namespaces', + 'namespaces/status', + ], + verbs: [ + 'get', + 'list', + 'watch', + ], + }, + { + apiGroups: [ + '', + 'project.openshift.io', + ], + resources: [ + 'projects', + ], + verbs: [ + 'get', + ], + }, + { + apiGroups: [ + 'apps', + 'deploymentconfigs.apps.openshift.io', + ], + resources: [ + 'deploymentconfigs', + ], + verbs: [ + 'get', + 'list', + 'delete', + ], + }, + { + apiGroups: [ + '', + 'authorization.openshift.io', + 'rbac.authorization.k8s.io', + ], + resources: [ + 'roles', + 'rolebindings', + 'serviceaccounts', + ], + verbs: [ + 'get', + 'list', + 'delete', + ], + }, + ], + }, + + { + apiVersion: 'rbac.authorization.k8s.io/v1', + kind: 'RoleBinding', + metadata: { + labels: { + deploymentconfig: obstmpl.resourceName, + }, + name: obstmpl.resourceName + '-${ROLE_BINDING_SUFFIX}', + namespace: '${SDI_NAMESPACE}', + }, + roleRef: { + apiGroup: 'rbac.authorization.k8s.io', + kind: 'Role', + name: obstmpl.resourceName, + namespace: '${SDI_NAMESPACE}', + }, + subjects: [ + { + kind: 'ServiceAccount', + name: obstmpl.resourceName, + namespace: '${NAMESPACE}', + }, + ], + }, + + { + apiVersion: 'rbac.authorization.k8s.io/v1', + kind: 'ClusterRoleBinding', + metadata: { + labels: { + deploymentconfig: obstmpl.resourceName, + }, + name: obstmpl.resourceName + '-node-reader-${ROLE_BINDING_SUFFIX}', + }, + roleRef: { + apiGroup: 'rbac.authorization.k8s.io', + kind: 'ClusterRole', + name: 'system:node-reader', + }, + subjects: [ + { + kind: 'ServiceAccount', + name: obstmpl.resourceName, + namespace: '${NAMESPACE}', + }, + ], + }, + + { + apiVersion: 'v1', + kind: 'ImageStream', + metadata: { + name: obstmpl.resourceName, + namespace: '${NAMESPACE}', + }, + spec: null, + status: { + dockerImageRepository: '', + }, + }, + + { + apiVersion: 'v1', + kind: 'Service', + metadata: { + annotations: { + 'template.openshift.io/expose-uri': ||| + https://{.spec.clusterIP}:{.spec.ports[?(.name=="registry")].port} + |||, + }, + name: obstmpl.resourceName, + namespace: '${NAMESPACE}', + }, + spec: { + ports: [ + { + name: 'registry', + port: 5000, + }, + ], + selector: { + deploymentconfig: obstmpl.resourceName, + }, + sessionAffinity: 'ClientIP', + type: 'ClusterIP', + }, + }, + + { + apiVersion: 'route.openshift.io/v1', + kind: 'Route', + metadata: { + annotations: { + 'template.openshift.io/expose-uri': 'https://{.spec.host}{.spec.path}', + }, + name: obstmpl.resourceName, + namespace: '${NAMESPACE}', + }, + spec: { + host: '${HOSTNAME}', + port: { + targetPort: 'registry', + }, + subdomain: '', + tls: { + insecureEdgeTerminationPolicy: 'Redirect', + termination: 'edge', + }, + to: { + kind: 'Service', + name: obstmpl.resourceName, + }, + }, + }, + ], + + parameters+: bc.newParameters + [ + { + description: ||| + Minor release of OpenShift Container Platform (e.g. 4.2). This value must match the OCP + server version. The biggest tolerated difference between the versions is 1 in the second + digit. + |||, + name: 'OCP_MINOR_RELEASE', + required: true, + value: '4.2', + }, + { + description: ||| + The name of the SAP Data Hub namespace to manage. Defaults to the current one. It must be + set only in case the observer is running in a differnt namespace (see NAMESPACE). + |||, + name: 'SDI_NAMESPACE', + }, + { + description: ||| + TODO + |||, + name: 'SDI_OBSERVER_REPOSITORY', + required: true, + value: 'https://github.com/redhat-sap/sap-data-intelligence', + }, + { + description: ||| + Revision (e.g. tag, commit or branch) of git repository where SDI Observer's source + reside. + |||, + name: 'SDI_OBSERVER_GIT_REVISION', + required: true, + value: 'master', + }, + { + description: ||| + A random suffix for the new RoleBinding's name. No need to edit. + |||, + from: '[a-z0-9]{5}', + generate: 'expression', + name: 'ROLE_BINDING_SUFFIX', + }, + { + description: ||| + Set to true if the given or configured VFLOW_REGISTRY shall be marked as insecure in all + instances of Pipeline Modeler. + |||, + name: 'MARK_REGISTRY_INSECURE', + required: true, + value: 'false', + }, + { + description: ||| + Patch deployments with vsystem-iptables container to make them privileged in order to load + kernel modules they need. Unless true, it is assumed that the modules have been pre-loaded + on the worker nodes. This will make also vsystem-vrep-* pod privileged. + |||, + name: 'MAKE_VSYSTEM_IPTABLES_PODS_PRIVILEGED', + required: true, + value: 'false', + }, + { + description: ||| + Format of the logging files on the nodes. Allowed values are "json" and "text". + Initially, SDI fluentd pods are configured to parse "json" while OpenShift 4 uses + "text" format by default. If not given, the default is "text". + |||, + name: 'NODE_LOG_FORMAT', + required: false, + }, + { + description: ||| + The registry to mark as insecure. If not given, it will be determined from the + installer-config secret in the SDI_NAMESPACE. If DEPLOY_SDI_REGISTRY is set to "true", + this variable will be used as the container image registry's hostname when creating the + corresponding route. + |||, + name: 'REGISTRY', + }, + { + description: ||| + Whether to deploy container image registry for the purpose of SAP Data Intelligence. + Requires project admin role attached to the sdi-observer service account. If enabled, + REDHAT_REGISTRY_SECRET_NAME must be provided. + |||, + name: 'DEPLOY_SDI_REGISTRY', + required: false, + value: 'false', + }, + { + description: ||| + Whether to deploy letsencrypt controller. Requires project admin role attached to the + sdi-observer service account. + |||, + name: 'DEPLOY_LETSENCRYPT', + required: false, + value: 'false', + }, + { + description: ||| + Whether to expose routes annotated for letsencrypt controller. Requires project admin role + attached to the sdi-observer service account. Letsencrypt controller must be deployed + either via this observer or cluster-wide for this to have an effect. Defaults to + DEPLOY_LETSENCRYPT. + |||, + name: 'EXPOSE_WITH_LETSENCRYPT', + }, + { + description: ||| + Unless given, a local copy will be used. + |||, + name: 'LETSENCRYPT_REPOSITORY', + required: false, + value: 'https://github.com/tnozicka/openshift-acme', + }, + { + description: ||| + Revision of letsencrypt repository to check out. + |||, + name: 'LETSENCRYPT_REVISION', + required: false, + value: '2cfefc7388102408a334ada90933531c7e5e11c2', + }, + { + description: ||| + Either "live" or "staging". Use the latter when debugging SDI Observer's deployment. + |||, + name: 'LETSENCRYPT_ENVIRONMENT', + required: true, + value: 'live', + }, + { + description: ||| + Whether to forcefully replace existing registry and/or letsencrypt deployments and + configuration files. + |||, + name: 'FORCE_REDEPLOY', + required: false, + value: 'false', + }, + { + description: ||| + Whether to replace secrets like SDI Registry's htpasswd file if they exist already. + |||, + name: 'RECREATE_SECRETS', + required: false, + value: 'false', + }, + { + description: ||| + Volume space available for container images (e.g. 75Gi). + |||, + name: 'SDI_REGISTRY_VOLUME_CAPACITY', + required: true, + value: '75Gi', + }, + { + description: ||| + Unless given, the default storage class will be used. + |||, + name: 'SDI_REGISTRY_STORAGE_CLASS_NAME', + required: false, + }, + { + description: ||| + A secret with htpasswd file with authentication data for the sdi image container If given + and the secret exists, it will be used instead of SDI_REGISTRY_USERNAME and + SDI_REGISTRY_PASSWORD. + |||, + name: 'SDI_REGISTRY_HTPASSWD_SECRET_NAME', + required: false, + }, + { + from: 'user-[a-z0-9]{6}', + generage: 'expression', + name: 'SDI_REGISTRY_USERNAME', + required: false, + }, + { + from: 'user-[a-zA-Z0-9]{32}', + generage: 'expression', + name: 'SDI_REGISTRY_PASSWORD', + required: false, + }, + { + description: ||| + Namespace where the secret with credentials for registry.redhat.io registry resides. + |||, + name: 'REDHAT_REGISTRY_SECRET_NAMESPACE', + require: false, + }, + ], +}