diff --git a/.tekton/multiarch-push-pipeline.yaml b/.tekton/multiarch-push-pipeline.yaml new file mode 100644 index 0000000000..915c8d312b --- /dev/null +++ b/.tekton/multiarch-push-pipeline.yaml @@ -0,0 +1,652 @@ +apiVersion: tekton.dev/v1 +kind: Pipeline +metadata: + labels: + appstudio.openshift.io/application: opendatahub-release + pipelines.appstudio.openshift.io/type: build + name: multiarch-push-pipeline + namespace: open-data-hub-tenant +spec: + description: | + This pipeline is ideal for building container images from a Containerfile while maintaining trust after pipeline customization. + + _Uses `buildah` to create a container image leveraging [trusted artifacts](https://konflux-ci.dev/architecture/ADR/0036-trusted-artifacts.html). It also optionally creates a source image and runs some build-time tests. Information is shared between tasks using OCI artifacts instead of PVCs. EC will pass the [`trusted_task.trusted`](https://enterprisecontract.dev/docs/ec-policies/release_policy.html#trusted_task__trusted) policy as long as all data used to build the artifact is generated from trusted tasks. + This pipeline is pushed as a Tekton bundle to [quay.io](https://quay.io/repository/konflux-ci/tekton-catalog/pipeline-docker-build-oci-ta?tab=tags)_ + finally: + - name: show-sbom + params: + - name: IMAGE_URL + value: $(tasks.build-image-index.results.IMAGE_URL) + taskRef: + params: + - name: name + value: show-sbom + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-show-sbom:0.1@sha256:beb0616db051952b4b861dd8c3e00fa1c0eccbd926feddf71194d3bb3ace9ce7 + - name: kind + value: task + resolver: bundles + - name: send-slack-notification + params: + - name: message + value: "$(tasks.rhoai-init.results.slack-message-failure-text)" + - name: secret-name + value: slack-secret + - name: key-name + value: slack-webhook + taskRef: + params: + - name: name + value: slack-webhook-notification + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-slack-webhook-notification:0.1@sha256:4e68fe2225debc256d403b828ed358345bb56d03327b46d55cb6c42911375750 + - name: kind + value: task + resolver: bundles + when: + - input: $(tasks.status) + operator: in + values: + - "Failed" + params: + - description: Source Repository URL + name: git-url + type: string + - default: "" + description: Revision of the Source Repository + name: revision + type: string + - description: Fully Qualified Output Image + name: output-image + type: string + - default: . + description: Path to the source code of an application's component from where to build image. + name: path-context + type: string + - default: Dockerfile + description: Path to the Dockerfile inside the context specified by parameter path-context + name: dockerfile + type: string + - default: "false" + description: Force rebuild image + name: rebuild + type: string + - default: "false" + description: Skip checks against built image + name: skip-checks + type: string + - default: "false" + description: Execute the build with network isolation + name: hermetic + type: string + - default: "" + description: Build dependencies to be prefetched by Cachi2 + name: prefetch-input + type: string + - default: "" + description: Image tag expiration time, time values could be something like 1h, 2d, 3w for hours, days, and weeks, respectively. + name: image-expires-after + type: string + - default: "false" + description: Build a source image. + name: build-source-image + type: string + - default: "true" + description: Add built image into an OCI image index + name: build-image-index + type: string + - default: [] + description: Array of extra tags to apply to the resulting image (e.g. commit or release identifiers). + name: additional-tags + type: array + - default: [] + description: Array of --build-arg values ("arg=value" strings) for buildah + name: build-args + type: array + - default: "" + description: Path to a file with build arguments for buildah, see https://www.mankier.com/1/buildah-build#--build-arg-file + name: build-args-file + type: string + - default: "false" + description: Whether to enable privileged mode, should be used only with remote VMs + name: privileged-nested + type: string + - default: + - linux/x86_64 + description: List of platforms to build the container images on. The available + set of values is determined by the configuration of the multi-platform-controller. + name: build-platforms + type: array + results: + - description: "" + name: IMAGE_URL + value: $(tasks.build-image-index.results.IMAGE_URL) + - description: "" + name: IMAGE_DIGEST + value: $(tasks.build-image-index.results.IMAGE_DIGEST) + - description: "" + name: CHAINS-GIT_URL + value: $(tasks.clone-repository.results.url) + - description: "" + name: CHAINS-GIT_COMMIT + value: $(tasks.clone-repository.results.commit) + tasks: + - name: rhoai-init + params: + - name: pipelinerun-name + value: "$(context.pipelineRun.name)" + taskSpec: + results: + - description: Notification text to be posted to slack + name: slack-message-failure-text + steps: + - image: quay.io/rhoai-konflux/alpine:latest + name: rhoai-init + env: + - name: slack_message + valueFrom: + secretKeyRef: + name: slack-secret + key: slack-component-failure-notification + script: | + pipelinerun_name=$(params.pipelinerun-name) + echo "pipelinerun-name = $pipelinerun_name" + application_name=opendatahub-release + echo "application-name = $application_name" + + component_name=${pipelinerun_name/-on-*/} + echo "component-name = $component_name" + + KONFLUX_SERVER="https://konflux-ui.apps.stone-prd-rh01.pg1f.p1.openshiftapps.com" + build_url="${KONFLUX_SERVER}/ns/open-data-hub-tenant/applications/${application_name}/pipelineruns/${pipelinerun_name}/logs" + + build_time="$(date +%Y-%m-%dT%H:%M:%S)" + + slack_message=${slack_message/__BUILD__URL__/$build_url} + slack_message=${slack_message/__PIPELINERUN__NAME__/$pipelinerun_name} + slack_message=${slack_message/__BUILD__TIME__/$build_time} + + echo -en "${slack_message}" > "$(results.slack-message-failure-text.path)" + - name: init + params: + - name: image-url + value: $(params.output-image) + - name: rebuild + value: $(params.rebuild) + - name: skip-checks + value: $(params.skip-checks) + taskRef: + params: + - name: name + value: init + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-init:0.2@sha256:ded314206f09712b2116deb050b774ae7efef9ab243794334c8e616871a3ffa5 + - name: kind + value: task + resolver: bundles + runAfter: + - rhoai-init + - name: clone-repository + params: + - name: url + value: $(params.git-url) + - name: revision + value: $(params.revision) + - name: ociStorage + value: $(params.output-image).git + - name: ociArtifactExpiresAfter + value: $(params.image-expires-after) + runAfter: + - init + taskRef: + params: + - name: name + value: git-clone-oci-ta + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-git-clone-oci-ta:0.1@sha256:4a601aeec58a1dd89c271e728fd8f0d84777825b46940c3aec27f15bab3edacf + - name: kind + value: task + resolver: bundles + when: + - input: $(tasks.init.results.build) + operator: in + values: + - "true" + workspaces: + - name: basic-auth + workspace: git-auth + - name: prefetch-dependencies + params: + - name: input + value: $(params.prefetch-input) + - name: SOURCE_ARTIFACT + value: $(tasks.clone-repository.results.SOURCE_ARTIFACT) + - name: ociStorage + value: $(params.output-image).prefetch + - name: ociArtifactExpiresAfter + value: $(params.image-expires-after) + runAfter: + - clone-repository + taskRef: + params: + - name: name + value: prefetch-dependencies-oci-ta + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-prefetch-dependencies-oci-ta:0.2@sha256:22290579c9fe0b5c1689bb9023b3eddec73c285b680226d9f460346ae849a2cb + - name: kind + value: task + resolver: bundles + workspaces: + - name: git-basic-auth + workspace: git-auth + - name: netrc + workspace: netrc + - matrix: + params: + - name: PLATFORM + value: + - $(params.build-platforms) + name: build-images + params: + - name: IMAGE + value: $(params.output-image) + - name: DOCKERFILE + value: $(params.dockerfile) + - name: CONTEXT + value: $(params.path-context) + - name: HERMETIC + value: $(params.hermetic) + - name: PREFETCH_INPUT + value: $(params.prefetch-input) + - name: IMAGE_EXPIRES_AFTER + value: $(params.image-expires-after) + - name: COMMIT_SHA + value: $(tasks.clone-repository.results.commit) + - name: BUILD_ARGS + value: + - $(params.build-args[*]) + - name: BUILD_ARGS_FILE + value: $(params.build-args-file) + - name: PRIVILEGED_NESTED + value: $(params.privileged-nested) + - name: SOURCE_ARTIFACT + value: $(tasks.prefetch-dependencies.results.SOURCE_ARTIFACT) + - name: CACHI2_ARTIFACT + value: $(tasks.prefetch-dependencies.results.CACHI2_ARTIFACT) + - name: IMAGE_APPEND_PLATFORM + value: "true" + runAfter: + - prefetch-dependencies + taskRef: + params: + - name: name + value: buildah-remote-oci-ta + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-buildah-remote-oci-ta:0.4@sha256:cd9ef1eb119700a6883edcf93fd7c71dc33ee43467f3c2728b2a002c77915e8d + - name: kind + value: task + resolver: bundles + when: + - input: $(tasks.init.results.build) + operator: in + values: + - "true" + - name: build-image-index + params: + - name: IMAGE + value: $(params.output-image) + - name: COMMIT_SHA + value: $(tasks.clone-repository.results.commit) + - name: IMAGE_EXPIRES_AFTER + value: $(params.image-expires-after) + - name: ALWAYS_BUILD_INDEX + value: $(params.build-image-index) + - name: IMAGES + value: + - $(tasks.build-images.results.IMAGE_REF[*]) + runAfter: + - build-images + taskRef: + params: + - name: name + value: build-image-index + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.1@sha256:ba7fbed5c4862968c1a77d6b90d5bdd497925ab1de41b859c027dd5c3069cd3e + - name: kind + value: task + resolver: bundles + when: + - input: $(tasks.init.results.build) + operator: in + values: + - "true" + - name: build-source-image + params: + - name: BINARY_IMAGE + value: $(tasks.build-image-index.results.IMAGE_URL) + - name: BINARY_IMAGE_DIGEST + value: $(tasks.build-image-index.results.IMAGE_DIGEST) + - name: SOURCE_ARTIFACT + value: $(tasks.prefetch-dependencies.results.SOURCE_ARTIFACT) + - name: CACHI2_ARTIFACT + value: $(tasks.prefetch-dependencies.results.CACHI2_ARTIFACT) + runAfter: + - build-image-index + taskRef: + params: + - name: name + value: source-build-oci-ta + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-source-build-oci-ta:0.3@sha256:7a36cc284c5932c18e117fe5995f3246b5dcc11ec742b66a2f9ae710034b064f + - name: kind + value: task + resolver: bundles + when: + - input: $(tasks.init.results.build) + operator: in + values: + - "true" + - input: $(params.build-source-image) + operator: in + values: + - "true" + - name: deprecated-base-image-check + params: + - name: IMAGE_URL + value: $(tasks.build-image-index.results.IMAGE_URL) + - name: IMAGE_DIGEST + value: $(tasks.build-image-index.results.IMAGE_DIGEST) + runAfter: + - build-image-index + taskRef: + params: + - name: name + value: deprecated-image-check + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-deprecated-image-check:0.5@sha256:1d07d16810c26713f3d875083924d93697900147364360587ccb5a63f2c31012 + - name: kind + value: task + resolver: bundles + when: + - input: $(params.skip-checks) + operator: in + values: + - "false" + - name: clair-scan + params: + - name: image-digest + value: $(tasks.build-image-index.results.IMAGE_DIGEST) + - name: image-url + value: $(tasks.build-image-index.results.IMAGE_URL) + runAfter: + - build-image-index + taskRef: + params: + - name: name + value: clair-scan + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.2@sha256:893ffa3ce26b061e21bb4d8db9ef7ed4ddd4044fe7aa5451ef391034da3ff759 + - name: kind + value: task + resolver: bundles + when: + - input: $(params.skip-checks) + operator: in + values: + - "false" + - name: ecosystem-cert-preflight-checks + params: + - name: image-url + value: $(tasks.build-image-index.results.IMAGE_URL) + runAfter: + - build-image-index + taskRef: + params: + - name: name + value: ecosystem-cert-preflight-checks + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-ecosystem-cert-preflight-checks:0.2@sha256:e106b6182e72c8f34ceae3f56b0b1aa2b4dc60f573877d9e51c3791029a7acb6 + - name: kind + value: task + resolver: bundles + when: + - input: $(params.skip-checks) + operator: in + values: + - "false" + - name: sast-snyk-check + params: + - name: image-digest + value: $(tasks.build-image-index.results.IMAGE_DIGEST) + - name: image-url + value: $(tasks.build-image-index.results.IMAGE_URL) + - name: SOURCE_ARTIFACT + value: $(tasks.prefetch-dependencies.results.SOURCE_ARTIFACT) + - name: CACHI2_ARTIFACT + value: $(tasks.prefetch-dependencies.results.CACHI2_ARTIFACT) + runAfter: + - build-image-index + taskRef: + params: + - name: name + value: sast-snyk-check-oci-ta + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:322c86ad5ee252c04440184d9f5046d276415148cb6bfaf571be1b102101786b + - name: kind + value: task + resolver: bundles + when: + - input: $(params.skip-checks) + operator: in + values: + - "false" + - name: clamav-scan + params: + - name: image-digest + value: $(tasks.build-image-index.results.IMAGE_DIGEST) + - name: image-url + value: $(tasks.build-image-index.results.IMAGE_URL) + runAfter: + - build-image-index + taskRef: + params: + - name: name + value: clamav-scan + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.3@sha256:b0bd59748cda4a7abf311e4f448e6c1d00c6b6d8c0ecc1c2eb33e08dc0e0b802 + - name: kind + value: task + resolver: bundles + when: + - input: $(params.skip-checks) + operator: in + values: + - "false" + - name: sast-coverity-check + params: + - name: image-digest + value: $(tasks.build-image-index.results.IMAGE_DIGEST) + - name: image-url + value: $(tasks.build-image-index.results.IMAGE_URL) + - name: IMAGE + value: $(params.output-image) + - name: DOCKERFILE + value: $(params.dockerfile) + - name: CONTEXT + value: $(params.path-context) + - name: HERMETIC + value: $(params.hermetic) + - name: PREFETCH_INPUT + value: $(params.prefetch-input) + - name: IMAGE_EXPIRES_AFTER + value: $(params.image-expires-after) + - name: COMMIT_SHA + value: $(tasks.clone-repository.results.commit) + - name: BUILD_ARGS + value: + - $(params.build-args[*]) + - name: BUILD_ARGS_FILE + value: $(params.build-args-file) + - name: SOURCE_ARTIFACT + value: $(tasks.prefetch-dependencies.results.SOURCE_ARTIFACT) + - name: CACHI2_ARTIFACT + value: $(tasks.prefetch-dependencies.results.CACHI2_ARTIFACT) + runAfter: + - coverity-availability-check + taskRef: + params: + - name: name + value: sast-coverity-check-oci-ta + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-sast-coverity-check-oci-ta:0.3@sha256:cdbe1a968676e4f5519b082bf1e27a4cdcf66dd60af66dbc26b3e604f957f7e9 + - name: kind + value: task + resolver: bundles + when: + - input: $(params.skip-checks) + operator: in + values: + - "false" + - input: $(tasks.coverity-availability-check.results.STATUS) + operator: in + values: + - success + - name: coverity-availability-check + runAfter: + - build-image-index + taskRef: + params: + - name: name + value: coverity-availability-check + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-coverity-availability-check:0.2@sha256:db2b267dc15e4ed17f704ee91b8e9b38068e1a35b1018a328fdca621819d74c6 + - name: kind + value: task + resolver: bundles + when: + - input: $(params.skip-checks) + operator: in + values: + - "false" + - name: sast-shell-check + params: + - name: image-digest + value: $(tasks.build-image-index.results.IMAGE_DIGEST) + - name: image-url + value: $(tasks.build-image-index.results.IMAGE_URL) + - name: SOURCE_ARTIFACT + value: $(tasks.prefetch-dependencies.results.SOURCE_ARTIFACT) + - name: CACHI2_ARTIFACT + value: $(tasks.prefetch-dependencies.results.CACHI2_ARTIFACT) + runAfter: + - build-image-index + taskRef: + params: + - name: name + value: sast-shell-check-oci-ta + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:bf7bdde00b7212f730c1356672290af6f38d070da2c8a316987b5c32fd49e0b9 + - name: kind + value: task + resolver: bundles + when: + - input: $(params.skip-checks) + operator: in + values: + - "false" + - name: sast-unicode-check + params: + - name: image-digest + value: $(tasks.build-image-index.results.IMAGE_DIGEST) + - name: image-url + value: $(tasks.build-image-index.results.IMAGE_URL) + - name: SOURCE_ARTIFACT + value: $(tasks.prefetch-dependencies.results.SOURCE_ARTIFACT) + - name: CACHI2_ARTIFACT + value: $(tasks.prefetch-dependencies.results.CACHI2_ARTIFACT) + runAfter: + - build-image-index + taskRef: + params: + - name: name + value: sast-unicode-check-oci-ta + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-sast-unicode-check-oci-ta:0.3@sha256:a2bde66f6b4164620298c7d709b8f08515409404000fa1dc2260d2508b135651 + - name: kind + value: task + resolver: bundles + when: + - input: $(params.skip-checks) + operator: in + values: + - "false" + - name: apply-tags + params: + - name: IMAGE_URL + value: $(tasks.build-image-index.results.IMAGE_URL) + - name: IMAGE_DIGEST + value: $(tasks.build-image-index.results.IMAGE_DIGEST) + - name: ADDITIONAL_TAGS + value: + - $(params.additional-tags[*]) + runAfter: + - build-image-index + taskRef: + params: + - name: name + value: apply-tags + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-apply-tags:0.2@sha256:e0de426d492e195f59c99d2ea1ca0df7bfb8c689f5d1468fe7f70eb8684b8d02 + - name: kind + value: task + resolver: bundles + - name: push-dockerfile + params: + - name: IMAGE + value: $(tasks.build-image-index.results.IMAGE_URL) + - name: IMAGE_DIGEST + value: $(tasks.build-image-index.results.IMAGE_DIGEST) + - name: DOCKERFILE + value: $(params.dockerfile) + - name: CONTEXT + value: $(params.path-context) + - name: SOURCE_ARTIFACT + value: $(tasks.prefetch-dependencies.results.SOURCE_ARTIFACT) + runAfter: + - build-image-index + taskRef: + params: + - name: name + value: push-dockerfile-oci-ta + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-push-dockerfile-oci-ta:0.1@sha256:235ef6e835de8171c07b8a7f8947d0b40bfcff999e1ff3cb6ddd9acc65c48430 + - name: kind + value: task + resolver: bundles + - name: rpms-signature-scan + params: + - name: image-url + value: $(tasks.build-image-index.results.IMAGE_URL) + - name: image-digest + value: $(tasks.build-image-index.results.IMAGE_DIGEST) + runAfter: + - build-image-index + taskRef: + params: + - name: name + value: rpms-signature-scan + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-rpms-signature-scan:0.2@sha256:1b6c20ab3dbfb0972803d3ebcb2fa72642e59400c77bd66dfd82028bdd09e120 + - name: kind + value: task + resolver: bundles + when: + - input: $(params.skip-checks) + operator: in + values: + - "false" + workspaces: + - name: git-auth + optional: true + - name: netrc + optional: true diff --git a/.tekton/odh-base-image-cuda-py311-c9s-push.yaml b/.tekton/odh-base-image-cuda-py311-c9s-push.yaml index 22d22ecd14..92d9d57e45 100644 --- a/.tekton/odh-base-image-cuda-py311-c9s-push.yaml +++ b/.tekton/odh-base-image-cuda-py311-c9s-push.yaml @@ -1,5 +1,6 @@ apiVersion: tekton.dev/v1 kind: PipelineRun +#test1 metadata: annotations: build.appstudio.openshift.io/repo: https://github.com/opendatahub-io/notebooks?rev={{revision}} @@ -36,7 +37,7 @@ spec: - '{{target_branch}}-{{revision}}' - v12.6 pipelineRef: - name: multiarch-pull-request-pipeline + name: multiarch-push-pipeline taskRunTemplate: serviceAccountName: build-pipeline-odh-base-image-cuda-py311-c9s-poc workspaces: diff --git a/.tekton/odh-base-image-cuda-py312-c9s-push.yaml b/.tekton/odh-base-image-cuda-py312-c9s-push.yaml index e9a83838d2..c2db1384ac 100644 --- a/.tekton/odh-base-image-cuda-py312-c9s-push.yaml +++ b/.tekton/odh-base-image-cuda-py312-c9s-push.yaml @@ -1,7 +1,7 @@ apiVersion: tekton.dev/v1 kind: PipelineRun +#test2 metadata: -#TEST annotations: build.appstudio.openshift.io/repo: https://github.com/opendatahub-io/notebooks?rev={{revision}} build.appstudio.redhat.com/commit_sha: '{{revision}}' @@ -37,7 +37,7 @@ spec: - '{{target_branch}}-{{revision}}' - v12.6 pipelineRef: - name: multiarch-pull-request-pipeline + name: multiarch-push-pipeline taskRunTemplate: serviceAccountName: build-pipeline-odh-base-image-cuda-py312-c9s-poc workspaces: diff --git a/.tekton/odh-base-image-cuda-py312-ubi9-push.yaml b/.tekton/odh-base-image-cuda-py312-ubi9-push.yaml index ada2080f24..a76c879626 100644 --- a/.tekton/odh-base-image-cuda-py312-ubi9-push.yaml +++ b/.tekton/odh-base-image-cuda-py312-ubi9-push.yaml @@ -1,6 +1,6 @@ apiVersion: tekton.dev/v1 kind: PipelineRun -# test +#test2 metadata: annotations: build.appstudio.openshift.io/repo: https://github.com/opendatahub-io/notebooks?rev={{revision}} @@ -37,7 +37,7 @@ spec: - '{{target_branch}}-{{revision}}' - v12.6 pipelineRef: - name: multiarch-pull-request-pipeline + name: multiarch-push-pipeline taskRunTemplate: serviceAccountName: build-pipeline-odh-base-image-cuda-py312-ubi9-poc workspaces: diff --git a/.tekton/odh-pipeline-runtime-pytorch-llmcompressor-cuda-py312-ubi9-pull-request.yaml b/.tekton/odh-pipeline-runtime-pytorch-llmcompressor-cuda-py312-ubi9-pull-request.yaml index 4bbcb5df85..e1bbf722b2 100644 --- a/.tekton/odh-pipeline-runtime-pytorch-llmcompressor-cuda-py312-ubi9-pull-request.yaml +++ b/.tekton/odh-pipeline-runtime-pytorch-llmcompressor-cuda-py312-ubi9-pull-request.yaml @@ -13,7 +13,7 @@ metadata: pipelinesascode.tekton.dev/max-keep-runs: '3' pipelinesascode.tekton.dev/on-comment: ^/kfbuild\s+(all|odh\-pipeline\-runtime\-pytorch\-llmcompressor\-cuda\-py312\-ubi9|runtimes/pytorch\+llmcompressor/ubi9\-python\-3\.12) pipelinesascode.tekton.dev/on-cel-expression: | - event == "pull_request" && target_branch == "main" && !("manifests/base/params-latest.env".pathChanged()) && ( ".tekton/odh-pipeline-runtime-pytorch-llmcompressor-cuda-py312-ubi9-pull-request.yaml".pathChanged() || "runtimes/pytorch+llmcompressor/ubi9-python-3.12/**".pathChanged() || "cuda/**".pathChanged() ) + event == "pull_request" && target_branch == "main" && !("manifests/base/params-latest.env".pathChanged()) && ( ".tekton/odh-pipeline-runtime-pytorch-llmcompressor-cuda-py312-ubi9-pull-request.yaml".pathChanged() || "runtimes/pytorch+llmcompressor/ubi9-python-3.12/**".pathChanged() || "runtimes/pytorch+llmcompressor/ubi9-python-3.12/build-args/cuda.conf".pathChanged() ) && body.repository.full_name == "opendatahub-io/notebooks" labels: appstudio.openshift.io/application: opendatahub-release @@ -40,6 +40,8 @@ spec: value: runtimes/pytorch+llmcompressor/ubi9-python-3.12/Dockerfile.cuda - name: path-context value: . + - name: build-args-file + value: runtimes/pytorch+llmcompressor/ubi9-python-3.12/build-args/cuda.conf pipelineRef: name: multiarch-pull-request-pipeline taskRunTemplate: diff --git a/.tekton/odh-pipeline-runtime-pytorch-llmcompressor-cuda-py312-ubi9-push.yaml b/.tekton/odh-pipeline-runtime-pytorch-llmcompressor-cuda-py312-ubi9-push.yaml index 8448978fac..bf465fcc8e 100644 --- a/.tekton/odh-pipeline-runtime-pytorch-llmcompressor-cuda-py312-ubi9-push.yaml +++ b/.tekton/odh-pipeline-runtime-pytorch-llmcompressor-cuda-py312-ubi9-push.yaml @@ -1,6 +1,5 @@ apiVersion: tekton.dev/v1 kind: PipelineRun -#test metadata: annotations: build.appstudio.openshift.io/repo: https://github.com/opendatahub-io/notebooks?rev={{revision}} @@ -9,7 +8,7 @@ metadata: pipelinesascode.tekton.dev/cancel-in-progress: "false" pipelinesascode.tekton.dev/max-keep-runs: "3" build.appstudio.openshift.io/build-nudge-files: "manifests/base/params-latest.env" - pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch == "main" && !("manifests/base/params-latest.env".pathChanged()) && ( ".tekton/odh-pipeline-runtime-pytorch-llmcompressor-cuda-py312-ubi9-push.yaml".pathChanged() || "runtimes/pytorch+llmcompressor/ubi9-python-3.12/**".pathChanged() || "cuda/**".pathChanged() ) + pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch == "main" && !("manifests/base/params-latest.env".pathChanged()) && ( ".tekton/odh-pipeline-runtime-pytorch-llmcompressor-cuda-py312-ubi9-push.yaml".pathChanged() || "runtimes/pytorch+llmcompressor/ubi9-python-3.12/**".pathChanged() || "runtimes/pytorch+llmcompressor/ubi9-python-3.12/build-args/cuda.conf".pathChanged() ) creationTimestamp: labels: appstudio.openshift.io/application: opendatahub-release @@ -29,6 +28,8 @@ spec: value: quay.io/opendatahub/odh-pipeline-runtime-pytorch-llmcompressor-cuda-py312-ubi9:{{revision}} - name: dockerfile value: runtimes/pytorch+llmcompressor/ubi9-python-3.12/Dockerfile.cuda + - name: build-args-file + value: runtimes/pytorch+llmcompressor/ubi9-python-3.12/build-args/cuda.conf - name: path-context value: . - name: additional-tags diff --git a/.tekton/odh-workbench-jupyter-pytorch-llmcompressor-cuda-py312-ubi9-pull-request.yaml b/.tekton/odh-workbench-jupyter-pytorch-llmcompressor-cuda-py312-ubi9-pull-request.yaml index 55be3bcbc0..ae2af40f36 100644 --- a/.tekton/odh-workbench-jupyter-pytorch-llmcompressor-cuda-py312-ubi9-pull-request.yaml +++ b/.tekton/odh-workbench-jupyter-pytorch-llmcompressor-cuda-py312-ubi9-pull-request.yaml @@ -13,7 +13,7 @@ metadata: pipelinesascode.tekton.dev/max-keep-runs: '3' pipelinesascode.tekton.dev/on-comment: ^/kfbuild\s+(all|odh\-workbench\-jupyter\-pytorch\-llmcompressor\-cuda\-py312\-ubi9|jupyter/pytorch\+llmcompressor/ubi9\-python\-3\.12) pipelinesascode.tekton.dev/on-cel-expression: | - event == "pull_request" && target_branch == "main" && !("manifests/base/params-latest.env".pathChanged()) && ( ".tekton/odh-workbench-jupyter-pytorch-llmcompressor-cuda-py312-ubi9-pull-request.yaml".pathChanged() || "jupyter/pytorch+llmcompressor/ubi9-python-3.12/**".pathChanged() || "cuda/**".pathChanged() || "jupyter/utils/**".pathChanged() || "jupyter/minimal/ubi9-python-3.12/start-notebook.sh".pathChanged() || "jupyter/datascience/ubi9-python-3.12/mongodb-org-6.0.repo-x86_64/**".pathChanged() || "jupyter/datascience/ubi9-python-3.12/mssql-2022.repo-x86_64/**".pathChanged() || "jupyter/datascience/ubi9-python-3.12/setup-elyra.sh".pathChanged() ) + event == "pull_request" && target_branch == "main" && !("manifests/base/params-latest.env".pathChanged()) && ( ".tekton/odh-workbench-jupyter-pytorch-llmcompressor-cuda-py312-ubi9-pull-request.yaml".pathChanged() || "jupyter/pytorch+llmcompressor/ubi9-python-3.12/**".pathChanged() || "jupyter/pytorch+llmcompressor/ubi9-python-3.12/build-args/cuda.conf".pathChanged() || "jupyter/utils/**".pathChanged() || "jupyter/minimal/ubi9-python-3.12/start-notebook.sh".pathChanged() || "jupyter/datascience/ubi9-python-3.12/mongodb-org-6.0.repo-x86_64/**".pathChanged() || "jupyter/datascience/ubi9-python-3.12/mssql-2022.repo-x86_64/**".pathChanged() || "jupyter/datascience/ubi9-python-3.12/setup-elyra.sh".pathChanged() ) && body.repository.full_name == "opendatahub-io/notebooks" labels: appstudio.openshift.io/application: opendatahub-release @@ -40,6 +40,8 @@ spec: value: jupyter/pytorch+llmcompressor/ubi9-python-3.12/Dockerfile.cuda - name: path-context value: . + - name: build-args-file + value: jupyter/pytorch+llmcompressor/ubi9-python-3.12/build-args/cuda.conf pipelineRef: name: multiarch-pull-request-pipeline taskRunTemplate: diff --git a/.tekton/odh-workbench-jupyter-pytorch-llmcompressor-cuda-py312-ubi9-push.yaml b/.tekton/odh-workbench-jupyter-pytorch-llmcompressor-cuda-py312-ubi9-push.yaml index 56ce28ab06..5577205350 100644 --- a/.tekton/odh-workbench-jupyter-pytorch-llmcompressor-cuda-py312-ubi9-push.yaml +++ b/.tekton/odh-workbench-jupyter-pytorch-llmcompressor-cuda-py312-ubi9-push.yaml @@ -8,7 +8,7 @@ metadata: pipelinesascode.tekton.dev/cancel-in-progress: "false" pipelinesascode.tekton.dev/max-keep-runs: "3" build.appstudio.openshift.io/build-nudge-files: "manifests/base/params-latest.env" - pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch == "main" && !("manifests/base/params-latest.env".pathChanged()) && ( ".tekton/odh-workbench-jupyter-pytorch-llmcompressor-cuda-py312-ubi9-push.yaml".pathChanged() || "jupyter/pytorch+llmcompressor/ubi9-python-3.12/**".pathChanged() || "cuda/**".pathChanged() || "jupyter/utils/**".pathChanged() || "jupyter/minimal/ubi9-python-3.12/start-notebook.sh".pathChanged() || "jupyter/datascience/ubi9-python-3.12/mongodb-org-6.0.repo-x86_64/**".pathChanged() || "jupyter/datascience/ubi9-python-3.12/mssql-2022.repo-x86_64/**".pathChanged() || "jupyter/datascience/ubi9-python-3.12/setup-elyra.sh".pathChanged() ) + pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch == "main" && !("manifests/base/params-latest.env".pathChanged()) && ( ".tekton/odh-workbench-jupyter-pytorch-llmcompressor-cuda-py312-ubi9-push.yaml".pathChanged() || "jupyter/pytorch+llmcompressor/ubi9-python-3.12/**".pathChanged() || "jupyter/pytorch+llmcompressor/ubi9-python-3.12/build-args/cuda.conf".pathChanged() || "jupyter/utils/**".pathChanged() || "jupyter/minimal/ubi9-python-3.12/start-notebook.sh".pathChanged() || "jupyter/datascience/ubi9-python-3.12/mongodb-org-6.0.repo-x86_64/**".pathChanged() || "jupyter/datascience/ubi9-python-3.12/mssql-2022.repo-x86_64/**".pathChanged() || "jupyter/datascience/ubi9-python-3.12/setup-elyra.sh".pathChanged() ) creationTimestamp: labels: appstudio.openshift.io/application: opendatahub-release @@ -26,6 +26,8 @@ spec: value: quay.io/opendatahub/odh-workbench-jupyter-pytorch-llmcompressor-cuda-py312-ubi9:{{revision}} - name: dockerfile value: jupyter/pytorch+llmcompressor/ubi9-python-3.12/Dockerfile.cuda + - name: build-args-file + value: jupyter/pytorch+llmcompressor/ubi9-python-3.12/build-args/cuda.conf - name: path-context value: . - name: additional-tags diff --git a/Makefile b/Makefile index ad3d428d59..07538b1112 100644 --- a/Makefile +++ b/Makefile @@ -67,9 +67,20 @@ endif # ARG 2: Path of Dockerfile we want to build. define build_image $(eval IMAGE_NAME := $(IMAGE_REGISTRY):$(1)-$(IMAGE_TAG)) - $(eval BUILD_ARGS :=) - $(info # Building $(IMAGE_NAME) image...) + # Checks if there’s a build-args/*.conf matching the Dockerfile + $(eval BUILD_DIR := $(dir $(2))) + $(eval DOCKERFILE_NAME := $(notdir $(2))) + $(eval CONF_FILE := $(BUILD_DIR)build-args/$(shell echo $(DOCKERFILE_NAME) | cut -d. -f2).conf) + + # if the conf file exists, transform it into --build-arg KEY=VALUE flags + $(eval BUILD_ARGS := $(shell if [ -f $(CONF_FILE) ]; then \ + while IFS='=' read -r k v; do \ + [ -n "$$k" ] && printf -- "--build-arg %s=%s " "$$k" "$$v"; \ + done < $(CONF_FILE); \ + fi)) + + $(info # Building $(IMAGE_NAME) using $(DOCKERFILE_NAME) with $(CONF_FILE) and $(BUILD_ARGS)...) $(ROOT_DIR)/scripts/sandbox.py --dockerfile '$(2)' --platform '$(BUILD_ARCH)' -- \ $(CONTAINER_ENGINE) build $(CONTAINER_BUILD_CACHE_ARGS) --platform=$(BUILD_ARCH) --label release=$(RELEASE) --tag $(IMAGE_NAME) --file '$(2)' $(BUILD_ARGS) {}\; diff --git a/base-images/cuda/12.6/c9s-python-3.11/Dockerfile.cuda b/base-images/cuda/12.6/c9s-python-3.11/Dockerfile.cuda index 9b656f9127..647b6a10f5 100644 --- a/base-images/cuda/12.6/c9s-python-3.11/Dockerfile.cuda +++ b/base-images/cuda/12.6/c9s-python-3.11/Dockerfile.cuda @@ -37,10 +37,10 @@ RUN NVIDIA_GPGKEY_SUM=d0664fbbdb8c32356d45de36c5984617217b2d0bef41b93ccecd326ba3 ENV CUDA_VERSION=12.6.3 # For libraries in the cuda-compat-* package: https://docs.nvidia.com/cuda/eula/index.html#attachment-a -RUN yum upgrade -y && yum install -y \ +RUN dnf upgrade -y && dnf install -y \ cuda-cudart-12-6-${NV_CUDA_CUDART_VERSION} \ cuda-compat-12-6 \ - && yum clean all \ + && dnf clean all \ && rm -rf /var/cache/yum/* # nvidia-docker 1.0 @@ -67,20 +67,20 @@ ENV NV_LIBNCCL_VERSION=2.23.4 ENV NCCL_VERSION=2.23.4 ENV NV_LIBNCCL_PACKAGE=${NV_LIBNCCL_PACKAGE_NAME}-${NV_LIBNCCL_PACKAGE_VERSION}+cuda12.6 -RUN yum install -y \ +RUN dnf install -y \ cuda-libraries-12-6-${NV_CUDA_LIB_VERSION} \ cuda-nvtx-12-6-${NV_NVTX_VERSION} \ ${NV_LIBNPP_PACKAGE} \ libcublas-12-6-${NV_LIBCUBLAS_VERSION} \ ${NV_LIBNCCL_PACKAGE} \ - && yum clean all \ + && dnf clean all \ && rm -rf /var/cache/yum/* # Install devel tools -RUN yum install -y \ +RUN dnf install -y \ make \ findutils \ - && yum clean all \ + && dnf clean all \ && rm -rf /var/cache/yum/* # Install CUDA cudnn9 from: @@ -90,17 +90,17 @@ ENV NV_CUDNN_PACKAGE=libcudnn9-cuda-12-${NV_CUDNN_VERSION} LABEL com.nvidia.cudnn.version="${NV_CUDNN_VERSION}" -RUN yum install -y \ +RUN dnf install -y \ ${NV_CUDNN_PACKAGE} \ - && yum clean all \ + && dnf clean all \ && rm -rf /var/cache/yum/* # Set this flag so that libraries can find the location of CUDA ENV XLA_FLAGS=--xla_gpu_cuda_data_dir=/usr/local/cuda # Install CUDA toolkit 12.6 -RUN yum -y install cuda-toolkit-12-6 && \ - yum -y clean all --enablerepo="*" +RUN dnf -y install cuda-toolkit-12-6 && \ + dnf -y clean all --enablerepo="*" # Restore user workspace USER 1001 diff --git a/base-images/cuda/12.6/c9s-python-3.12/Dockerfile.cuda b/base-images/cuda/12.6/c9s-python-3.12/Dockerfile.cuda index 3a7bcd2ac4..7f58cb17d3 100644 --- a/base-images/cuda/12.6/c9s-python-3.12/Dockerfile.cuda +++ b/base-images/cuda/12.6/c9s-python-3.12/Dockerfile.cuda @@ -37,10 +37,10 @@ RUN NVIDIA_GPGKEY_SUM=d0664fbbdb8c32356d45de36c5984617217b2d0bef41b93ccecd326ba3 ENV CUDA_VERSION=12.6.3 # For libraries in the cuda-compat-* package: https://docs.nvidia.com/cuda/eula/index.html#attachment-a -RUN yum upgrade -y && yum install -y \ +RUN dnf upgrade -y && dnf install -y \ cuda-cudart-12-6-${NV_CUDA_CUDART_VERSION} \ cuda-compat-12-6 \ - && yum clean all \ + && dnf clean all \ && rm -rf /var/cache/yum/* # nvidia-docker 1.0 @@ -67,20 +67,20 @@ ENV NV_LIBNCCL_VERSION=2.23.4 ENV NCCL_VERSION=2.23.4 ENV NV_LIBNCCL_PACKAGE=${NV_LIBNCCL_PACKAGE_NAME}-${NV_LIBNCCL_PACKAGE_VERSION}+cuda12.6 -RUN yum install -y \ +RUN dnf install -y \ cuda-libraries-12-6-${NV_CUDA_LIB_VERSION} \ cuda-nvtx-12-6-${NV_NVTX_VERSION} \ ${NV_LIBNPP_PACKAGE} \ libcublas-12-6-${NV_LIBCUBLAS_VERSION} \ ${NV_LIBNCCL_PACKAGE} \ - && yum clean all \ + && dnf clean all \ && rm -rf /var/cache/yum/* # Install devel tools -RUN yum install -y \ +RUN dnf install -y \ make \ findutils \ - && yum clean all \ + && dnf clean all \ && rm -rf /var/cache/yum/* # Install CUDA cudnn9 from: @@ -90,17 +90,17 @@ ENV NV_CUDNN_PACKAGE=libcudnn9-cuda-12-${NV_CUDNN_VERSION} LABEL com.nvidia.cudnn.version="${NV_CUDNN_VERSION}" -RUN yum install -y \ +RUN dnf install -y \ ${NV_CUDNN_PACKAGE} \ - && yum clean all \ + && dnf clean all \ && rm -rf /var/cache/yum/* # Set this flag so that libraries can find the location of CUDA ENV XLA_FLAGS=--xla_gpu_cuda_data_dir=/usr/local/cuda # Install CUDA toolkit 12.6 -RUN yum -y install cuda-toolkit-12-6 && \ - yum -y clean all --enablerepo="*" +RUN dnf -y install cuda-toolkit-12-6 && \ + dnf -y clean all --enablerepo="*" # Restore user workspace USER 1001 diff --git a/base-images/cuda/12.6/ubi9-python-3.12/Dockerfile.cuda b/base-images/cuda/12.6/ubi9-python-3.12/Dockerfile.cuda index bd8d949ebc..0c80bc14b0 100644 --- a/base-images/cuda/12.6/ubi9-python-3.12/Dockerfile.cuda +++ b/base-images/cuda/12.6/ubi9-python-3.12/Dockerfile.cuda @@ -37,10 +37,10 @@ RUN NVIDIA_GPGKEY_SUM=d0664fbbdb8c32356d45de36c5984617217b2d0bef41b93ccecd326ba3 ENV CUDA_VERSION=12.6.3 # For libraries in the cuda-compat-* package: https://docs.nvidia.com/cuda/eula/index.html#attachment-a -RUN yum upgrade -y && yum install -y \ +RUN dnf upgrade -y && dnf install -y \ cuda-cudart-12-6-${NV_CUDA_CUDART_VERSION} \ cuda-compat-12-6 \ - && yum clean all \ + && dnf clean all \ && rm -rf /var/cache/yum/* # nvidia-docker 1.0 @@ -67,20 +67,20 @@ ENV NV_LIBNCCL_VERSION=2.23.4 ENV NCCL_VERSION=2.23.4 ENV NV_LIBNCCL_PACKAGE=${NV_LIBNCCL_PACKAGE_NAME}-${NV_LIBNCCL_PACKAGE_VERSION}+cuda12.6 -RUN yum install -y \ +RUN dnf install -y \ cuda-libraries-12-6-${NV_CUDA_LIB_VERSION} \ cuda-nvtx-12-6-${NV_NVTX_VERSION} \ ${NV_LIBNPP_PACKAGE} \ libcublas-12-6-${NV_LIBCUBLAS_VERSION} \ ${NV_LIBNCCL_PACKAGE} \ - && yum clean all \ + && dnf clean all \ && rm -rf /var/cache/yum/* # Install devel tools -RUN yum install -y \ +RUN dnf install -y \ make \ findutils \ - && yum clean all \ + && dnf clean all \ && rm -rf /var/cache/yum/* # Install CUDA cudnn9 from: @@ -90,9 +90,9 @@ ENV NV_CUDNN_PACKAGE=libcudnn9-cuda-12-${NV_CUDNN_VERSION} LABEL com.nvidia.cudnn.version="${NV_CUDNN_VERSION}" -RUN yum install -y \ +RUN dnf install -y \ ${NV_CUDNN_PACKAGE} \ - && yum clean all \ + && dnf clean all \ && rm -rf /var/cache/yum/* # Set this flag so that libraries can find the location of CUDA diff --git a/base-images/rocm/6.2/c9s-python-3.12/Dockerfile.rocm b/base-images/rocm/6.2/c9s-python-3.12/Dockerfile.rocm index 9ff4b8e0a9..e0976b537a 100644 --- a/base-images/rocm/6.2/c9s-python-3.12/Dockerfile.rocm +++ b/base-images/rocm/6.2/c9s-python-3.12/Dockerfile.rocm @@ -32,8 +32,8 @@ RUN echo "[ROCm]" > /etc/yum.repos.d/rocm.repo && \ echo "baseurl=https://repo.radeon.com/amdgpu/$AMDGPU_VERSION/rhel/9.4/main/x86_64" >> /etc/yum.repos.d/amdgpu.repo && \ echo "enabled=1" >> /etc/yum.repos.d/amdgpu.repo && \ echo "gpgcheck=0" >> /etc/yum.repos.d/amdgpu.repo && \ - yum install -y rocm-developer-tools rocm-ml-sdk rocm-opencl-sdk rocm-openmp-sdk rocm-utils && \ - yum clean all && rm -rf /var/cache/yum + dnf install -y rocm-developer-tools rocm-ml-sdk rocm-opencl-sdk rocm-openmp-sdk rocm-utils && \ + dnf clean all && rm -rf /var/cache/yum # Restore user workspace USER 1001 diff --git a/base-images/rocm/6.2/ubi9-python-3.12/Dockerfile.rocm b/base-images/rocm/6.2/ubi9-python-3.12/Dockerfile.rocm index 8aa7a5b853..6ba367d844 100644 --- a/base-images/rocm/6.2/ubi9-python-3.12/Dockerfile.rocm +++ b/base-images/rocm/6.2/ubi9-python-3.12/Dockerfile.rocm @@ -32,8 +32,8 @@ RUN echo "[ROCm]" > /etc/yum.repos.d/rocm.repo && \ echo "baseurl=https://repo.radeon.com/amdgpu/$AMDGPU_VERSION/rhel/9.4/main/x86_64" >> /etc/yum.repos.d/amdgpu.repo && \ echo "enabled=1" >> /etc/yum.repos.d/amdgpu.repo && \ echo "gpgcheck=0" >> /etc/yum.repos.d/amdgpu.repo && \ - yum install -y rocm-developer-tools rocm-ml-sdk rocm-opencl-sdk rocm-openmp-sdk rocm-utils && \ - yum clean all && rm -rf /var/cache/yum + dnf install -y rocm-developer-tools rocm-ml-sdk rocm-opencl-sdk rocm-openmp-sdk rocm-utils && \ + dnf clean all && rm -rf /var/cache/yum # Restore user workspace USER 1001 diff --git a/codeserver/ubi9-python-3.12/Dockerfile.cpu b/codeserver/ubi9-python-3.12/Dockerfile.cpu index 9141f8064e..bff6619588 100644 --- a/codeserver/ubi9-python-3.12/Dockerfile.cpu +++ b/codeserver/ubi9-python-3.12/Dockerfile.cpu @@ -1,7 +1,12 @@ +######################### +# configuration args # +######################### +ARG BASE_IMAGE + #################### -# base # +# cpu-base # #################### -FROM registry.access.redhat.com/ubi9/python-312:latest AS base +FROM ${BASE_IMAGE} AS cpu-base WORKDIR /opt/app-root/bin @@ -30,11 +35,10 @@ RUN curl -L https://mirror.openshift.com/pub/openshift-v4/$(uname -m)/clients/oc rm -f /tmp/openshift-client-linux.tar.gz # Install the oc client end - #################### # codeserver # #################### -FROM base AS codeserver +FROM cpu-base AS codeserver ARG TARGETOS TARGETARCH diff --git a/codeserver/ubi9-python-3.12/build-args/cpu.conf b/codeserver/ubi9-python-3.12/build-args/cpu.conf new file mode 100644 index 0000000000..cc7c73581a --- /dev/null +++ b/codeserver/ubi9-python-3.12/build-args/cpu.conf @@ -0,0 +1 @@ +BASE_IMAGE=registry.access.redhat.com/ubi9/python-312:latest diff --git a/cuda/NGC-DL-CONTAINER-LICENSE b/cuda/NGC-DL-CONTAINER-LICENSE deleted file mode 100644 index be9eb7558b..0000000000 --- a/cuda/NGC-DL-CONTAINER-LICENSE +++ /dev/null @@ -1,230 +0,0 @@ -NVIDIA DEEP LEARNING CONTAINER LICENSE - -This license is a legal agreement between you and NVIDIA Corporation ("NVIDIA") and governs the use -of the NVIDIA container and all its contents (“CONTAINER”). - -This license can be accepted only by an adult of legal age of majority in the country in which the -CONTAINER is used. If you are under the legal age of majority, you must ask your parent or legal -guardian to consent to this license. If you are entering this license on behalf of a company or -other legal entity, you represent that you have legal authority and “you” will mean the entity you -represent. - -By using the CONTAINER, you affirm that you have reached the legal age of majority, you accept the -terms of this license, and you take legal and financial responsibility for the actions of your -permitted users. - -You agree to use the CONTAINER only for purposes that are permitted by (a) this license, and (b) any -applicable law, regulation or generally accepted practices or guidelines in the relevant -jurisdictions. - -1. LICENSE. Subject to the terms of this license, NVIDIA hereby grants you a non-exclusive, -non-transferable license, without the right to sublicense (except as expressly provided in this -license) to: - -a. Install and use copies of the CONTAINER, and modify and create derivative works of samples or -example source code delivered in the CONTAINER (if applicable), to develop and test services and -applications, - -b. Deploy the CONTAINER on infrastructure you own or lease to offer a service to third parties, -without distributing the CONTAINER or exposing the NVIDIA APIs in the CONTAINER directly to such -service users, and - -c. Develop and extend the CONTAINER to create a Compatible (as defined below) derived CONTAINER that -includes the entire CONTAINER plus other software with primary functionality, to develop and compile -applications, and distribute such derived CONTAINER to run applications, subject to the distribution -requirements indicated in this license. As used in this section, “Compatible” means that extensions -to the CONTAINER must not adversely affect the functionality of the other components in the -CONTAINER. - -2. DISTRIBUTION REQUIREMENTS. For purposes of this Section 2, the term “distribution” also means the -deployment of CONTAINERS in a service or an application for third parties to access over the -internet. These are the distribution requirements for you to exercise the grants above: - -a. A service or an application must have material additional functionality, beyond the included -portions of the CONTAINER. - -b. The following notice shall be included in modifications and derivative works of source code -distributed: “This software contains source code provided by NVIDIA Corporation.” - -c. You agree to distribute the CONTAINER subject to the terms at least as protective as the terms of -this license, including (without limitation) terms relating to the license grant, license -restrictions and protection of NVIDIA’s intellectual property rights. Additionally, you agree that -you will protect the privacy, security and legal rights of your application users. - -d. You agree to notify NVIDIA in writing of any known or suspected distribution or use of the -CONTAINER not in compliance with the requirements of this license, and to enforce the terms of your -agreements with respect to the distributed CONTAINER. - -3. AUTHORIZED USERS. You may allow employees and contractors of your entity or of your -subsidiary(ies) to access and use the CONTAINER from your secure network to perform work on your -behalf. If you are an academic institution you may allow users enrolled or employed by the academic -institution to access and use the CONTAINER from your secure network. You are responsible for the -compliance with the terms of this license by your authorized users. - -4. LIMITATIONS. Your license to use the CONTAINER is restricted as follows: - -a. The CONTAINER is licensed for you to develop services and applications only for their use in -systems with NVIDIA GPUs. - -b. You may not reverse engineer, decompile or disassemble, or remove copyright or other proprietary -notices from any portion of the CONTAINER or copies of the CONTAINER. - -c. Except as expressly provided in this license, you may not copy, sell, rent, sublicense, transfer, -distribute, modify, or create derivative works of any portion of the CONTAINER. For clarity, you may -not distribute or sublicense the CONTAINER as a stand-alone product. - -d. Unless you have an agreement with NVIDIA for this purpose, you may not indicate that a service or -an application created with the CONTAINER is sponsored or endorsed by NVIDIA. - -e. You may not bypass, disable, or circumvent any technical limitation, encryption, security, -digital rights management or authentication mechanism in the CONTAINER. - -f. You may not replace any NVIDIA software components in the CONTAINER that are governed by this -license with other software that implements NVIDIA APIs. - -g. You may not use the CONTAINER in any manner that would cause it to become subject to an open -source software license. As examples, licenses that require as a condition of use, modification, -and/or distribution that the CONTAINER be: (i) disclosed or distributed in source code form; (ii) -licensed for the purpose of making derivative works; or (iii) redistributable at no charge. - -h. Unless you have an agreement with NVIDIA for this purpose, you may not use the CONTAINER with any -system or application where the use or failure of the system or application can reasonably be -expected to threaten or result in personal injury, death, or catastrophic loss. Examples include use -in avionics, navigation, military, medical, life support or other life critical applications. NVIDIA -does not design, test or manufacture the CONTAINER for these critical uses and NVIDIA shall not be -liable to you or any third party, in whole or in part, for any claims or damages arising from such -uses. - -i. You agree to defend, indemnify and hold harmless NVIDIA and its affiliates, and their respective -employees, contractors, agents, officers and directors, from and against any and all claims, -damages, obligations, losses, liabilities, costs or debt, fines, restitutions and expenses -(including but not limited to attorney’s fees and costs incident to establishing the right of -indemnification) arising out of or related to your use of the CONTAINER outside of the scope of this -license, or not in compliance with its terms. - -5. UPDATES. NVIDIA may, at its option, make available patches, workarounds or other updates to this -CONTAINER. Unless the updates are provided with their separate governing terms, they are deemed part -of the CONTAINER licensed to you as provided in this license. You agree that the form and content of -the CONTAINER that NVIDIA provides may change without prior notice to you. While NVIDIA generally -maintains compatibility between versions, NVIDIA may in some cases make changes that introduce -incompatibilities in future versions of the CONTAINER. - -6. PRE-RELEASE VERSIONS. CONTAINER versions identified as alpha, beta, preview, early access or -otherwise as pre-release may not be fully functional, may contain errors or design flaws, and may -have reduced or different security, privacy, availability, and reliability standards relative to -commercial versions of NVIDIA software and materials. You may use a pre-release CONTAINER version at -your own risk, understanding that these versions are not intended for use in production or -business-critical systems. NVIDIA may choose not to make available a commercial version of any -pre-release CONTAINER. NVIDIA may also choose to abandon development and terminate the availability -of a pre-release CONTAINER at any time without liability. - -7. THIRD-PARTY COMPONENTS. The CONTAINER may include third-party components with separate legal -notices or terms as may be described in proprietary notices accompanying the CONTAINER. If and to -the extent there is a conflict between the terms in this license and the third-party license terms, -the third-party terms control only to the extent necessary to resolve the conflict. - -You acknowledge and agree that it is your sole responsibility to obtain any additional third-party -licenses required to make, have made, use, have used, sell, import, and offer for sale your products -or services that include or incorporate any third-party software and content relating to audio -and/or video encoders and decoders from, including but not limited to, Microsoft, Thomson, -Fraunhofer IIS, Sisvel S.p.A., MPEG-LA, and Coding Technologies. NVIDIA does not grant to you under -this license any necessary patent or other rights with respect to any audio and/or video encoders -and decoders. - -Subject to the other terms of this license, you may use the CONTAINER to develop and test -applications released under Open Source Initiative (OSI) approved open source software licenses. - -8. OWNERSHIP. - -8.1 NVIDIA reserves all rights, title and interest in and to the CONTAINER not expressly granted to -you under this license. NVIDIA and its suppliers hold all rights, title and interest in and to the -CONTAINER, including their respective intellectual property rights. The CONTAINER is copyrighted and -protected by the laws of the United States and other countries, and international treaty provisions. - -8.2 Subject to the rights of NVIDIA and its suppliers in the CONTAINER, you hold all rights, title -and interest in and to your services, applications and your derivative works of the sample source -code delivered in the CONTAINER including their respective intellectual property rights. - -9. FEEDBACK. You may, but are not obligated to, provide to NVIDIA suggestions, fixes, modifications, -feature requests or other feedback regarding the CONTAINER (“Feedback”). Feedback, even if -designated as confidential by you, shall not create any confidentiality obligation for NVIDIA. -NVIDIA and its designees have a perpetual, non-exclusive, worldwide, irrevocable license to use, -reproduce, publicly display, modify, create derivative works of, license, sublicense, and otherwise -distribute and exploit Feedback as NVIDIA sees fit without payment and without obligation or -restriction of any kind on account of intellectual property rights or otherwise. - -10. NO WARRANTIES. THE CONTAINER IS PROVIDED AS-IS. TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE -LAW NVIDIA AND ITS AFFILIATES EXPRESSLY DISCLAIM ALL WARRANTIES OF ANY KIND OR NATURE, WHETHER -EXPRESS, IMPLIED OR STATUTORY, INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY, -NON-INFRINGEMENT, OR FITNESS FOR A PARTICULAR PURPOSE. NVIDIA DOES NOT WARRANT THAT THE CONTAINER -WILL MEET YOUR REQUIREMENTS OR THAT THE OPERATION THEREOF WILL BE UNINTERRUPTED OR ERROR-FREE, OR -THAT ALL ERRORS WILL BE CORRECTED. - -11. LIMITATIONS OF LIABILITY. TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW NVIDIA AND ITS -AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL, PUNITIVE OR CONSEQUENTIAL DAMAGES, OR -FOR ANY LOST PROFITS, PROJECT DELAYS, LOSS OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF -PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION WITH THIS LICENSE OR THE USE OR -PERFORMANCE OF THE CONTAINER, WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH OF -CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE), PRODUCT LIABILITY OR ANY OTHER CAUSE OF -ACTION OR THEORY OF LIABILITY, EVEN IF NVIDIA HAS PREVIOUSLY BEEN ADVISED OF, OR COULD REASONABLY -HAVE FORESEEN, THE POSSIBILITY OF SUCH DAMAGES. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES TOTAL -CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS LICENSE EXCEED US$10.00. THE NATURE OF THE -LIABILITY OR THE NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS LIMIT. - -12. TERMINATION. Your rights under this license will terminate automatically without notice from -NVIDIA if you fail to comply with any term and condition of this license or if you commence or -participate in any legal proceeding against NVIDIA with respect to the CONTAINER. NVIDIA may -terminate this license with advance written notice to you, if NVIDIA decides to no longer provide -the CONTAINER in a country or, in NVIDIA’s sole discretion, the continued use of it is no longer -commercially viable. Upon any termination of this license, you agree to promptly discontinue use of -the CONTAINER and destroy all copies in your possession or control. Your prior distributions in -accordance with this license are not affected by the termination of this license. All provisions of -this license will survive termination, except for the license granted to you. - -13. APPLICABLE LAW. This license will be governed in all respects by the laws of the United States -and of the State of Delaware, without regard to the conflicts of laws principles. The United Nations -Convention on Contracts for the International Sale of Goods is specifically disclaimed. You agree to -all terms of this license in the English language. The state or federal courts residing in Santa -Clara County, California shall have exclusive jurisdiction over any dispute or claim arising out of -this license. Notwithstanding this, you agree that NVIDIA shall still be allowed to apply for -injunctive remedies or urgent legal relief in any jurisdiction. - -14. NO ASSIGNMENT. This license and your rights and obligations thereunder may not be assigned by -you by any means or operation of law without NVIDIA’s permission. Any attempted assignment not -approved by NVIDIA in writing shall be void and of no effect. NVIDIA may assign, delegate or -transfer this license and its rights and obligations, and if to a non-affiliate you will be -notified. - -15. EXPORT. The CONTAINER is subject to United States export laws and regulations. You agree to -comply with all applicable - -U.S. and international export laws, including the Export Administration Regulations (EAR) -administered by the U.S. Department of Commerce and economic sanctions administered by the U.S. -Department of Treasury’s Office of Foreign Assets Control (OFAC). These laws include restrictions on -destinations, end-users and end-use. By accepting this license, you confirm that you are not -currently residing in a country or region currently embargoed by the U.S. and that you are not -otherwise prohibited from receiving the CONTAINER. - -16. GOVERNMENT USE. The CONTAINER is, and shall be treated as being, “Commercial Items” as that term -is defined at 48 CFR § 2.101, consisting of “commercial computer software” and “commercial computer -software documentation”, respectively, as such terms are used in, respectively, 48 CFR § 12.212 and -48 CFR §§ 227.7202 & 252.227-7014(a)(1). Use, duplication or disclosure by the U.S. Government or a -U.S. Government subcontractor is subject to the restrictions in this license pursuant to 48 CFR § -12.212 or 48 CFR § 227.7202. In no event shall the US Government user acquire rights in the -CONTAINER beyond those specified in 48 C.F.R. 52.227-19(b)(1)-(2). - -17. NOTICES. Please direct your legal notices or other correspondence to NVIDIA Corporation, 2788 -San Tomas Expressway, Santa Clara, California 95051, United States of America, Attention: Legal -Department. - -18. ENTIRE AGREEMENT. This license is the final, complete and exclusive agreement between the -parties relating to the subject matter of this license and supersedes all prior or contemporaneous -understandings and agreements relating to this subject matter, whether oral or written. If any court -of competent jurisdiction determines that any provision of this license is illegal, invalid or -unenforceable, the remaining provisions will remain in full force and effect. Any amendment or -waiver under this license shall be in writing and signed by representatives of both parties. - -19. LICENSING. If the distribution terms in this license are not suitable for your organization, or -for any questions regarding this license, please contact NVIDIA at nvidia-compute-license-questions@nvidia.com. - -(v. December 4, 2020) diff --git a/cuda/cuda.repo-amd64 b/cuda/cuda.repo-amd64 deleted file mode 100644 index f7fb7a4f8b..0000000000 --- a/cuda/cuda.repo-amd64 +++ /dev/null @@ -1,6 +0,0 @@ -[cuda] -name=cuda -baseurl=https://developer.download.nvidia.com/compute/cuda/repos/rhel9/x86_64 -enabled=1 -gpgcheck=1 -gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-NVIDIA diff --git a/cuda/cuda.repo-arm64 b/cuda/cuda.repo-arm64 deleted file mode 100644 index b17064ff09..0000000000 --- a/cuda/cuda.repo-arm64 +++ /dev/null @@ -1,6 +0,0 @@ -[cuda] -name=cuda-sbsa -baseurl=https://developer.download.nvidia.com/compute/cuda/repos/rhel9/sbsa -enabled=1 -gpgcheck=1 -gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-NVIDIA diff --git a/examples/jupyterlab-with-elyra/Dockerfile b/examples/jupyterlab-with-elyra/Dockerfile index 7ba23f9d75..100c757636 100644 --- a/examples/jupyterlab-with-elyra/Dockerfile +++ b/examples/jupyterlab-with-elyra/Dockerfile @@ -58,7 +58,7 @@ nbdime~=4.0.2 nbgitpuller~=1.2.2 # Elyra -odh-elyra==4.2.1 +odh-elyra==4.2.3 kfp~=2.12.1 # Miscellaneous datascience packages diff --git a/jupyter/datascience/ubi9-python-3.12/Dockerfile.cpu b/jupyter/datascience/ubi9-python-3.12/Dockerfile.cpu index a6f7341aae..0427003779 100644 --- a/jupyter/datascience/ubi9-python-3.12/Dockerfile.cpu +++ b/jupyter/datascience/ubi9-python-3.12/Dockerfile.cpu @@ -1,3 +1,8 @@ +######################### +# configuration args # +######################### +ARG BASE_IMAGE + ###################################################### # mongocli-builder (build stage only, not published) # ###################################################### @@ -11,10 +16,10 @@ RUN unzip ./mongodb-cli-mongocli-v${MONGOCLI_VERSION}.zip RUN cd ./mongodb-cli-mongocli-v${MONGOCLI_VERSION}/ && \ CGO_ENABLED=1 GOOS=linux go build -a -tags strictfipsruntime -o /tmp/mongocli ./cmd/mongocli/ -######################## -# base # -######################## -FROM registry.access.redhat.com/ubi9/python-312:latest AS base +#################### +# cpu-base # +#################### +FROM ${BASE_IMAGE} AS cpu-base WORKDIR /opt/app-root/bin @@ -46,7 +51,7 @@ RUN curl -L https://mirror.openshift.com/pub/openshift-v4/$(uname -m)/clients/oc #################### # jupyter-minimal # #################### -FROM base AS jupyter-minimal +FROM cpu-base AS jupyter-minimal ARG JUPYTER_REUSABLE_UTILS=jupyter/utils ARG MINIMAL_SOURCE_CODE=jupyter/minimal/ubi9-python-3.12 @@ -69,7 +74,6 @@ WORKDIR /opt/app-root/src ENTRYPOINT ["start-notebook.sh"] - ######################## # jupytyer-datascience # ######################## diff --git a/jupyter/datascience/ubi9-python-3.12/build-args/cpu.conf b/jupyter/datascience/ubi9-python-3.12/build-args/cpu.conf new file mode 100644 index 0000000000..cc7c73581a --- /dev/null +++ b/jupyter/datascience/ubi9-python-3.12/build-args/cpu.conf @@ -0,0 +1 @@ +BASE_IMAGE=registry.access.redhat.com/ubi9/python-312:latest diff --git a/jupyter/minimal/ubi9-python-3.12/Dockerfile.cpu b/jupyter/minimal/ubi9-python-3.12/Dockerfile.cpu index c8166a216c..ef1ebf8057 100644 --- a/jupyter/minimal/ubi9-python-3.12/Dockerfile.cpu +++ b/jupyter/minimal/ubi9-python-3.12/Dockerfile.cpu @@ -1,7 +1,12 @@ +######################### +# configuration args # +######################### +ARG BASE_IMAGE + #################### -# base # +# cpu-base # #################### -FROM registry.access.redhat.com/ubi9/python-312:latest AS base +FROM ${BASE_IMAGE} AS cpu-base WORKDIR /opt/app-root/bin @@ -33,7 +38,7 @@ RUN curl -L https://mirror.openshift.com/pub/openshift-v4/$(uname -m)/clients/oc #################### # jupyter-minimal # #################### -FROM base AS jupyter-minimal +FROM cpu-base AS jupyter-minimal ARG JUPYTER_REUSABLE_UTILS=jupyter/utils ARG MINIMAL_SOURCE_CODE=jupyter/minimal/ubi9-python-3.12 diff --git a/jupyter/minimal/ubi9-python-3.12/Dockerfile.cuda b/jupyter/minimal/ubi9-python-3.12/Dockerfile.cuda index 419c030bf0..4c0c480d1f 100644 --- a/jupyter/minimal/ubi9-python-3.12/Dockerfile.cuda +++ b/jupyter/minimal/ubi9-python-3.12/Dockerfile.cuda @@ -1,7 +1,12 @@ +######################### +# configuration args # +######################### +ARG BASE_IMAGE + #################### -# base # +# cuda-base # #################### -FROM registry.access.redhat.com/ubi9/python-312:latest AS base +FROM ${BASE_IMAGE} AS cuda-base WORKDIR /opt/app-root/bin @@ -30,105 +35,6 @@ RUN curl -L https://mirror.openshift.com/pub/openshift-v4/$(uname -m)/clients/oc rm -f /tmp/openshift-client-linux.tar.gz # Install the oc client end -#################### -# cuda-base # -#################### -FROM base AS cuda-base-amd64 -ENV NVARCH=x86_64 - -FROM base AS cuda-base-arm64 -ENV NVARCH=sbsa - -FROM cuda-base-${TARGETARCH} AS cuda-base - -ARG TARGETARCH - -ARG CUDA_SOURCE_CODE=cuda - -# Install CUDA base from: -# https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/12.6.3/ubi9/base/Dockerfile -USER 0 -WORKDIR /opt/app-root/bin - -ENV NVIDIA_REQUIRE_CUDA="cuda>=12.6 brand=unknown,driver>=470,driver<471 brand=grid,driver>=470,driver<471 brand=tesla,driver>=470,driver<471 brand=nvidia,driver>=470,driver<471 brand=quadro,driver>=470,driver<471 brand=quadrortx,driver>=470,driver<471 brand=nvidiartx,driver>=470,driver<471 brand=vapps,driver>=470,driver<471 brand=vpc,driver>=470,driver<471 brand=vcs,driver>=470,driver<471 brand=vws,driver>=470,driver<471 brand=cloudgaming,driver>=470,driver<471 brand=unknown,driver>=535,driver<536 brand=grid,driver>=535,driver<536 brand=tesla,driver>=535,driver<536 brand=nvidia,driver>=535,driver<536 brand=quadro,driver>=535,driver<536 brand=quadrortx,driver>=535,driver<536 brand=nvidiartx,driver>=535,driver<536 brand=vapps,driver>=535,driver<536 brand=vpc,driver>=535,driver<536 brand=vcs,driver>=535,driver<536 brand=vws,driver>=535,driver<536 brand=cloudgaming,driver>=535,driver<536 brand=unknown,driver>=550,driver<551 brand=grid,driver>=550,driver<551 brand=tesla,driver>=550,driver<551 brand=nvidia,driver>=550,driver<551 brand=quadro,driver>=550,driver<551 brand=quadrortx,driver>=550,driver<551 brand=nvidiartx,driver>=550,driver<551 brand=vapps,driver>=550,driver<551 brand=vpc,driver>=550,driver<551 brand=vcs,driver>=550,driver<551 brand=vws,driver>=550,driver<551 brand=cloudgaming,driver>=550,driver<551" -ENV NV_CUDA_CUDART_VERSION=12.6.77-1 - -COPY ${CUDA_SOURCE_CODE}/cuda.repo-${TARGETARCH} /etc/yum.repos.d/cuda.repo -COPY ${CUDA_SOURCE_CODE}/NGC-DL-CONTAINER-LICENSE / - -RUN NVIDIA_GPGKEY_SUM=d0664fbbdb8c32356d45de36c5984617217b2d0bef41b93ccecd326ba3b80c87 && \ - curl -fsSL https://developer.download.nvidia.com/compute/cuda/repos/rhel9/${NVARCH}/D42D0685.pub | sed '/^Version/d' > /etc/pki/rpm-gpg/RPM-GPG-KEY-NVIDIA && \ - echo "$NVIDIA_GPGKEY_SUM /etc/pki/rpm-gpg/RPM-GPG-KEY-NVIDIA" | sha256sum -c --strict - - -ENV CUDA_VERSION=12.6.3 - -# For libraries in the cuda-compat-* package: https://docs.nvidia.com/cuda/eula/index.html#attachment-a -RUN dnf upgrade -y && dnf install -y \ - cuda-cudart-12-6-${NV_CUDA_CUDART_VERSION} \ - cuda-compat-12-6 \ - && dnf clean all \ - && rm -rf /var/cache/yum/* - -# nvidia-docker 1.0 -RUN echo "/usr/local/nvidia/lib" >> /etc/ld.so.conf.d/nvidia.conf && \ - echo "/usr/local/nvidia/lib64" >> /etc/ld.so.conf.d/nvidia.conf - -ENV PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:${PATH} -ENV LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64 - -# nvidia-container-runtime -ENV NVIDIA_VISIBLE_DEVICES=all -ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility - -# Install CUDA runtime from: -# https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/12.6.3/ubi9/runtime/Dockerfile -ENV NV_CUDA_LIB_VERSION=12.6.3-1 -ENV NV_NVTX_VERSION=12.6.77-1 -ENV NV_LIBNPP_VERSION=12.3.1.54-1 -ENV NV_LIBNPP_PACKAGE=libnpp-12-6-${NV_LIBNPP_VERSION} -ENV NV_LIBCUBLAS_VERSION=12.6.4.1-1 -ENV NV_LIBNCCL_PACKAGE_NAME=libnccl -ENV NV_LIBNCCL_PACKAGE_VERSION=2.23.4-1 -ENV NV_LIBNCCL_VERSION=2.23.4 -ENV NCCL_VERSION=2.23.4 -ENV NV_LIBNCCL_PACKAGE=${NV_LIBNCCL_PACKAGE_NAME}-${NV_LIBNCCL_PACKAGE_VERSION}+cuda12.6 - -RUN dnf install -y \ - cuda-libraries-12-6-${NV_CUDA_LIB_VERSION} \ - cuda-nvtx-12-6-${NV_NVTX_VERSION} \ - ${NV_LIBNPP_PACKAGE} \ - libcublas-12-6-${NV_LIBCUBLAS_VERSION} \ - ${NV_LIBNCCL_PACKAGE} \ - && dnf clean all \ - && rm -rf /var/cache/yum/* - -# Install devel tools - -RUN dnf install -y \ - make \ - findutils \ - && dnf clean all \ - && rm -rf /var/cache/yum/* - -# Install CUDA cudnn9 from: -# https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/12.6.3/ubi9/runtime/cudnn/Dockerfile -ENV NV_CUDNN_VERSION=9.5.1.17-1 -ENV NV_CUDNN_PACKAGE=libcudnn9-cuda-12-${NV_CUDNN_VERSION} - -LABEL com.nvidia.cudnn.version="${NV_CUDNN_VERSION}" - -RUN dnf install -y \ - ${NV_CUDNN_PACKAGE} \ - && dnf clean all \ - && rm -rf /var/cache/yum/* - -# Set this flag so that libraries can find the location of CUDA -ENV XLA_FLAGS=--xla_gpu_cuda_data_dir=/usr/local/cuda - -# Restore notebook user workspace -USER 1001 -WORKDIR /opt/app-root/src - ######################### # cuda-jupyter-minimal # ######################### diff --git a/jupyter/minimal/ubi9-python-3.12/Dockerfile.rocm b/jupyter/minimal/ubi9-python-3.12/Dockerfile.rocm index f7e13511f1..4a6f2f32bf 100644 --- a/jupyter/minimal/ubi9-python-3.12/Dockerfile.rocm +++ b/jupyter/minimal/ubi9-python-3.12/Dockerfile.rocm @@ -1,7 +1,12 @@ +######################### +# configuration args # +######################### +ARG BASE_IMAGE + #################### -# base # +# rocm-base # #################### -FROM registry.access.redhat.com/ubi9/python-312:latest AS base +FROM ${BASE_IMAGE} AS rocm-base WORKDIR /opt/app-root/bin @@ -30,40 +35,6 @@ RUN curl -L https://mirror.openshift.com/pub/openshift-v4/$(uname -m)/clients/oc rm -f /tmp/openshift-client-linux.tar.gz # Install the oc client end -######################## -# rocm-base # -######################## -FROM base AS rocm-base - -USER 0 -WORKDIR /opt/app-root/bin - -# Please keep in sync with ROCm/python3.12 dependent images -ARG ROCM_VERSION=6.2.4 -ARG AMDGPU_VERSION=6.2.4 - -# Install the ROCm rpms -# ref: https://github.com/ROCm/ROCm-docker/blob/master/dev/Dockerfile-centos-7-complete -# Note: Based on 6.2 above new package mivisionx is a pre-requistes, which bring in more dependent packages -# so we are only installing meta packages of rocm -# ref: https://rocm.docs.amd.com/projects/install-on-linux/en/develop/reference/package-manager-integration.html#packages-in-rocm-programming-models -RUN echo "[ROCm]" > /etc/yum.repos.d/rocm.repo && \ - echo "name=ROCm" >> /etc/yum.repos.d/rocm.repo && \ - echo "baseurl=https://repo.radeon.com/rocm/rhel9/$ROCM_VERSION/main" >> /etc/yum.repos.d/rocm.repo && \ - echo "enabled=1" >> /etc/yum.repos.d/rocm.repo && \ - echo "gpgcheck=0" >> /etc/yum.repos.d/rocm.repo && \ - echo "[amdgpu]" > /etc/yum.repos.d/amdgpu.repo && \ - echo "name=amdgpu" >> /etc/yum.repos.d/amdgpu.repo && \ - echo "baseurl=https://repo.radeon.com/amdgpu/$AMDGPU_VERSION/rhel/9.4/main/x86_64" >> /etc/yum.repos.d/amdgpu.repo && \ - echo "enabled=1" >> /etc/yum.repos.d/amdgpu.repo && \ - echo "gpgcheck=0" >> /etc/yum.repos.d/amdgpu.repo && \ - dnf install -y rocm-developer-tools rocm-ml-sdk rocm-opencl-sdk rocm-openmp-sdk rocm-utils && \ - dnf clean all && rm -rf /var/cache/yum - -# Restore notebook user workspace -USER 1001 -WORKDIR /opt/app-root/src - ######################## # rocm-jupyter-minimal # ######################## diff --git a/jupyter/minimal/ubi9-python-3.12/build-args/cpu.conf b/jupyter/minimal/ubi9-python-3.12/build-args/cpu.conf new file mode 100644 index 0000000000..cc7c73581a --- /dev/null +++ b/jupyter/minimal/ubi9-python-3.12/build-args/cpu.conf @@ -0,0 +1 @@ +BASE_IMAGE=registry.access.redhat.com/ubi9/python-312:latest diff --git a/jupyter/minimal/ubi9-python-3.12/build-args/cuda.conf b/jupyter/minimal/ubi9-python-3.12/build-args/cuda.conf new file mode 100644 index 0000000000..7525e99151 --- /dev/null +++ b/jupyter/minimal/ubi9-python-3.12/build-args/cuda.conf @@ -0,0 +1 @@ +BASE_IMAGE=quay.io/opendatahub/odh-base-image-cuda-py312-ubi9:v12.6 diff --git a/jupyter/minimal/ubi9-python-3.12/build-args/rocm.conf b/jupyter/minimal/ubi9-python-3.12/build-args/rocm.conf new file mode 100644 index 0000000000..6682af4d77 --- /dev/null +++ b/jupyter/minimal/ubi9-python-3.12/build-args/rocm.conf @@ -0,0 +1 @@ +BASE_IMAGE=quay.io/opendatahub/odh-base-image-rocm-py312-ubi9:v6.2 diff --git a/jupyter/pytorch+llmcompressor/ubi9-python-3.12/Dockerfile.cuda b/jupyter/pytorch+llmcompressor/ubi9-python-3.12/Dockerfile.cuda index 87003950f3..9e92f08889 100644 --- a/jupyter/pytorch+llmcompressor/ubi9-python-3.12/Dockerfile.cuda +++ b/jupyter/pytorch+llmcompressor/ubi9-python-3.12/Dockerfile.cuda @@ -1,3 +1,8 @@ +######################### +# configuration args # +######################### +ARG BASE_IMAGE + ###################################################### # mongocli-builder (build stage only, not published) # ###################################################### @@ -12,9 +17,9 @@ RUN cd ./mongodb-cli-mongocli-v${MONGOCLI_VERSION}/ && \ CGO_ENABLED=1 GOOS=linux go build -a -tags strictfipsruntime -o /tmp/mongocli ./cmd/mongocli/ #################### -# base # +# cuda-base # #################### -FROM registry.access.redhat.com/ubi9/python-312:latest AS base +FROM ${BASE_IMAGE} AS cuda-base WORKDIR /opt/app-root/bin @@ -43,105 +48,6 @@ RUN curl -L https://mirror.openshift.com/pub/openshift-v4/$(uname -m)/clients/oc rm -f /tmp/openshift-client-linux.tar.gz # Install the oc client end -#################### -# cuda-base # -#################### -FROM base AS cuda-base-amd64 -ENV NVARCH=x86_64 - -FROM base AS cuda-base-arm64 -ENV NVARCH=sbsa - -FROM cuda-base-${TARGETARCH} AS cuda-base - -ARG TARGETARCH - -ARG CUDA_SOURCE_CODE=cuda - -# Install CUDA base from: -# https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/12.6.3/ubi9/base/Dockerfile -USER 0 -WORKDIR /opt/app-root/bin - -ENV NVIDIA_REQUIRE_CUDA="cuda>=12.6 brand=unknown,driver>=470,driver<471 brand=grid,driver>=470,driver<471 brand=tesla,driver>=470,driver<471 brand=nvidia,driver>=470,driver<471 brand=quadro,driver>=470,driver<471 brand=quadrortx,driver>=470,driver<471 brand=nvidiartx,driver>=470,driver<471 brand=vapps,driver>=470,driver<471 brand=vpc,driver>=470,driver<471 brand=vcs,driver>=470,driver<471 brand=vws,driver>=470,driver<471 brand=cloudgaming,driver>=470,driver<471 brand=unknown,driver>=535,driver<536 brand=grid,driver>=535,driver<536 brand=tesla,driver>=535,driver<536 brand=nvidia,driver>=535,driver<536 brand=quadro,driver>=535,driver<536 brand=quadrortx,driver>=535,driver<536 brand=nvidiartx,driver>=535,driver<536 brand=vapps,driver>=535,driver<536 brand=vpc,driver>=535,driver<536 brand=vcs,driver>=535,driver<536 brand=vws,driver>=535,driver<536 brand=cloudgaming,driver>=535,driver<536 brand=unknown,driver>=550,driver<551 brand=grid,driver>=550,driver<551 brand=tesla,driver>=550,driver<551 brand=nvidia,driver>=550,driver<551 brand=quadro,driver>=550,driver<551 brand=quadrortx,driver>=550,driver<551 brand=nvidiartx,driver>=550,driver<551 brand=vapps,driver>=550,driver<551 brand=vpc,driver>=550,driver<551 brand=vcs,driver>=550,driver<551 brand=vws,driver>=550,driver<551 brand=cloudgaming,driver>=550,driver<551" -ENV NV_CUDA_CUDART_VERSION=12.6.77-1 - -COPY ${CUDA_SOURCE_CODE}/cuda.repo-${TARGETARCH} /etc/yum.repos.d/cuda.repo -COPY ${CUDA_SOURCE_CODE}/NGC-DL-CONTAINER-LICENSE / - -RUN NVIDIA_GPGKEY_SUM=d0664fbbdb8c32356d45de36c5984617217b2d0bef41b93ccecd326ba3b80c87 && \ - curl -fsSL https://developer.download.nvidia.com/compute/cuda/repos/rhel9/${NVARCH}/D42D0685.pub | sed '/^Version/d' > /etc/pki/rpm-gpg/RPM-GPG-KEY-NVIDIA && \ - echo "$NVIDIA_GPGKEY_SUM /etc/pki/rpm-gpg/RPM-GPG-KEY-NVIDIA" | sha256sum -c --strict - - -ENV CUDA_VERSION=12.6.3 - -# For libraries in the cuda-compat-* package: https://docs.nvidia.com/cuda/eula/index.html#attachment-a -RUN dnf upgrade -y && dnf install -y \ - cuda-cudart-12-6-${NV_CUDA_CUDART_VERSION} \ - cuda-compat-12-6 \ - && dnf clean all \ - && rm -rf /var/cache/yum/* - -# nvidia-docker 1.0 -RUN echo "/usr/local/nvidia/lib" >> /etc/ld.so.conf.d/nvidia.conf && \ - echo "/usr/local/nvidia/lib64" >> /etc/ld.so.conf.d/nvidia.conf - -ENV PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:${PATH} -ENV LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64 - -# nvidia-container-runtime -ENV NVIDIA_VISIBLE_DEVICES=all -ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility - -# Install CUDA runtime from: -# https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/12.6.3/ubi9/runtime/Dockerfile -ENV NV_CUDA_LIB_VERSION=12.6.3-1 -ENV NV_NVTX_VERSION=12.6.77-1 -ENV NV_LIBNPP_VERSION=12.3.1.54-1 -ENV NV_LIBNPP_PACKAGE=libnpp-12-6-${NV_LIBNPP_VERSION} -ENV NV_LIBCUBLAS_VERSION=12.6.4.1-1 -ENV NV_LIBNCCL_PACKAGE_NAME=libnccl -ENV NV_LIBNCCL_PACKAGE_VERSION=2.23.4-1 -ENV NV_LIBNCCL_VERSION=2.23.4 -ENV NCCL_VERSION=2.23.4 -ENV NV_LIBNCCL_PACKAGE=${NV_LIBNCCL_PACKAGE_NAME}-${NV_LIBNCCL_PACKAGE_VERSION}+cuda12.6 - -RUN dnf install -y \ - cuda-libraries-12-6-${NV_CUDA_LIB_VERSION} \ - cuda-nvtx-12-6-${NV_NVTX_VERSION} \ - ${NV_LIBNPP_PACKAGE} \ - libcublas-12-6-${NV_LIBCUBLAS_VERSION} \ - ${NV_LIBNCCL_PACKAGE} \ - && dnf clean all \ - && rm -rf /var/cache/yum/* - -# Install devel tools - -RUN dnf install -y \ - make \ - findutils \ - && dnf clean all \ - && rm -rf /var/cache/yum/* - -# Install CUDA cudnn9 from: -# https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/12.6.3/ubi9/runtime/cudnn/Dockerfile -ENV NV_CUDNN_VERSION=9.5.1.17-1 -ENV NV_CUDNN_PACKAGE=libcudnn9-cuda-12-${NV_CUDNN_VERSION} - -LABEL com.nvidia.cudnn.version="${NV_CUDNN_VERSION}" - -RUN dnf install -y \ - ${NV_CUDNN_PACKAGE} \ - && dnf clean all \ - && rm -rf /var/cache/yum/* - -# Set this flag so that libraries can find the location of CUDA -ENV XLA_FLAGS=--xla_gpu_cuda_data_dir=/usr/local/cuda - -# Restore notebook user workspace -USER 1001 -WORKDIR /opt/app-root/src - ######################### # cuda-jupyter-minimal # ######################### diff --git a/jupyter/pytorch+llmcompressor/ubi9-python-3.12/build-args/cuda.conf b/jupyter/pytorch+llmcompressor/ubi9-python-3.12/build-args/cuda.conf new file mode 100644 index 0000000000..7525e99151 --- /dev/null +++ b/jupyter/pytorch+llmcompressor/ubi9-python-3.12/build-args/cuda.conf @@ -0,0 +1 @@ +BASE_IMAGE=quay.io/opendatahub/odh-base-image-cuda-py312-ubi9:v12.6 diff --git a/jupyter/pytorch/ubi9-python-3.12/Dockerfile.cuda b/jupyter/pytorch/ubi9-python-3.12/Dockerfile.cuda index cc8eabc295..ce9c1df811 100644 --- a/jupyter/pytorch/ubi9-python-3.12/Dockerfile.cuda +++ b/jupyter/pytorch/ubi9-python-3.12/Dockerfile.cuda @@ -1,3 +1,8 @@ +######################### +# configuration args # +######################### +ARG BASE_IMAGE + ###################################################### # mongocli-builder (build stage only, not published) # ###################################################### @@ -12,9 +17,9 @@ RUN cd ./mongodb-cli-mongocli-v${MONGOCLI_VERSION}/ && \ CGO_ENABLED=1 GOOS=linux go build -a -tags strictfipsruntime -o /tmp/mongocli ./cmd/mongocli/ #################### -# base # +# cuda-base # #################### -FROM registry.access.redhat.com/ubi9/python-312:latest AS base +FROM ${BASE_IMAGE} AS cuda-base WORKDIR /opt/app-root/bin @@ -43,105 +48,6 @@ RUN curl -L https://mirror.openshift.com/pub/openshift-v4/$(uname -m)/clients/oc rm -f /tmp/openshift-client-linux.tar.gz # Install the oc client end -#################### -# cuda-base # -#################### -FROM base AS cuda-base-amd64 -ENV NVARCH=x86_64 - -FROM base AS cuda-base-arm64 -ENV NVARCH=sbsa - -FROM cuda-base-${TARGETARCH} AS cuda-base - -ARG TARGETARCH - -ARG CUDA_SOURCE_CODE=cuda - -# Install CUDA base from: -# https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/12.6.3/ubi9/base/Dockerfile -USER 0 -WORKDIR /opt/app-root/bin - -ENV NVIDIA_REQUIRE_CUDA="cuda>=12.6 brand=unknown,driver>=470,driver<471 brand=grid,driver>=470,driver<471 brand=tesla,driver>=470,driver<471 brand=nvidia,driver>=470,driver<471 brand=quadro,driver>=470,driver<471 brand=quadrortx,driver>=470,driver<471 brand=nvidiartx,driver>=470,driver<471 brand=vapps,driver>=470,driver<471 brand=vpc,driver>=470,driver<471 brand=vcs,driver>=470,driver<471 brand=vws,driver>=470,driver<471 brand=cloudgaming,driver>=470,driver<471 brand=unknown,driver>=535,driver<536 brand=grid,driver>=535,driver<536 brand=tesla,driver>=535,driver<536 brand=nvidia,driver>=535,driver<536 brand=quadro,driver>=535,driver<536 brand=quadrortx,driver>=535,driver<536 brand=nvidiartx,driver>=535,driver<536 brand=vapps,driver>=535,driver<536 brand=vpc,driver>=535,driver<536 brand=vcs,driver>=535,driver<536 brand=vws,driver>=535,driver<536 brand=cloudgaming,driver>=535,driver<536 brand=unknown,driver>=550,driver<551 brand=grid,driver>=550,driver<551 brand=tesla,driver>=550,driver<551 brand=nvidia,driver>=550,driver<551 brand=quadro,driver>=550,driver<551 brand=quadrortx,driver>=550,driver<551 brand=nvidiartx,driver>=550,driver<551 brand=vapps,driver>=550,driver<551 brand=vpc,driver>=550,driver<551 brand=vcs,driver>=550,driver<551 brand=vws,driver>=550,driver<551 brand=cloudgaming,driver>=550,driver<551" -ENV NV_CUDA_CUDART_VERSION=12.6.77-1 - -COPY ${CUDA_SOURCE_CODE}/cuda.repo-${TARGETARCH} /etc/yum.repos.d/cuda.repo -COPY ${CUDA_SOURCE_CODE}/NGC-DL-CONTAINER-LICENSE / - -RUN NVIDIA_GPGKEY_SUM=d0664fbbdb8c32356d45de36c5984617217b2d0bef41b93ccecd326ba3b80c87 && \ - curl -fsSL https://developer.download.nvidia.com/compute/cuda/repos/rhel9/${NVARCH}/D42D0685.pub | sed '/^Version/d' > /etc/pki/rpm-gpg/RPM-GPG-KEY-NVIDIA && \ - echo "$NVIDIA_GPGKEY_SUM /etc/pki/rpm-gpg/RPM-GPG-KEY-NVIDIA" | sha256sum -c --strict - - -ENV CUDA_VERSION=12.6.3 - -# For libraries in the cuda-compat-* package: https://docs.nvidia.com/cuda/eula/index.html#attachment-a -RUN dnf upgrade -y && dnf install -y \ - cuda-cudart-12-6-${NV_CUDA_CUDART_VERSION} \ - cuda-compat-12-6 \ - && dnf clean all \ - && rm -rf /var/cache/yum/* - -# nvidia-docker 1.0 -RUN echo "/usr/local/nvidia/lib" >> /etc/ld.so.conf.d/nvidia.conf && \ - echo "/usr/local/nvidia/lib64" >> /etc/ld.so.conf.d/nvidia.conf - -ENV PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:${PATH} -ENV LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64 - -# nvidia-container-runtime -ENV NVIDIA_VISIBLE_DEVICES=all -ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility - -# Install CUDA runtime from: -# https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/12.6.3/ubi9/runtime/Dockerfile -ENV NV_CUDA_LIB_VERSION=12.6.3-1 -ENV NV_NVTX_VERSION=12.6.77-1 -ENV NV_LIBNPP_VERSION=12.3.1.54-1 -ENV NV_LIBNPP_PACKAGE=libnpp-12-6-${NV_LIBNPP_VERSION} -ENV NV_LIBCUBLAS_VERSION=12.6.4.1-1 -ENV NV_LIBNCCL_PACKAGE_NAME=libnccl -ENV NV_LIBNCCL_PACKAGE_VERSION=2.23.4-1 -ENV NV_LIBNCCL_VERSION=2.23.4 -ENV NCCL_VERSION=2.23.4 -ENV NV_LIBNCCL_PACKAGE=${NV_LIBNCCL_PACKAGE_NAME}-${NV_LIBNCCL_PACKAGE_VERSION}+cuda12.6 - -RUN dnf install -y \ - cuda-libraries-12-6-${NV_CUDA_LIB_VERSION} \ - cuda-nvtx-12-6-${NV_NVTX_VERSION} \ - ${NV_LIBNPP_PACKAGE} \ - libcublas-12-6-${NV_LIBCUBLAS_VERSION} \ - ${NV_LIBNCCL_PACKAGE} \ - && dnf clean all \ - && rm -rf /var/cache/yum/* - -# Install devel tools - -RUN dnf install -y \ - make \ - findutils \ - && dnf clean all \ - && rm -rf /var/cache/yum/* - -# Install CUDA cudnn9 from: -# https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/12.6.3/ubi9/runtime/cudnn/Dockerfile -ENV NV_CUDNN_VERSION=9.5.1.17-1 -ENV NV_CUDNN_PACKAGE=libcudnn9-cuda-12-${NV_CUDNN_VERSION} - -LABEL com.nvidia.cudnn.version="${NV_CUDNN_VERSION}" - -RUN dnf install -y \ - ${NV_CUDNN_PACKAGE} \ - && dnf clean all \ - && rm -rf /var/cache/yum/* - -# Set this flag so that libraries can find the location of CUDA -ENV XLA_FLAGS=--xla_gpu_cuda_data_dir=/usr/local/cuda - -# Restore notebook user workspace -USER 1001 -WORKDIR /opt/app-root/src - ######################### # cuda-jupyter-minimal # ######################### @@ -203,7 +109,7 @@ COPY ${DATASCIENCE_SOURCE_CODE}/setup-elyra.sh ${DATASCIENCE_SOURCE_CODE}/utils WORKDIR /opt/app-root/src ############################# -# cuda-jupyter-pytorch # +# cuda-jupyter-pytorch # ############################# FROM cuda-jupyter-datascience AS cuda-jupyter-pytorch diff --git a/jupyter/pytorch/ubi9-python-3.12/build-args/cuda.conf b/jupyter/pytorch/ubi9-python-3.12/build-args/cuda.conf new file mode 100644 index 0000000000..7525e99151 --- /dev/null +++ b/jupyter/pytorch/ubi9-python-3.12/build-args/cuda.conf @@ -0,0 +1 @@ +BASE_IMAGE=quay.io/opendatahub/odh-base-image-cuda-py312-ubi9:v12.6 diff --git a/jupyter/rocm/pytorch/ubi9-python-3.12/Dockerfile.rocm b/jupyter/rocm/pytorch/ubi9-python-3.12/Dockerfile.rocm index d3671c29af..0c9541c7d5 100644 --- a/jupyter/rocm/pytorch/ubi9-python-3.12/Dockerfile.rocm +++ b/jupyter/rocm/pytorch/ubi9-python-3.12/Dockerfile.rocm @@ -1,3 +1,8 @@ +######################### +# configuration args # +######################### +ARG BASE_IMAGE + ###################################################### # mongocli-builder (build stage only, not published) # ###################################################### @@ -12,9 +17,9 @@ RUN cd ./mongodb-cli-mongocli-v${MONGOCLI_VERSION}/ && \ CGO_ENABLED=1 GOOS=linux go build -a -tags strictfipsruntime -o /tmp/mongocli ./cmd/mongocli/ #################### -# base # +# rocm-base # #################### -FROM registry.access.redhat.com/ubi9/python-312:latest AS base +FROM ${BASE_IMAGE} AS rocm-base WORKDIR /opt/app-root/bin @@ -43,40 +48,6 @@ RUN curl -L https://mirror.openshift.com/pub/openshift-v4/$(uname -m)/clients/oc rm -f /tmp/openshift-client-linux.tar.gz # Install the oc client end -######################## -# rocm-base # -######################## -FROM base AS rocm-base - -USER 0 -WORKDIR /opt/app-root/bin - -# Please keep in sync with ROCm/python3.12 dependent images -ARG ROCM_VERSION=6.2.4 -ARG AMDGPU_VERSION=6.2.4 - -# Install the ROCm rpms -# ref: https://github.com/ROCm/ROCm-docker/blob/master/dev/Dockerfile-centos-7-complete -# Note: Based on 6.2 above new package mivisionx is a pre-requistes, which bring in more dependent packages -# so we are only installing meta packages of rocm -# ref: https://rocm.docs.amd.com/projects/install-on-linux/en/develop/reference/package-manager-integration.html#packages-in-rocm-programming-models -RUN echo "[ROCm]" > /etc/yum.repos.d/rocm.repo && \ - echo "name=ROCm" >> /etc/yum.repos.d/rocm.repo && \ - echo "baseurl=https://repo.radeon.com/rocm/rhel9/$ROCM_VERSION/main" >> /etc/yum.repos.d/rocm.repo && \ - echo "enabled=1" >> /etc/yum.repos.d/rocm.repo && \ - echo "gpgcheck=0" >> /etc/yum.repos.d/rocm.repo && \ - echo "[amdgpu]" > /etc/yum.repos.d/amdgpu.repo && \ - echo "name=amdgpu" >> /etc/yum.repos.d/amdgpu.repo && \ - echo "baseurl=https://repo.radeon.com/amdgpu/$AMDGPU_VERSION/rhel/9.4/main/x86_64" >> /etc/yum.repos.d/amdgpu.repo && \ - echo "enabled=1" >> /etc/yum.repos.d/amdgpu.repo && \ - echo "gpgcheck=0" >> /etc/yum.repos.d/amdgpu.repo && \ - dnf install -y rocm-developer-tools rocm-ml-sdk rocm-opencl-sdk rocm-openmp-sdk rocm-utils && \ - dnf clean all && rm -rf /var/cache/yum - -# Restore notebook user workspace -USER 1001 -WORKDIR /opt/app-root/src - ######################## # rocm-jupyter-minimal # ######################## @@ -137,7 +108,6 @@ COPY ${DATASCIENCE_SOURCE_CODE}/setup-elyra.sh ${DATASCIENCE_SOURCE_CODE}/utils WORKDIR /opt/app-root/src - ######################## # rocm-jupyter-pytorch # ######################## diff --git a/jupyter/rocm/pytorch/ubi9-python-3.12/build-args/rocm.conf b/jupyter/rocm/pytorch/ubi9-python-3.12/build-args/rocm.conf new file mode 100644 index 0000000000..6682af4d77 --- /dev/null +++ b/jupyter/rocm/pytorch/ubi9-python-3.12/build-args/rocm.conf @@ -0,0 +1 @@ +BASE_IMAGE=quay.io/opendatahub/odh-base-image-rocm-py312-ubi9:v6.2 diff --git a/jupyter/rocm/tensorflow/ubi9-python-3.12/Dockerfile.rocm b/jupyter/rocm/tensorflow/ubi9-python-3.12/Dockerfile.rocm index 8dfee5343c..83e7ab09b9 100644 --- a/jupyter/rocm/tensorflow/ubi9-python-3.12/Dockerfile.rocm +++ b/jupyter/rocm/tensorflow/ubi9-python-3.12/Dockerfile.rocm @@ -1,3 +1,8 @@ +######################### +# configuration args # +######################### +ARG BASE_IMAGE + ###################################################### # mongocli-builder (build stage only, not published) # ###################################################### @@ -12,9 +17,9 @@ RUN cd ./mongodb-cli-mongocli-v${MONGOCLI_VERSION}/ && \ CGO_ENABLED=1 GOOS=linux go build -a -tags strictfipsruntime -o /tmp/mongocli ./cmd/mongocli/ #################### -# base # +# rocm-base # #################### -FROM registry.access.redhat.com/ubi9/python-312:latest AS base +FROM ${BASE_IMAGE} AS rocm-base WORKDIR /opt/app-root/bin @@ -43,51 +48,6 @@ RUN curl -L https://mirror.openshift.com/pub/openshift-v4/$(uname -m)/clients/oc rm -f /tmp/openshift-client-linux.tar.gz # Install the oc client end -############# -# rocm-base # -############# -FROM base AS rocm-base - -USER 0 -WORKDIR /opt/app-root/bin - -# Please keep in sync with ROCm/python3.12 dependent images -ARG ROCM_VERSION=6.4.3 -ARG AMDGPU_VERSION=6.4.3 - -# Install the ROCm rpms -# ref: https://github.com/ROCm/ROCm-docker/blob/master/dev/Dockerfile-centos-7-complete -# docs: https://rocm.docs.amd.com/projects/install-on-linux/en/latest/install/install-methods/package-manager/package-manager-rhel.html#registering-rocm-repositories -# Note: Based on 6.4 above new package mivisionx is a pre-requistes, which bring in more dependent packages -# so we are only installing meta packages of rocm -# ref: https://rocm.docs.amd.com/projects/install-on-linux/en/develop/reference/package-manager-integration.html#packages-in-rocm-programming-models -RUN echo "[ROCm]" > /etc/yum.repos.d/rocm.repo && \ - echo "name=ROCm" >> /etc/yum.repos.d/rocm.repo && \ - echo "baseurl=https://repo.radeon.com/rocm/el9/${ROCM_VERSION}/main" >> /etc/yum.repos.d/rocm.repo && \ - echo "enabled=1" >> /etc/yum.repos.d/rocm.repo && \ - echo "gpgcheck=0" >> /etc/yum.repos.d/rocm.repo && \ - echo "[amdgpu]" > /etc/yum.repos.d/amdgpu.repo && \ - echo "name=amdgpu" >> /etc/yum.repos.d/amdgpu.repo && \ - echo "baseurl=https://repo.radeon.com/amdgpu/${AMDGPU_VERSION}/rhel/9.4/main/x86_64" >> /etc/yum.repos.d/amdgpu.repo && \ - echo "enabled=1" >> /etc/yum.repos.d/amdgpu.repo && \ - echo "gpgcheck=0" >> /etc/yum.repos.d/amdgpu.repo && \ - dnf install -y 'https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm' && \ - dnf clean all && dnf makecache && \ - dnf install -y rocm-developer-tools rocm-ml-sdk rocm-openmp-sdk rocm-utils && \ - dnf clean all && rm -rf /var/cache/dnf - -# https://rocm.docs.amd.com/projects/install-on-linux/en/latest/install/post-install.html#configure-rocm-shared-objects -RUN tee --append /etc/ld.so.conf.d/rocm.conf < /etc/pki/rpm-gpg/RPM-GPG-KEY-NVIDIA && \ - echo "$NVIDIA_GPGKEY_SUM /etc/pki/rpm-gpg/RPM-GPG-KEY-NVIDIA" | sha256sum -c --strict - - -ENV CUDA_VERSION=12.6.3 - -# For libraries in the cuda-compat-* package: https://docs.nvidia.com/cuda/eula/index.html#attachment-a -RUN dnf upgrade -y && dnf install -y \ - cuda-cudart-12-6-${NV_CUDA_CUDART_VERSION} \ - cuda-compat-12-6 \ - && dnf clean all \ - && rm -rf /var/cache/yum/* - -# nvidia-docker 1.0 -RUN echo "/usr/local/nvidia/lib" >> /etc/ld.so.conf.d/nvidia.conf && \ - echo "/usr/local/nvidia/lib64" >> /etc/ld.so.conf.d/nvidia.conf - -ENV PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:${PATH} -ENV LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64 - -# nvidia-container-runtime -ENV NVIDIA_VISIBLE_DEVICES=all -ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility - -# Install CUDA runtime from: -# https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/12.6.3/ubi9/runtime/Dockerfile -ENV NV_CUDA_LIB_VERSION=12.6.3-1 -ENV NV_NVTX_VERSION=12.6.77-1 -ENV NV_LIBNPP_VERSION=12.3.1.54-1 -ENV NV_LIBNPP_PACKAGE=libnpp-12-6-${NV_LIBNPP_VERSION} -ENV NV_LIBCUBLAS_VERSION=12.6.4.1-1 -ENV NV_LIBNCCL_PACKAGE_NAME=libnccl -ENV NV_LIBNCCL_PACKAGE_VERSION=2.23.4-1 -ENV NV_LIBNCCL_VERSION=2.23.4 -ENV NCCL_VERSION=2.23.4 -ENV NV_LIBNCCL_PACKAGE=${NV_LIBNCCL_PACKAGE_NAME}-${NV_LIBNCCL_PACKAGE_VERSION}+cuda12.6 - -RUN dnf install -y \ - cuda-libraries-12-6-${NV_CUDA_LIB_VERSION} \ - cuda-nvtx-12-6-${NV_NVTX_VERSION} \ - ${NV_LIBNPP_PACKAGE} \ - libcublas-12-6-${NV_LIBCUBLAS_VERSION} \ - ${NV_LIBNCCL_PACKAGE} \ - && dnf clean all \ - && rm -rf /var/cache/yum/* - -# Install devel tools - -RUN dnf install -y \ - make \ - findutils \ - && dnf clean all \ - && rm -rf /var/cache/yum/* - -# Install CUDA cudnn9 from: -# https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/12.6.3/ubi9/runtime/cudnn/Dockerfile -ENV NV_CUDNN_VERSION=9.5.1.17-1 -ENV NV_CUDNN_PACKAGE=libcudnn9-cuda-12-${NV_CUDNN_VERSION} - -LABEL com.nvidia.cudnn.version="${NV_CUDNN_VERSION}" - -RUN dnf install -y \ - ${NV_CUDNN_PACKAGE} \ - && dnf clean all \ - && rm -rf /var/cache/yum/* - -# Set this flag so that libraries can find the location of CUDA -ENV XLA_FLAGS=--xla_gpu_cuda_data_dir=/usr/local/cuda - -# Restore notebook user workspace -USER 1001 -WORKDIR /opt/app-root/src - ######################### # cuda-jupyter-minimal # ######################### diff --git a/jupyter/tensorflow/ubi9-python-3.12/build-args/cuda.conf b/jupyter/tensorflow/ubi9-python-3.12/build-args/cuda.conf new file mode 100644 index 0000000000..7525e99151 --- /dev/null +++ b/jupyter/tensorflow/ubi9-python-3.12/build-args/cuda.conf @@ -0,0 +1 @@ +BASE_IMAGE=quay.io/opendatahub/odh-base-image-cuda-py312-ubi9:v12.6 diff --git a/jupyter/trustyai/ubi9-python-3.12/Dockerfile.cpu b/jupyter/trustyai/ubi9-python-3.12/Dockerfile.cpu index 246f6e2172..f39e2cd07d 100644 --- a/jupyter/trustyai/ubi9-python-3.12/Dockerfile.cpu +++ b/jupyter/trustyai/ubi9-python-3.12/Dockerfile.cpu @@ -1,3 +1,8 @@ +######################### +# configuration args # +######################### +ARG BASE_IMAGE + ###################################################### # mongocli-builder (build stage only, not published) # ###################################################### @@ -11,10 +16,10 @@ RUN unzip ./mongodb-cli-mongocli-v${MONGOCLI_VERSION}.zip RUN cd ./mongodb-cli-mongocli-v${MONGOCLI_VERSION}/ && \ CGO_ENABLED=1 GOOS=linux go build -a -tags strictfipsruntime -o /tmp/mongocli ./cmd/mongocli/ -######################## -# base # -######################## -FROM registry.access.redhat.com/ubi9/python-312:latest AS base +#################### +# cpu-base # +#################### +FROM ${BASE_IMAGE} AS cpu-base WORKDIR /opt/app-root/bin @@ -46,7 +51,7 @@ RUN curl -L https://mirror.openshift.com/pub/openshift-v4/$(uname -m)/clients/oc #################### # jupyter-minimal # #################### -FROM base AS jupyter-minimal +FROM cpu-base AS jupyter-minimal ARG JUPYTER_REUSABLE_UTILS=jupyter/utils ARG MINIMAL_SOURCE_CODE=jupyter/minimal/ubi9-python-3.12 diff --git a/jupyter/trustyai/ubi9-python-3.12/build-args/cpu.conf b/jupyter/trustyai/ubi9-python-3.12/build-args/cpu.conf new file mode 100644 index 0000000000..cc7c73581a --- /dev/null +++ b/jupyter/trustyai/ubi9-python-3.12/build-args/cpu.conf @@ -0,0 +1 @@ +BASE_IMAGE=registry.access.redhat.com/ubi9/python-312:latest diff --git a/rstudio/c9s-python-3.11/Dockerfile.cpu b/rstudio/c9s-python-3.11/Dockerfile.cpu index cf0a8a395e..f291c74720 100644 --- a/rstudio/c9s-python-3.11/Dockerfile.cpu +++ b/rstudio/c9s-python-3.11/Dockerfile.cpu @@ -1,7 +1,12 @@ -##################### -# base # -##################### -FROM quay.io/sclorg/python-311-c9s:c9s AS base +######################### +# configuration args # +######################### +ARG BASE_IMAGE + +#################### +# cpu-base # +#################### +FROM ${BASE_IMAGE} AS cpu-base WORKDIR /opt/app-root/bin @@ -35,7 +40,7 @@ WORKDIR /opt/app-root/src ##################### # rstudio # ##################### -FROM base AS rstudio +FROM cpu-base AS rstudio ARG RSTUDIO_SOURCE_CODE=rstudio/c9s-python-3.11 diff --git a/rstudio/c9s-python-3.11/Dockerfile.cuda b/rstudio/c9s-python-3.11/Dockerfile.cuda index 5d7cab0306..46541b5f9f 100644 --- a/rstudio/c9s-python-3.11/Dockerfile.cuda +++ b/rstudio/c9s-python-3.11/Dockerfile.cuda @@ -1,7 +1,12 @@ -##################### -# base # -##################### -FROM quay.io/sclorg/python-311-c9s:c9s AS base +######################### +# configuration args # +######################### +ARG BASE_IMAGE + +#################### +# cuda-base # +#################### +FROM ${BASE_IMAGE} AS cuda-base WORKDIR /opt/app-root/bin @@ -32,109 +37,6 @@ RUN curl -L https://mirror.openshift.com/pub/openshift-v4/$(uname -m)/clients/oc WORKDIR /opt/app-root/src -#################### -# cuda-base # -#################### -FROM base AS cuda-base-amd64 -ENV NVARCH=x86_64 - -FROM base AS cuda-base-arm64 -ENV NVARCH=sbsa - -FROM cuda-base-${TARGETARCH} AS cuda-base - -ARG TARGETARCH - -ARG CUDA_SOURCE_CODE=cuda - -# Install CUDA base from: -# https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/12.6.3/ubi9/base/Dockerfile -USER 0 -WORKDIR /opt/app-root/bin - -ENV NVIDIA_REQUIRE_CUDA="cuda>=12.6 brand=unknown,driver>=470,driver<471 brand=grid,driver>=470,driver<471 brand=tesla,driver>=470,driver<471 brand=nvidia,driver>=470,driver<471 brand=quadro,driver>=470,driver<471 brand=quadrortx,driver>=470,driver<471 brand=nvidiartx,driver>=470,driver<471 brand=vapps,driver>=470,driver<471 brand=vpc,driver>=470,driver<471 brand=vcs,driver>=470,driver<471 brand=vws,driver>=470,driver<471 brand=cloudgaming,driver>=470,driver<471 brand=unknown,driver>=535,driver<536 brand=grid,driver>=535,driver<536 brand=tesla,driver>=535,driver<536 brand=nvidia,driver>=535,driver<536 brand=quadro,driver>=535,driver<536 brand=quadrortx,driver>=535,driver<536 brand=nvidiartx,driver>=535,driver<536 brand=vapps,driver>=535,driver<536 brand=vpc,driver>=535,driver<536 brand=vcs,driver>=535,driver<536 brand=vws,driver>=535,driver<536 brand=cloudgaming,driver>=535,driver<536 brand=unknown,driver>=550,driver<551 brand=grid,driver>=550,driver<551 brand=tesla,driver>=550,driver<551 brand=nvidia,driver>=550,driver<551 brand=quadro,driver>=550,driver<551 brand=quadrortx,driver>=550,driver<551 brand=nvidiartx,driver>=550,driver<551 brand=vapps,driver>=550,driver<551 brand=vpc,driver>=550,driver<551 brand=vcs,driver>=550,driver<551 brand=vws,driver>=550,driver<551 brand=cloudgaming,driver>=550,driver<551" -ENV NV_CUDA_CUDART_VERSION=12.6.77-1 - -COPY ${CUDA_SOURCE_CODE}/cuda.repo-${TARGETARCH} /etc/yum.repos.d/cuda.repo -COPY ${CUDA_SOURCE_CODE}/NGC-DL-CONTAINER-LICENSE / - -RUN NVIDIA_GPGKEY_SUM=d0664fbbdb8c32356d45de36c5984617217b2d0bef41b93ccecd326ba3b80c87 && \ - curl -fsSL https://developer.download.nvidia.com/compute/cuda/repos/rhel9/${NVARCH}/D42D0685.pub | sed '/^Version/d' > /etc/pki/rpm-gpg/RPM-GPG-KEY-NVIDIA && \ - echo "$NVIDIA_GPGKEY_SUM /etc/pki/rpm-gpg/RPM-GPG-KEY-NVIDIA" | sha256sum -c --strict - - -ENV CUDA_VERSION=12.6.3 - -# For libraries in the cuda-compat-* package: https://docs.nvidia.com/cuda/eula/index.html#attachment-a -RUN dnf upgrade -y && dnf install -y \ - cuda-cudart-12-6-${NV_CUDA_CUDART_VERSION} \ - cuda-compat-12-6 \ - && dnf clean all \ - && rm -rf /var/cache/yum/* - -# nvidia-docker 1.0 -RUN echo "/usr/local/nvidia/lib" >> /etc/ld.so.conf.d/nvidia.conf && \ - echo "/usr/local/nvidia/lib64" >> /etc/ld.so.conf.d/nvidia.conf - -ENV PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:${PATH} -ENV LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64 - -# nvidia-container-runtime -ENV NVIDIA_VISIBLE_DEVICES=all -ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility - -# Install CUDA runtime from: -# https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/12.6.3/ubi9/runtime/Dockerfile -ENV NV_CUDA_LIB_VERSION=12.6.3-1 -ENV NV_NVTX_VERSION=12.6.77-1 -ENV NV_LIBNPP_VERSION=12.3.1.54-1 -ENV NV_LIBNPP_PACKAGE=libnpp-12-6-${NV_LIBNPP_VERSION} -ENV NV_LIBCUBLAS_VERSION=12.6.4.1-1 -ENV NV_LIBNCCL_PACKAGE_NAME=libnccl -ENV NV_LIBNCCL_PACKAGE_VERSION=2.23.4-1 -ENV NV_LIBNCCL_VERSION=2.23.4 -ENV NCCL_VERSION=2.23.4 -ENV NV_LIBNCCL_PACKAGE=${NV_LIBNCCL_PACKAGE_NAME}-${NV_LIBNCCL_PACKAGE_VERSION}+cuda12.6 - -RUN dnf install -y \ - cuda-libraries-12-6-${NV_CUDA_LIB_VERSION} \ - cuda-nvtx-12-6-${NV_NVTX_VERSION} \ - ${NV_LIBNPP_PACKAGE} \ - libcublas-12-6-${NV_LIBCUBLAS_VERSION} \ - ${NV_LIBNCCL_PACKAGE} \ - && dnf clean all \ - && rm -rf /var/cache/yum/* - -# Install devel tools - -RUN dnf install -y \ - make \ - findutils \ - && dnf clean all \ - && rm -rf /var/cache/yum/* - -# Install CUDA cudnn9 from: -# https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/12.6.3/ubi9/runtime/cudnn/Dockerfile -ENV NV_CUDNN_VERSION=9.5.1.17-1 -ENV NV_CUDNN_PACKAGE=libcudnn9-cuda-12-${NV_CUDNN_VERSION} - -LABEL com.nvidia.cudnn.version="${NV_CUDNN_VERSION}" - -RUN dnf install -y \ - ${NV_CUDNN_PACKAGE} \ - && dnf clean all \ - && rm -rf /var/cache/yum/* - -# Set this flag so that libraries can find the location of CUDA -ENV XLA_FLAGS=--xla_gpu_cuda_data_dir=/usr/local/cuda - -# Install CUDA toolkit 12.6 -RUN dnf -y install cuda-toolkit-12-6 && \ - dnf -y clean all --enablerepo="*" - -# Restore notebook user workspace -USER 1001 -WORKDIR /opt/app-root/src - ##################### # rstudio # ##################### diff --git a/rstudio/c9s-python-3.11/build-args/cpu.conf b/rstudio/c9s-python-3.11/build-args/cpu.conf new file mode 100644 index 0000000000..12f863debb --- /dev/null +++ b/rstudio/c9s-python-3.11/build-args/cpu.conf @@ -0,0 +1 @@ +BASE_IMAGE=quay.io/sclorg/python-311-c9s:c9s diff --git a/rstudio/c9s-python-3.11/build-args/cuda.conf b/rstudio/c9s-python-3.11/build-args/cuda.conf new file mode 100644 index 0000000000..2a5529fdcc --- /dev/null +++ b/rstudio/c9s-python-3.11/build-args/cuda.conf @@ -0,0 +1 @@ +BASE_IMAGE=quay.io/opendatahub/odh-base-image-cuda-py311-c9s:v12.6 diff --git a/runtimes/datascience/ubi9-python-3.12/Dockerfile.cpu b/runtimes/datascience/ubi9-python-3.12/Dockerfile.cpu index ef9f3fa05f..7365485d4a 100644 --- a/runtimes/datascience/ubi9-python-3.12/Dockerfile.cpu +++ b/runtimes/datascience/ubi9-python-3.12/Dockerfile.cpu @@ -1,7 +1,12 @@ +######################### +# configuration args # +######################### +ARG BASE_IMAGE + #################### -# base # +# cpu-base # #################### -FROM registry.access.redhat.com/ubi9/python-312:latest AS base +FROM ${BASE_IMAGE} AS cpu-base WORKDIR /opt/app-root/bin @@ -33,7 +38,7 @@ RUN curl -L https://mirror.openshift.com/pub/openshift-v4/$(uname -m)/clients/oc ####################### # runtime-datascience # ####################### -FROM base AS runtime-datascience +FROM cpu-base AS runtime-datascience ARG DATASCIENCE_SOURCE_CODE=runtimes/datascience/ubi9-python-3.12 diff --git a/runtimes/datascience/ubi9-python-3.12/build-args/cpu.conf b/runtimes/datascience/ubi9-python-3.12/build-args/cpu.conf new file mode 100644 index 0000000000..cc7c73581a --- /dev/null +++ b/runtimes/datascience/ubi9-python-3.12/build-args/cpu.conf @@ -0,0 +1 @@ +BASE_IMAGE=registry.access.redhat.com/ubi9/python-312:latest diff --git a/runtimes/minimal/ubi9-python-3.12/Dockerfile.cpu b/runtimes/minimal/ubi9-python-3.12/Dockerfile.cpu index 4931a57b7f..19ce17d9e2 100644 --- a/runtimes/minimal/ubi9-python-3.12/Dockerfile.cpu +++ b/runtimes/minimal/ubi9-python-3.12/Dockerfile.cpu @@ -1,7 +1,12 @@ +######################### +# configuration args # +######################### +ARG BASE_IMAGE + #################### -# base # +# cpu-base # #################### -FROM registry.access.redhat.com/ubi9/python-312:latest AS base +FROM ${BASE_IMAGE} AS cpu-base WORKDIR /opt/app-root/bin @@ -40,7 +45,7 @@ RUN curl -L https://mirror.openshift.com/pub/openshift-v4/$(uname -m)/clients/oc #################### # runtime-minimal # #################### -FROM base AS runtime-minimal +FROM cpu-base AS runtime-minimal ARG MINIMAL_SOURCE_CODE=runtimes/minimal/ubi9-python-3.12 diff --git a/runtimes/minimal/ubi9-python-3.12/build-args/cpu.conf b/runtimes/minimal/ubi9-python-3.12/build-args/cpu.conf new file mode 100644 index 0000000000..cc7c73581a --- /dev/null +++ b/runtimes/minimal/ubi9-python-3.12/build-args/cpu.conf @@ -0,0 +1 @@ +BASE_IMAGE=registry.access.redhat.com/ubi9/python-312:latest diff --git a/runtimes/pytorch+llmcompressor/ubi9-python-3.12/Dockerfile.cuda b/runtimes/pytorch+llmcompressor/ubi9-python-3.12/Dockerfile.cuda index 57bfec60e8..b611f1c646 100644 --- a/runtimes/pytorch+llmcompressor/ubi9-python-3.12/Dockerfile.cuda +++ b/runtimes/pytorch+llmcompressor/ubi9-python-3.12/Dockerfile.cuda @@ -1,7 +1,12 @@ +######################### +# configuration args # +######################### +ARG BASE_IMAGE + #################### -# base # +# cuda-base # #################### -FROM registry.access.redhat.com/ubi9/python-312:latest AS base +FROM ${BASE_IMAGE} AS cuda-base WORKDIR /opt/app-root/bin @@ -30,105 +35,6 @@ RUN curl -L https://mirror.openshift.com/pub/openshift-v4/$(uname -m)/clients/oc rm -f /tmp/openshift-client-linux.tar.gz # Install the oc client end -#################### -# cuda-base # -#################### -FROM base AS cuda-base-amd64 -ENV NVARCH=x86_64 - -FROM base AS cuda-base-arm64 -ENV NVARCH=sbsa - -FROM cuda-base-${TARGETARCH} AS cuda-base - -ARG TARGETARCH - -ARG CUDA_SOURCE_CODE=cuda - -# Install CUDA base from: -# https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/12.6.3/ubi9/base/Dockerfile -USER 0 -WORKDIR /opt/app-root/bin - -ENV NVIDIA_REQUIRE_CUDA="cuda>=12.6 brand=unknown,driver>=470,driver<471 brand=grid,driver>=470,driver<471 brand=tesla,driver>=470,driver<471 brand=nvidia,driver>=470,driver<471 brand=quadro,driver>=470,driver<471 brand=quadrortx,driver>=470,driver<471 brand=nvidiartx,driver>=470,driver<471 brand=vapps,driver>=470,driver<471 brand=vpc,driver>=470,driver<471 brand=vcs,driver>=470,driver<471 brand=vws,driver>=470,driver<471 brand=cloudgaming,driver>=470,driver<471 brand=unknown,driver>=535,driver<536 brand=grid,driver>=535,driver<536 brand=tesla,driver>=535,driver<536 brand=nvidia,driver>=535,driver<536 brand=quadro,driver>=535,driver<536 brand=quadrortx,driver>=535,driver<536 brand=nvidiartx,driver>=535,driver<536 brand=vapps,driver>=535,driver<536 brand=vpc,driver>=535,driver<536 brand=vcs,driver>=535,driver<536 brand=vws,driver>=535,driver<536 brand=cloudgaming,driver>=535,driver<536 brand=unknown,driver>=550,driver<551 brand=grid,driver>=550,driver<551 brand=tesla,driver>=550,driver<551 brand=nvidia,driver>=550,driver<551 brand=quadro,driver>=550,driver<551 brand=quadrortx,driver>=550,driver<551 brand=nvidiartx,driver>=550,driver<551 brand=vapps,driver>=550,driver<551 brand=vpc,driver>=550,driver<551 brand=vcs,driver>=550,driver<551 brand=vws,driver>=550,driver<551 brand=cloudgaming,driver>=550,driver<551" -ENV NV_CUDA_CUDART_VERSION=12.6.77-1 - -COPY ${CUDA_SOURCE_CODE}/cuda.repo-${TARGETARCH} /etc/yum.repos.d/cuda.repo -COPY ${CUDA_SOURCE_CODE}/NGC-DL-CONTAINER-LICENSE / - -RUN NVIDIA_GPGKEY_SUM=d0664fbbdb8c32356d45de36c5984617217b2d0bef41b93ccecd326ba3b80c87 && \ - curl -fsSL https://developer.download.nvidia.com/compute/cuda/repos/rhel9/${NVARCH}/D42D0685.pub | sed '/^Version/d' > /etc/pki/rpm-gpg/RPM-GPG-KEY-NVIDIA && \ - echo "$NVIDIA_GPGKEY_SUM /etc/pki/rpm-gpg/RPM-GPG-KEY-NVIDIA" | sha256sum -c --strict - - -ENV CUDA_VERSION=12.6.3 - -# For libraries in the cuda-compat-* package: https://docs.nvidia.com/cuda/eula/index.html#attachment-a -RUN dnf upgrade -y && dnf install -y \ - cuda-cudart-12-6-${NV_CUDA_CUDART_VERSION} \ - cuda-compat-12-6 \ - && dnf clean all \ - && rm -rf /var/cache/yum/* - -# nvidia-docker 1.0 -RUN echo "/usr/local/nvidia/lib" >> /etc/ld.so.conf.d/nvidia.conf && \ - echo "/usr/local/nvidia/lib64" >> /etc/ld.so.conf.d/nvidia.conf - -ENV PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:${PATH} -ENV LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64 - -# nvidia-container-runtime -ENV NVIDIA_VISIBLE_DEVICES=all -ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility - -# Install CUDA runtime from: -# https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/12.6.3/ubi9/runtime/Dockerfile -ENV NV_CUDA_LIB_VERSION=12.6.3-1 -ENV NV_NVTX_VERSION=12.6.77-1 -ENV NV_LIBNPP_VERSION=12.3.1.54-1 -ENV NV_LIBNPP_PACKAGE=libnpp-12-6-${NV_LIBNPP_VERSION} -ENV NV_LIBCUBLAS_VERSION=12.6.4.1-1 -ENV NV_LIBNCCL_PACKAGE_NAME=libnccl -ENV NV_LIBNCCL_PACKAGE_VERSION=2.23.4-1 -ENV NV_LIBNCCL_VERSION=2.23.4 -ENV NCCL_VERSION=2.23.4 -ENV NV_LIBNCCL_PACKAGE=${NV_LIBNCCL_PACKAGE_NAME}-${NV_LIBNCCL_PACKAGE_VERSION}+cuda12.6 - -RUN dnf install -y \ - cuda-libraries-12-6-${NV_CUDA_LIB_VERSION} \ - cuda-nvtx-12-6-${NV_NVTX_VERSION} \ - ${NV_LIBNPP_PACKAGE} \ - libcublas-12-6-${NV_LIBCUBLAS_VERSION} \ - ${NV_LIBNCCL_PACKAGE} \ - && dnf clean all \ - && rm -rf /var/cache/yum/* - -# Install devel tools - -RUN dnf install -y \ - make \ - findutils \ - && dnf clean all \ - && rm -rf /var/cache/yum/* - -# Install CUDA cudnn9 from: -# https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/12.6.3/ubi9/runtime/cudnn/Dockerfile -ENV NV_CUDNN_VERSION=9.5.1.17-1 -ENV NV_CUDNN_PACKAGE=libcudnn9-cuda-12-${NV_CUDNN_VERSION} - -LABEL com.nvidia.cudnn.version="${NV_CUDNN_VERSION}" - -RUN dnf install -y \ - ${NV_CUDNN_PACKAGE} \ - && dnf clean all \ - && rm -rf /var/cache/yum/* - -# Set this flag so that libraries can find the location of CUDA -ENV XLA_FLAGS=--xla_gpu_cuda_data_dir=/usr/local/cuda - -# Restore notebook user workspace -USER 1001 -WORKDIR /opt/app-root/src - ######################### # cuda-runtime-pytorch # ######################### diff --git a/runtimes/pytorch+llmcompressor/ubi9-python-3.12/build-args/cuda.conf b/runtimes/pytorch+llmcompressor/ubi9-python-3.12/build-args/cuda.conf new file mode 100644 index 0000000000..7525e99151 --- /dev/null +++ b/runtimes/pytorch+llmcompressor/ubi9-python-3.12/build-args/cuda.conf @@ -0,0 +1 @@ +BASE_IMAGE=quay.io/opendatahub/odh-base-image-cuda-py312-ubi9:v12.6 diff --git a/runtimes/pytorch/ubi9-python-3.12/Dockerfile.cuda b/runtimes/pytorch/ubi9-python-3.12/Dockerfile.cuda index bd77972cf6..24bd09e79e 100644 --- a/runtimes/pytorch/ubi9-python-3.12/Dockerfile.cuda +++ b/runtimes/pytorch/ubi9-python-3.12/Dockerfile.cuda @@ -1,7 +1,12 @@ +######################### +# configuration args # +######################### +ARG BASE_IMAGE + #################### -# base # +# cuda-base # #################### -FROM registry.access.redhat.com/ubi9/python-312:latest AS base +FROM ${BASE_IMAGE} AS cuda-base WORKDIR /opt/app-root/bin @@ -30,105 +35,6 @@ RUN curl -L https://mirror.openshift.com/pub/openshift-v4/$(uname -m)/clients/oc rm -f /tmp/openshift-client-linux.tar.gz # Install the oc client end -#################### -# cuda-base # -#################### -FROM base AS cuda-base-amd64 -ENV NVARCH=x86_64 - -FROM base AS cuda-base-arm64 -ENV NVARCH=sbsa - -FROM cuda-base-${TARGETARCH} AS cuda-base - -ARG TARGETARCH - -ARG CUDA_SOURCE_CODE=cuda - -# Install CUDA base from: -# https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/12.6.3/ubi9/base/Dockerfile -USER 0 -WORKDIR /opt/app-root/bin - -ENV NVIDIA_REQUIRE_CUDA="cuda>=12.6 brand=unknown,driver>=470,driver<471 brand=grid,driver>=470,driver<471 brand=tesla,driver>=470,driver<471 brand=nvidia,driver>=470,driver<471 brand=quadro,driver>=470,driver<471 brand=quadrortx,driver>=470,driver<471 brand=nvidiartx,driver>=470,driver<471 brand=vapps,driver>=470,driver<471 brand=vpc,driver>=470,driver<471 brand=vcs,driver>=470,driver<471 brand=vws,driver>=470,driver<471 brand=cloudgaming,driver>=470,driver<471 brand=unknown,driver>=535,driver<536 brand=grid,driver>=535,driver<536 brand=tesla,driver>=535,driver<536 brand=nvidia,driver>=535,driver<536 brand=quadro,driver>=535,driver<536 brand=quadrortx,driver>=535,driver<536 brand=nvidiartx,driver>=535,driver<536 brand=vapps,driver>=535,driver<536 brand=vpc,driver>=535,driver<536 brand=vcs,driver>=535,driver<536 brand=vws,driver>=535,driver<536 brand=cloudgaming,driver>=535,driver<536 brand=unknown,driver>=550,driver<551 brand=grid,driver>=550,driver<551 brand=tesla,driver>=550,driver<551 brand=nvidia,driver>=550,driver<551 brand=quadro,driver>=550,driver<551 brand=quadrortx,driver>=550,driver<551 brand=nvidiartx,driver>=550,driver<551 brand=vapps,driver>=550,driver<551 brand=vpc,driver>=550,driver<551 brand=vcs,driver>=550,driver<551 brand=vws,driver>=550,driver<551 brand=cloudgaming,driver>=550,driver<551" -ENV NV_CUDA_CUDART_VERSION=12.6.77-1 - -COPY ${CUDA_SOURCE_CODE}/cuda.repo-${TARGETARCH} /etc/yum.repos.d/cuda.repo -COPY ${CUDA_SOURCE_CODE}/NGC-DL-CONTAINER-LICENSE / - -RUN NVIDIA_GPGKEY_SUM=d0664fbbdb8c32356d45de36c5984617217b2d0bef41b93ccecd326ba3b80c87 && \ - curl -fsSL https://developer.download.nvidia.com/compute/cuda/repos/rhel9/${NVARCH}/D42D0685.pub | sed '/^Version/d' > /etc/pki/rpm-gpg/RPM-GPG-KEY-NVIDIA && \ - echo "$NVIDIA_GPGKEY_SUM /etc/pki/rpm-gpg/RPM-GPG-KEY-NVIDIA" | sha256sum -c --strict - - -ENV CUDA_VERSION=12.6.3 - -# For libraries in the cuda-compat-* package: https://docs.nvidia.com/cuda/eula/index.html#attachment-a -RUN dnf upgrade -y && dnf install -y \ - cuda-cudart-12-6-${NV_CUDA_CUDART_VERSION} \ - cuda-compat-12-6 \ - && dnf clean all \ - && rm -rf /var/cache/yum/* - -# nvidia-docker 1.0 -RUN echo "/usr/local/nvidia/lib" >> /etc/ld.so.conf.d/nvidia.conf && \ - echo "/usr/local/nvidia/lib64" >> /etc/ld.so.conf.d/nvidia.conf - -ENV PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:${PATH} -ENV LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64 - -# nvidia-container-runtime -ENV NVIDIA_VISIBLE_DEVICES=all -ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility - -# Install CUDA runtime from: -# https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/12.6.3/ubi9/runtime/Dockerfile -ENV NV_CUDA_LIB_VERSION=12.6.3-1 -ENV NV_NVTX_VERSION=12.6.77-1 -ENV NV_LIBNPP_VERSION=12.3.1.54-1 -ENV NV_LIBNPP_PACKAGE=libnpp-12-6-${NV_LIBNPP_VERSION} -ENV NV_LIBCUBLAS_VERSION=12.6.4.1-1 -ENV NV_LIBNCCL_PACKAGE_NAME=libnccl -ENV NV_LIBNCCL_PACKAGE_VERSION=2.23.4-1 -ENV NV_LIBNCCL_VERSION=2.23.4 -ENV NCCL_VERSION=2.23.4 -ENV NV_LIBNCCL_PACKAGE=${NV_LIBNCCL_PACKAGE_NAME}-${NV_LIBNCCL_PACKAGE_VERSION}+cuda12.6 - -RUN dnf install -y \ - cuda-libraries-12-6-${NV_CUDA_LIB_VERSION} \ - cuda-nvtx-12-6-${NV_NVTX_VERSION} \ - ${NV_LIBNPP_PACKAGE} \ - libcublas-12-6-${NV_LIBCUBLAS_VERSION} \ - ${NV_LIBNCCL_PACKAGE} \ - && dnf clean all \ - && rm -rf /var/cache/yum/* - -# Install devel tools - -RUN dnf install -y \ - make \ - findutils \ - && dnf clean all \ - && rm -rf /var/cache/yum/* - -# Install CUDA cudnn9 from: -# https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/12.6.3/ubi9/runtime/cudnn/Dockerfile -ENV NV_CUDNN_VERSION=9.5.1.17-1 -ENV NV_CUDNN_PACKAGE=libcudnn9-cuda-12-${NV_CUDNN_VERSION} - -LABEL com.nvidia.cudnn.version="${NV_CUDNN_VERSION}" - -RUN dnf install -y \ - ${NV_CUDNN_PACKAGE} \ - && dnf clean all \ - && rm -rf /var/cache/yum/* - -# Set this flag so that libraries can find the location of CUDA -ENV XLA_FLAGS=--xla_gpu_cuda_data_dir=/usr/local/cuda - -# Restore notebook user workspace -USER 1001 -WORKDIR /opt/app-root/src - ######################### # cuda-runtime-pytorch # ######################### diff --git a/runtimes/pytorch/ubi9-python-3.12/build-args/cuda.conf b/runtimes/pytorch/ubi9-python-3.12/build-args/cuda.conf new file mode 100644 index 0000000000..7525e99151 --- /dev/null +++ b/runtimes/pytorch/ubi9-python-3.12/build-args/cuda.conf @@ -0,0 +1 @@ +BASE_IMAGE=quay.io/opendatahub/odh-base-image-cuda-py312-ubi9:v12.6 diff --git a/runtimes/rocm-pytorch/ubi9-python-3.12/Dockerfile.rocm b/runtimes/rocm-pytorch/ubi9-python-3.12/Dockerfile.rocm index 1c998aa6ac..254bf795ef 100644 --- a/runtimes/rocm-pytorch/ubi9-python-3.12/Dockerfile.rocm +++ b/runtimes/rocm-pytorch/ubi9-python-3.12/Dockerfile.rocm @@ -1,7 +1,12 @@ +######################### +# configuration args # +######################### +ARG BASE_IMAGE + #################### -# base # +# rocm-base # #################### -FROM registry.access.redhat.com/ubi9/python-312:latest AS base +FROM ${BASE_IMAGE} AS rocm-base WORKDIR /opt/app-root/bin @@ -30,40 +35,6 @@ RUN curl -L https://mirror.openshift.com/pub/openshift-v4/$(uname -m)/clients/oc rm -f /tmp/openshift-client-linux.tar.gz # Install the oc client end -############# -# rocm-base # -############# -FROM base AS rocm-base - -USER 0 -WORKDIR /opt/app-root/bin - -# Please keep in sync with ROCm/python3.12 dependent images -ARG ROCM_VERSION=6.2.4 -ARG AMDGPU_VERSION=6.2.4 - -# Install the ROCm rpms -# ref: https://github.com/ROCm/ROCm-docker/blob/master/dev/Dockerfile-centos-7-complete -# Note: Based on 6.2 above new package mivisionx is a pre-requistes, which bring in more dependent packages -# so we are only installing meta packages of rocm -# ref: https://rocm.docs.amd.com/projects/install-on-linux/en/develop/reference/package-manager-integration.html#packages-in-rocm-programming-models -RUN echo "[ROCm]" > /etc/yum.repos.d/rocm.repo && \ - echo "name=ROCm" >> /etc/yum.repos.d/rocm.repo && \ - echo "baseurl=https://repo.radeon.com/rocm/rhel9/$ROCM_VERSION/main" >> /etc/yum.repos.d/rocm.repo && \ - echo "enabled=1" >> /etc/yum.repos.d/rocm.repo && \ - echo "gpgcheck=0" >> /etc/yum.repos.d/rocm.repo && \ - echo "[amdgpu]" > /etc/yum.repos.d/amdgpu.repo && \ - echo "name=amdgpu" >> /etc/yum.repos.d/amdgpu.repo && \ - echo "baseurl=https://repo.radeon.com/amdgpu/$AMDGPU_VERSION/rhel/9.4/main/x86_64" >> /etc/yum.repos.d/amdgpu.repo && \ - echo "enabled=1" >> /etc/yum.repos.d/amdgpu.repo && \ - echo "gpgcheck=0" >> /etc/yum.repos.d/amdgpu.repo && \ - dnf install -y rocm-developer-tools rocm-ml-sdk rocm-opencl-sdk rocm-openmp-sdk rocm-utils && \ - dnf clean all && rm -rf /var/cache/yum - -# Restore notebook user workspace -USER 1001 -WORKDIR /opt/app-root/src - ######################## # rocm-runtime-pytorch # ######################## diff --git a/runtimes/rocm-pytorch/ubi9-python-3.12/build-args/rocm.conf b/runtimes/rocm-pytorch/ubi9-python-3.12/build-args/rocm.conf new file mode 100644 index 0000000000..6682af4d77 --- /dev/null +++ b/runtimes/rocm-pytorch/ubi9-python-3.12/build-args/rocm.conf @@ -0,0 +1 @@ +BASE_IMAGE=quay.io/opendatahub/odh-base-image-rocm-py312-ubi9:v6.2 diff --git a/runtimes/rocm-tensorflow/ubi9-python-3.12/Dockerfile.rocm b/runtimes/rocm-tensorflow/ubi9-python-3.12/Dockerfile.rocm index 9a3e0d7166..c40eff0845 100644 --- a/runtimes/rocm-tensorflow/ubi9-python-3.12/Dockerfile.rocm +++ b/runtimes/rocm-tensorflow/ubi9-python-3.12/Dockerfile.rocm @@ -1,7 +1,12 @@ +######################### +# configuration args # +######################### +ARG BASE_IMAGE + #################### -# base # +# rocm-base # #################### -FROM registry.access.redhat.com/ubi9/python-312:latest AS base +FROM ${BASE_IMAGE} AS rocm-base WORKDIR /opt/app-root/bin @@ -30,40 +35,6 @@ RUN curl -L https://mirror.openshift.com/pub/openshift-v4/$(uname -m)/clients/oc rm -f /tmp/openshift-client-linux.tar.gz # Install the oc client end -############# -# rocm-base # -############# -FROM base AS rocm-base - -USER 0 -WORKDIR /opt/app-root/bin - -# Please keep in sync with ROCm/python3.12 dependent images -ARG ROCM_VERSION=6.2.4 -ARG AMDGPU_VERSION=6.2.4 - -# Install the ROCm rpms -# ref: https://github.com/ROCm/ROCm-docker/blob/master/dev/Dockerfile-centos-7-complete -# Note: Based on 6.2 above new package mivisionx is a pre-requistes, which bring in more dependent packages -# Only the ROCm meta-packages are installed -# ref: https://rocm.docs.amd.com/projects/install-on-linux/en/develop/reference/package-manager-integration.html#packages-in-rocm-programming-models -RUN echo "[ROCm]" > /etc/yum.repos.d/rocm.repo && \ - echo "name=ROCm" >> /etc/yum.repos.d/rocm.repo && \ - echo "baseurl=https://repo.radeon.com/rocm/rhel9/$ROCM_VERSION/main" >> /etc/yum.repos.d/rocm.repo && \ - echo "enabled=1" >> /etc/yum.repos.d/rocm.repo && \ - echo "gpgcheck=0" >> /etc/yum.repos.d/rocm.repo && \ - echo "[amdgpu]" > /etc/yum.repos.d/amdgpu.repo && \ - echo "name=amdgpu" >> /etc/yum.repos.d/amdgpu.repo && \ - echo "baseurl=https://repo.radeon.com/amdgpu/$AMDGPU_VERSION/rhel/9.4/main/x86_64" >> /etc/yum.repos.d/amdgpu.repo && \ - echo "enabled=1" >> /etc/yum.repos.d/amdgpu.repo && \ - echo "gpgcheck=0" >> /etc/yum.repos.d/amdgpu.repo && \ - dnf install -y rocm-developer-tools rocm-ml-sdk rocm-opencl-sdk rocm-openmp-sdk rocm-utils && \ - dnf clean all && rm -rf /var/cache/yum - -# Restore notebook user workspace -USER 1001 -WORKDIR /opt/app-root/src - ########################### # rocm-runtime-tensorflow # ########################### diff --git a/runtimes/rocm-tensorflow/ubi9-python-3.12/build-args/rocm.conf b/runtimes/rocm-tensorflow/ubi9-python-3.12/build-args/rocm.conf new file mode 100644 index 0000000000..6682af4d77 --- /dev/null +++ b/runtimes/rocm-tensorflow/ubi9-python-3.12/build-args/rocm.conf @@ -0,0 +1 @@ +BASE_IMAGE=quay.io/opendatahub/odh-base-image-rocm-py312-ubi9:v6.2 diff --git a/runtimes/rocm-tensorflow/ubi9-python-3.12/pylock.toml b/runtimes/rocm-tensorflow/ubi9-python-3.12/pylock.toml index 241328616b..64dea07dd5 100644 --- a/runtimes/rocm-tensorflow/ubi9-python-3.12/pylock.toml +++ b/runtimes/rocm-tensorflow/ubi9-python-3.12/pylock.toml @@ -3099,9 +3099,9 @@ wheels = [{ url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc [[packages]] name = "skl2onnx" -version = "1.17.0" -sdist = { url = "https://files.pythonhosted.org/packages/f2/91/53c1d0085fb11c6ae2b2092160f55380fa361b8ced1144eada49add70adb/skl2onnx-1.17.0.tar.gz", upload-time = 2024-05-30T12:28:02Z, size = 931965, hashes = { sha256 = "7127dc84e470f489f68094ccfff9a5a815b609f700d43e708e6f658a33b06403" } } -wheels = [{ url = "https://files.pythonhosted.org/packages/61/db/343223f105c34cbf8dd1237693e8628deb5009f2a9c4f2e3d2c6546f2766/skl2onnx-1.17.0-py2.py3-none-any.whl", upload-time = 2024-05-30T12:27:59Z, size = 298421, hashes = { sha256 = "27942fc2743efe9dff56380001da4685812d0f5b1b0b9c1a032e80d059d6779a" } }] +version = "1.18.0" +sdist = { url = "https://files.pythonhosted.org/packages/a9/fb/f91b284365bab4ddcb0f77f573e60dc96bc232648de997814e3ddd832e97/skl2onnx-1.18.0.tar.gz", upload-time = 2024-12-18T07:25:09Z, size = 935240, hashes = { sha256 = "39ea4ae30c5c182355a1824467013158214444e0ce0b18f33338bd827d4fb00f" } } +wheels = [{ url = "https://files.pythonhosted.org/packages/08/de/e8825727acd80484aa28080de62e4dc21f076d6887c10db49e2c8a66578f/skl2onnx-1.18.0-py2.py3-none-any.whl", upload-time = 2024-12-18T07:25:06Z, size = 300310, hashes = { sha256 = "1345d8a1d3aa4a11abfbed4bc984b777023dad85e1c9fe4eb727cba5ee0fcaa8" } }] [[packages]] name = "smart-open" diff --git a/runtimes/rocm-tensorflow/ubi9-python-3.12/pyproject.toml b/runtimes/rocm-tensorflow/ubi9-python-3.12/pyproject.toml index 4c03cadf24..ff6c2690a4 100644 --- a/runtimes/rocm-tensorflow/ubi9-python-3.12/pyproject.toml +++ b/runtimes/rocm-tensorflow/ubi9-python-3.12/pyproject.toml @@ -20,7 +20,7 @@ dependencies = [ "plotly~=6.0.0", "scikit-learn~=1.6.1", "scipy~=1.15.2", - "skl2onnx~=1.17.0", + "skl2onnx~=1.18.0", # Required for skl2onnx, as upgraded version is not compatible with protobuf "onnxconverter-common~=1.13.0", "codeflare-sdk~=0.29.0", diff --git a/runtimes/tensorflow/ubi9-python-3.12/Dockerfile.cuda b/runtimes/tensorflow/ubi9-python-3.12/Dockerfile.cuda index 288bfbca76..4cfcc11987 100644 --- a/runtimes/tensorflow/ubi9-python-3.12/Dockerfile.cuda +++ b/runtimes/tensorflow/ubi9-python-3.12/Dockerfile.cuda @@ -1,7 +1,14 @@ +######################### +# configuration args # +######################### +ARG BASE_IMAGE + #################### -# base # +# cuda-base # #################### -FROM registry.access.redhat.com/ubi9/python-312:latest AS base +FROM ${BASE_IMAGE} AS cuda-base + +ARG TARGETARCH WORKDIR /opt/app-root/bin @@ -30,110 +37,6 @@ RUN curl -L https://mirror.openshift.com/pub/openshift-v4/$(uname -m)/clients/oc rm -f /tmp/openshift-client-linux.tar.gz # Install the oc client end -#################### -# cuda-base # -#################### -FROM base AS cuda-base-amd64 -ENV NVARCH=x86_64 - -FROM base AS cuda-base-arm64 -ENV NVARCH=sbsa - -FROM cuda-base-${TARGETARCH} AS cuda-base - -ARG TARGETARCH - -ARG CUDA_SOURCE_CODE=cuda - -# Install CUDA base from: -# https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/12.6.3/ubi9/base/Dockerfile -USER 0 -WORKDIR /opt/app-root/bin - -ENV NVIDIA_REQUIRE_CUDA="cuda>=12.6 brand=unknown,driver>=470,driver<471 brand=grid,driver>=470,driver<471 brand=tesla,driver>=470,driver<471 brand=nvidia,driver>=470,driver<471 brand=quadro,driver>=470,driver<471 brand=quadrortx,driver>=470,driver<471 brand=nvidiartx,driver>=470,driver<471 brand=vapps,driver>=470,driver<471 brand=vpc,driver>=470,driver<471 brand=vcs,driver>=470,driver<471 brand=vws,driver>=470,driver<471 brand=cloudgaming,driver>=470,driver<471 brand=unknown,driver>=535,driver<536 brand=grid,driver>=535,driver<536 brand=tesla,driver>=535,driver<536 brand=nvidia,driver>=535,driver<536 brand=quadro,driver>=535,driver<536 brand=quadrortx,driver>=535,driver<536 brand=nvidiartx,driver>=535,driver<536 brand=vapps,driver>=535,driver<536 brand=vpc,driver>=535,driver<536 brand=vcs,driver>=535,driver<536 brand=vws,driver>=535,driver<536 brand=cloudgaming,driver>=535,driver<536 brand=unknown,driver>=550,driver<551 brand=grid,driver>=550,driver<551 brand=tesla,driver>=550,driver<551 brand=nvidia,driver>=550,driver<551 brand=quadro,driver>=550,driver<551 brand=quadrortx,driver>=550,driver<551 brand=nvidiartx,driver>=550,driver<551 brand=vapps,driver>=550,driver<551 brand=vpc,driver>=550,driver<551 brand=vcs,driver>=550,driver<551 brand=vws,driver>=550,driver<551 brand=cloudgaming,driver>=550,driver<551" -ENV NV_CUDA_CUDART_VERSION=12.6.77-1 - -COPY ${CUDA_SOURCE_CODE}/cuda.repo-${TARGETARCH} /etc/yum.repos.d/cuda.repo -COPY ${CUDA_SOURCE_CODE}/NGC-DL-CONTAINER-LICENSE / - -RUN NVIDIA_GPGKEY_SUM=d0664fbbdb8c32356d45de36c5984617217b2d0bef41b93ccecd326ba3b80c87 && \ - curl -fsSL https://developer.download.nvidia.com/compute/cuda/repos/rhel9/${NVARCH}/D42D0685.pub | sed '/^Version/d' > /etc/pki/rpm-gpg/RPM-GPG-KEY-NVIDIA && \ - echo "$NVIDIA_GPGKEY_SUM /etc/pki/rpm-gpg/RPM-GPG-KEY-NVIDIA" | sha256sum -c --strict - - -ENV CUDA_VERSION=12.6.3 - -# For libraries in the cuda-compat-* package: https://docs.nvidia.com/cuda/eula/index.html#attachment-a -RUN dnf upgrade -y && dnf install -y \ - cuda-cudart-12-6-${NV_CUDA_CUDART_VERSION} \ - cuda-compat-12-6 \ - && dnf clean all \ - && rm -rf /var/cache/yum/* - -# nvidia-docker 1.0 -RUN echo "/usr/local/nvidia/lib" >> /etc/ld.so.conf.d/nvidia.conf && \ - echo "/usr/local/nvidia/lib64" >> /etc/ld.so.conf.d/nvidia.conf - -ENV PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:${PATH} -ENV LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64 - -# nvidia-container-runtime -ENV NVIDIA_VISIBLE_DEVICES=all -ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility - -# Install CUDA runtime from: -# https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/12.6.3/ubi9/runtime/Dockerfile -ENV NV_CUDA_LIB_VERSION=12.6.3-1 -ENV NV_NVTX_VERSION=12.6.77-1 -ENV NV_LIBNPP_VERSION=12.3.1.54-1 -ENV NV_LIBNPP_PACKAGE=libnpp-12-6-${NV_LIBNPP_VERSION} -ENV NV_LIBCUBLAS_VERSION=12.6.4.1-1 -ENV NV_LIBNCCL_PACKAGE_NAME=libnccl -ENV NV_LIBNCCL_PACKAGE_VERSION=2.23.4-1 -ENV NV_LIBNCCL_VERSION=2.23.4 -ENV NCCL_VERSION=2.23.4 -ENV NV_LIBNCCL_PACKAGE=${NV_LIBNCCL_PACKAGE_NAME}-${NV_LIBNCCL_PACKAGE_VERSION}+cuda12.6 - -RUN dnf install -y \ - cuda-libraries-12-6-${NV_CUDA_LIB_VERSION} \ - cuda-nvtx-12-6-${NV_NVTX_VERSION} \ - ${NV_LIBNPP_PACKAGE} \ - libcublas-12-6-${NV_LIBCUBLAS_VERSION} \ - ${NV_LIBNCCL_PACKAGE} \ - && dnf clean all \ - && rm -rf /var/cache/yum/* - -# Install devel tools - -RUN dnf install -y \ - make \ - findutils \ - && dnf clean all \ - && rm -rf /var/cache/yum/* - -# Install CUDA cudnn9 from: -# https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/12.6.3/ubi9/runtime/cudnn/Dockerfile -ENV NV_CUDNN_VERSION=9.5.1.17-1 -ENV NV_CUDNN_PACKAGE=libcudnn9-cuda-12-${NV_CUDNN_VERSION} - -LABEL com.nvidia.cudnn.version="${NV_CUDNN_VERSION}" - -RUN dnf install -y \ - ${NV_CUDNN_PACKAGE} \ - && dnf clean all \ - && rm -rf /var/cache/yum/* - -# Set this flag so that libraries can find the location of CUDA -ENV XLA_FLAGS=--xla_gpu_cuda_data_dir=/usr/local/cuda - -# hdf5 is needed for h5py -RUN dnf install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm && \ - dnf install -y hdf5-devel && \ - dnf clean all - -# Restore notebook user workspace -USER 1001 -WORKDIR /opt/app-root/src - ############################ # cuda-runtime-tensorflow # ############################ diff --git a/runtimes/tensorflow/ubi9-python-3.12/build-args/cuda.conf b/runtimes/tensorflow/ubi9-python-3.12/build-args/cuda.conf new file mode 100644 index 0000000000..7525e99151 --- /dev/null +++ b/runtimes/tensorflow/ubi9-python-3.12/build-args/cuda.conf @@ -0,0 +1 @@ +BASE_IMAGE=quay.io/opendatahub/odh-base-image-cuda-py312-ubi9:v12.6 diff --git a/scripts/generate_pull_request_pipelineruns.py b/scripts/generate_pull_request_pipelineruns.py index f45b20bf9e..ce43b12054 100644 --- a/scripts/generate_pull_request_pipelineruns.py +++ b/scripts/generate_pull_request_pipelineruns.py @@ -49,6 +49,7 @@ def pull_request_pipelinerun_template( component: str, dockerfile: pathlib.Path, build_platforms: list[Literal["linux/x86_64", "linux/arm64", "linux/ppc64le", "linux/s390x"]], + params: list[dict[str, Any]], ) -> dict[str, Any]: """https://docs.redhat.com/en/documentation/red_hat_openshift_pipelines/1.19/html/pipelines_as_code/creating-pipeline-runs-pac#creating-pipeline-runs-pac""" @@ -81,21 +82,7 @@ def pull_request_pipelinerun_template( "timeouts": { "pipeline": "3h", }, - "params": [ - {"name": "git-url", "value": "{{source_url}}"}, - {"name": "revision", "value": "{{revision}}"}, - {"name": "output-image", "value": f"quay.io/opendatahub/{component}:on-pr-{{{{revision}}}}"}, - {"name": "image-expires-after", "value": "5d"}, - { - "name": "build-platforms", - "value": build_platforms, - }, - {"name": "dockerfile", "value": str(dockerfile)}, - { - "name": "path-context", - "value": ".", - }, - ], + "params": params, "pipelineRef": { "name": "multiarch-pull-request-pipeline", }, @@ -147,13 +134,36 @@ def transform_build_pipeline_to_pr_pipeline(push_pipeline_path: pathlib.Path): build_platforms = ["linux/x86_64"] if component in ["odh-pipeline-runtime-minimal-cpu-py311-ubi9", "odh-pipeline-runtime-minimal-cpu-py312-ubi9"]: build_platforms.extend(["linux/arm64", "linux/s390x"]) + + # Collect params + dockerfile = pathlib.Path( + next(param for param in push_pipeline["spec"]["params"] if param["name"] == "dockerfile")["value"] + ) + + pr_params = [ + {"name": "git-url", "value": "{{source_url}}"}, + {"name": "revision", "value": "{{revision}}"}, + {"name": "output-image", "value": f"quay.io/opendatahub/{component}:on-pr-{{{{revision}}}}"}, + {"name": "image-expires-after", "value": "5d"}, + {"name": "build-platforms", "value": build_platforms}, + {"name": "dockerfile", "value": str(dockerfile)}, + {"name": "path-context", "value": "."}, + ] + + existing_param_names = {p["name"] for p in pr_params} + # skip copying these no need of additional-tags on pull-request + skip_params = {"additional-tags"} + + for p in push_pipeline["spec"]["params"]: + if p["name"] not in existing_param_names and p["name"] not in skip_params: + pr_params.append(p) + pr_pipeline = pull_request_pipelinerun_template( on_cel_expression=LiteralScalarString(pr_on_cel_expression + "\n"), component=component, - dockerfile=pathlib.Path( - next(param for param in push_pipeline["spec"]["params"] if param["name"] == "dockerfile")["value"] - ), + dockerfile=dockerfile, build_platforms=build_platforms, + params=pr_params, ) # Generate the new filename and write the file diff --git a/tests/manifests.py b/tests/manifests.py index 5d7f107f7a..8698f5be9b 100644 --- a/tests/manifests.py +++ b/tests/manifests.py @@ -185,7 +185,10 @@ def get_source_of_truth_filepath( filename = f"jupyter-{notebook_id}-{file_suffix}" elif RSTUDIO_NOTEBOOK_ID in notebook_id: - filename = f"rstudio-gpu-{file_suffix}" + imagestream_filename = f"rstudio-gpu-{file_suffix}" + buildconfig_filename = "cuda-rstudio-buildconfig.yaml" + _ = imagestream_filename + filename = buildconfig_filename if not filename: raise ValueError( diff --git a/tests/test_main.py b/tests/test_main.py index 5a399ee6e6..dea3bd17cc 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -1,16 +1,20 @@ from __future__ import annotations +import dataclasses import json import logging import os import pathlib +import pprint import re import shutil import subprocess import tomllib +from collections import defaultdict from typing import TYPE_CHECKING import packaging.requirements +import packaging.specifiers import packaging.utils import packaging.version import pytest @@ -64,42 +68,14 @@ def test_image_pyprojects(subtests: pytest_subtests.plugin.SubTests): ) with subtests.test(msg="checking imagestream manifest consistency with pylock.toml", pyproject=file): - # TODO(jdanek): missing manifests - if is_suffix(directory.parts, pathlib.Path("runtimes/rocm-tensorflow/ubi9-python-3.12").parts): - pytest.skip(f"Manifest not implemented {directory.parts}") - if is_suffix(directory.parts, pathlib.Path("jupyter/rocm/tensorflow/ubi9-python-3.12").parts): - pytest.skip(f"Manifest not implemented {directory.parts}") - - metadata = manifests.extract_metadata_from_path(directory) - manifest_file = manifests.get_source_of_truth_filepath( - root_repo_directory=PROJECT_ROOT, - metadata=metadata, - ) - if not manifest_file.is_file(): - raise FileNotFoundError( - f"Unable to determine imagestream manifest for '{directory}'. " - f"Computed filepath '{manifest_file}' does not exist." - ) - - imagestream = yaml.safe_load(manifest_file.read_text()) - recommended_tags = [ - tag - for tag in imagestream["spec"]["tags"] - if tag["annotations"].get("opendatahub.io/workbench-image-recommended", None) == "true" - ] - assert len(recommended_tags) <= 1, "at most one tag may be recommended at a time" - assert recommended_tags or len(imagestream["spec"]["tags"]) == 1, ( - "Either there has to be recommended image, or there can be only one tag" - ) - current_tag = recommended_tags[0] if recommended_tags else imagestream["spec"]["tags"][0] + _skip_unimplemented_manifests(directory) - sw = json.loads(current_tag["annotations"]["opendatahub.io/notebook-software"]) - dep = json.loads(current_tag["annotations"]["opendatahub.io/notebook-python-dependencies"]) + manifest = load_manifests_file_for(directory) with subtests.test(msg="checking the `notebook-software` array", pyproject=file): # TODO(jdanek) pytest.skip("checking the `notebook-software` array not yet implemented") - for s in sw: + for s in manifest.sw: if s.get("name") == "Python": assert s.get("version") == f"v{python}", ( "Python version in imagestream does not match Pipfile" @@ -108,7 +84,7 @@ def test_image_pyprojects(subtests: pytest_subtests.plugin.SubTests): pytest.fail(f"unexpected {s=}") with subtests.test(msg="checking the `notebook-python-dependencies` array", pyproject=file): - for d in dep: + for d in manifest.dep: workbench_only_packages = [ "Kfp", "JupyterLab", @@ -155,11 +131,11 @@ def test_image_pyprojects(subtests: pytest_subtests.plugin.SubTests): } name = d["name"] - if name in workbench_only_packages and metadata.type == manifests.NotebookType.RUNTIME: + if name in workbench_only_packages and manifest.metadata.type == manifests.NotebookType.RUNTIME: continue # TODO(jdanek): intentional? - if metadata.scope == "pytorch+llmcompressor" and name == "Codeflare-SDK": + if manifest.metadata.scope == "pytorch+llmcompressor" and name == "Codeflare-SDK": continue if name == "ROCm-PyTorch": @@ -197,6 +173,128 @@ def test_image_pyprojects(subtests: pytest_subtests.plugin.SubTests): ), f"{name}: manifest declares {manifest_version}, but pylock.toml pins {locked_version}" +def test_image_manifests_version_alignment(subtests: pytest_subtests.plugin.SubTests): + collected_manifests = [] + for file in PROJECT_ROOT.glob("**/pyproject.toml"): + logging.info(file) + directory = file.parent # "ubi9-python-3.11" + try: + _ubi, _lang, _python = directory.name.split("-") + except ValueError: + logging.debug(f"skipping {directory.name}/pyproject.toml as it is not an image directory") + continue + + if _skip_unimplemented_manifests(directory, call_skip=False): + continue + + manifest = load_manifests_file_for(directory) + collected_manifests.append(manifest) + + @dataclasses.dataclass + class VersionData: + manifest: Manifest + version: str + + packages: dict[str, list[VersionData]] = defaultdict(list) + for manifest in collected_manifests: + for dep in manifest.dep: + name = dep["name"] + version = dep["version"] + packages[name].append(VersionData(manifest=manifest, version=version)) + + # TODO(jdanek): review these, if any are unwarranted + ignored_exceptions: tuple[tuple[str, tuple[str, ...]], ...] = ( + # ("package name", ("allowed version 1", "allowed version 2", ...)) + ("Codeflare-SDK", ("0.30", "0.29")), + ("Scikit-learn", ("1.7", "1.6")), + ("Pandas", ("2.2", "1.5")), + ("Numpy", ("2.2", "1.26")), + ("Tensorboard", ("2.19", "2.18")), + ) + + for name, data in packages.items(): + versions = [d.version for d in data] + + # if there is only a single version, all is good + if len(set(versions)) == 1: + continue + + mapping = {str(d.manifest.filename.relative_to(PROJECT_ROOT)): d.version for d in data} + with subtests.test(msg=f"checking versions for {name} across the latest tags in all imagestreams"): + exception = next((it for it in ignored_exceptions if it[0] == name), None) + if exception: + # exception may save us from failing + if set(versions) == set(exception[1]): + continue + else: + pytest.fail( + f"{name} is allowed to have {exception} but actually has more versions: {pprint.pformat(mapping)}" + ) + # all hope is lost, the check has failed + pytest.fail(f"{name} has multiple versions: {pprint.pformat(mapping)}") + + +def test_image_pyprojects_version_alignment(subtests: pytest_subtests.plugin.SubTests): + requirements = defaultdict(list) + for file in PROJECT_ROOT.glob("**/pyproject.toml"): + logging.info(file) + directory = file.parent # "ubi9-python-3.11" + try: + _ubi, _lang, _python = directory.name.split("-") + except ValueError: + logging.debug(f"skipping {directory.name}/pyproject.toml as it is not an image directory") + continue + + pyproject = tomllib.loads(file.read_text()) + for d in pyproject["project"]["dependencies"]: + requirement = packaging.requirements.Requirement(d) + requirements[requirement.name].append(requirement.specifier) + + # TODO(jdanek): review these, if any are unwarranted + ignored_exceptions: tuple[tuple[str, tuple[str, ...]], ...] = ( + # ("package name", ("allowed specifier 1", "allowed specifier 2", ...)) + ("setuptools", ("~=78.1.1", "==78.1.1")), + ("wheel", ("==0.45.1", "~=0.45.1")), + ("tensorboard", ("~=2.18.0", "~=2.19.0")), + ("torch", ("==2.6.0", "==2.6.0+cu126", "==2.6.0+rocm6.2.4")), + ("torchvision", ("==0.21.0", "==0.21.0+cu126", "==0.21.0+rocm6.2.4")), + ("matplotlib", ("~=3.10.1", "~=3.10.3")), + ("numpy", ("~=2.2.3", "<2.0.0", "~=1.26.4")), + ("pandas", ("~=2.2.3", "~=1.5.3")), + ("scikit-learn", ("~=1.6.1", "~=1.7.0")), + ("codeflare-sdk", ("~=0.29.0", "~=0.30.0")), + ("ipython-genutils", (">=0.2.0", "~=0.2.0")), + ("jinja2", (">=3.1.6", "~=3.1.6")), + ("jupyter-client", ("~=8.6.3", ">=8.6.3")), + ("requests", ("~=2.32.3", ">=2.0.0")), + ("urllib3", ("~=2.5.0", "~=2.3.0")), + ("transformers", ("<5.0,>4.0", "~=4.55.0")), + ("datasets", ("", "~=3.4.1")), + ("accelerate", ("!=1.1.0,>=0.20.3", "~=1.5.2")), + ("kubeflow-training", ("==1.9.0", "==1.9.2", "==1.9.3")), + ("jupyter-bokeh", ("~=3.0.5", "~=4.0.5")), + ("jupyterlab-lsp", ("~=5.1.0", "~=5.1.1")), + ("jupyterlab-widgets", ("~=3.0.13", "~=3.0.15")), + ) + + for name, data in requirements.items(): + if len(set(data)) == 1: + continue + + with subtests.test(msg=f"checking versions of {name} across all pyproject.tomls"): + exception = next((it for it in ignored_exceptions if it[0] == name), None) + if exception: + # exception may save us from failing + if set(data) == {packaging.specifiers.SpecifierSet(e) for e in exception[1]}: + continue + else: + pytest.fail( + f"{name} is allowed to have {exception[1]} but actually has more specifiers: {pprint.pformat(set(data))}" + ) + # all hope is lost, the check has failed + pytest.fail(f"{name} has multiple specifiers: {pprint.pformat(data)}") + + def test_files_that_should_be_same_are_same(subtests: pytest_subtests.plugin.SubTests): file_groups = { "ROCm de-vendor script": [ @@ -239,3 +337,68 @@ def is_suffix[T](main_sequence: Sequence[T], suffix_sequence: Sequence[T]): if suffix_len > len(main_sequence): return False return main_sequence[-suffix_len:] == suffix_sequence + + +def _skip_unimplemented_manifests(directory: pathlib.Path, call_skip=True) -> bool: + # TODO(jdanek): missing manifests + dirs = ( + "runtimes/rocm-tensorflow/ubi9-python-3.12", + "jupyter/rocm/tensorflow/ubi9-python-3.12", + ) + for d in dirs: + if is_suffix(directory.parts, pathlib.Path(d).parts): + if call_skip: + pytest.skip(f"Manifest not implemented {directory.parts}") + else: + return True + return False + + +@dataclasses.dataclass +class Manifest: + filename: pathlib.Path + imagestream: dict[str, Any] + metadata: manifests.NotebookMetadata + sw: list[dict[str, Any]] + dep: list[dict[str, Any]] + + +def load_manifests_file_for(directory: pathlib.Path) -> Manifest: + metadata = manifests.extract_metadata_from_path(directory) + manifest_file = manifests.get_source_of_truth_filepath( + root_repo_directory=PROJECT_ROOT, + metadata=metadata, + ) + if not manifest_file.is_file(): + raise FileNotFoundError( + f"Unable to determine imagestream manifest for '{directory}'. " + f"Computed filepath '{manifest_file}' does not exist." + ) + + # BEWARE: rhds rstudio has imagestream bundled in the buildconfig yaml + if "buildconfig" in manifest_file.name: + # imagestream is the first document in the file + imagestream = next(yaml.safe_load_all(manifest_file.read_text())) + else: + imagestream = yaml.safe_load(manifest_file.read_text()) + recommended_tags = [ + tag + for tag in imagestream["spec"]["tags"] + if tag["annotations"].get("opendatahub.io/workbench-image-recommended", None) == "true" + ] + assert len(recommended_tags) <= 1, "at most one tag may be recommended at a time" + assert recommended_tags or len(imagestream["spec"]["tags"]) == 1, ( + "Either there has to be recommended image, or there can be only one tag" + ) + current_tag = recommended_tags[0] if recommended_tags else imagestream["spec"]["tags"][0] + + sw = json.loads(current_tag["annotations"]["opendatahub.io/notebook-software"]) + dep = json.loads(current_tag["annotations"]["opendatahub.io/notebook-python-dependencies"]) + + return Manifest( + filename=manifest_file, + imagestream=imagestream, + metadata=metadata, + sw=sw, + dep=dep, + )