Skip to content
This repository has been archived by the owner on Jan 22, 2024. It is now read-only.

Commit

Permalink
Merge branch 'CNT-4255/convert-to-meta-package' into 'main'
Browse files Browse the repository at this point in the history
Convert nvidia-docker2 to a meta package

See merge request nvidia/container-toolkit/nvidia-docker!46
  • Loading branch information
ArangoGutierrez committed Apr 26, 2023
2 parents 80902fe + fa083de commit a9f6b1b
Show file tree
Hide file tree
Showing 14 changed files with 113 additions and 451 deletions.
131 changes: 51 additions & 80 deletions .gitlab-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,6 @@ default:
- name: docker:dind
command: ["--experimental"]

# Build packages for all supported OS / ARCH combinations
stages:
- build-one
- build-all

variables:
# We specify the LIB_VERSION, TOOLKIT_VERSION, and TOOLKIT_TAG variable to allow packages
# to be built.
Expand All @@ -31,45 +26,51 @@ variables:
TOOLKIT_VERSION: 999.999.999
TOOLKIT_TAG: dummy+toolkit

.build-setup: &build-setup
before_script:
- apk update
- apk upgrade
- apk add coreutils build-base sed git bash make
- docker run --rm --privileged multiarch/qemu-user-static --reset -p yes -c yes
# Build packages for all supported OS / ARCH combinations
stages:
- trigger
- build

# build-one jobs build packages for a single OS / ARCH combination.
#
# They are run during the first stage of the pipeline as a smoke test to ensure
# that we can successfully build packages on all of our architectures for a
# single OS. They are triggered on any change to an MR. No artifacts are
# produced as part of build-one jobs.
.build-one-setup: &build-one-setup
<<: *build-setup
stage: build-one
only:
- merge_requests
.pipeline-trigger-rules:
rules:
# We trigger the pipeline if started manually
- if: $CI_PIPELINE_SOURCE == "web"
# We trigger the pipeline on the main branch
- if: $CI_COMMIT_BRANCH == "main"
# We trigger the pipeline on the release- branches
- if: $CI_COMMIT_BRANCH =~ /^release-.*$/
# We trigger the pipeline on tags
- if: $CI_COMMIT_TAG && $CI_COMMIT_TAG != ""

workflow:
rules:
# We trigger the pipeline on a merge request
- if: $CI_PIPELINE_SOURCE == 'merge_request_event'
# We then add all the regular triggers
- !reference [.pipeline-trigger-rules, rules]

# The main or manual job is used to filter out distributions or architectures that are not required on
# every build.
.main-or-manual:
rules:
- !reference [.pipeline-trigger-rules, rules]
- if: $CI_PIPELINE_SOURCE == "schedule"
when: manual

# build-all jobs build packages for every OS / ARCH combination we support.
#
# They are run under two conditions:
# 1) Automatically whenever a new tag is pushed to the repo (e.g. v1.1.0)
# 2) Manually by a reviewer just before merging a MR.
#
# Unlike build-one jobs, it takes a long time to build the full suite
# OS / ARCH combinations, so this is optimized to only run once per MR
# (assuming it all passes). A full set of artifacts including the packages
# built for each OS / ARCH are produced as a result of these jobs.
.build-all-setup: &build-all-setup
<<: *build-setup
stage: build-all
timeout: 2h 30m
# The trigger-pipeline job adds a manualy triggered job to the pipeline on merge requests.
trigger-pipeline:
stage: trigger
script:
- echo "starting pipeline"
rules:
- if: $CI_COMMIT_TAG
when: always
- if: $CI_MERGE_REQUEST_ID
- !reference [.main-or-manual, rules]
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
when: manual
allow_failure: false
- when: always

.build-setup:
stage: build
variables:
ARTIFACTS_NAME: "${CI_PROJECT_NAME}-${CI_COMMIT_REF_SLUG}-${CI_JOB_NAME}-artifacts-${CI_PIPELINE_ID}"
ARTIFACTS_DIR: "${CI_PROJECT_NAME}-${CI_COMMIT_REF_SLUG}-artifacts-${CI_PIPELINE_ID}"
Expand All @@ -80,46 +81,16 @@ variables:
paths:
- ${ARTIFACTS_DIR}

# The full set of build-one jobs organizes to build
# ubuntu18.04 in parallel on each of our supported ARCHs.
build-one-amd64:
<<: *build-one-setup
script:
- make ubuntu18.04-amd64

build-one-ppc64le:
<<: *build-one-setup
script:
- make ubuntu18.04-ppc64le

build-one-arm64:
<<: *build-one-setup
script:
- make ubuntu18.04-arm64

# The full set of build-all jobs organized to
# have builds for each ARCH run in parallel.
build-all-amd64:
<<: *build-all-setup
script:
- make docker-amd64

build-all-x86_64:
<<: *build-all-setup
script:
- make docker-x86_64

build-all-ppc64le:
<<: *build-all-setup
script:
- make docker-ppc64le

build-all-arm64:
<<: *build-all-setup
script:
- make docker-arm64
before_script:
- apk update
- apk upgrade
- apk add coreutils build-base sed git bash make

build-all-aarch64:
<<: *build-all-setup
build:
extends:
- .build-setup
parallel:
matrix:
- PACKAGING: [deb, rpm]
script:
- make docker-aarch64
- make ${PACKAGING}
40 changes: 20 additions & 20 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,33 +1,33 @@
# NVIDIA Container Toolkit

[![GitHub license](https://img.shields.io/github/license/NVIDIA/nvidia-docker?style=flat-square)](https://raw.githubusercontent.com/NVIDIA/nvidia-docker/main/LICENSE)
[![Documentation](https://img.shields.io/badge/documentation-wiki-blue.svg?style=flat-square)](https://github.com/NVIDIA/nvidia-docker/wiki)
[![Package repository](https://img.shields.io/badge/packages-repository-b956e8.svg?style=flat-square)](https://nvidia.github.io/nvidia-docker)

![nvidia-gpu-docker](https://cloud.githubusercontent.com/assets/3028125/12213714/5b208976-b632-11e5-8406-38d379ec46aa.png)
**NOTE:** The `nvidia-docker2` package that is generated by this repository is a meta
package that only serves to introduce a dependency on `nvidia-container-toolkit`
package which includes all the components of the [NVIDIA Container Toolkit](https://github.com/NVIDIA/nvidia-container-toolkit).

## Introduction
The NVIDIA Container Toolkit allows users to build and run GPU accelerated Docker containers. The toolkit includes a container runtime [library](https://github.com/NVIDIA/libnvidia-container) and utilities to automatically configure containers to leverage NVIDIA GPUs.
The `nvidia-docker` wrapper script that was included in this repository is no
longer included in the package and a configuration specific to the target
container engine (e.g. Docker, Containerd, Cri-o, or Podman) is suggested
instead.

Product documentation including an architecture overview, platform support, installation and usage guides can be found in the [documentation repository](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/overview.html).
For Docker users specifically, the NVIDIA Container Toolkit CLI (`nvidia-ctk`)
includes functionality to ensure that the `nvidia` runtime has been registered
with the Docker daemon. Installing the NVIDIA Container Toolkit and running:
```
sudo nvidia-ctk runtime configure
```
will load (or create) an `/etc/docker/daemon.json` file and ensure that the
NVIDIA Container Runtime is configured as a runtime named `nvidia`.

Frequently asked questions are available on the [wiki](https://github.com/NVIDIA/nvidia-docker/wiki).
Restarting the Docker daemon is required for this to take affect.

## Getting Started

**Make sure you have installed the [NVIDIA driver](https://docs.nvidia.com/datacenter/tesla/tesla-installation-notes/index.html) and Docker engine for your Linux distribution**.

**Note that you do not need to install the CUDA Toolkit on the host system, but the NVIDIA driver needs to be installed**.

For instructions on getting started with the NVIDIA Container Toolkit, refer to the [installation guide](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#docker).

## Usage

The [user guide](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/user-guide.html) provides information on the configuration and command line options available when running GPU containers with Docker.
For further instructions, see the NVIDIA Container Toolkit [documentation](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit)
and specifically the [user guide](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/user-guide.html).

## Issues and Contributing

[Checkout the Contributing document!](CONTRIBUTING.md)

* Please let us know by [filing a new issue](https://github.com/NVIDIA/nvidia-docker/issues/new)
* You can contribute by opening a [merge request](https://gitlab.com/nvidia/container-toolkit/nvidia-docker/-/merge_requests)
* Please let us know by [filing a new issue](https://github.com/NVIDIA/nvidia-container-toolkit/issues/new) against the `nvidia-container-toolkit` repository.
* You can contribute by opening a [merge request](https://gitlab.com/nvidia/container-toolkit/container-toolkit/-/merge_requests)
8 changes: 0 additions & 8 deletions daemon.json

This file was deleted.

12 changes: 9 additions & 3 deletions debian/control
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,12 @@ Package: nvidia-docker2
Architecture: all
Breaks: nvidia-docker (<< 2.0.0)
Replaces: nvidia-docker (<< 2.0.0)
Depends: ${misc:Depends}, nvidia-container-toolkit (>= @TOOLKIT_VERSION@), @DOCKER_VERSION@
Description: nvidia-docker CLI wrapper
Replaces nvidia-docker with a new implementation based on the NVIDIA Container Toolkit
Depends: ${misc:Depends}, nvidia-container-toolkit (>= @TOOLKIT_VERSION@)
Description: NVIDIA Container Toolkit meta-package
A meta-package that allows installation flows expecting the nvidia-docker2
to be migrated to installing the NVIDIA Container Toolkit packages directly.
The wrapper script provided in earlier versions of this package should be
considered deprecated.
The nvidia-container-toolkit-base package provides an nvidia-ctk CLI that can be
used to update the docker config in-place to allow for the NVIDIA Container
Runtime to be used.
2 changes: 0 additions & 2 deletions debian/nvidia-docker2.install

This file was deleted.

2 changes: 0 additions & 2 deletions debian/prepare
Original file line number Diff line number Diff line change
Expand Up @@ -3,5 +3,3 @@
set -e

sed -i "s;@SECTION@;${SECTION:+$SECTION/};g" debian/control
sed -i "s;@TOOLKIT_VERSION@;${TOOLKIT_VERSION};g" debian/control
sed -i "s;@DOCKER_VERSION@;${DOCKER_VERSION};g" debian/control
42 changes: 0 additions & 42 deletions docker/Dockerfile.amazonlinux

This file was deleted.

5 changes: 0 additions & 5 deletions docker/Dockerfile.ubuntu → docker/Dockerfile.deb
Original file line number Diff line number Diff line change
Expand Up @@ -30,14 +30,9 @@ ENV SECTION ""
ENV DIST_DIR=/tmp/${PKG_NAME}-$PKG_VERS
RUN mkdir -p $DIST_DIR /dist

# nvidia-docker 2.0
COPY nvidia-docker $DIST_DIR/nvidia-docker
COPY daemon.json $DIST_DIR/daemon.json

WORKDIR $DIST_DIR
COPY debian ./debian

RUN sed -i "s;@VERSION@;${PKG_VERS};" $DIST_DIR/nvidia-docker
RUN sed -i "s;@TOOLKIT_VERSION@;${TOOLKIT_VERSION};" debian/control && \
dch --create --package="${PKG_NAME}" \
--newversion "${REVISION}" \
Expand Down
49 changes: 0 additions & 49 deletions docker/Dockerfile.debian

This file was deleted.

42 changes: 0 additions & 42 deletions docker/Dockerfile.opensuse-leap

This file was deleted.

Loading

0 comments on commit a9f6b1b

Please sign in to comment.