-
Notifications
You must be signed in to change notification settings - Fork 2.9k
/
linux-gpu-ci-pipeline.yml
274 lines (251 loc) · 10.2 KB
/
linux-gpu-ci-pipeline.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
##### start trigger Don't edit it manually, Please do edit set-trigger-rules.py ####
trigger:
branches:
include:
- main
- rel-*
paths:
exclude:
- docs/**
- README.md
- CONTRIBUTING.md
- BUILD.md
- 'js/web'
- 'onnxruntime/core/providers/js'
pr:
branches:
include:
- main
- rel-*
paths:
exclude:
- docs/**
- README.md
- CONTRIBUTING.md
- BUILD.md
- 'js/web'
- 'onnxruntime/core/providers/js'
#### end trigger ####
parameters:
- name: CudaVersion
displayName: CUDA version
type: string
default: '11.8'
values:
- 11.8
- 12.2
- name: SpecificArtifact
displayName: Use Specific Artifact
type: boolean
default: false
- name: BuildId
displayName: Specific Artifact's BuildId
type: string
default: '0'
resources:
repositories:
- repository: manylinux
type: Github
endpoint: Microsoft
name: pypa/manylinux
ref: 5eda9aded5462201e6310105728d33016e637ea7
variables:
- name: docker_base_image
${{ if eq(parameters.CudaVersion, '11.8') }}:
value: nvidia/cuda:11.8.0-cudnn8-devel-ubi8
${{ if eq(parameters.CudaVersion, '12.2') }}:
value: nvidia/cuda:12.2.2-cudnn8-devel-ubi8
- name: linux_trt_version
${{ if eq(parameters.CudaVersion, '11.8') }}:
value: 10.0.1.6-1.cuda11.8
${{ if eq(parameters.CudaVersion, '12.2') }}:
value: 10.0.1.6-1.cuda12.4
- name: Repository
${{ if eq(parameters.CudaVersion, '11.8') }}:
value: 'onnxruntimecuda11manylinuxbuild'
${{ if eq(parameters.CudaVersion, '12.2') }}:
value: 'onnxruntimecuda12manylinuxbuild'
stages:
- stage: Linux_Build
jobs:
- job: Linux_Build
timeoutInMinutes: 120
variables:
skipComponentGovernanceDetection: true
CCACHE_DIR: $(Pipeline.Workspace)/ccache
workspace:
clean: all
pool: onnxruntime-Ubuntu2204-AMD-CPU
steps:
- task: mspremier.PostBuildCleanup.PostBuildCleanup-task.PostBuildCleanup@3
displayName: 'Clean Agent Directories'
condition: always()
- checkout: self
clean: true
submodules: none
- template: templates/get-docker-image-steps.yml
parameters:
Dockerfile: tools/ci_build/github/linux/docker/Dockerfile.manylinux2_28_cuda
Context: tools/ci_build/github/linux/docker
DockerBuildArgs: "
--network=host
--build-arg BASEIMAGE=$(docker_base_image)
--build-arg TRT_VERSION=$(linux_trt_version)
--build-arg BUILD_UID=$( id -u )
"
Repository: $(Repository)
- task: Cache@2
inputs:
key: '"ccache" | "${{parameters.CudaVersion}}" |"$(Build.SourceBranch)" | "$(Build.SourceVersion)"'
path: $(CCACHE_DIR)
restoreKeys: |
"ccache" | "${{parameters.CudaVersion}}" | "$(Build.SourceBranch)"
"ccache"
cacheHitVar: CACHE_RESTORED
displayName: Cach Task
- script: |
sudo mkdir -p $(Pipeline.Workspace)/ccache
condition: ne(variables.CACHE_RESTORED, 'true')
displayName: Create Cache Dir
- script: |
set -e -x
mkdir -p $HOME/.onnx
docker run -e CFLAGS="-Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -fstack-protector-strong -fstack-clash-protection -fcf-protection -O3 -Wl,--strip-all" -e CXXFLAGS="-Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -fstack-protector-strong -fstack-clash-protection -fcf-protection -O3 -Wl,--strip-all" --rm \
--volume /data/onnx:/data/onnx:ro \
--volume $(Build.SourcesDirectory):/onnxruntime_src \
--volume $(Build.BinariesDirectory):/build \
--volume /data/models:/build/models:ro \
--volume $HOME/.onnx:/home/onnxruntimedev/.onnx \
--volume $(Pipeline.Workspace)/ccache:/cache \
-e ALLOW_RELEASED_ONNX_OPSET_ONLY=0 \
-e NIGHTLY_BUILD \
-e BUILD_BUILDNUMBER \
-e CCACHE_DIR=/cache \
$(Repository) \
/bin/bash -c "
set -ex; \
env; \
ccache -s; \
/opt/python/cp38-cp38/bin/python3 /onnxruntime_src/tools/ci_build/build.py \
--build_dir /build --cmake_generator Ninja \
--config Release --update --build \
--skip_submodule_sync \
--build_shared_lib \
--parallel --use_binskim_compliant_compile_flags \
--build_wheel \
--enable_onnx_tests --use_cuda --cuda_version=${{parameters.CudaVersion}} --cuda_home=/usr/local/cuda-${{parameters.CudaVersion}} --cudnn_home=/usr/local/cuda-${{parameters.CudaVersion}} \
--enable_cuda_profiling --enable_cuda_nhwc_ops \
--enable_pybind --build_java \
--use_cache \
--cmake_extra_defines CMAKE_CUDA_ARCHITECTURES=75 \
--cmake_extra_defines onnxruntime_BUILD_UNIT_TESTS=ON \
--cmake_extra_defines onnxruntime_ENABLE_CUDA_EP_INTERNAL_TESTS=ON; \
ccache -sv; \
ccache -z"
workingDirectory: $(Build.SourcesDirectory)
displayName: Build Onnxruntime
- task: CmdLine@2
inputs:
script: |
rm -rf $(Build.BinariesDirectory)/Release/onnxruntime $(Build.BinariesDirectory)/Release/pybind11
rm -f $(Build.BinariesDirectory)/Release/models
find $(Build.BinariesDirectory)/Release/_deps -mindepth 1 ! -regex '^$(Build.BinariesDirectory)/Release/_deps/onnx-src\(/.*\)?' -delete
cd $(Build.BinariesDirectory)/Release
find -executable -type f > $(Build.BinariesDirectory)/Release/perms.txt
- task: PublishPipelineArtifact@0
displayName: 'Publish Pipeline Artifact'
inputs:
artifactName: 'drop-linux'
targetPath: '$(Build.BinariesDirectory)/Release'
- template: templates/explicitly-defined-final-tasks.yml
- stage: Linux_Test
dependsOn:
- Linux_Build
jobs:
- job: Linux_Test
timeoutInMinutes: 180
variables:
skipComponentGovernanceDetection: true
workspace:
clean: all
pool: onnxruntime-Linux-GPU-T4
steps:
- checkout: self
clean: true
submodules: none
- template: templates/flex-downloadPipelineArtifact.yml
parameters:
ArtifactName: 'drop-linux'
StepName: 'Download Pipeline Artifact - Linux Build'
TargetPath: '$(Build.BinariesDirectory)/Release'
SpecificArtifact: ${{ parameters.SpecificArtifact }}
BuildId: ${{ parameters.BuildId }}
- template: templates/get-docker-image-steps.yml
parameters:
Dockerfile: tools/ci_build/github/linux/docker/Dockerfile.manylinux2_28_cuda
Context: tools/ci_build/github/linux/docker
DockerBuildArgs: "
--network=host
--build-arg BASEIMAGE=$(docker_base_image)
--build-arg TRT_VERSION=$(linux_trt_version)
--build-arg BUILD_UID=$( id -u )
"
Repository: $(Repository)
- task: CmdLine@2
inputs:
script: |
set -e -x
mkdir -p $HOME/.onnx
docker run --gpus all --rm \
--volume $(Build.SourcesDirectory):/onnxruntime_src \
--volume $(Build.BinariesDirectory)/Release:/build/Release \
--volume /data/models:/build/models:ro \
--volume $HOME/.onnx:/home/onnxruntimedev/.onnx \
--volume /data/onnx:/data/onnx \
-e NVIDIA_TF32_OVERRIDE=0 \
$(Repository) \
/bin/bash -c '
nvidia-smi; \
/sbin/ldconfig -N -v $(sed "s/:/ /" <<< $LD_LIBRARY_PATH) 2>/dev/null | grep -E "libcudart.so|libcudnn.so|libnvinfer.so"; \
cat /usr/local/cuda/include/cuda.h | grep -m1 CUDA_VERSION; \
cat /usr/include/cudnn_version.h | grep CUDNN_MAJOR -m1 -A 2; \
ln -s /opt/python/cp38-cp38/bin/python3 /tmp/python3; \
/tmp/python3 -m pip install /build/Release/dist/*.whl; \
/tmp/python3 -u -c "from onnxruntime.capi._pybind_state import (OrtDevice as C_OrtDevice) ; \
ort_device = C_OrtDevice(C_OrtDevice.cuda(), C_OrtDevice.default_memory(), 0); \
print(ort_device); print(ort_device.device_type(), C_OrtDevice.cuda()); \
assert(ort_device.device_type()==1); assert(C_OrtDevice.cuda()==1);" \
'
displayName: 'Check GPU'
- task: CmdLine@2
inputs:
script: |
set -e -x
mkdir -p $HOME/.onnx
docker run --gpus all --shm-size=1g --ipc=host --ulimit memlock=-1 --ulimit stack=67108864 --rm \
--volume $(Build.SourcesDirectory):/onnxruntime_src \
--volume $(Build.BinariesDirectory)/Release:/build/Release \
--volume /data/models:/build/models:ro \
--volume $HOME/.onnx:/home/onnxruntimedev/.onnx \
--volume /data/onnx:/data/onnx \
-e NVIDIA_TF32_OVERRIDE=0 \
$(Repository) \
/bin/bash -c '
set -ex; \
cp /onnxruntime_src/tools/ci_build/github/linux/docker/scripts/manylinux/requirements.txt /tmp/requirements.txt; \
ln -s /opt/python/cp38-cp38/bin/python3 /tmp/python3; \
/tmp/python3 -m pip install -r /tmp/requirements.txt; \
/tmp/python3 -m pip install /build/Release/dist/*.whl; \
cd /build/Release && xargs -a /build/Release/perms.txt chmod a+x; \
cd /onnxruntime_src/java && /onnxruntime_src/java/gradlew cmakeCheck -DcmakeBuildDir=/build/Release -DUSE_CUDA=1; \
cd /tmp; \
/tmp/python3 /onnxruntime_src/tools/ci_build/build.py \
--build_dir /build --config Release --test --skip_submodule_sync --build_shared_lib --parallel --use_binskim_compliant_compile_flags --build_wheel --enable_onnx_tests \
--use_cuda --cuda_version=${{parameters.CudaVersion}} --cuda_home=/usr/local/cuda --cudnn_home=/usr/local/cuda \
--enable_pybind --build_java --ctest_path "" ; \
'
displayName: 'Run Tests'
- template: templates/check_test_result.yml
parameters:
FileName: '$(Build.BinariesDirectory)/Release/onnxruntime_test_all.Release.results.xml'
- template: templates/clean-agent-build-directory-step.yml