Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 15 additions & 16 deletions .github/workflows/build-docker.yml
Original file line number Diff line number Diff line change
Expand Up @@ -25,34 +25,33 @@ env:
# used to sync image to mirror registry
DOCKER_MIRROR_REGISTRY_USERNAME: ${{ vars.DOCKER_MIRROR_REGISTRY_USERNAME }}
DOCKER_MIRROR_REGISTRY_PASSWORD: ${{ secrets.DOCKER_MIRROR_REGISTRY_PASSWORD }}

CI_PROJECT_NAME: ${{ vars.CI_PROJECT_NAME || 'LabNow/lab-media' }}

jobs:
qpod_PaddleOCR_cuda112:
name: 'paddleocr-cuda112'
job-PaddleOCR:
name: 'paddleocr-cuda'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- run: |
source ./tool.sh && free_diskspace
build_image paddleocr-cuda112 latest docker_PaddleOCR/paddle-ocr.Dockerfile --build-arg "BASE_IMG=cuda_11.2"
echo 'paddleocr-models: temp disable the build caused by the paddle run on CPU server'
echo 'build_image paddleocr-models latest docker_PaddleOCR/paddle-ocr-models.Dockerfile --build-arg "BASE_IMG=paddleocr-cuda112" --build-arg "BASE_NAMESPACE_SRC=docker.io/library"'
build_image paddleocr-cuda latest docker_PaddleOCR/paddle-ocr.Dockerfile --build-arg "BASE_IMG=cuda_12.6"
echo 'Skipping paddleocr-models build for now'
echo 'build_image paddleocr-models latest docker_PaddleOCR/paddle-ocr-models.Dockerfile --build-arg "BASE_IMG=paddleocr-cuda" --build-arg "BASE_NAMESPACE_SRC=docker.io/library"'
push_image

qpod_PaddleOCR_cuda120:
name: 'paddleocr-cuda120,doc-ai-cuda120'
job-doc-ai:
name: 'doc-ai-cuda126'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- run: |
source ./tool.sh && free_diskspace
build_image paddleocr-cuda120 latest docker_PaddleOCR/paddle-ocr.Dockerfile --build-arg "BASE_IMG=cuda_12.0"
build_image doc-ai-cuda120 latest docker_PaddleOCR/paddle-ocr.Dockerfile --build-arg "BASE_IMG=py-nlp-cuda120"
build_image doc-ai-cuda126 latest docker_PaddleOCR/paddle-ocr.Dockerfile --build-arg "BASE_IMG=py-nlp-cuda126"
push_image


qpod_vllm-cuda:
job-vllm-cuda:
name: 'vllm-cuda'
runs-on: ubuntu-latest
steps:
Expand All @@ -62,7 +61,7 @@ jobs:
build_image vllm-cuda latest docker_vllm/vllm-cuda.Dockerfile
push_image

qpod_aidoc-miner:
job-aidoc-miner:
name: 'aidoc-miner'
runs-on: ubuntu-latest
steps:
Expand All @@ -73,7 +72,7 @@ jobs:
push_image


qpod_OpenFace:
job-OpenFace:
name: 'opencv,openface-src,openface'
runs-on: ubuntu-latest
steps:
Expand All @@ -87,7 +86,7 @@ jobs:
build_image openface latest docker_OpenFace/OpenFace.Dockerfile && push_image openface

# To build HF model image for a single model, simple run: `build_image_hf_model bert-base-cased`
qpod_HuggingFaceModels:
job-HuggingFaceModels:
name: 'huggingface-model'
runs-on: ubuntu-latest
steps:
Expand All @@ -105,7 +104,7 @@ jobs:

## Sync all images in this build (listed by "names") to mirror registry.
sync_images:
needs: ["qpod_OpenFace", "qpod_HuggingFaceModels", "qpod_aidoc-miner", "qpod_PaddleOCR_cuda112", "qpod_PaddleOCR_cuda120", "qpod_vllm-cuda"]
needs: ["job-OpenFace", "job-HuggingFaceModels", "job-aidoc-miner", "job-PaddleOCR", "job-doc-ai", "job-vllm-cuda"]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
Expand All @@ -116,5 +115,5 @@ jobs:
source ./tool.sh
printf '%s' "$AUTH_FILE_CONTENT" > .github/workflows/auth.json && ls -alh ./.github/workflows
printenv | grep -v 'PATH' > /tmp/docker.env && echo "REGISTRY_URL=${REGISTRY_DST}" >> /tmp/docker.env
docker run --rm --env-file /tmp/docker.env -v $(pwd):/tmp -w /tmp ${IMG_PREFIX_DST:-qpod}/docker-kit \
docker run --rm --env-file /tmp/docker.env -v $(pwd):/tmp -w /tmp ${IMG_PREFIX_DST:-labnow}/docker-kit \
python /opt/utils/image-syncer/run_jobs.py --auth-file=/tmp/.github/workflows/auth.json
1 change: 1 addition & 0 deletions LICENSE
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
BSD 3-Clause License

Copyright (c) 2020, QPod
Copyright (c) 2024, LabNow
All rights reserved.

Redistribution and use in source and binary forms, with or without
Expand Down
33 changes: 23 additions & 10 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,20 +1,33 @@
# QPod Media Lab - Docker Image Stack
# LabNow Container Image Stack - Lab Media

[![License](https://img.shields.io/badge/License-BSD%203--Clause-green.svg)](https://opensource.org/licenses/BSD-3-Clause)
[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/QPod/media-lab/build-docker.yml?branch=main)](https://github.com/QPod/media-lab/actions/workflows/build-docker.yml)
[![Join the Gitter Chat](https://img.shields.io/gitter/room/nwjs/nw.js.svg)](https://gitter.im/QPod/)
[![Recent Code Update](https://img.shields.io/github/last-commit/QPod/media-lab.svg)](https://github.com/QPod/media-lab/stargazers)
[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/LabNow-ai/lab-media/build-docker.yml?branch=main)](https://github.com/LabNow-ai/lab-media/actions/workflows/build-docker.yml)
[![Recent Code Update](https://img.shields.io/github/last-commit/LabNow-ai/lab-media.svg)](https://github.com/LabNow-ai/lab-media/stargazers)
[![Visit Images on DockerHub](https://img.shields.io/badge/DockerHub-Images-green)](https://hub.docker.com/u/labnow)

Please generously STAR★ our project or donate to us! [![GitHub Starts](https://img.shields.io/github/stars/QPod/media-lab.svg?label=Stars&style=social)](https://github.com/QPod/media-lab/stargazers)
Please generously STAR★ our project or donate to us! [![GitHub Starts](https://img.shields.io/github/stars/LabNow-ai/lab-media.svg?label=Stars&style=social)](https://github.com/LabNow-ai/lab-media/stargazers)
[![Donate-PayPal](https://img.shields.io/badge/Donate-PayPal-blue.svg)](https://paypal.me/haobibo)
[![Donate-AliPay](https://img.shields.io/badge/Donate-Alipay-blue.svg)](https://raw.githubusercontent.com/wiki/haobibo/resources/img/Donate-AliPay.png)
[![Donate-WeChat](https://img.shields.io/badge/Donate-WeChat-green.svg)](https://raw.githubusercontent.com/wiki/haobibo/resources/img/Donate-WeChat.png)

## Building blocks for multimedia media projects
Discussion and contributions are welcome:
[![Join the Discord Chat](https://img.shields.io/badge/Discuss_on-Discord-green)](https://discord.gg/kHUzgQxgbJ)
[![Open an Issue on GitHub](https://img.shields.io/github/issues/LabNow-ai/lab-media)](https://github.com/LabNow-ai/lab-media/issues)

Building blocks for the following multimedia project use cases are supported in this project:

## Lab Media - Building blocks for AI models and multimedia media projects

`LabNow lab-media` (
[DockerHub](https://hub.docker.com/u/labnow)
| [quay.io](https://quay.io/organization/labnow)
| [GitHub](https://github.com/LabNow-ai/lab-media)
) provides building blocks for the following AI and multi-modal / multi-media project use cases are supported in this project:

- [Transformer models based on HuggingFace transformers](https://hub.docker.com/r/labnow/huggingface-model/tags)
- [OCR (based on paddlepaddle)](https://hub.docker.com/search?q=labnow%2Fpaddleocr) and [Document Intelligence](https://hub.docker.com/search?q=labnow%2Fdoc-ai)
- Image/Video and Audio feature extraction
- [OCR](https://hub.docker.com/search?q=qpod%2Fpaddleocr) and [Document Intelligence](https://hub.docker.com/search?q=qpod%2Fdoc-ai)
- [Transformer models based on HuggingFace transformers](https://hub.docker.com/r/qpod/huggingface-model/tags)
- [Face Landmark Detection](https://hub.docker.com/search?q=qpod%2Fopenface)

## Documentation & Tutorial
[Wiki & Document](https://labnow.ai)
| [中文使用指引(含中国网络镜像)](https://labnow-ai.feishu.cn/wiki/wikcn0sBhMtb1KNRSUTettxWstc)
| [![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/LabNow-ai/lab-media)
2 changes: 1 addition & 1 deletion docker_HuggingFace-model/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ FROM ${BASE_NAMESPACE:+$BASE_NAMESPACE/}${BASE_IMG}
ARG HF_MODEL_NAME="answerdotai/ModernBERT-base"

LABEL HF_MODEL_NAME="${HF_MODEL_NAME}"
LABEL maintainer="haobibo@gmail.com"
LABEL maintainer="postmaster@labnow.ai"
LABEL usage="docker run --rm -it -v $(pwd):/tmp `docker-image-name`"
CMD ["sh", "-c", "ls -alh /home && cp -rf /home/* /tmp/"]

Expand Down
14 changes: 7 additions & 7 deletions docker_HuggingFace-model/README.md
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# Store and Download Huggingface Models via docker images

[![Docker Pulls](https://img.shields.io/docker/pulls/qpod/huggingface-model.svg)](https://hub.docker.com/r/qpod/huggingface-model)
[![Docker Starts](https://img.shields.io/docker/stars/qpod/huggingface-model.svg)](https://hub.docker.com/r/qpod/huggingface-model)
[![Docker Pulls](https://img.shields.io/docker/pulls/labnow/huggingface-model.svg)](https://hub.docker.com/r/labnow/huggingface-model)
[![Docker Starts](https://img.shields.io/docker/stars/labnow/huggingface-model.svg)](https://hub.docker.com/r/labnow/huggingface-model)

These docker images help you to store and download Huggingface Models via docker images.

Expand All @@ -12,7 +12,7 @@ This is especially useful when you are:

## Download HuggingFace Models as docker images

You can download the model files simply using `docker pull qpod/huggingface-model:bert-base-cased`, in which the tag name is the HuggingFace model repo name.
You can download the model files simply using `docker pull labnow/huggingface-model:bert-base-cased`, in which the tag name is the HuggingFace model repo name.

The models files are stored at the `/home` directory in the docker images by default.

Expand All @@ -25,26 +25,26 @@ For example, given a HuggingFace model `HF_MODEL_NAME='microsoft/DialoGPT-small'

The shell code to do this conversion is: `HF_MODEL_TAG=$(echo ${HF_MODEL_NAME} | sed 's/\//./g' | tr '[:upper:]' '[:lower:]')`.

We have alrady pre-built several popular models, you can find a list of models here: https://hub.docker.com/r/qpod/huggingface-model/tags
We have alrady pre-built several popular models, you can find a list of models here: https://hub.docker.com/r/labnow/huggingface-model/tags

## Export the model files to local file system

You can use the following commnad to export the model files stored in the docker images to your local file system.

```bash
# for model names, refer to dockerhub: https://hub.docker.com/r/qpod/huggingface-model/tags
# for model names, refer to dockerhub: https://hub.docker.com/r/labnow/huggingface-model/tags
MODEL_NAME="bert-base-chinese"

# choose a folder to store model files
LOCAL_REPO="/tmp/models"

mkdir -pv ${LOCAL_REPO} && cd ${LOCAL_REPO}
docker run --rm -it -v $(pwd):/tmp "qpod/huggingface-model:${MODEL_NAME}"
docker run --rm -it -v $(pwd):/tmp "labnow/huggingface-model:${MODEL_NAME}"
```

## Build your own docker image which stores a customized HF model

refer to: https://github.com/QPod/media-lab/tree/main/docker_HuggingFace-model
refer to: https://github.com/LabNow-ai/lab-media/tree/main/docker_HuggingFace-model

```bash
source tool.sh && source docker_HuggingFace-model/script-setup-huggingface.sh
Expand Down
2 changes: 1 addition & 1 deletion docker_OpenFace/OpenCV.Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ ARG BASE_NAMESPACE
ARG BASE_IMG="base"
FROM ${BASE_NAMESPACE:+$BASE_NAMESPACE/}${BASE_IMG}

LABEL maintainer="haobibo@gmail.com"
LABEL maintainer="postmaster@labnow.ai"

COPY work /opt/utils/

Expand Down
2 changes: 1 addition & 1 deletion docker_OpenFace/OpenFace-src.Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ ARG BASE_NAMESPACE
ARG BASE_IMG="busybox"
FROM ${BASE_NAMESPACE:+$BASE_NAMESPACE/}${BASE_IMG}

LABEL maintainer="haobibo@gmail.com"
LABEL maintainer="postmaster@labnow.ai"
LABEL usage="docker run --rm -it -v $(pwd):/tmp `docker-image-name`"
CMD ["sh", "-c", "ls -alh /home && cp -r /home/* /tmp/"]

Expand Down
2 changes: 1 addition & 1 deletion docker_OpenFace/OpenFace.Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ FROM ${BASE_NAMESPACE:+$BASE_NAMESPACE/}${BASE_IMG} AS runtime

FROM runtime

LABEL maintainer="haobibo@gmail.com"
LABEL maintainer="postmaster@labnow.ai"

COPY --from=source /home /tmp
COPY work /opt/utils/
Expand Down
4 changes: 2 additions & 2 deletions docker_OpenFace/OpenSMILE.Dockerfile
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
# Distributed under the terms of the Modified BSD License.

ARG BASE_NAMESPACE
ARG BASE_IMG="qpod/opencv"
ARG BASE_IMG="labnow/opencv"
FROM ${BASE_NAMESPACE:+$BASE_NAMESPACE/}${BASE_IMG}

LABEL maintainer="haobibo@gmail.com"
LABEL maintainer="postmaster@labnow.ai"

RUN set -eux && source /opt/utils/script-utils.sh \
## Download and build OpenSMILE
Expand Down
28 changes: 14 additions & 14 deletions docker_PaddleOCR/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,25 +2,25 @@

## PaddleOCR

### qpod/paddleocr-cuda102
[![Docker Pulls](https://img.shields.io/docker/pulls/qpod/paddleocr-cuda102.svg)](https://hub.docker.com/r/qpod/paddleocr-cuda102)
[![Docker Starts](https://img.shields.io/docker/stars/qpod/paddleocr-cuda102.svg)](https://hub.docker.com/r/qpod/paddleocr-cuda102)
### labnow/paddleocr-cuda102
[![Docker Pulls](https://img.shields.io/docker/pulls/labnow/paddleocr-cuda102.svg)](https://hub.docker.com/r/labnow/paddleocr-cuda102)
[![Docker Starts](https://img.shields.io/docker/stars/labnow/paddleocr-cuda102.svg)](https://hub.docker.com/r/labnow/paddleocr-cuda102)

### qpod/paddleocr-cuda112
[![Docker Pulls](https://img.shields.io/docker/pulls/qpod/paddleocr-cuda112.svg)](https://hub.docker.com/r/qpod/paddleocr-cuda112)
[![Docker Starts](https://img.shields.io/docker/stars/qpod/paddleocr-cuda112.svg)](https://hub.docker.com/r/qpod/paddleocr-cuda112)
### labnow/paddleocr-cuda112
[![Docker Pulls](https://img.shields.io/docker/pulls/labnow/paddleocr-cuda112.svg)](https://hub.docker.com/r/labnow/paddleocr-cuda112)
[![Docker Starts](https://img.shields.io/docker/stars/labnow/paddleocr-cuda112.svg)](https://hub.docker.com/r/labnow/paddleocr-cuda112)

### qpod/paddleocr-cuda116
[![Docker Pulls](https://img.shields.io/docker/pulls/qpod/paddleocr-cuda116.svg)](https://hub.docker.com/r/qpod/paddleocr-cuda116)
[![Docker Starts](https://img.shields.io/docker/stars/qpod/paddleocr-cuda116.svg)](https://hub.docker.com/r/qpod/paddleocr-cuda116)
### labnow/paddleocr-cuda116
[![Docker Pulls](https://img.shields.io/docker/pulls/labnow/paddleocr-cuda116.svg)](https://hub.docker.com/r/labnow/paddleocr-cuda116)
[![Docker Starts](https://img.shields.io/docker/stars/labnow/paddleocr-cuda116.svg)](https://hub.docker.com/r/labnow/paddleocr-cuda116)

## PaddleOCR Models

[![Docker Pulls](https://img.shields.io/docker/pulls/qpod/paddleocr-models.svg)](https://hub.docker.com/r/qpod/paddleocr-models)
[![Docker Starts](https://img.shields.io/docker/stars/qpod/paddleocr-models.svg)](https://hub.docker.com/r/qpod/paddleocr-models)
[![Docker Pulls](https://img.shields.io/docker/pulls/labnow/paddleocr-models.svg)](https://hub.docker.com/r/labnow/paddleocr-models)
[![Docker Starts](https://img.shields.io/docker/stars/labnow/paddleocr-models.svg)](https://hub.docker.com/r/labnow/paddleocr-models)

## Document Intelligence

### qpod/doc-ai-cuda112
[![Docker Pulls](https://img.shields.io/docker/pulls/qpod/doc-ai-cuda112.svg)](https://hub.docker.com/r/qpod/doc-ai-cuda112)
[![Docker Starts](https://img.shields.io/docker/stars/qpod/doc-ai-cuda112.svg)](https://hub.docker.com/r/qpod/doc-ai-cuda112)
### labnow/doc-ai-cuda112
[![Docker Pulls](https://img.shields.io/docker/pulls/labnow/doc-ai-cuda112.svg)](https://hub.docker.com/r/labnow/doc-ai-cuda112)
[![Docker Starts](https://img.shields.io/docker/stars/labnow/doc-ai-cuda112.svg)](https://hub.docker.com/r/labnow/doc-ai-cuda112)
4 changes: 2 additions & 2 deletions docker_PaddleOCR/paddle-ocr-models.Dockerfile
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# Distributed under the terms of the Modified BSD License.

ARG BASE_NAMESPACE
ARG BASE_IMG="paddle-cuda116"
ARG BASE_IMG="paddle-3.0"
FROM ${BASE_NAMESPACE:+$BASE_NAMESPACE/}${BASE_IMG} AS builder
RUN set -eux \
&& python /opt/utils/download_paddleocr_models.py \
Expand All @@ -11,6 +11,6 @@ ARG BASE_NAMESPACE_SRC
FROM ${BASE_NAMESPACE_SRC:+$BASE_NAMESPACE_SRC/}busybox
COPY --from=builder /opt/.paddleocr /home/
LABEL MODEL_NAME="paddleocr"
LABEL maintainer="haobibo@gmail.com"
LABEL maintainer="postmaster@labnow.ai"
LABEL usage="docker run --rm -it -v $(pwd):/tmp `docker-image-name`"
CMD ["sh", "-c", "ls -alh /home && cp -r /home/* /tmp/"]
15 changes: 10 additions & 5 deletions docker_PaddleOCR/paddle-ocr.Dockerfile
Original file line number Diff line number Diff line change
@@ -1,21 +1,26 @@
# Distributed under the terms of the Modified BSD License.

ARG BASE_NAMESPACE
ARG BASE_IMG="paddle-cuda"
ARG BASE_IMG="paddle-3.0"
FROM ${BASE_NAMESPACE:+$BASE_NAMESPACE/}${BASE_IMG}

LABEL maintainer="haobibo@gmail.com"
LABEL maintainer="postmaster@labnow.ai"

COPY work /opt/utils/

RUN set -eux && source /opt/utils/script-setup.sh \
# -----------------------------
&& export CUDA_VER=$(echo ${CUDA_VERSION:-"999"} | cut -c1-4 | sed 's/\.//' ) \
&& export IDX=$( [ -x "$(command -v nvcc)" ] && echo "cu${CUDA_VER:-117}" || echo "cpu" ) \
&& echo "Detected CUDA version=${CUDA_VER} and IDX=${IDX}" \
# -----------------------------
# Step 1. install/update paddlepaddle
&& URL_PYPI_PADDLE="https://www.paddlepaddle.org.cn/whl/linux/mkl/avx/stable.html" \
&& URL_PYPI_PADDLE="https://www.paddlepaddle.org.cn/packages/stable/${IDX}/" \
&& CUDA_VER=$(echo "${CUDA_VERSION:0:4}" | sed 's/\.//' ) \
&& PADDLE=$( [ -x "$(command -v nvcc)" ] && echo "paddlepaddle-gpu" || echo "paddlepaddle") \
&& PADDLE_VER=$(pip index versions ${PADDLE} -f ${URL_PYPI_PADDLE} | grep 'Available' | cut -d ":" -f 2 | tr ', ' '\n' | grep ${CUDA_VER:-'.'} | head -n 1) \
&& PADDLE_VER=$(pip index versions ${PADDLE} -i ${URL_PYPI_PADDLE} | grep 'Available' | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' | head -n1) \
&& V=$(echo ${PADDLE}==${PADDLE_VER}) && echo "to install paddle: ${V}" \
&& pip install ${V} -f ${URL_PYPI_PADDLE} \
&& pip install ${V} -i ${URL_PYPI_PADDLE} \
# Step 2. install required OS libs for PaddleOCR, mainly for images processing
&& apt-get -qq update -yq --fix-missing && apt-get -qq install -yq --no-install-recommends libgl1 libglib2.0-0 \
# Step 3. install PaddleOCR
Expand Down
2 changes: 1 addition & 1 deletion docker_aidoc/miner.Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ ARG BASE_NAMESPACE
ARG BASE_IMG="py-nlp-cuda128"
FROM ${BASE_NAMESPACE:+$BASE_NAMESPACE/}${BASE_IMG}

LABEL maintainer="haobibo@gmail.com"
LABEL maintainer="postmaster@labnow.ai"

RUN set -eux \
# ----------
Expand Down
13 changes: 9 additions & 4 deletions docker_vllm/vllm-cuda.Dockerfile
Original file line number Diff line number Diff line change
@@ -1,14 +1,19 @@
# Distributed under the terms of the Modified BSD License.

ARG BASE_NAMESPACE
ARG BASE_IMG="torch-cuda126"
ARG BASE_IMG="torch-cuda128"
FROM ${BASE_NAMESPACE:+$BASE_NAMESPACE/}${BASE_IMG}

LABEL maintainer="haobibo@gmail.com"
LABEL maintainer="postmaster@labnow.ai"

# https://docs.vllm.ai/en/latest/getting_started/installation/gpu.html
RUN set -eux && source /opt/utils/script-setup.sh \
&& pip install vllm \
# https://docs.vllm.ai/en/latest/getting_started/installation/gpu.html
# -----------------------------
&& export CUDA_VER=$(echo ${CUDA_VERSION:-"999"} | cut -c1-4 | sed 's/\.//' ) \
&& export IDX=$( [ -x "$(command -v nvcc)" ] && echo "cu${CUDA_VER:-117}" || echo "cpu" ) \
&& echo "Detected CUDA version=${CUDA_VER} and IDX=${IDX}" \
# -----------------------------
&& pip install vllm --index-url "https://download.pytorch.org/whl/${IDX}" --extra-index-url https://pypi.org/simple \
# && cd /tmp/ \
# && git clone https://github.com/vllm-project/vllm.git \
# && cd /tmp/vllm \
Expand Down
4 changes: 2 additions & 2 deletions tool.sh
Original file line number Diff line number Diff line change
@@ -1,15 +1,15 @@
#!/bin/bash
set -xu

CI_PROJECT_NAME=${GITHUB_REPOSITORY:-"QPod/lab-media"}
CI_PROJECT_NAME=${CI_PROJECT_NAME:-$GITHUB_REPOSITORY}
CI_PROJECT_BRANCH=${GITHUB_HEAD_REF:-"main"}
CI_PROJECT_SPACE=$(echo "${CI_PROJECT_BRANCH}" | cut -f1 -d'/')

if [ "${CI_PROJECT_BRANCH}" = "main" ] ; then
# If on the main branch, docker images namespace will be same as CI_PROJECT_NAME's name space
export CI_PROJECT_NAMESPACE="$(dirname ${CI_PROJECT_NAME})" ;
else
# not main branch, docker namespace = {CI_PROJECT_NAME's name space} + "0" + {1st substr before / in CI_PROJECT_SPACE}
# not main branch, docker namespace = {CI_PROJECT_NAME's name space} + "-" + {1st substr before / in CI_PROJECT_SPACE}
export CI_PROJECT_NAMESPACE="$(dirname ${CI_PROJECT_NAME})0${CI_PROJECT_SPACE}" ;
fi

Expand Down