From 38485177140f69444d8be7320fe0f0c2e07145b6 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 19 Sep 2025 19:46:00 +0300 Subject: [PATCH 1/9] ci : migrate ggml ci to a self-hosted runners --- .github/workflows/build.yml | 120 ++++++++++++++++++++++++++++++++++++ ci/README.md | 12 +--- 2 files changed, 121 insertions(+), 11 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index ff42b19f1d51c..1d3bc3fd0c402 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1247,3 +1247,123 @@ jobs: -DGGML_CANN=on \ -DSOC_TYPE=${{ matrix.device }} cmake --build build -j $(nproc) + + ggml-ci-x64-cpu-low-perf: + runs-on: [self-hosted, Linux, X64, CPU, low-perf] + + steps: + - name: Clone + id: checkout + uses: actions/checkout@v4 + + - name: Test + id: ggml-ci + run: | + bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp + + ggml-ci-arm64-cpu-low-perf: + runs-on: [self-hosted, Linux, ARM64, CPU, low-perf] + + steps: + - name: Clone + id: checkout + uses: actions/checkout@v4 + + - name: Test + id: ggml-ci + run: | + bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp + + ggml-ci-x64-cpu-high-perf: + runs-on: [self-hosted, Linux, X64, CPU, high-perf] + + steps: + - name: Clone + id: checkout + uses: actions/checkout@v4 + + - name: Test + id: ggml-ci + run: | + bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp + + ggml-ci-arm64-cpu-high-perf: + runs-on: [self-hosted, Linux, ARM64, CPU, high-perf] + + steps: + - name: Clone + id: checkout + uses: actions/checkout@v4 + + - name: Test + id: ggml-ci + run: | + bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp + + ggml-ci-x64-nvidia-cuda: + runs-on: [self-hosted, Linux, X64, NVIDIA] + + steps: + - name: Clone + id: checkout + uses: actions/checkout@v4 + + - name: Test + id: ggml-ci + run: | + nvidia-smi + GG_BUILD_CUDA=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp + + ggml-ci-x64-nvidia-vulkan: + runs-on: [self-hosted, Linux, X64, NVIDIA] + + steps: + - name: Clone + id: checkout + uses: actions/checkout@v4 + + - name: Test + id: ggml-ci + run: | + vulkaninfo + GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp + + ggml-ci-x64-cpu-amx: + runs-on: [self-hosted, Linux, X64, CPU, AMX] + + steps: + - name: Clone + id: checkout + uses: actions/checkout@v4 + + - name: Test + id: ggml-ci + run: | + bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp + + ggml-ci-mac-metal: + runs-on: [self-hosted, macOS, ARM64] + + steps: + - name: Clone + id: checkout + uses: actions/checkout@v4 + + - name: Test + id: ggml-ci + run: | + GG_BUILD_METAL=1 bash ./ci/run.sh ~/results/llama.cpp ~/mnt/llama.cpp + +# TODO: install vulkan drivers +# ggml-ci-mac-vulkan: +# runs-on: [self-hosted, macOS, ARM64] +# +# steps: +# - name: Clone +# id: checkout +# uses: actions/checkout@v4 +# +# - name: Test +# id: ggml-ci +# run: | +# GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp ~/mnt/llama.cpp diff --git a/ci/README.md b/ci/README.md index 8eebe988d5874..87b811acbcd65 100644 --- a/ci/README.md +++ b/ci/README.md @@ -1,16 +1,6 @@ # CI -In addition to [Github Actions](https://github.com/ggml-org/llama.cpp/actions) `llama.cpp` uses a custom CI framework: - -https://github.com/ggml-org/ci - -It monitors the `master` branch for new commits and runs the -[ci/run.sh](https://github.com/ggml-org/llama.cpp/blob/master/ci/run.sh) script on dedicated cloud instances. This allows us -to execute heavier workloads compared to just using Github Actions. Also with time, the cloud instances will be scaled -to cover various hardware architectures, including GPU and Apple Silicon instances. - -Collaborators can optionally trigger the CI run by adding the `ggml-ci` keyword to their commit message. -Only the branches of this repo are monitored for this keyword. +This CI implements heavy workflows that are running on self-hosted runners with various hardware configurations. It is a good practice, before publishing changes to execute the full CI locally on your machine: From dd3d5c6a4e7459eccfd73bd260713a8957574776 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sat, 20 Sep 2025 10:17:50 +0300 Subject: [PATCH 2/9] ci : add T4 runner --- .github/workflows/build.yml | 50 ++++++++++++++++++++++++++++++++++--- 1 file changed, 46 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 1d3bc3fd0c402..46c829077bd8e 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1300,8 +1300,8 @@ jobs: run: | bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp - ggml-ci-x64-nvidia-cuda: - runs-on: [self-hosted, Linux, X64, NVIDIA] + ggml-ci-x64-nvidia-v100-cuda: + runs-on: [self-hosted, Linux, X64, NVIDIA, V100] steps: - name: Clone @@ -1314,8 +1314,8 @@ jobs: nvidia-smi GG_BUILD_CUDA=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp - ggml-ci-x64-nvidia-vulkan: - runs-on: [self-hosted, Linux, X64, NVIDIA] + ggml-ci-x64-nvidia-v100-vulkan: + runs-on: [self-hosted, Linux, X64, NVIDIA, V100] steps: - name: Clone @@ -1328,6 +1328,48 @@ jobs: vulkaninfo GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp + ggml-ci-x64-nvidia-t4-cuda: + runs-on: [self-hosted, Linux, X64, NVIDIA, T4] + + steps: + - name: Clone + id: checkout + uses: actions/checkout@v4 + + - name: Test + id: ggml-ci + run: | + nvidia-smi + GG_BUILD_CUDA=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp + + ggml-ci-x64-nvidia-t4-vulkan: + runs-on: [self-hosted, Linux, X64, NVIDIA, T4] + + steps: + - name: Clone + id: checkout + uses: actions/checkout@v4 + + - name: Test + id: ggml-ci + run: | + vulkaninfo + GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp + + ggml-ci-x64-nvidia-t4-vulkan-coopmat1: + runs-on: [self-hosted, Linux, X64, NVIDIA, T4] + + steps: + - name: Clone + id: checkout + uses: actions/checkout@v4 + + - name: Test + id: ggml-ci + run: | + vulkaninfo + GG_BUILD_VULKAN=1 GGML_VK_DISABLE_COOPMAT2=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp + ggml-ci-x64-cpu-amx: runs-on: [self-hosted, Linux, X64, CPU, AMX] From 16801fb8d5ecf515b543b53cc5edeaa22fde2007 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sat, 20 Sep 2025 10:40:45 +0300 Subject: [PATCH 3/9] ci : add instructions for adding self-hosted runners --- ci/README-MUSA.md | 35 +++++++++++++++++++++++++++++++++++ ci/README.md | 45 ++++++++++----------------------------------- 2 files changed, 45 insertions(+), 35 deletions(-) create mode 100644 ci/README-MUSA.md diff --git a/ci/README-MUSA.md b/ci/README-MUSA.md new file mode 100644 index 0000000000000..bb6ca2a3dc807 --- /dev/null +++ b/ci/README-MUSA.md @@ -0,0 +1,35 @@ +## Running MUSA CI in a Docker Container + +Assuming `$PWD` is the root of the `llama.cpp` repository, follow these steps to set up and run MUSA CI in a Docker container: + +### 1. Create a local directory to store cached models, configuration files and venv: + +```bash +mkdir -p $HOME/llama.cpp/ci-cache +``` + +### 2. Create a local directory to store CI run results: + +```bash +mkdir -p $HOME/llama.cpp/ci-results +``` + +### 3. Start a Docker container and run the CI: + +```bash +docker run --privileged -it \ + -v $HOME/llama.cpp/ci-cache:/ci-cache \ + -v $HOME/llama.cpp/ci-results:/ci-results \ + -v $PWD:/ws -w /ws \ + mthreads/musa:rc4.2.0-devel-ubuntu22.04-amd64 +``` + +Inside the container, execute the following commands: + +```bash +apt update -y && apt install -y bc cmake ccache git python3.10-venv time unzip wget +git config --global --add safe.directory /ws +GG_BUILD_MUSA=1 bash ./ci/run.sh /ci-results /ci-cache +``` + +This setup ensures that the CI runs within an isolated Docker environment while maintaining cached files and results across runs. diff --git a/ci/README.md b/ci/README.md index 87b811acbcd65..d25bdd26fe1c9 100644 --- a/ci/README.md +++ b/ci/README.md @@ -1,8 +1,10 @@ # CI -This CI implements heavy workflows that are running on self-hosted runners with various hardware configurations. +This CI implements heavy-duty workflows that run on self-hosted runners. Typically the purpose of these workflows is to +cover hardware configurations that are not available from Github-hosted runners and/or require more computational +resource than normally available. -It is a good practice, before publishing changes to execute the full CI locally on your machine: +It is a good practice, before publishing changes to execute the full CI locally on your machine. For example: ```bash mkdir tmp @@ -19,40 +21,13 @@ GG_BUILD_SYCL=1 bash ./ci/run.sh ./tmp/results ./tmp/mnt # with MUSA support GG_BUILD_MUSA=1 bash ./ci/run.sh ./tmp/results ./tmp/mnt -``` - -## Running MUSA CI in a Docker Container - -Assuming `$PWD` is the root of the `llama.cpp` repository, follow these steps to set up and run MUSA CI in a Docker container: - -### 1. Create a local directory to store cached models, configuration files and venv: - -```bash -mkdir -p $HOME/llama.cpp/ci-cache -``` - -### 2. Create a local directory to store CI run results: -```bash -mkdir -p $HOME/llama.cpp/ci-results -``` - -### 3. Start a Docker container and run the CI: - -```bash -docker run --privileged -it \ - -v $HOME/llama.cpp/ci-cache:/ci-cache \ - -v $HOME/llama.cpp/ci-results:/ci-results \ - -v $PWD:/ws -w /ws \ - mthreads/musa:rc4.2.0-devel-ubuntu22.04-amd64 +# etc. ``` -Inside the container, execute the following commands: - -```bash -apt update -y && apt install -y bc cmake ccache git python3.10-venv time unzip wget -git config --global --add safe.directory /ws -GG_BUILD_MUSA=1 bash ./ci/run.sh /ci-results /ci-cache -``` +# Adding self-hosted runners -This setup ensures that the CI runs within an isolated Docker environment while maintaining cached files and results across runs. +- Add a self-hosted `ggml-ci` workflow to [[.github/workflows/build.yml]] with an appropriate label +- Request a runner token from `ggml-org` (for example, via a comment in the PR or email) +- Set-up a machine using the received token ([docs](https://docs.github.com/en/actions/how-tos/manage-runners/self-hosted-runners/add-runners)) +- Optionally update [ci/run.sh](https://github.com/ggml-org/llama.cpp/blob/master/ci/run.sh) to build and run on the target platform by gating the implementation with a `GG_BUILD_...` env From 0235ddc8107c5388bec40e51f7f17b7a394cdfab Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sat, 20 Sep 2025 11:24:40 +0300 Subject: [PATCH 4/9] ci : disable test-backend-ops from debug builds due to slowness --- ci/run.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/run.sh b/ci/run.sh index 8e20b4e24dbc7..ad8ed26bb10a4 100755 --- a/ci/run.sh +++ b/ci/run.sh @@ -150,7 +150,7 @@ function gg_run_ctest_debug { (time cmake -DCMAKE_BUILD_TYPE=Debug ${CMAKE_EXTRA} .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log (time make -j$(nproc) ) 2>&1 | tee -a $OUT/${ci}-make.log - (time ctest --output-on-failure -L main -E test-opt ) 2>&1 | tee -a $OUT/${ci}-ctest.log + (time ctest --output-on-failure -L main -E "test-opt|test-backend-ops" ) 2>&1 | tee -a $OUT/${ci}-ctest.log set +e } From 91f690a14dc059feab8b03be404a7b826ffd15b7 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 21 Sep 2025 13:21:27 +0300 Subject: [PATCH 5/9] ci : add AMD V710 runner (vulkan) --- .github/workflows/build.yml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 46c829077bd8e..7dca99789616c 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1383,6 +1383,20 @@ jobs: run: | bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp + ggml-ci-x64-amd-v710-vulkan: + runs-on: [self-hosted, Linux, X64, AMD, V710] + + steps: + - name: Clone + id: checkout + uses: actions/checkout@v4 + + - name: Test + id: ggml-ci + run: | + vulkaninfo + GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp + ggml-ci-mac-metal: runs-on: [self-hosted, macOS, ARM64] From 58b1b12d29f39cda1bd16939a7881a040bd89134 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 21 Sep 2025 14:00:20 +0300 Subject: [PATCH 6/9] cont : add ROCM workflow --- .github/workflows/build.yml | 14 ++++++++++++++ ci/run.sh | 12 +++++++++++- 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 7dca99789616c..5e6705103b11e 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1397,6 +1397,20 @@ jobs: vulkaninfo GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp + ggml-ci-x64-amd-v710-rocm: + runs-on: [self-hosted, Linux, X64, AMD, V710] + + steps: + - name: Clone + id: checkout + uses: actions/checkout@v4 + + - name: Test + id: ggml-ci + run: | + vulkaninfo + GG_BUILD_ROCM=1 GG_BUILD_AMDGPU_TARGETS="gfx1101" bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp + ggml-ci-mac-metal: runs-on: [self-hosted, macOS, ARM64] diff --git a/ci/run.sh b/ci/run.sh index ad8ed26bb10a4..95717cbd94910 100755 --- a/ci/run.sh +++ b/ci/run.sh @@ -65,6 +65,16 @@ if [ ! -z ${GG_BUILD_CUDA} ]; then fi fi +if [ ! -z ${GG_BUILD_ROCM} ]; then + CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_HIP=ON" + if [ -z ${GG_BUILD_AMDGPU_TARGETS} ]; then + echo "Missing GG_BUILD_AMDGPU_TARGETS, please set it to your GPU architecture (e.g. gfx90a, gfx1100, etc.)" + exit 1 + fi + + CMAKE_EXTRA="${CMAKE_EXTRA} -DAMDGPU_TARGETS=${GG_BUILD_AMDGPU_TARGETS}" +fi + if [ ! -z ${GG_BUILD_SYCL} ]; then if [ -z ${ONEAPI_ROOT} ]; then echo "Not detected ONEAPI_ROOT, please install oneAPI base toolkit and enable it by:" @@ -883,7 +893,7 @@ if [ -z ${GG_BUILD_LOW_PERF} ]; then fi if [ -z ${GG_BUILD_VRAM_GB} ] || [ ${GG_BUILD_VRAM_GB} -ge 8 ]; then - if [ -z ${GG_BUILD_CUDA} ] && [ -z ${GG_BUILD_VULKAN} ]; then + if [ -z ${GG_BUILD_CUDA} ] && [ -z ${GG_BUILD_VULKAN} ] && [ -z ${GG_BUILD_ROCM} ] && [ -z ${GG_BUILD_METAL} ]; then test $ret -eq 0 && gg_run pythia_1_4b else test $ret -eq 0 && gg_run pythia_2_8b From 9b2864149b572549beb8734c9c8942cc6722acad Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 21 Sep 2025 15:29:55 +0300 Subject: [PATCH 7/9] ci : switch to qwen3 0.6b model --- ci/run.sh | 343 +++++------------------------------------------------- 1 file changed, 32 insertions(+), 311 deletions(-) diff --git a/ci/run.sh b/ci/run.sh index 95717cbd94910..ff6913c4e8243 100755 --- a/ci/run.sh +++ b/ci/run.sh @@ -259,15 +259,9 @@ function gg_sum_test_scripts_release { } function gg_get_model { - local gguf_0="$MNT/models/pythia/1.4B/ggml-model-f16.gguf" - local gguf_1="$MNT/models/pythia/2.8B/ggml-model-f16.gguf" - local gguf_2="$MNT/models/open-llama/7B-v2/ggml-model-f16.gguf" + local gguf_0="$MNT/models/qwen3/0.6B/ggml-model-bf16.gguf" if [[ -s $gguf_0 ]]; then echo -n "$gguf_0" - elif [[ -s $gguf_1 ]]; then - echo -n "$gguf_1" - elif [[ -s $gguf_2 ]]; then - echo -n "$gguf_2" else echo >&2 "No model found. Can't run gg_run_ctest_with_model." exit 1 @@ -326,158 +320,22 @@ function gg_sum_ctest_with_model_release { gg_printf '```\n' } -# open_llama_7b_v2 +# qwen3_0_6b -function gg_run_open_llama_7b_v2 { +function gg_run_qwen3_0_6b { cd ${SRC} - gg_wget models-mnt/open-llama/7B-v2/ https://huggingface.co/openlm-research/open_llama_7b_v2/raw/main/config.json - gg_wget models-mnt/open-llama/7B-v2/ https://huggingface.co/openlm-research/open_llama_7b_v2/resolve/main/tokenizer.model - gg_wget models-mnt/open-llama/7B-v2/ https://huggingface.co/openlm-research/open_llama_7b_v2/raw/main/tokenizer_config.json - gg_wget models-mnt/open-llama/7B-v2/ https://huggingface.co/openlm-research/open_llama_7b_v2/raw/main/special_tokens_map.json - gg_wget models-mnt/open-llama/7B-v2/ https://huggingface.co/openlm-research/open_llama_7b_v2/raw/main/pytorch_model.bin.index.json - gg_wget models-mnt/open-llama/7B-v2/ https://huggingface.co/openlm-research/open_llama_7b_v2/resolve/main/pytorch_model-00001-of-00002.bin - gg_wget models-mnt/open-llama/7B-v2/ https://huggingface.co/openlm-research/open_llama_7b_v2/resolve/main/pytorch_model-00002-of-00002.bin - gg_wget models-mnt/open-llama/7B-v2/ https://huggingface.co/openlm-research/open_llama_7b_v2/raw/main/generation_config.json + gg_wget models-mnt/qwen3/0.6B/ https://huggingface.co/Qwen/Qwen3-0.6B-Base/raw/main/config.json + gg_wget models-mnt/qwen3/0.6B/ https://huggingface.co/Qwen/Qwen3-0.6B-Base/raw/main/tokenizer.json + gg_wget models-mnt/qwen3/0.6B/ https://huggingface.co/Qwen/Qwen3-0.6B-Base/raw/main/tokenizer_config.json + #gg_wget models-mnt/qwen3/0.6B/ https://huggingface.co/Qwen/Qwen3-0.6B-Base/raw/main/special_tokens_map.json + gg_wget models-mnt/qwen3/0.6B/ https://huggingface.co/Qwen/Qwen3-0.6B-Base/resolve/main/model.safetensors - gg_wget models-mnt/wikitext/ https://huggingface.co/datasets/ggml-org/ci/resolve/main/wikitext-2-raw-v1.zip - unzip -o models-mnt/wikitext/wikitext-2-raw-v1.zip -d models-mnt/wikitext/ - - path_models="../models-mnt/open-llama/7B-v2" - path_wiki="../models-mnt/wikitext/wikitext-2-raw" - - rm -rf build-ci-release && mkdir build-ci-release && cd build-ci-release - - set -e - - (time cmake -DCMAKE_BUILD_TYPE=Release ${CMAKE_EXTRA} .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log - (time make -j$(nproc) ) 2>&1 | tee -a $OUT/${ci}-make.log - - python3 ../examples/convert_legacy_llama.py ${path_models} --outfile ${path_models}/ggml-model-f16.gguf - - model_f16="${path_models}/ggml-model-f16.gguf" - model_q8_0="${path_models}/ggml-model-q8_0.gguf" - model_q4_0="${path_models}/ggml-model-q4_0.gguf" - model_q4_1="${path_models}/ggml-model-q4_1.gguf" - model_q5_0="${path_models}/ggml-model-q5_0.gguf" - model_q5_1="${path_models}/ggml-model-q5_1.gguf" - model_q2_k="${path_models}/ggml-model-q2_k.gguf" - model_q3_k="${path_models}/ggml-model-q3_k.gguf" - model_q4_k="${path_models}/ggml-model-q4_k.gguf" - model_q5_k="${path_models}/ggml-model-q5_k.gguf" - model_q6_k="${path_models}/ggml-model-q6_k.gguf" - - wiki_test="${path_wiki}/wiki.test.raw" - - ./bin/llama-quantize ${model_f16} ${model_q8_0} q8_0 - ./bin/llama-quantize ${model_f16} ${model_q4_0} q4_0 - ./bin/llama-quantize ${model_f16} ${model_q4_1} q4_1 - ./bin/llama-quantize ${model_f16} ${model_q5_0} q5_0 - ./bin/llama-quantize ${model_f16} ${model_q5_1} q5_1 - ./bin/llama-quantize ${model_f16} ${model_q2_k} q2_k - ./bin/llama-quantize ${model_f16} ${model_q3_k} q3_k - ./bin/llama-quantize ${model_f16} ${model_q4_k} q4_k - ./bin/llama-quantize ${model_f16} ${model_q5_k} q5_k - ./bin/llama-quantize ${model_f16} ${model_q6_k} q6_k - - (time ./bin/llama-cli -no-cnv --model ${model_f16} -t 1 -ngl 99 -c 0 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-f16.log - (time ./bin/llama-cli -no-cnv --model ${model_q8_0} -t 1 -ngl 99 -c 0 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q8_0.log - (time ./bin/llama-cli -no-cnv --model ${model_q4_0} -t 1 -ngl 99 -c 0 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_0.log - (time ./bin/llama-cli -no-cnv --model ${model_q4_1} -t 1 -ngl 99 -c 0 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_1.log - (time ./bin/llama-cli -no-cnv --model ${model_q5_0} -t 1 -ngl 99 -c 0 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q5_0.log - (time ./bin/llama-cli -no-cnv --model ${model_q5_1} -t 1 -ngl 99 -c 0 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q5_1.log - (time ./bin/llama-cli -no-cnv --model ${model_q2_k} -t 1 -ngl 99 -c 0 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q2_k.log - (time ./bin/llama-cli -no-cnv --model ${model_q3_k} -t 1 -ngl 99 -c 0 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q3_k.log - (time ./bin/llama-cli -no-cnv --model ${model_q4_k} -t 1 -ngl 99 -c 0 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_k.log - (time ./bin/llama-cli -no-cnv --model ${model_q5_k} -t 1 -ngl 99 -c 0 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q5_k.log - (time ./bin/llama-cli -no-cnv --model ${model_q6_k} -t 1 -ngl 99 -c 0 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q6_k.log - - (time ./bin/llama-perplexity --model ${model_f16} -f ${wiki_test} -t 1 -ngl 99 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-f16.log - (time ./bin/llama-perplexity --model ${model_q8_0} -f ${wiki_test} -t 1 -ngl 99 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q8_0.log - (time ./bin/llama-perplexity --model ${model_q4_0} -f ${wiki_test} -t 1 -ngl 99 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q4_0.log - (time ./bin/llama-perplexity --model ${model_q4_1} -f ${wiki_test} -t 1 -ngl 99 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q4_1.log - (time ./bin/llama-perplexity --model ${model_q5_0} -f ${wiki_test} -t 1 -ngl 99 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q5_0.log - (time ./bin/llama-perplexity --model ${model_q5_1} -f ${wiki_test} -t 1 -ngl 99 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q5_1.log - (time ./bin/llama-perplexity --model ${model_q2_k} -f ${wiki_test} -t 1 -ngl 99 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q2_k.log - (time ./bin/llama-perplexity --model ${model_q3_k} -f ${wiki_test} -t 1 -ngl 99 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q3_k.log - (time ./bin/llama-perplexity --model ${model_q4_k} -f ${wiki_test} -t 1 -ngl 99 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q4_k.log - (time ./bin/llama-perplexity --model ${model_q5_k} -f ${wiki_test} -t 1 -ngl 99 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q5_k.log - (time ./bin/llama-perplexity --model ${model_q6_k} -f ${wiki_test} -t 1 -ngl 99 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q6_k.log - - (time ./bin/llama-imatrix --model ${model_f16} -f ${wiki_test} -t 1 -ngl 99 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-imatrix.log - - (time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 10 -c 0 -fa off ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log - (time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 10 -c 0 -fa on ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log - (time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 99 -c 0 -fa off ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log - (time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 99 -c 0 -fa on ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log - - function check_ppl { - qnt="$1" - ppl=$(echo "$2" | grep -oE "[0-9]+\.[0-9]+" | tail -n 1) - - if [ $(echo "$ppl > 20.0" | bc) -eq 1 ]; then - printf ' - %s @ %s (FAIL: ppl > 20.0)\n' "$qnt" "$ppl" - return 20 - fi - - printf ' - %s @ %s OK\n' "$qnt" "$ppl" - return 0 - } - - check_ppl "f16" "$(cat $OUT/${ci}-tg-f16.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log - check_ppl "q8_0" "$(cat $OUT/${ci}-tg-q8_0.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log - check_ppl "q4_0" "$(cat $OUT/${ci}-tg-q4_0.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log - check_ppl "q4_1" "$(cat $OUT/${ci}-tg-q4_1.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log - check_ppl "q5_0" "$(cat $OUT/${ci}-tg-q5_0.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log - check_ppl "q5_1" "$(cat $OUT/${ci}-tg-q5_1.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log - check_ppl "q2_k" "$(cat $OUT/${ci}-tg-q2_k.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log - check_ppl "q3_k" "$(cat $OUT/${ci}-tg-q3_k.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log - check_ppl "q4_k" "$(cat $OUT/${ci}-tg-q4_k.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log - check_ppl "q5_k" "$(cat $OUT/${ci}-tg-q5_k.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log - check_ppl "q6_k" "$(cat $OUT/${ci}-tg-q6_k.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log - - cat $OUT/${ci}-imatrix.log | grep "Final" >> $OUT/${ci}-imatrix-sum.log - - set +e -} - -function gg_sum_open_llama_7b_v2 { - gg_printf '### %s\n\n' "${ci}" - - gg_printf 'OpenLLaMA 7B-v2:\n' - gg_printf '- status: %s\n' "$(cat $OUT/${ci}.exit)" - gg_printf '- perplexity:\n%s\n' "$(cat $OUT/${ci}-ppl.log)" - gg_printf '- imatrix:\n```\n%s\n```\n' "$(cat $OUT/${ci}-imatrix-sum.log)" - gg_printf '- f16: \n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-f16.log)" - gg_printf '- q8_0:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q8_0.log)" - gg_printf '- q4_0:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q4_0.log)" - gg_printf '- q4_1:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q4_1.log)" - gg_printf '- q5_0:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q5_0.log)" - gg_printf '- q5_1:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q5_1.log)" - gg_printf '- q2_k:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q2_k.log)" - gg_printf '- q3_k:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q3_k.log)" - gg_printf '- q4_k:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q4_k.log)" - gg_printf '- q5_k:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q5_k.log)" - gg_printf '- q6_k:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q6_k.log)" - gg_printf '- save-load-state: \n```\n%s\n```\n' "$(cat $OUT/${ci}-save-load-state.log)" -} - -# pythia_1.4b - -function gg_run_pythia_1_4b { - cd ${SRC} - - gg_wget models-mnt/pythia/1.4B/ https://huggingface.co/EleutherAI/pythia-1.4b/raw/main/config.json - gg_wget models-mnt/pythia/1.4B/ https://huggingface.co/EleutherAI/pythia-1.4b/raw/main/tokenizer.json - gg_wget models-mnt/pythia/1.4B/ https://huggingface.co/EleutherAI/pythia-1.4b/raw/main/tokenizer_config.json - gg_wget models-mnt/pythia/1.4B/ https://huggingface.co/EleutherAI/pythia-1.4b/raw/main/special_tokens_map.json - gg_wget models-mnt/pythia/1.4B/ https://huggingface.co/EleutherAI/pythia-1.4b/resolve/main/pytorch_model.bin gg_wget models-mnt/wikitext/ https://huggingface.co/datasets/ggml-org/ci/resolve/main/wikitext-2-raw-v1.zip unzip -o models-mnt/wikitext/wikitext-2-raw-v1.zip -d models-mnt/wikitext/ - head -n 60 models-mnt/wikitext/wikitext-2-raw/wiki.test.raw > models-mnt/wikitext/wikitext-2-raw/wiki.test-60.raw - path_models="../models-mnt/pythia/1.4B" + path_models="../models-mnt/qwen3/0.6B" path_wiki="../models-mnt/wikitext/wikitext-2-raw" rm -rf build-ci-release && mkdir build-ci-release && cd build-ci-release @@ -487,140 +345,9 @@ function gg_run_pythia_1_4b { (time cmake -DCMAKE_BUILD_TYPE=Release ${CMAKE_EXTRA} .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log (time make -j$(nproc) ) 2>&1 | tee -a $OUT/${ci}-make.log - python3 ../convert_hf_to_gguf.py ${path_models} --outfile ${path_models}/ggml-model-f16.gguf + python3 ../convert_hf_to_gguf.py ${path_models} --outfile ${path_models}/ggml-model-bf16.gguf --outtype bf16 - model_f16="${path_models}/ggml-model-f16.gguf" - model_q8_0="${path_models}/ggml-model-q8_0.gguf" - model_q4_0="${path_models}/ggml-model-q4_0.gguf" - model_q4_1="${path_models}/ggml-model-q4_1.gguf" - model_q5_0="${path_models}/ggml-model-q5_0.gguf" - model_q5_1="${path_models}/ggml-model-q5_1.gguf" - model_q2_k="${path_models}/ggml-model-q2_k.gguf" - model_q3_k="${path_models}/ggml-model-q3_k.gguf" - model_q4_k="${path_models}/ggml-model-q4_k.gguf" - model_q5_k="${path_models}/ggml-model-q5_k.gguf" - model_q6_k="${path_models}/ggml-model-q6_k.gguf" - - wiki_test_60="${path_wiki}/wiki.test-60.raw" - - ./bin/llama-quantize ${model_f16} ${model_q8_0} q8_0 - ./bin/llama-quantize ${model_f16} ${model_q4_0} q4_0 - ./bin/llama-quantize ${model_f16} ${model_q4_1} q4_1 - ./bin/llama-quantize ${model_f16} ${model_q5_0} q5_0 - ./bin/llama-quantize ${model_f16} ${model_q5_1} q5_1 - ./bin/llama-quantize ${model_f16} ${model_q2_k} q2_k - ./bin/llama-quantize ${model_f16} ${model_q3_k} q3_k - ./bin/llama-quantize ${model_f16} ${model_q4_k} q4_k - ./bin/llama-quantize ${model_f16} ${model_q5_k} q5_k - ./bin/llama-quantize ${model_f16} ${model_q6_k} q6_k - - (time ./bin/llama-cli -no-cnv --model ${model_f16} -ngl 99 -c 0 -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-f16.log - (time ./bin/llama-cli -no-cnv --model ${model_q8_0} -ngl 99 -c 0 -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q8_0.log - (time ./bin/llama-cli -no-cnv --model ${model_q4_0} -ngl 99 -c 0 -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_0.log - (time ./bin/llama-cli -no-cnv --model ${model_q4_1} -ngl 99 -c 0 -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_1.log - (time ./bin/llama-cli -no-cnv --model ${model_q5_0} -ngl 99 -c 0 -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q5_0.log - (time ./bin/llama-cli -no-cnv --model ${model_q5_1} -ngl 99 -c 0 -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q5_1.log - (time ./bin/llama-cli -no-cnv --model ${model_q2_k} -ngl 99 -c 0 -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q2_k.log - (time ./bin/llama-cli -no-cnv --model ${model_q3_k} -ngl 99 -c 0 -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q3_k.log - (time ./bin/llama-cli -no-cnv --model ${model_q4_k} -ngl 99 -c 0 -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_k.log - (time ./bin/llama-cli -no-cnv --model ${model_q5_k} -ngl 99 -c 0 -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q5_k.log - (time ./bin/llama-cli -no-cnv --model ${model_q6_k} -ngl 99 -c 0 -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q6_k.log - - (time ./bin/llama-perplexity --model ${model_f16} -f ${wiki_test_60} -ngl 99 -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-f16.log - (time ./bin/llama-perplexity --model ${model_q8_0} -f ${wiki_test_60} -ngl 99 -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q8_0.log - (time ./bin/llama-perplexity --model ${model_q4_0} -f ${wiki_test_60} -ngl 99 -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q4_0.log - (time ./bin/llama-perplexity --model ${model_q4_1} -f ${wiki_test_60} -ngl 99 -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q4_1.log - (time ./bin/llama-perplexity --model ${model_q5_0} -f ${wiki_test_60} -ngl 99 -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q5_0.log - (time ./bin/llama-perplexity --model ${model_q5_1} -f ${wiki_test_60} -ngl 99 -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q5_1.log - (time ./bin/llama-perplexity --model ${model_q2_k} -f ${wiki_test_60} -ngl 99 -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q2_k.log - (time ./bin/llama-perplexity --model ${model_q3_k} -f ${wiki_test_60} -ngl 99 -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q3_k.log - (time ./bin/llama-perplexity --model ${model_q4_k} -f ${wiki_test_60} -ngl 99 -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q4_k.log - (time ./bin/llama-perplexity --model ${model_q5_k} -f ${wiki_test_60} -ngl 99 -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q5_k.log - (time ./bin/llama-perplexity --model ${model_q6_k} -f ${wiki_test_60} -ngl 99 -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-tg-q6_k.log - - (time ./bin/llama-imatrix --model ${model_f16} -f ${wiki_test_60} -ngl 99 -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-imatrix.log - - (time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 99 -c 0 -fa off ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log - (time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 99 -c 0 -fa on ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log - - function check_ppl { - qnt="$1" - ppl=$(echo "$2" | grep -oE "[0-9]+\.[0-9]+" | tail -n 1) - - if [ $(echo "$ppl > 20.0" | bc) -eq 1 ]; then - printf ' - %s @ %s (FAIL: ppl > 20.0)\n' "$qnt" "$ppl" - return 20 - fi - - printf ' - %s @ %s OK\n' "$qnt" "$ppl" - return 0 - } - - check_ppl "f16" "$(cat $OUT/${ci}-tg-f16.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log - check_ppl "q8_0" "$(cat $OUT/${ci}-tg-q8_0.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log - check_ppl "q4_0" "$(cat $OUT/${ci}-tg-q4_0.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log - check_ppl "q4_1" "$(cat $OUT/${ci}-tg-q4_1.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log - check_ppl "q5_0" "$(cat $OUT/${ci}-tg-q5_0.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log - check_ppl "q5_1" "$(cat $OUT/${ci}-tg-q5_1.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log - #check_ppl "q2_k" "$(cat $OUT/${ci}-tg-q2_k.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log # note: ppl > 20.0 for this quant and model - check_ppl "q3_k" "$(cat $OUT/${ci}-tg-q3_k.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log - check_ppl "q4_k" "$(cat $OUT/${ci}-tg-q4_k.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log - check_ppl "q5_k" "$(cat $OUT/${ci}-tg-q5_k.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log - check_ppl "q6_k" "$(cat $OUT/${ci}-tg-q6_k.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log - - cat $OUT/${ci}-imatrix.log | grep "Final" >> $OUT/${ci}-imatrix-sum.log - - set +e -} - -function gg_sum_pythia_1_4b { - gg_printf '### %s\n\n' "${ci}" - - gg_printf 'Pythia 1.4B:\n' - gg_printf '- status: %s\n' "$(cat $OUT/${ci}.exit)" - gg_printf '- perplexity:\n%s\n' "$(cat $OUT/${ci}-ppl.log)" - gg_printf '- imatrix:\n```\n%s\n```\n' "$(cat $OUT/${ci}-imatrix-sum.log)" - gg_printf '- f16: \n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-f16.log)" - gg_printf '- q8_0:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q8_0.log)" - gg_printf '- q4_0:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q4_0.log)" - gg_printf '- q4_1:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q4_1.log)" - gg_printf '- q5_0:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q5_0.log)" - gg_printf '- q5_1:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q5_1.log)" - gg_printf '- q2_k:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q2_k.log)" - gg_printf '- q3_k:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q3_k.log)" - gg_printf '- q4_k:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q4_k.log)" - gg_printf '- q5_k:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q5_k.log)" - gg_printf '- q6_k:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q6_k.log)" - gg_printf '- save-load-state: \n```\n%s\n```\n' "$(cat $OUT/${ci}-save-load-state.log)" -} - -# pythia_2_8b - -function gg_run_pythia_2_8b { - cd ${SRC} - - gg_wget models-mnt/pythia/2.8B/ https://huggingface.co/EleutherAI/pythia-2.8b/raw/main/config.json - gg_wget models-mnt/pythia/2.8B/ https://huggingface.co/EleutherAI/pythia-2.8b/raw/main/tokenizer.json - gg_wget models-mnt/pythia/2.8B/ https://huggingface.co/EleutherAI/pythia-2.8b/raw/main/tokenizer_config.json - gg_wget models-mnt/pythia/2.8B/ https://huggingface.co/EleutherAI/pythia-2.8b/raw/main/special_tokens_map.json - gg_wget models-mnt/pythia/2.8B/ https://huggingface.co/EleutherAI/pythia-2.8b/resolve/main/pytorch_model.bin - - gg_wget models-mnt/wikitext/ https://huggingface.co/datasets/ggml-org/ci/resolve/main/wikitext-2-raw-v1.zip - unzip -o models-mnt/wikitext/wikitext-2-raw-v1.zip -d models-mnt/wikitext/ - - path_models="../models-mnt/pythia/2.8B" - path_wiki="../models-mnt/wikitext/wikitext-2-raw" - - rm -rf build-ci-release && mkdir build-ci-release && cd build-ci-release - - set -e - - (time cmake -DCMAKE_BUILD_TYPE=Release ${CMAKE_EXTRA} .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log - (time make -j$(nproc) ) 2>&1 | tee -a $OUT/${ci}-make.log - - python3 ../convert_hf_to_gguf.py ${path_models} --outfile ${path_models}/ggml-model-f16.gguf - - model_f16="${path_models}/ggml-model-f16.gguf" + model_bf16="${path_models}/ggml-model-bf16.gguf" model_q8_0="${path_models}/ggml-model-q8_0.gguf" model_q4_0="${path_models}/ggml-model-q4_0.gguf" model_q4_1="${path_models}/ggml-model-q4_1.gguf" @@ -634,18 +361,18 @@ function gg_run_pythia_2_8b { wiki_test="${path_wiki}/wiki.test.raw" - ./bin/llama-quantize ${model_f16} ${model_q8_0} q8_0 - ./bin/llama-quantize ${model_f16} ${model_q4_0} q4_0 - ./bin/llama-quantize ${model_f16} ${model_q4_1} q4_1 - ./bin/llama-quantize ${model_f16} ${model_q5_0} q5_0 - ./bin/llama-quantize ${model_f16} ${model_q5_1} q5_1 - ./bin/llama-quantize ${model_f16} ${model_q2_k} q2_k - ./bin/llama-quantize ${model_f16} ${model_q3_k} q3_k - ./bin/llama-quantize ${model_f16} ${model_q4_k} q4_k - ./bin/llama-quantize ${model_f16} ${model_q5_k} q5_k - ./bin/llama-quantize ${model_f16} ${model_q6_k} q6_k - - (time ./bin/llama-cli -no-cnv --model ${model_f16} -t 1 -ngl 99 -c 0 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-f16.log + ./bin/llama-quantize ${model_bf16} ${model_q8_0} q8_0 + ./bin/llama-quantize ${model_bf16} ${model_q4_0} q4_0 + ./bin/llama-quantize ${model_bf16} ${model_q4_1} q4_1 + ./bin/llama-quantize ${model_bf16} ${model_q5_0} q5_0 + ./bin/llama-quantize ${model_bf16} ${model_q5_1} q5_1 + ./bin/llama-quantize ${model_bf16} ${model_q2_k} q2_k + ./bin/llama-quantize ${model_bf16} ${model_q3_k} q3_k + ./bin/llama-quantize ${model_bf16} ${model_q4_k} q4_k + ./bin/llama-quantize ${model_bf16} ${model_q5_k} q5_k + ./bin/llama-quantize ${model_bf16} ${model_q6_k} q6_k + + (time ./bin/llama-cli -no-cnv --model ${model_bf16} -t 1 -ngl 99 -c 0 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-bf16.log (time ./bin/llama-cli -no-cnv --model ${model_q8_0} -t 1 -ngl 99 -c 0 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q8_0.log (time ./bin/llama-cli -no-cnv --model ${model_q4_0} -t 1 -ngl 99 -c 0 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_0.log (time ./bin/llama-cli -no-cnv --model ${model_q4_1} -t 1 -ngl 99 -c 0 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_1.log @@ -657,7 +384,7 @@ function gg_run_pythia_2_8b { (time ./bin/llama-cli -no-cnv --model ${model_q5_k} -t 1 -ngl 99 -c 0 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q5_k.log (time ./bin/llama-cli -no-cnv --model ${model_q6_k} -t 1 -ngl 99 -c 0 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q6_k.log - (time ./bin/llama-perplexity --model ${model_f16} -f ${wiki_test} -t 1 -ngl 99 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-f16.log + (time ./bin/llama-perplexity --model ${model_bf16} -f ${wiki_test} -t 1 -ngl 99 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-bf16.log (time ./bin/llama-perplexity --model ${model_q8_0} -f ${wiki_test} -t 1 -ngl 99 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q8_0.log (time ./bin/llama-perplexity --model ${model_q4_0} -f ${wiki_test} -t 1 -ngl 99 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q4_0.log (time ./bin/llama-perplexity --model ${model_q4_1} -f ${wiki_test} -t 1 -ngl 99 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q4_1.log @@ -669,7 +396,7 @@ function gg_run_pythia_2_8b { (time ./bin/llama-perplexity --model ${model_q5_k} -f ${wiki_test} -t 1 -ngl 99 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q5_k.log (time ./bin/llama-perplexity --model ${model_q6_k} -f ${wiki_test} -t 1 -ngl 99 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q6_k.log - (time ./bin/llama-imatrix --model ${model_f16} -f ${wiki_test} -t 1 -ngl 99 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-imatrix.log + (time ./bin/llama-imatrix --model ${model_bf16} -f ${wiki_test} -t 1 -ngl 99 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-imatrix.log (time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 10 -c 0 -fa off ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log (time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 10 -c 0 -fa on ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log @@ -689,7 +416,7 @@ function gg_run_pythia_2_8b { return 0 } - check_ppl "f16" "$(cat $OUT/${ci}-tg-f16.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log + check_ppl "bf16" "$(cat $OUT/${ci}-tg-bf16.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log check_ppl "q8_0" "$(cat $OUT/${ci}-tg-q8_0.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log check_ppl "q4_0" "$(cat $OUT/${ci}-tg-q4_0.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log check_ppl "q4_1" "$(cat $OUT/${ci}-tg-q4_1.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log @@ -706,14 +433,14 @@ function gg_run_pythia_2_8b { set +e } -function gg_sum_pythia_2_8b { +function gg_sum_qwen3_0_6b { gg_printf '### %s\n\n' "${ci}" gg_printf 'Pythia 2.8B:\n' gg_printf '- status: %s\n' "$(cat $OUT/${ci}.exit)" gg_printf '- perplexity:\n%s\n' "$(cat $OUT/${ci}-ppl.log)" gg_printf '- imatrix:\n```\n%s\n```\n' "$(cat $OUT/${ci}-imatrix-sum.log)" - gg_printf '- f16: \n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-f16.log)" + gg_printf '- bf16:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-bf16.log)" gg_printf '- q8_0:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q8_0.log)" gg_printf '- q4_0:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q4_0.log)" gg_printf '- q4_1:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q4_1.log)" @@ -892,16 +619,10 @@ if [ -z ${GG_BUILD_LOW_PERF} ]; then test $ret -eq 0 && gg_run test_scripts_release fi - if [ -z ${GG_BUILD_VRAM_GB} ] || [ ${GG_BUILD_VRAM_GB} -ge 8 ]; then - if [ -z ${GG_BUILD_CUDA} ] && [ -z ${GG_BUILD_VULKAN} ] && [ -z ${GG_BUILD_ROCM} ] && [ -z ${GG_BUILD_METAL} ]; then - test $ret -eq 0 && gg_run pythia_1_4b - else - test $ret -eq 0 && gg_run pythia_2_8b - #test $ret -eq 0 && gg_run open_llama_7b_v2 - fi - test $ret -eq 0 && gg_run ctest_with_model_debug - test $ret -eq 0 && gg_run ctest_with_model_release - fi + test $ret -eq 0 && gg_run qwen3_0_6b + + test $ret -eq 0 && gg_run ctest_with_model_debug + test $ret -eq 0 && gg_run ctest_with_model_release fi exit $ret From 535334569323bc6ce4bdcce74463dd22aece8ca5 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 21 Sep 2025 15:44:21 +0300 Subject: [PATCH 8/9] cont : fix the context size --- ci/run.sh | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/ci/run.sh b/ci/run.sh index ff6913c4e8243..64bc88b335da0 100755 --- a/ci/run.sh +++ b/ci/run.sh @@ -372,17 +372,17 @@ function gg_run_qwen3_0_6b { ./bin/llama-quantize ${model_bf16} ${model_q5_k} q5_k ./bin/llama-quantize ${model_bf16} ${model_q6_k} q6_k - (time ./bin/llama-cli -no-cnv --model ${model_bf16} -t 1 -ngl 99 -c 0 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-bf16.log - (time ./bin/llama-cli -no-cnv --model ${model_q8_0} -t 1 -ngl 99 -c 0 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q8_0.log - (time ./bin/llama-cli -no-cnv --model ${model_q4_0} -t 1 -ngl 99 -c 0 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_0.log - (time ./bin/llama-cli -no-cnv --model ${model_q4_1} -t 1 -ngl 99 -c 0 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_1.log - (time ./bin/llama-cli -no-cnv --model ${model_q5_0} -t 1 -ngl 99 -c 0 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q5_0.log - (time ./bin/llama-cli -no-cnv --model ${model_q5_1} -t 1 -ngl 99 -c 0 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q5_1.log - (time ./bin/llama-cli -no-cnv --model ${model_q2_k} -t 1 -ngl 99 -c 0 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q2_k.log - (time ./bin/llama-cli -no-cnv --model ${model_q3_k} -t 1 -ngl 99 -c 0 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q3_k.log - (time ./bin/llama-cli -no-cnv --model ${model_q4_k} -t 1 -ngl 99 -c 0 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_k.log - (time ./bin/llama-cli -no-cnv --model ${model_q5_k} -t 1 -ngl 99 -c 0 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q5_k.log - (time ./bin/llama-cli -no-cnv --model ${model_q6_k} -t 1 -ngl 99 -c 0 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q6_k.log + (time ./bin/llama-cli -no-cnv --model ${model_bf16} -t 1 -ngl 99 -c 2048 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-bf16.log + (time ./bin/llama-cli -no-cnv --model ${model_q8_0} -t 1 -ngl 99 -c 2048 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q8_0.log + (time ./bin/llama-cli -no-cnv --model ${model_q4_0} -t 1 -ngl 99 -c 2048 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_0.log + (time ./bin/llama-cli -no-cnv --model ${model_q4_1} -t 1 -ngl 99 -c 2048 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_1.log + (time ./bin/llama-cli -no-cnv --model ${model_q5_0} -t 1 -ngl 99 -c 2048 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q5_0.log + (time ./bin/llama-cli -no-cnv --model ${model_q5_1} -t 1 -ngl 99 -c 2048 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q5_1.log + (time ./bin/llama-cli -no-cnv --model ${model_q2_k} -t 1 -ngl 99 -c 2048 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q2_k.log + (time ./bin/llama-cli -no-cnv --model ${model_q3_k} -t 1 -ngl 99 -c 2048 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q3_k.log + (time ./bin/llama-cli -no-cnv --model ${model_q4_k} -t 1 -ngl 99 -c 2048 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_k.log + (time ./bin/llama-cli -no-cnv --model ${model_q5_k} -t 1 -ngl 99 -c 2048 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q5_k.log + (time ./bin/llama-cli -no-cnv --model ${model_q6_k} -t 1 -ngl 99 -c 2048 -s 1234 -n 256 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q6_k.log (time ./bin/llama-perplexity --model ${model_bf16} -f ${wiki_test} -t 1 -ngl 99 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-bf16.log (time ./bin/llama-perplexity --model ${model_q8_0} -f ${wiki_test} -t 1 -ngl 99 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q8_0.log @@ -398,10 +398,10 @@ function gg_run_qwen3_0_6b { (time ./bin/llama-imatrix --model ${model_bf16} -f ${wiki_test} -t 1 -ngl 99 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-imatrix.log - (time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 10 -c 0 -fa off ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log - (time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 10 -c 0 -fa on ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log - (time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 99 -c 0 -fa off ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log - (time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 99 -c 0 -fa on ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log + (time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 10 -c 2048 -fa off ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log + (time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 10 -c 2048 -fa on ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log + (time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 99 -c 2048 -fa off ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log + (time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 99 -c 2048 -fa on ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log function check_ppl { qnt="$1" From 85de2b41029bef131d91c91c1ca78b8427d58ba2 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 21 Sep 2025 16:49:48 +0300 Subject: [PATCH 9/9] ci : add TODOs [no ci] --- .github/workflows/build.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 5e6705103b11e..510b25b7598ae 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1248,6 +1248,8 @@ jobs: -DSOC_TYPE=${{ matrix.device }} cmake --build build -j $(nproc) +# TODO: simplify the following workflows using a matrix +# TODO: run lighter CI on PRs and the full CI only on master (if needed) ggml-ci-x64-cpu-low-perf: runs-on: [self-hosted, Linux, X64, CPU, low-perf]