Skip to content

ggml-model: Use number of threads by number of available cores #76

ggml-model: Use number of threads by number of available cores

ggml-model: Use number of threads by number of available cores #76

Workflow file for this run

name: Meson
on:
push:
branches: [ "master" ]
pull_request:
branches: [ "master" ]
env:
# Customize the CMake build type here (Release, Debug, RelWithDebInfo, etc.)
BUILD_TYPE: Release
XDG_DATA_HOME: ${{ github.workspace }}/data
jobs:
build:
# The CMake configure and build commands are platform agnostic and should work equally well on Windows or Mac.
# You can convert this to a matrix build if you need cross-platform coverage.
# See: https://docs.github.com/en/free-pro-team@latest/actions/learn-github-actions/managing-complex-workflows#using-a-build-matrix
runs-on: ubuntu-latest
steps:
- name: Update apt repositories
run: sudo apt-get update
- name: Install apt dependencies
run: sudo apt-get install build-essential gir1.2-soup-3.0 python3-pip cmake gobject-introspection libglib2.0-dev ninja-build gjs libgirepository1.0-dev libsoup-3.0-dev
- name: Install pip dependencies
run: sudo pip3 install meson
- name: Clone jasmine-gjs
uses: actions/checkout@v3
with:
path: jasmine-gjs
repository: ptomato/jasmine-gjs
- name: Configure jasmine-gjs
working-directory: jasmine-gjs
run: meson build && cd build && ninja && sudo ninja install
- name: Build jasmine-gjs
working-directory: jasmine-gjs/build
run: ninja
- name: Install jasmine-gjs
working-directory: jasmine-gjs/build
run: sudo ninja install
- name: Clone ggml
uses: actions/checkout@v3
with:
path: ggml
repository: ggerganov/ggml
ref: da3015e52487d8fdb1a05de0879cdc3fc8976bc2
- name: Build ggml
uses: threeal/cmake-action@v1.2.0
with:
generator: Ninja
source-dir: ggml
build-dir: ggml/build
run-build: true
options: CMAKE_INSTALL_PREFIX=/usr BUILD_SHARED_LIBS=ON GGML_BUILD_TESTS=OFF GGML_BUILD_EXAMPLES=OFF
- name: Make GPT2 directory
run: mkdir -p ${{ github.workspace }}/data/ggml-gobject/0/models
- name: Cache GPT2
id: cache-gpt2
uses: actions/cache@v3
with:
path: ${{ github.workspace }}/data/ggml-gobject/0/models/ggml-model-gpt-2-117M.bin
key: ${{ runner.os }}-ggml-gpt2-117M
- name: Fetch GPT2
if: steps.cache-gpt2.outputs.cache-hit != 'true'
run: mkdir -p ggml/build && cd ggml/build && ../examples/gpt-2/download-ggml-model.sh 117M && mv models/gpt-2-117M/ggml-model.bin ${{ github.workspace }}/data/ggml-gobject/0/models/ggml-model-gpt-2-117M.bin
- name: Install ggml
working-directory: ggml/build
run: sudo ninja install
- uses: actions/checkout@v3
with:
path: ggml-gobject
- name: Meson Configure
# You may pin to the exact commit or the version.
# uses: BSFishy/meson-build@656b2a360964b249ac82905a52018921e01d875b
working-directory: ggml-gobject
run: meson build
- name: Ninja Build
# You may pin to the exact commit or the version.
# uses: BSFishy/meson-build@656b2a360964b249ac82905a52018921e01d875b
working-directory: ggml-gobject/build
run: ninja
- name: Ninja Test
# You may pin to the exact commit or the version.
# uses: BSFishy/meson-build@656b2a360964b249ac82905a52018921e01d875b
working-directory: ggml-gobject/build
run: ninja test
- name: Check logs
if: always()
run: cat ggml-gobject/build/meson-logs/testlog.txt