From ba21ab7ca86cee36fba8a1c1b4446a5bd96cebae Mon Sep 17 00:00:00 2001 From: GitHub Actions <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 6 Dec 2023 01:53:37 +0000 Subject: [PATCH] localai-git: auto updated to v2.0.0.6.g997119c-1 --- x86_64/localai-git/PKGBUILD | 177 +++++++++++++++++++++++++ x86_64/localai-git/whisper-1.5.1.patch | 66 +++++++++ 2 files changed, 243 insertions(+) create mode 100644 x86_64/localai-git/PKGBUILD create mode 100644 x86_64/localai-git/whisper-1.5.1.patch diff --git a/x86_64/localai-git/PKGBUILD b/x86_64/localai-git/PKGBUILD new file mode 100644 index 000000000..1985afa56 --- /dev/null +++ b/x86_64/localai-git/PKGBUILD @@ -0,0 +1,177 @@ +# Maintainer: wuxxin + +# to only build for cpu, set ENABLE_CUDA and ENABLE_ROCM to 0 +_ENABLE_CUDA=1 +_ENABLE_ROCM=0 +_SKIP_CPU=0 +_GO_TAGS="" +# _GO_TAGS="tts stablediffusion" +_OPTIONAL_BACKENDS="" +if test -n "$(echo "$_GO_TAGS" | grep -o "tts")"; then + _OPTIONAL_BACKENDS="backend-assets/grpc/piper $_OPTIONAL_BACKENDS" +fi +if test -n "$(echo "$_GO_TAGS" | grep -o "stablediffusion")"; then + _OPTIONAL_BACKENDS="backend-assets/grpc/stablediffusion $_OPTIONAL_BACKENDS" +fi +# list of backends to be build +_GRPC_BACKENDS="backend-assets/grpc/bert-embeddings backend-assets/grpc/llama-cpp backend-assets/grpc/whisper $_OPTIONAL_BACKENDS" +_pkgname="localai" + +pkgbase="${_pkgname}-git" +pkgname=("${pkgbase}") +pkgver=v2.0.0.6.g997119c +pkgrel=1 +pkgdesc="The free, Open Source OpenAI alternative. Self-hosted, community-driven and local-first." +url="https://github.com/mudler/LocalAI" +license=('MIT') +arch=('x86_64') + +provides=('localai') +conflicts=('localai') + +depends=( + 'grpc' + 'opencv' + 'blas-openblas' + 'sdl2' + 'ffmpeg' +) +makedepends=( + 'go' + 'git' + 'cmake' +) + +if test "$(echo "$_GO_TAGS" | grep -o "tts")" = "tts"; then + depends+=( + 'onnxruntime' + 'piper-phonemize' + ) +fi + +if [[ $_ENABLE_CUDA = 1 ]]; then + pkgname+=("${pkgbase}-cuda") + makedepends+=( + 'cuda' + 'cudnn' + 'nccl' + 'magma-cuda' + ) +fi + +if [[ $_ENABLE_ROCM = 1 ]]; then + pkgname+=("${pkgbase}-rocm") + makedepends+=( + 'rocm-hip-sdk' + 'miopen-hip' + 'rccl' + 'magma-hip' + ) +fi + +source=( + "${_pkgname}"::"git+https://github.com/mudler/LocalAI" +) + +sha256sums=( + 'SKIP' +) + +pkgver() { + cd "${srcdir}/${_pkgname}" + (git describe --always --tags | tr "-" ".") +} + +prepare() { + cd "${srcdir}/${_pkgname}" + + # list of backend sources to be recursive git checked out before build() + _EXTERNAL_SOURCES="backend/cpp/llama/llama.cpp sources/go-piper sources/whisper.cpp sources/go-bert" + # fetch sources for active backends + mkdir -p "sources" + make $_EXTERNAL_SOURCES + + # modify get-sources, remove go mod edits for inactive backend sources + sed -ri "s#get-sources: .*#get-sources: $_EXTERNAL_SOURCES#g" Makefile + sed -ri 's#.+\-replace github.com/nomic-ai/gpt4all/gpt4all.+##g' Makefile + sed -ri 's#.+\-replace github.com/donomii/go-rwkv.cpp.+##g' Makefile + sed -ri 's#.+\-replace github.com/go-skynet/go-ggml-transformers.cpp.+##g' Makefile + sed -ri 's#.+\-replace github.com/mudler/go-stable-diffusion.+##g' Makefile + + # # patch stablediffusion + # sed -ri "s/^(#include )/\1src\/\2\3/g" \ + # sources/go-stable-diffusion/stablediffusion.hpp + + # copy for different build types + cd "${srcdir}" + for n in "${_pkgname}-cpu" "${_pkgname}-cuda" "${_pkgname}-rocm"; do + if test -d "$n"; then rm -rf "$n"; fi + cp -r "${_pkgname}" "$n" + done +} + +_build() { + if test -n "$(echo "$_GO_TAGS" | grep -o "stablediffusion")"; then + make BUILD_TYPE="$1" GRPC_BACKENDS="backend-assets/grpc/stablediffusion" GO_TAGS="$_GO_TAGS" build + fi + make BUILD_TYPE="$1" GRPC_BACKENDS="$_GRPC_BACKENDS" GO_TAGS="$_GO_TAGS" build +} + +build() { + if test "$_SKIP_CPU" != "1"; then + cd "${srcdir}/${_pkgname}-cpu" + _build openblas + fi + + if [[ $_ENABLE_CUDA = 1 ]]; then + cd "${srcdir}/${_pkgname}-cuda" + export CUDA_HOME="${CUDA_HOME:-/opt/cuda}" + export PATH="$CUDA_HOME/bin:$PATH" + MAGMA_HOME="$CUDA_HOME/targets/x86_64-linux" CUDA_LIBPATH="$CUDA_HOME/lib64/" \ + _build cublas + fi + + if [[ $_ENABLE_ROCM = 1 ]]; then + cd "${srcdir}/${_pkgname}-rocm" + export ROCM_HOME="${ROCM_HOME:-/opt/rocm}" + export PATH="$ROC_HOME/bin:$PATH" + if test -n "$GPU_TARGETS"; then + _AMDGPU_TARGETS="$GPU_TARGETS" + else + _AMDGPU_TARGETS="${AMDGPU_TARGETS:-gfx900;gfx906;gfx908;gfx90a;gfx1030;gfx1100;gfx1101;gfx1102}" + fi + MAGMA_HOME="$ROCM_HOME" AMDGPU_TARGETS="$_AMDGPU_TARGETS" GPU_TARGETS="$_AMDGPU_TARGETS" \ + _build hipblas + fi +} + +_package_install() { + install -Dm755 "local-ai" "${pkgdir}/usr/bin/local-ai" + # sources/go-piper/piper/build/pi/lib/* /usr/lib/ + + # add 1-2 7b high performing models yaml configs based on mistral as gpt-3.5 + # prefer chatml, add example working preload-models.yaml, + + install -Dm644 README.md -t "${pkgdir}/usr/share/doc/${_pkgname}" +} + +package_localai-git() { + if test "$_SKIP_CPU" != "1"; then + cd "${srcdir}/${_pkgname}-cpu" + _package_install + fi +} + +package_localai-git-cuda() { + cd "${srcdir}/${_pkgname}-cuda" + pkgdesc+=' (with CUDA support)' + depends+=('cuda') + _package_install +} + +package_localai-git-rocm() { + cd "${srcdir}/${_pkgname}-rocm" + pkgdesc+=' (with ROCM support)' + depends+=('rocm-hip-runtime') + _package_install +} diff --git a/x86_64/localai-git/whisper-1.5.1.patch b/x86_64/localai-git/whisper-1.5.1.patch new file mode 100644 index 000000000..ac7557f63 --- /dev/null +++ b/x86_64/localai-git/whisper-1.5.1.patch @@ -0,0 +1,66 @@ +From 90f96c5ea447ff7a9e796e772a4e53cec3d38235 Mon Sep 17 00:00:00 2001 +From: Felix Erkinger +Date: Sat, 18 Nov 2023 00:58:03 +0100 +Subject: [PATCH] update whisper_cpp to 1.5.1 with OPENBLAS, METAL, HIPBLAS, + CUBLAS, CLBLAST support + +--- + Makefile | 8 +++++++- + 1 file changed, 7 insertions(+), 1 deletion(-) + +diff --git a/Makefile b/Makefile +index 22689eb31..f7c7ca80e 100644 +--- a/Makefile ++++ b/Makefile +@@ -22,7 +22,7 @@ RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp + RWKV_VERSION?=c898cd0f62df8f2a7830e53d1d513bef4f6f792b + + # whisper.cpp version +-WHISPER_CPP_VERSION?=85ed71aaec8e0612a84c0b67804bde75aa75a273 ++WHISPER_CPP_VERSION?=9d6ebd877ce7d99053423d186e6f5387a4a4753c + + # bert.cpp version + BERT_VERSION?=6abe312cded14042f6b7c3cd8edf082713334a4d +@@ -78,11 +78,13 @@ endif + + ifeq ($(BUILD_TYPE),openblas) + CGO_LDFLAGS+=-lopenblas ++ export WHISPER_OPENBLAS=1 + endif + + ifeq ($(BUILD_TYPE),cublas) + CGO_LDFLAGS+=-lcublas -lcudart -L$(CUDA_LIBPATH) + export LLAMA_CUBLAS=1 ++ export WHISPER_CUBLAS=1 + endif + + ifeq ($(BUILD_TYPE),hipblas) +@@ -91,6 +93,7 @@ ifeq ($(BUILD_TYPE),hipblas) + export CC=$(ROCM_HOME)/llvm/bin/clang + # llama-ggml has no hipblas support, so override it here. + export STABLE_BUILD_TYPE= ++ export WHISPER_HIPBLAS=1 + GPU_TARGETS ?= gfx900,gfx90a,gfx1030,gfx1031,gfx1100 + AMDGPU_TARGETS ?= "$(GPU_TARGETS)" + CMAKE_ARGS+=-DLLAMA_HIPBLAS=ON -DAMDGPU_TARGETS="$(AMDGPU_TARGETS)" -DGPU_TARGETS="$(GPU_TARGETS)" +@@ -100,10 +103,12 @@ endif + ifeq ($(BUILD_TYPE),metal) + CGO_LDFLAGS+=-framework Foundation -framework Metal -framework MetalKit -framework MetalPerformanceShaders + export LLAMA_METAL=1 ++ export WHISPER_METAL=1 + endif + + ifeq ($(BUILD_TYPE),clblas) + CGO_LDFLAGS+=-lOpenCL -lclblast ++ export WHISPER_CLBLAST=1 + endif + + ifeq ($(OS),Darwin) +@@ -233,6 +238,7 @@ replace: + $(GOCMD) mod edit -replace github.com/go-skynet/go-ggml-transformers.cpp=$(shell pwd)/sources/go-ggml-transformers + $(GOCMD) mod edit -replace github.com/donomii/go-rwkv.cpp=$(shell pwd)/sources/go-rwkv + $(GOCMD) mod edit -replace github.com/ggerganov/whisper.cpp=$(shell pwd)/sources/whisper.cpp ++ $(GOCMD) mod edit -replace github.com/ggerganov/whisper.cpp/bindings/go=$(shell pwd)/sources/whisper.cpp/bindings/go + $(GOCMD) mod edit -replace github.com/go-skynet/go-bert.cpp=$(shell pwd)/sources/go-bert + $(GOCMD) mod edit -replace github.com/mudler/go-stable-diffusion=$(shell pwd)/sources/go-stable-diffusion + $(GOCMD) mod edit -replace github.com/mudler/go-piper=$(shell pwd)/sources/go-piper