Skip to content

Commit

Permalink
ggml : re-enable BLAS for CPU when src0 != F32 + remove redundant ful…
Browse files Browse the repository at this point in the history
…l offload checks in llama.cpp (#4240)

* ggml : use blas even if src0 is not F32

* llama : use n_threads_batch only when n_tokens >= 32

ggml-ci

* llama : revert n_threads_batch logic

ggml-ci
  • Loading branch information
ggerganov committed Nov 28, 2023
1 parent b38a16d commit 8406b09
Show file tree
Hide file tree
Showing 2 changed files with 2 additions and 12 deletions.
2 changes: 1 addition & 1 deletion ggml.c
Original file line number Diff line number Diff line change
Expand Up @@ -9373,7 +9373,7 @@ static bool ggml_compute_forward_mul_mat_use_blas(
// TODO: find the optimal values for these
if (ggml_is_contiguous(src0) &&
ggml_is_contiguous(src1) &&
src0->type == GGML_TYPE_F32 &&
//src0->type == GGML_TYPE_F32 &&
src1->type == GGML_TYPE_F32 &&
(ne0 >= 32 && ne1 >= 32 && ne10 >= 32)) {

Expand Down
12 changes: 1 addition & 11 deletions llama.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5550,18 +5550,8 @@ static int llama_decode_internal(
n_threads = std::min(4, n_threads);
}

// If all tensors can be run on the GPU then using more than 1 thread is detrimental.
const bool full_offload_supported =
model.arch == LLM_ARCH_LLAMA ||
model.arch == LLM_ARCH_BAICHUAN ||
model.arch == LLM_ARCH_FALCON ||
model.arch == LLM_ARCH_REFACT ||
model.arch == LLM_ARCH_MPT ||
model.arch == LLM_ARCH_STARCODER ||
model.arch == LLM_ARCH_STABLELM;

const bool fully_offloaded = model.n_gpu_layers >= (int) hparams.n_layer + 3;
if (ggml_cpu_has_cublas() && full_offload_supported && fully_offloaded) {
if (ggml_cpu_has_cublas() && fully_offloaded) {
n_threads = 1;
}

Expand Down

0 comments on commit 8406b09

Please sign in to comment.