From bbd3047633f14213fbd409a36bd0c9cdb34c5fd7 Mon Sep 17 00:00:00 2001 From: Yusuf Simonson Date: Fri, 28 Nov 2025 11:32:31 -0500 Subject: [PATCH 1/2] Fix capitalization of LlamaCppError --- llama-cpp-2/src/lib.rs | 4 ++-- llama-cpp-2/src/llama_backend.rs | 8 ++++---- llama-cpp-2/src/model/params.rs | 10 +++++----- llama-cpp-sys-2/llama.cpp | 2 +- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/llama-cpp-2/src/lib.rs b/llama-cpp-2/src/lib.rs index 16f05e31..1daa7f9f 100644 --- a/llama-cpp-2/src/lib.rs +++ b/llama-cpp-2/src/lib.rs @@ -35,11 +35,11 @@ pub mod token; pub mod token_type; /// A failable result from a llama.cpp function. -pub type Result = std::result::Result; +pub type Result = std::result::Result; /// All errors that can occur in the llama-cpp crate. #[derive(Debug, Eq, PartialEq, thiserror::Error)] -pub enum LLamaCppError { +pub enum LlamaCppError { /// The backend was already initialized. This can generally be ignored as initializing the backend /// is idempotent. #[error("BackendAlreadyInitialized")] diff --git a/llama-cpp-2/src/llama_backend.rs b/llama-cpp-2/src/llama_backend.rs index 1cc3fa3d..7954e9c6 100644 --- a/llama-cpp-2/src/llama_backend.rs +++ b/llama-cpp-2/src/llama_backend.rs @@ -1,6 +1,6 @@ //! Representation of an initialized llama backend -use crate::LLamaCppError; +use crate::LlamaCppError; use llama_cpp_sys_2::ggml_log_level; use std::sync::atomic::AtomicBool; use std::sync::atomic::Ordering::SeqCst; @@ -18,7 +18,7 @@ impl LlamaBackend { fn mark_init() -> crate::Result<()> { match LLAMA_BACKEND_INITIALIZED.compare_exchange(false, true, SeqCst, SeqCst) { Ok(_) => Ok(()), - Err(_) => Err(LLamaCppError::BackendAlreadyInitialized), + Err(_) => Err(LlamaCppError::BackendAlreadyInitialized), } } @@ -28,7 +28,7 @@ impl LlamaBackend { /// /// ``` ///# use llama_cpp_2::llama_backend::LlamaBackend; - ///# use llama_cpp_2::LLamaCppError; + ///# use llama_cpp_2::LlamaCppError; ///# use std::error::Error; /// ///# fn main() -> Result<(), Box> { @@ -36,7 +36,7 @@ impl LlamaBackend { /// /// let backend = LlamaBackend::init()?; /// // the llama backend can only be initialized once - /// assert_eq!(Err(LLamaCppError::BackendAlreadyInitialized), LlamaBackend::init()); + /// assert_eq!(Err(LlamaCppError::BackendAlreadyInitialized), LlamaBackend::init()); /// ///# Ok(()) ///# } diff --git a/llama-cpp-2/src/model/params.rs b/llama-cpp-2/src/model/params.rs index 47f4c257..a54a4485 100644 --- a/llama-cpp-2/src/model/params.rs +++ b/llama-cpp-2/src/model/params.rs @@ -1,7 +1,7 @@ //! A safe wrapper around `llama_model_params`. use crate::model::params::kv_overrides::KvOverrides; -use crate::LLamaCppError; +use crate::LlamaCppError; use std::ffi::{c_char, CStr}; use std::fmt::{Debug, Formatter}; use std::pin::Pin; @@ -375,19 +375,19 @@ impl LlamaModelParams { /// You don't need to specify CPU or ACCEL devices. /// /// # Errors - /// Returns `LLamaCppError::BackendDeviceNotFound` if any device index is invalid. - pub fn with_devices(mut self, devices: &[usize]) -> Result { + /// Returns `LlamaCppError::BackendDeviceNotFound` if any device index is invalid. + pub fn with_devices(mut self, devices: &[usize]) -> Result { for dev in self.devices.iter_mut() { *dev = std::ptr::null_mut(); } // Check device count let max_devices = crate::max_devices().min(LLAMA_CPP_MAX_DEVICES); if devices.len() > max_devices { - return Err(LLamaCppError::MaxDevicesExceeded(max_devices)); + return Err(LlamaCppError::MaxDevicesExceeded(max_devices)); } for (i, &dev) in devices.iter().enumerate() { if dev >= unsafe { llama_cpp_sys_2::ggml_backend_dev_count() } { - return Err(LLamaCppError::BackendDeviceNotFound(dev)); + return Err(LlamaCppError::BackendDeviceNotFound(dev)); } let backend_dev = unsafe { llama_cpp_sys_2::ggml_backend_dev_get(dev) }; self.devices[i] = backend_dev; diff --git a/llama-cpp-sys-2/llama.cpp b/llama-cpp-sys-2/llama.cpp index d7395115..4c91f263 160000 --- a/llama-cpp-sys-2/llama.cpp +++ b/llama-cpp-sys-2/llama.cpp @@ -1 +1 @@ -Subproject commit d7395115baf395b75a73a17b0b796e746e468da9 +Subproject commit 4c91f2633f29a51ac2dcaa1c462483ea0ef6de8a From de7db85f3c3752c6883786708eff8e253d96d6b9 Mon Sep 17 00:00:00 2001 From: Yusuf Simonson Date: Fri, 28 Nov 2025 11:48:03 -0500 Subject: [PATCH 2/2] Revert back to baseline llama.cpp --- llama-cpp-sys-2/llama.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama-cpp-sys-2/llama.cpp b/llama-cpp-sys-2/llama.cpp index 4c91f263..d7395115 160000 --- a/llama-cpp-sys-2/llama.cpp +++ b/llama-cpp-sys-2/llama.cpp @@ -1 +1 @@ -Subproject commit 4c91f2633f29a51ac2dcaa1c462483ea0ef6de8a +Subproject commit d7395115baf395b75a73a17b0b796e746e468da9