We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
2 parents 48ce8f8 + bb10875 commit 2ec8656Copy full SHA for 2ec8656
llama-cpp-2/src/lib.rs
@@ -13,7 +13,7 @@
13
//!
14
//! - `cuda` enables CUDA gpu support.
15
//! - `sampler` adds the [`context::sample::sampler`] struct for a more rusty way of sampling.
16
-use std::ffi::NulError;
+use std::ffi::{c_char, NulError};
17
use std::fmt::Debug;
18
use std::num::NonZeroI32;
19
@@ -398,7 +398,7 @@ pub struct LlamaBackendDevice {
398
pub fn list_llama_ggml_backend_devices() -> Vec<LlamaBackendDevice> {
399
let mut devices = Vec::new();
400
for i in 0..unsafe { llama_cpp_sys_2::ggml_backend_dev_count() } {
401
- fn cstr_to_string(ptr: *const i8) -> String {
+ fn cstr_to_string(ptr: *const c_char) -> String {
402
if ptr.is_null() {
403
String::new()
404
} else {
0 commit comments