Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 11 additions & 2 deletions common/common-nexa.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,17 @@
#include <variant>
#include <cmath>

#include <cxxabi.h>
#define NEXA_CLASS_NAME (abi::__cxa_demangle(typeid(*this).name(), nullptr, nullptr, nullptr))
// Replace the cxxabi.h include and NEXA_CLASS_NAME definition with cross-platform version
#ifdef _MSC_VER
// Windows/MSVC version
#include <typeinfo>
#define NEXA_CLASS_NAME (typeid(*this).name())
#else
// Unix/GCC/Clang version
#include <cxxabi.h>
#define NEXA_CLASS_NAME (abi::__cxa_demangle(typeid(*this).name(), nullptr, nullptr, nullptr))
#endif

#define NEXA_LOG(fmt, ...) fprintf(stderr, "%s::%s: " fmt "\n", NEXA_CLASS_NAME, __func__, ##__VA_ARGS__)

// Prints the content of a ggml_tensor with specified precision. Can use the backend if available.
Expand Down
34 changes: 16 additions & 18 deletions examples/nexa-omni-audio/omni.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -523,13 +523,12 @@ bool omni_context_params_parse(int argc, char **argv, omni_context_params &param

omni_context_params omni_context_default_params()
{
omni_context_params params = {
.model = "",
.mmproj = "",
.file = "",
.prompt = "this conversation talks about",
.n_gpu_layers = -1,
};
omni_context_params params;
params.model = "";
params.mmproj = "";
params.file = "";
params.prompt = "this conversation talks about";
params.n_gpu_layers = -1;
return params;
}

Expand Down Expand Up @@ -565,17 +564,16 @@ bool omni_params_parse(int argc, char **argv, omni_params &params)

static omni_params get_omni_params_from_context_params(omni_context_params &params)
{
omni_params all_params = {
.gpt = {
.n_gpu_layers = params.n_gpu_layers,
.model = params.model,
.prompt = params.prompt,
},
.whisper = {
.model = params.mmproj,
.fname_inp = {params.file},
},
};
omni_params all_params;

// Initialize gpt params
all_params.gpt.n_gpu_layers = params.n_gpu_layers;
all_params.gpt.model = params.model;
all_params.gpt.prompt = params.prompt;

// Initialize whisper params
all_params.whisper.model = params.mmproj;
all_params.whisper.fname_inp = {params.file};

if (all_params.gpt.n_threads <= 0)
{
Expand Down
5 changes: 5 additions & 0 deletions examples/nexa-omni-audio/whisper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,11 @@
#include <functional>
#include <codecvt>

#ifdef _WIN32
#include <io.h>
#include <fcntl.h>
#endif

// third-party utilities
// use your favorite implementations
#define DR_WAV_IMPLEMENTATION
Expand Down
36 changes: 17 additions & 19 deletions examples/qwen2-audio/qwen2.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -523,13 +523,12 @@ bool omni_context_params_parse(int argc, char **argv, omni_context_params &param

omni_context_params omni_context_default_params()
{
omni_context_params params = {
.model = "",
.mmproj = "",
.file = "",
.prompt = "this conversation talks about",
.n_gpu_layers = -1,
};
omni_context_params params;
params.model = "";
params.mmproj = "";
params.file = "";
params.prompt = "this conversation talks about";
params.n_gpu_layers = -1;
return params;
}

Expand Down Expand Up @@ -565,18 +564,17 @@ bool omni_params_parse(int argc, char **argv, omni_params &params)

static omni_params get_omni_params_from_context_params(omni_context_params &params)
{
omni_params all_params = {
.gpt = {
.n_gpu_layers = params.n_gpu_layers,
.model = params.model,
.prompt = params.prompt,
},
.whisper = {
.model = params.mmproj,
.fname_inp = {params.file},
},
};

omni_params all_params;

// Initialize gpt params
all_params.gpt.n_gpu_layers = params.n_gpu_layers;
all_params.gpt.model = params.model;
all_params.gpt.prompt = params.prompt;

// Initialize whisper params
all_params.whisper.model = params.mmproj;
all_params.whisper.fname_inp = {params.file};

if (all_params.gpt.n_threads <= 0)
{
all_params.gpt.n_threads = std::thread::hardware_concurrency();
Expand Down
5 changes: 5 additions & 0 deletions examples/qwen2-audio/whisper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,11 @@
#include <functional>
#include <codecvt>

#ifdef _WIN32
#include <io.h> // for _setmode
#include <fcntl.h> // for _O_BINARY
#endif

// third-party utilities
// use your favorite implementations
#define DR_WAV_IMPLEMENTATION
Expand Down