diff --git a/CMakeLists.txt b/CMakeLists.txt index 264644eca..e695bcb51 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -57,7 +57,7 @@ add_executable(${PROJECT_NAME} main.cc) # # and comment out the following lines find_package(Drogon CONFIG REQUIRED) -target_link_libraries(${PROJECT_NAME} PRIVATE Drogon::Drogon common llama llava +target_link_libraries(${PROJECT_NAME} PRIVATE Drogon::Drogon common llava ${CMAKE_THREAD_LIBS_INIT}) # ############################################################################## diff --git a/controllers/llamaCPP.h b/controllers/llamaCPP.h index e4152ed85..c1017c757 100644 --- a/controllers/llamaCPP.h +++ b/controllers/llamaCPP.h @@ -12,11 +12,10 @@ #include // External +#include "clip.h" #include "common.h" #include "llama.h" -#include "../../llama.cpp/examples/llava/clip.h" - #include "stb_image.h" #ifndef NDEBUG @@ -1538,8 +1537,9 @@ struct llama_server_context { "cache\n"); kv_cache_clear(); } - std::unique_lock lock(mutex_tasks); - condition_tasks.wait(lock, [&] { return !queue_tasks.empty(); }); + // TODO: Need to implement queueing using CV for better performance + // std::unique_lock lock(mutex_tasks); + // condition_tasks.wait(lock, [&] { return !queue_tasks.empty(); }); } for (llama_client_slot &slot : slots) {