diff --git a/conditioner.hpp b/conditioner.hpp index b1dc76983..df7ed0cc8 100644 --- a/conditioner.hpp +++ b/conditioner.hpp @@ -141,7 +141,7 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner { } return true; }; - model_loader.load_tensors(on_load); + model_loader.load_tensors(on_load, 1); readed_embeddings.push_back(embd_name); if (embd) { int64_t hidden_size = text_model->model.hidden_size; diff --git a/control.hpp b/control.hpp index f9a492354..79b82a220 100644 --- a/control.hpp +++ b/control.hpp @@ -445,7 +445,7 @@ struct ControlNet : public GGMLRunner { guided_hint_cached = true; } - bool load_from_file(const std::string& file_path) { + bool load_from_file(const std::string& file_path, int n_threads) { LOG_INFO("loading control net from '%s'", file_path.c_str()); alloc_params_buffer(); std::map tensors; @@ -458,7 +458,7 @@ struct ControlNet : public GGMLRunner { return false; } - bool success = model_loader.load_tensors(tensors, ignore_tensors); + bool success = model_loader.load_tensors(tensors, ignore_tensors, n_threads); if (!success) { LOG_ERROR("load control net tensors from model loader failed"); diff --git a/esrgan.hpp b/esrgan.hpp index e2003e4eb..7ede2e4ee 100644 --- a/esrgan.hpp +++ b/esrgan.hpp @@ -164,7 +164,7 @@ struct ESRGAN : public GGMLRunner { return "esrgan"; } - bool load_from_file(const std::string& file_path) { + bool load_from_file(const std::string& file_path, int n_threads) { LOG_INFO("loading esrgan from '%s'", file_path.c_str()); alloc_params_buffer(); @@ -177,7 +177,7 @@ struct ESRGAN : public GGMLRunner { return false; } - bool success = model_loader.load_tensors(esrgan_tensors); + bool success = model_loader.load_tensors(esrgan_tensors, {}, n_threads); if (!success) { LOG_ERROR("load esrgan tensors from model loader failed"); diff --git a/lora.hpp b/lora.hpp index 222f61b1e..1fce9569f 100644 --- a/lora.hpp +++ b/lora.hpp @@ -116,7 +116,7 @@ struct LoraModel : public GGMLRunner { return "lora"; } - bool load_from_file(bool filter_tensor = false, int n_threads = 0) { + bool load_from_file(bool filter_tensor, int n_threads) { LOG_INFO("loading LoRA from '%s'", file_path.c_str()); if (load_failed) { diff --git a/model.cpp b/model.cpp index 0c45fa444..e626c130a 100644 --- a/model.cpp +++ b/model.cpp @@ -1957,7 +1957,8 @@ bool ModelLoader::load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_thread std::atomic copy_to_backend_time_ms(0); std::atomic convert_time_ms(0); - int num_threads_to_use = n_threads_p > 0 ? n_threads_p : (int)std::thread::hardware_concurrency(); + int num_threads_to_use = n_threads_p > 0 ? n_threads_p : get_num_physical_cores(); + LOG_DEBUG("using %d threads for model loading", num_threads_to_use); int64_t start_time = ggml_time_ms(); std::vector processed_tensor_storages; diff --git a/pmid.hpp b/pmid.hpp index 3bd59cd7a..63029cbc0 100644 --- a/pmid.hpp +++ b/pmid.hpp @@ -591,7 +591,7 @@ struct PhotoMakerIDEmbed : public GGMLRunner { return "id_embeds"; } - bool load_from_file(bool filter_tensor = false) { + bool load_from_file(bool filter_tensor, int n_threads) { LOG_INFO("loading PhotoMaker ID Embeds from '%s'", file_path.c_str()); if (load_failed) { @@ -623,11 +623,11 @@ struct PhotoMakerIDEmbed : public GGMLRunner { return true; }; - model_loader->load_tensors(on_new_tensor_cb); + model_loader->load_tensors(on_new_tensor_cb, n_threads); alloc_params_buffer(); dry_run = false; - model_loader->load_tensors(on_new_tensor_cb); + model_loader->load_tensors(on_new_tensor_cb, n_threads); LOG_DEBUG("finished loading PhotoMaker ID Embeds "); return true; diff --git a/stable-diffusion.cpp b/stable-diffusion.cpp index ff064bb87..b68ba4fb8 100644 --- a/stable-diffusion.cpp +++ b/stable-diffusion.cpp @@ -531,7 +531,7 @@ class StableDiffusionGGML { } if (strlen(SAFE_STR(sd_ctx_params->photo_maker_path)) > 0) { pmid_lora = std::make_shared(backend, sd_ctx_params->photo_maker_path, ""); - if (!pmid_lora->load_from_file(true)) { + if (!pmid_lora->load_from_file(true, n_threads)) { LOG_WARN("load photomaker lora tensors from %s failed", sd_ctx_params->photo_maker_path); return false; } @@ -599,14 +599,14 @@ class StableDiffusionGGML { if (!use_tiny_autoencoder) { vae_params_mem_size = first_stage_model->get_params_buffer_size(); } else { - if (!tae_first_stage->load_from_file(taesd_path)) { + if (!tae_first_stage->load_from_file(taesd_path, n_threads)) { return false; } vae_params_mem_size = tae_first_stage->get_params_buffer_size(); } size_t control_net_params_mem_size = 0; if (control_net) { - if (!control_net->load_from_file(SAFE_STR(sd_ctx_params->control_net_path))) { + if (!control_net->load_from_file(SAFE_STR(sd_ctx_params->control_net_path), n_threads)) { return false; } control_net_params_mem_size = control_net->get_params_buffer_size(); @@ -836,7 +836,7 @@ class StableDiffusionGGML { return; } LoraModel lora(backend, file_path, is_high_noise ? "model.high_noise_" : ""); - if (!lora.load_from_file()) { + if (!lora.load_from_file(false, n_threads)) { LOG_WARN("load lora tensors from %s failed", file_path.c_str()); return; } diff --git a/tae.hpp b/tae.hpp index 1ae1257f8..41bcbe2f1 100644 --- a/tae.hpp +++ b/tae.hpp @@ -222,7 +222,7 @@ struct TinyAutoEncoder : public GGMLRunner { return "taesd"; } - bool load_from_file(const std::string& file_path) { + bool load_from_file(const std::string& file_path, int n_threads) { LOG_INFO("loading taesd from '%s', decode_only = %s", file_path.c_str(), decode_only ? "true" : "false"); alloc_params_buffer(); std::map taesd_tensors; @@ -238,7 +238,7 @@ struct TinyAutoEncoder : public GGMLRunner { return false; } - bool success = model_loader.load_tensors(taesd_tensors, ignore_tensors); + bool success = model_loader.load_tensors(taesd_tensors, ignore_tensors, n_threads); if (!success) { LOG_ERROR("load tae tensors from model loader failed"); diff --git a/upscaler.cpp b/upscaler.cpp index 7e765d77a..4c138ea7d 100644 --- a/upscaler.cpp +++ b/upscaler.cpp @@ -18,7 +18,8 @@ struct UpscalerGGML { } bool load_from_file(const std::string& esrgan_path, - bool offload_params_to_cpu) { + bool offload_params_to_cpu, + int n_threads) { ggml_log_set(ggml_log_callback_default, nullptr); #ifdef SD_USE_CUDA LOG_DEBUG("Using CUDA backend"); @@ -54,7 +55,7 @@ struct UpscalerGGML { if (direct) { esrgan_upscaler->enable_conv2d_direct(); } - if (!esrgan_upscaler->load_from_file(esrgan_path)) { + if (!esrgan_upscaler->load_from_file(esrgan_path, n_threads)) { return false; } return true; @@ -124,7 +125,7 @@ upscaler_ctx_t* new_upscaler_ctx(const char* esrgan_path_c_str, return NULL; } - if (!upscaler_ctx->upscaler->load_from_file(esrgan_path, offload_params_to_cpu)) { + if (!upscaler_ctx->upscaler->load_from_file(esrgan_path, offload_params_to_cpu, n_threads)) { delete upscaler_ctx->upscaler; upscaler_ctx->upscaler = NULL; free(upscaler_ctx);