From 2f1a75c4a99c098ce27b4467a0a4b52aab379828 Mon Sep 17 00:00:00 2001 From: leejet Date: Wed, 10 Sep 2025 22:14:23 +0800 Subject: [PATCH] remove sd3 flash attention warn --- stable-diffusion.cpp | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/stable-diffusion.cpp b/stable-diffusion.cpp index 7fb6fb436..088947311 100644 --- a/stable-diffusion.cpp +++ b/stable-diffusion.cpp @@ -344,9 +344,6 @@ class StableDiffusionGGML { LOG_INFO("Using flash attention in the diffusion model"); } if (sd_version_is_sd3(version)) { - if (sd_ctx_params->diffusion_flash_attn) { - LOG_WARN("flash attention in this diffusion model is currently not implemented!"); - } cond_stage_model = std::make_shared(clip_backend, offload_params_to_cpu, model_loader.tensor_storages_types); @@ -1555,7 +1552,7 @@ enum scheduler_t str_to_schedule(const char* str) { } void sd_ctx_params_init(sd_ctx_params_t* sd_ctx_params) { - *sd_ctx_params = {}; + *sd_ctx_params = {}; sd_ctx_params->vae_decode_only = true; sd_ctx_params->vae_tiling = false; sd_ctx_params->free_params_immediately = true; @@ -1639,7 +1636,7 @@ char* sd_ctx_params_to_str(const sd_ctx_params_t* sd_ctx_params) { } void sd_sample_params_init(sd_sample_params_t* sample_params) { - *sample_params = {}; + *sample_params = {}; sample_params->guidance.txt_cfg = 7.0f; sample_params->guidance.img_cfg = INFINITY; sample_params->guidance.distilled_guidance = 3.5f;