From 6e2d45a4a92f31a32e80f1301266d5dabb35c3aa Mon Sep 17 00:00:00 2001 From: Ruben Ortlam Date: Sun, 9 Nov 2025 16:14:41 +0100 Subject: [PATCH 01/51] vulkan: fix memory allocations (llama/17122) --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index 46e098a7ffb..7570febeaad 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -2220,9 +2220,12 @@ static vk_buffer ggml_vk_create_buffer(vk_device& device, size_t size, const std } buf->memory_property_flags = req_flags; + bool done = false; + for (auto mtype_it = memory_type_indices.begin(); mtype_it != memory_type_indices.end(); mtype_it++) { try { buf->device_memory = device->device.allocateMemory({ mem_req.size, *mtype_it, &mem_flags_info }); + done = true; break; } catch (const vk::SystemError& e) { // loop and retry @@ -2233,6 +2236,10 @@ static vk_buffer ggml_vk_create_buffer(vk_device& device, size_t size, const std } } } + + if (done) { + break; + } } if (!buf->device_memory) { From ce8d1da21c4483e804605227b3cd34a296f167eb Mon Sep 17 00:00:00 2001 From: Acly Date: Mon, 10 Nov 2025 10:19:39 +0100 Subject: [PATCH 02/51] cuda/vulkan : bicubic interpolation (llama/17022) * vulkan : implement upscale with bicubic interpolation * cuda : implement upscale with bicubic interpolation * tests : add ggml_interpolate with GGML_SCALE_MODE_BICUBIC to backend tests * adapt OpenCL backend to not support the OP in that case so tests don't fail * print scale mode & flags in test-backend-ops --- ggml/src/ggml-cuda/upscale.cu | 93 +++++++++++++++++-- ggml/src/ggml-opencl/ggml-opencl.cpp | 7 +- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 5 +- .../ggml-vulkan/vulkan-shaders/upscale.comp | 37 ++++++++ 4 files changed, 133 insertions(+), 9 deletions(-) diff --git a/ggml/src/ggml-cuda/upscale.cu b/ggml/src/ggml-cuda/upscale.cu index 35b7e61d80a..687c669304d 100644 --- a/ggml/src/ggml-cuda/upscale.cu +++ b/ggml/src/ggml-cuda/upscale.cu @@ -81,6 +81,70 @@ static __global__ void upscale_f32_bilinear(const float * x, float * dst, dst[index] = result; } +namespace bicubic_interpolation { +// https://en.wikipedia.org/wiki/Bicubic_interpolation#Bicubic_convolution_algorithm +__device__ const float a = -0.75f; // use alpha = -0.75 (same as PyTorch) + +static __device__ float weight1(float x) { return ((a + 2) * x - (a + 3)) * x * x + 1; }; +static __device__ float weight2(float x) { return ((a * x - 5 * a) * x + 8 * a) * x - 4 * a; }; + +static __device__ float bicubic(float p0, float p1, float p2, float p3, float x) { + const float w0 = weight2(x + 1); + const float w1 = weight1(x + 0); + const float w2 = weight1(1 - x); + const float w3 = weight2(2 - x); + return p0 * w0 + p1 * w1 + p2 * w2 + p3 * w3; +}; +} // namespace bicubic_interpolation + +static __global__ void upscale_f32_bicubic(const float * x, float * dst, + const int nb00, const int nb01, const int nb02, const int nb03, + const int ne00_src, const int ne01_src, + const int ne10_dst, const int ne11_dst, const int ne12_dst, const int ne13_dst, + const float sf0, const float sf1, const float sf2, const float sf3, + const float pixel_offset) { + using bicubic_interpolation::bicubic; + + const int64_t index = threadIdx.x + blockIdx.x * blockDim.x; + const int64_t dst_total_elements = ne10_dst * ne11_dst * ne12_dst * ne13_dst; + + if (index >= dst_total_elements) { + return; + } + + const int i10_dst = index % ne10_dst; + const int i11_dst = (index / ne10_dst) % ne11_dst; + const int i12_dst = (index / (ne10_dst * ne11_dst)) % ne12_dst; + const int i13_dst = index / (ne10_dst * ne11_dst * ne12_dst); + + const int i02_src = (int)(i12_dst / sf2); + const int i03_src = (int)(i13_dst / sf3); + + const float y_src_f = ((float)i11_dst + pixel_offset) / sf1 - pixel_offset; + const int y0_src = (int)floorf(y_src_f); + const float dy = y_src_f - (float)y0_src; + + const float x_src_f = ((float)i10_dst + pixel_offset) / sf0 - pixel_offset; + const int x0_src = (int)floorf(x_src_f); + const float dx = x_src_f - (float)x0_src; + + const char * x_base = (const char *)x + (int64_t)i02_src * nb02 + (int64_t)i03_src * nb03; + + auto load = [=](int x_off, int y_off) -> float { + int i00_src = max(0, min(x0_src + x_off, ne00_src - 1)); + int i01_src = max(0, min(y0_src + y_off, ne01_src - 1)); + return *(const float *)(x_base + (int64_t)i00_src * nb00 + (int64_t)i01_src * nb01); + }; + + const float result = bicubic( + bicubic(load(-1,-1), load(0,-1), load(1,-1), load(2,-1), dx), + bicubic(load(-1, 0), load(0, 0), load(1, 0), load(2, 0), dx), + bicubic(load(-1, 1), load(0, 1), load(1, 1), load(2, 1), dx), + bicubic(load(-1, 2), load(0, 2), load(1, 2), load(2, 2), dx), dy); + + dst[index] = result; +} + static void upscale_f32_cuda(const float * x, float * dst, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int ne13, @@ -104,6 +168,18 @@ static void upscale_f32_bilinear_cuda(const float * x, float * dst, upscale_f32_bilinear<<>>(x, dst, nb00, nb01, nb02, nb03, ne00_src, ne01_src, ne10_dst, ne11_dst, ne12_dst, ne13_dst, sf0, sf1, sf2, sf3, pixel_offset); } +static void upscale_f32_bicubic_cuda(const float * x, float * dst, + const int nb00, const int nb01, const int nb02, const int nb03, + const int ne00_src, const int ne01_src, + const int ne10_dst, const int ne11_dst, const int ne12_dst, const int ne13_dst, + const float sf0, const float sf1, const float sf2, const float sf3, + const float pixel_offset, cudaStream_t stream) { + const int64_t dst_size = ne10_dst * ne11_dst * ne12_dst * ne13_dst; + const int64_t num_blocks = (dst_size + CUDA_UPSCALE_BLOCK_SIZE - 1) / CUDA_UPSCALE_BLOCK_SIZE; + + upscale_f32_bicubic<<>>(x, dst, nb00, nb01, nb02, nb03, ne00_src, ne01_src, ne10_dst, ne11_dst, ne12_dst, ne13_dst, sf0, sf1, sf2, sf3, pixel_offset); +} + void ggml_cuda_op_upscale(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const float * src0_d = (const float *)src0->data; @@ -121,17 +197,22 @@ void ggml_cuda_op_upscale(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { float sf2 = (float)dst->ne[2]/src0->ne[2]; const float sf3 = (float)dst->ne[3]/src0->ne[3]; + float pixel_offset = 0.5f; + if (mode_flags & GGML_SCALE_FLAG_ALIGN_CORNERS) { + sf0 = dst->ne[0] > 1 && src0->ne[0] > 1 ? (float)(dst->ne[0] - 1) / (src0->ne[0] - 1) : sf0; + sf1 = dst->ne[1] > 1 && src0->ne[1] > 1 ? (float)(dst->ne[1] - 1) / (src0->ne[1] - 1) : sf1; + pixel_offset = 0.0f; + } + if (mode == GGML_SCALE_MODE_NEAREST) { upscale_f32_cuda(src0_d, dst_d, src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], sf0, sf1, sf2, sf3, stream); } else if (mode == GGML_SCALE_MODE_BILINEAR) { - float pixel_offset = 0.5f; - if (mode_flags & GGML_SCALE_FLAG_ALIGN_CORNERS) { - sf0 = dst->ne[0] > 1 && src0->ne[0] > 1 ? (float)(dst->ne[0] - 1) / (src0->ne[0] - 1) : sf0; - sf1 = dst->ne[1] > 1 && src0->ne[1] > 1 ? (float)(dst->ne[1] - 1) / (src0->ne[1] - 1) : sf1; - pixel_offset = 0.0f; - } upscale_f32_bilinear_cuda(src0_d, dst_d, src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3], src0->ne[0], src0->ne[1], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], sf0, sf1, sf2, sf3, pixel_offset, stream); + } else if (mode == GGML_SCALE_MODE_BICUBIC) { + upscale_f32_bicubic_cuda(src0_d, dst_d, src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3], + src0->ne[0], src0->ne[1], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], + sf0, sf1, sf2, sf3, pixel_offset, stream); } } diff --git a/ggml/src/ggml-opencl/ggml-opencl.cpp b/ggml/src/ggml-opencl/ggml-opencl.cpp index 3dc4d035509..1d3a318a569 100644 --- a/ggml/src/ggml-opencl/ggml-opencl.cpp +++ b/ggml/src/ggml-opencl/ggml-opencl.cpp @@ -2944,8 +2944,11 @@ static bool ggml_opencl_supports_op(ggml_backend_dev_t dev, const struct ggml_te return op->src[0]->type == GGML_TYPE_F32 && op->type == GGML_TYPE_F32; // Assuming F32 for now, can be expanded case GGML_OP_PAD: return op->src[0]->type == GGML_TYPE_F32 && op->type == GGML_TYPE_F32; - case GGML_OP_UPSCALE: - return op->src[0]->type == GGML_TYPE_F32 && op->type == GGML_TYPE_F32; + case GGML_OP_UPSCALE: { + ggml_scale_mode mode = (ggml_scale_mode)(ggml_get_op_params_i32(op, 0) & 0xFF); + return op->src[0]->type == GGML_TYPE_F32 && op->type == GGML_TYPE_F32 && + (mode == GGML_SCALE_MODE_NEAREST || mode == GGML_SCALE_MODE_BILINEAR); + } case GGML_OP_CONV_2D: return (op->src[0]->type == GGML_TYPE_F16 && op->src[1]->type == GGML_TYPE_F16 && op->type == GGML_TYPE_F16) || (op->src[0]->type == GGML_TYPE_F32 && op->src[1]->type == GGML_TYPE_F32 && op->type == GGML_TYPE_F32) || diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index 7570febeaad..a7a28b1934c 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -620,7 +620,7 @@ struct vk_device_struct { vk_pipeline pipeline_add_id_f32; vk_pipeline pipeline_concat_f32, pipeline_concat_f16, pipeline_concat_i32; - vk_pipeline pipeline_upscale_nearest_f32, pipeline_upscale_bilinear_f32; + vk_pipeline pipeline_upscale_nearest_f32, pipeline_upscale_bilinear_f32, pipeline_upscale_bicubic_f32; vk_pipeline pipeline_scale_f32; vk_pipeline pipeline_sqr_f32; vk_pipeline pipeline_sqrt_f32; @@ -3702,6 +3702,7 @@ static void ggml_vk_load_shaders(vk_device& device) { ggml_vk_create_pipeline(device, device->pipeline_upscale_nearest_f32, "upscale_f32", upscale_f32_len, upscale_f32_data, "main", 2, sizeof(vk_op_upscale_push_constants), {512, 1, 1}, {GGML_SCALE_MODE_NEAREST}, 1); ggml_vk_create_pipeline(device, device->pipeline_upscale_bilinear_f32, "upscale_f32", upscale_f32_len, upscale_f32_data, "main", 2, sizeof(vk_op_upscale_push_constants), {512, 1, 1}, {GGML_SCALE_MODE_BILINEAR}, 1); + ggml_vk_create_pipeline(device, device->pipeline_upscale_bicubic_f32, "upscale_f32", upscale_f32_len, upscale_f32_data, "main", 2, sizeof(vk_op_upscale_push_constants), {512, 1, 1}, {GGML_SCALE_MODE_BICUBIC}, 1); ggml_vk_create_pipeline(device, device->pipeline_scale_f32, "scale_f32", scale_f32_len, scale_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1); @@ -8200,6 +8201,8 @@ static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const return ctx->device->pipeline_upscale_nearest_f32; case GGML_SCALE_MODE_BILINEAR: return ctx->device->pipeline_upscale_bilinear_f32; + case GGML_SCALE_MODE_BICUBIC: + return ctx->device->pipeline_upscale_bicubic_f32; default: return nullptr; } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/upscale.comp b/ggml/src/ggml-vulkan/vulkan-shaders/upscale.comp index 8670aad32c3..037ab0c78f0 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/upscale.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/upscale.comp @@ -20,6 +20,7 @@ layout (binding = 1) writeonly buffer D {D_TYPE data_d[];}; // from ggml.h: enum ggml_scale_mode, enum ggml_scale_flag #define NEAREST 0 #define BILINEAR 1 +#define BICUBIC 2 layout (constant_id = 0) const uint scale_mode = 0; @@ -61,6 +62,39 @@ float interpolate_bilinear(uint i10, uint i11, uint i12, uint i13) { return fetch_bilinear(c0, c1, d, i12, i13); } +// Bicubic interpolation with alpha = -0.75 +// https://en.wikipedia.org/wiki/Bicubic_interpolation#Bicubic_convolution_algorithm +const vec4 bcoeffs1 = vec4( 1.25, -2.25, 0.0, 1.0); +const vec4 bcoeffs2 = vec4(-0.75, 3.75, -6.0, 3.0); +vec4 powers(float x) { return vec4(x*x*x, x*x, x, 1); } + +float bicubic(float p0, float p1, float p2, float p3, float x) { + return p0 * dot(bcoeffs2, powers(x + 1)) + + p1 * dot(bcoeffs1, powers(x )) + + p2 * dot(bcoeffs1, powers(1 - x)) + + p3 * dot(bcoeffs2, powers(2 - x)); +} + +#define FETCH(a,b) data_a[base + clamp(i.x+(a), 0, res.x) * p.nb00 + clamp(i.y+(b), 0, res.y) * p.nb01] + +float interpolate_bicubic(uint i10, uint i11, uint i12, uint i13) { + const ivec2 res = ivec2(p.ne00 - 1, p.ne01 - 1); + + const vec2 coord = (vec2(i10, i11) + p.pixel_offset) / vec2(p.sf0, p.sf1) - p.pixel_offset; + const vec2 d = fract(coord); + const ivec2 i = ivec2(floor(coord)); + + const uint i02 = uint(i12 / p.sf2); + const uint i03 = uint(i13 / p.sf3); + const uint base = p.a_offset + i03 * p.nb03 + i02 * p.nb02; + + return bicubic( + bicubic(FETCH(-1,-1), FETCH(0,-1), FETCH(1,-1), FETCH(2,-1), d.x), + bicubic(FETCH(-1, 0), FETCH(0, 0), FETCH(1, 0), FETCH(2, 0), d.x), + bicubic(FETCH(-1, 1), FETCH(0, 1), FETCH(1, 1), FETCH(2, 1), d.x), + bicubic(FETCH(-1, 2), FETCH(0, 2), FETCH(1, 2), FETCH(2, 2), d.x), d.y); +} + void main() { const uint idx = gl_GlobalInvocationID.z * 262144 + gl_GlobalInvocationID.y * 512 + gl_GlobalInvocationID.x; @@ -81,6 +115,9 @@ void main() { case BILINEAR: result = interpolate_bilinear(i10, i11, i12, i13); break; + case BICUBIC: + result = interpolate_bicubic(i10, i11, i12, i13); + break; } data_d[p.d_offset + idx] = D_TYPE(result); From 5c643599a21206774252f0869115b09c121458bb Mon Sep 17 00:00:00 2001 From: fj-y-saito <85871716+fj-y-saito@users.noreply.github.com> Date: Mon, 10 Nov 2025 22:12:59 +0900 Subject: [PATCH 03/51] =?UTF-8?q?arm64:=20add=20i8mm=20route=20with=20SVE?= =?UTF-8?q?=20ggml=5Fvec=5Fdot=5Fq4=5FK=5Fq8=5FK=20and=20ggml=5Fvec=5Fdot?= =?UTF-8?q?=5Fq6=5FK=5F=E2=80=A6=20(#15277)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * add i8mm route with SVE ggml_vec_dot_q4_K_q8_K and ggml_vec_dot_q6_K_q8_K * Surround SVE function with compiler directive * fix compile switch * fix coding style * ggml : fix indent --------- Co-authored-by: Georgi Gerganov --- ggml/src/ggml-cpu/arch/arm/quants.c | 454 ++++++++++++++++++++++++++-- 1 file changed, 428 insertions(+), 26 deletions(-) diff --git a/ggml/src/ggml-cpu/arch/arm/quants.c b/ggml/src/ggml-cpu/arch/arm/quants.c index aadbb487ec0..b390ab61c78 100644 --- a/ggml/src/ggml-cpu/arch/arm/quants.c +++ b/ggml/src/ggml-cpu/arch/arm/quants.c @@ -2044,6 +2044,26 @@ void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi } +#ifdef __ARM_FEATURE_SVE +static inline svuint32_t ggml_decode_q4scales_and_mins_for_mmla(const uint32_t * vx_scales) { + const svbool_t pg_all = svptrue_pat_b32(SV_VL4); + const svbool_t pg_false = svpfalse_b(); // 0x0000 + const svbool_t pg_lo_8 = svwhilelt_b8_s32(0, 8); // 0x00ff + const svbool_t pg_odd = svzip1_b32(pg_false, pg_lo_8); + + svuint32_t vutmp_hi, vutmp_lo; + svuint32_t vx01 = svld1_u32(pg_lo_8, vx_scales); + vutmp_hi = svzip1_u32(vx01, vx01); + vutmp_hi = svlsr_n_u32_m(pg_odd, vutmp_hi, 2); + vutmp_hi = svreinterpret_u32_u64(svand_n_u64_x(pg_all, svreinterpret_u64_u32(vutmp_hi), UINT64_C(0x303030303f3f3f3f))); + const svuint32_t vx2 = svdup_u32(vx_scales[2]); + vutmp_lo = svlsr_u32_x(pg_all, vx2, svreinterpret_u32_s32(svindex_s32(-2, 2))); + vutmp_lo = svand_n_u32_z(pg_odd, vutmp_lo, UINT32_C(0x0f0f0f0f)); + svuint32_t vutmp = svorr_u32_z(pg_all, vutmp_hi, vutmp_lo); + return vutmp; +} +#endif + void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { assert(n % QK_K == 0); #ifdef __ARM_FEATURE_MATMUL_INT8 @@ -2066,8 +2086,220 @@ void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi static const uint32_t kmask3 = 0x03030303; uint32_t utmp[4]; +#ifdef __ARM_FEATURE_SVE + const int vector_length = ggml_cpu_get_sve_cnt()*8; +#endif -#if defined(__ARM_FEATURE_MATMUL_INT8) +#if defined(__ARM_FEATURE_SVE) && defined(__ARM_FEATURE_MATMUL_INT8) + if (nrc == 2) { + svbool_t pg32_2 = svptrue_pat_b32(SV_VL2); + + const block_q4_K * GGML_RESTRICT vx0 = vx; + const block_q8_K * GGML_RESTRICT vy0 = vy; + const block_q4_K * GGML_RESTRICT vx1 = (const block_q4_K *) ((const uint8_t*)vx + bx); + const block_q8_K * GGML_RESTRICT vy1 = (const block_q8_K *) ((const uint8_t*)vy + by); + + union { + uint32_t u32[8]; + uint64_t u64[4]; + } new_utmp; + + svfloat32_t sumf1 = svdup_n_f32(0); + + switch (vector_length) { + case 128: + { + svbool_t pg_false = svpfalse_b(); + svbool_t pg_lo_8 = svwhilelt_b8_s32(0, 8); + svbool_t vmins_mask1= svzip1_b32(pg_lo_8, pg_false); + svbool_t vmins_mask2 = svzip1_b32(pg_false, pg_lo_8); + svbool_t pg128_all = svptrue_pat_b8(SV_VL16); + for (int i = 0; i < nb; ++i) { + svfloat32_t vy_d = svuzp1_f32(svdup_n_f32(vy0[i].d), svdup_n_f32(vy1[i].d)); + svfloat32_t vx_d = svzip1_f32(svdup_n_f32(GGML_FP16_TO_FP32(vx0[i].d)), svdup_n_f32(GGML_FP16_TO_FP32(vx1[i].d))); + svfloat32_t svsuper_block_scales = svmul_f32_x(pg128_all, vy_d, vx_d); + svfloat32_t vx_dmins = svzip1_f32(svdup_n_f32(GGML_FP16_TO_FP32(vx0[i].dmin)), svdup_n_f32(GGML_FP16_TO_FP32(vx1[i].dmin))); + svfloat32_t vy_dmins = svuzp1_f32(svdup_n_f32(vy0[i].d), svdup_n_f32(vy1[i].d)); + svfloat32_t svdmins = svmul_n_f32_x(pg128_all, svmul_f32_x(pg128_all, vy_dmins, vx_dmins), -1); + const uint8_t * GGML_RESTRICT q4_0 = vx0[i].qs; + const int8_t * GGML_RESTRICT q8_0 = vy0[i].qs; + const uint8_t * GGML_RESTRICT q4_1 = vx1[i].qs; + const int8_t * GGML_RESTRICT q8_1 = vy1[i].qs; + svint16_t lo = svld1_s16(pg128_all, vy0[i].bsums + 0); + svint16_t hi = svld1_s16(pg128_all, vy0[i].bsums + 8); + svint16_t sum_tmp1 = svuzp1_s16(lo, hi); + svint16_t sum_tmp2 = svuzp2_s16(lo, hi); + svint16_t svq8sums_0 = svadd_s16_x(pg128_all, sum_tmp1, sum_tmp2); + lo = svld1_s16(pg128_all, vy1[i].bsums + 0); + hi = svld1_s16(pg128_all, vy1[i].bsums + 8); + sum_tmp1 = svuzp1(lo, hi); + sum_tmp2 = svuzp2(lo, hi); + svint16_t svq8sums_1 = svadd_s16_x(pg128_all, sum_tmp1, sum_tmp2); + svuint32_t decoded_scales0 = ggml_decode_q4scales_and_mins_for_mmla((const uint32_t *)vx0[i].scales); + svuint32_t decoded_scales1 = ggml_decode_q4scales_and_mins_for_mmla((const uint32_t *)vx1[i].scales); + svuint32x2_t decoded_scales = svcreate2_u32(decoded_scales0, decoded_scales1); + svst2_u32(pg128_all, new_utmp.u32, decoded_scales); + svint16_t svmins8_0 = svreinterpret_s16_u16(svunpklo_u16(svreinterpret_u8_u32(svuzp1_u32(svld1_u32(vmins_mask1, new_utmp.u32+4), svdup_n_u32(0))))); + svint16_t svmins8_1 = svreinterpret_s16_u16(svunpklo_u16(svreinterpret_u8_u32(svuzp2_u32(svld1_u32(vmins_mask2, new_utmp.u32+4), svdup_n_u32(0))))); + svint32_t svsumfs_tmp1 = svreinterpret_s32_s64(svdot_s64(svdup_n_s64(0), svq8sums_0, svmins8_0)); + svint32_t svsumfs_tmp2 = svreinterpret_s32_s64(svdot_s64(svdup_n_s64(0), svq8sums_0, svmins8_1)); + svint32_t svsumfs_tmp3 = svtrn1_s32(svsumfs_tmp1, svsumfs_tmp2); + svint32_t svsumfs_tmp4 = svreinterpret_s32_s64(svdot_s64(svdup_n_s64(0), svq8sums_1, svmins8_0)); + svint32_t svsumfs_tmp5 = svreinterpret_s32_s64(svdot_s64(svdup_n_s64(0), svq8sums_1, svmins8_1)); + svint32_t svsumfs_tmp6 = svtrn1_s32(svsumfs_tmp4, svsumfs_tmp5); + svint32_t svsumfs_tmp7 = svreinterpret_s32_s64(svtrn2_s64(svreinterpret_s64_s32(svsumfs_tmp3), svreinterpret_s64_s32(svsumfs_tmp6))); + svint32_t svsumfs_tmp8 = svreinterpret_s32_s64(svtrn1_s64(svreinterpret_s64_s32(svsumfs_tmp3), svreinterpret_s64_s32(svsumfs_tmp6))); + svint32_t svsumfs_tmp = svadd_s32_x(pg128_all, svsumfs_tmp7, svsumfs_tmp8); + svint32_t svscales, sumi1, sumi2; + svint32_t acc_sumif1 = svdup_n_s32(0); + svint32_t acc_sumif2 = svdup_n_s32(0); + svint8_t q4bytes_0_l, q4bytes_0_h, q4bytes_1_l, q4bytes_1_h, l0, l1, l2, l3, + q8bytes_0_h, q8bytes_0_l, q8bytes_1_h, q8bytes_1_l, r0, r1, r2, r3; +#pragma GCC unroll 1 + for (int j = 0; j < QK_K/64; ++j) { + q4bytes_0_l = svreinterpret_s8_u8(svand_n_u8_x(pg128_all, svld1_u8(pg128_all, q4_0), 0xf)); + q4bytes_1_l = svreinterpret_s8_u8(svand_n_u8_x(pg128_all, svld1_u8(pg128_all, q4_1), 0xf)); + q4bytes_0_h = svreinterpret_s8_u8(svand_n_u8_x(pg128_all, svld1_u8(pg128_all, q4_0+16), 0xf)); + q4bytes_1_h = svreinterpret_s8_u8(svand_n_u8_x(pg128_all, svld1_u8(pg128_all, q4_1+16), 0xf)); + l0 = svreinterpret_s8_s64(svzip1_s64(svreinterpret_s64_s8(q4bytes_0_l), svreinterpret_s64_s8(q4bytes_1_l))); + l1 = svreinterpret_s8_s64(svzip2_s64(svreinterpret_s64_s8(q4bytes_0_l), svreinterpret_s64_s8(q4bytes_1_l))); + l2 = svreinterpret_s8_s64(svzip1_s64(svreinterpret_s64_s8(q4bytes_0_h), svreinterpret_s64_s8(q4bytes_1_h))); + l3 = svreinterpret_s8_s64(svzip2_s64(svreinterpret_s64_s8(q4bytes_0_h), svreinterpret_s64_s8(q4bytes_1_h))); + q8bytes_0_h = svld1_s8(pg128_all, q8_0); + q8bytes_1_h = svld1_s8(pg128_all, q8_1); + q8bytes_0_l = svld1_s8(pg128_all, q8_0+16); + q8bytes_1_l = svld1_s8(pg128_all, q8_1+16); + r0 = svreinterpret_s8_s64(svzip1_s64(svreinterpret_s64_s8(q8bytes_0_h), svreinterpret_s64_s8(q8bytes_1_h))); + r1 = svreinterpret_s8_s64(svzip2_s64(svreinterpret_s64_s8(q8bytes_0_h), svreinterpret_s64_s8(q8bytes_1_h))); + r2 = svreinterpret_s8_s64(svzip1_s64(svreinterpret_s64_s8(q8bytes_0_l), svreinterpret_s64_s8(q8bytes_1_l))); + r3 = svreinterpret_s8_s64(svzip2_s64(svreinterpret_s64_s8(q8bytes_0_l), svreinterpret_s64_s8(q8bytes_1_l))); + sumi1 = svmmla_s32(svmmla_s32(svmmla_s32(svmmla_s32(svdup_n_s32(0), r0, l0), r1, l1), r2, l2), r3, l3); + svscales = svreinterpret_s32_u32(svlsr_n_u32_x(pg128_all, svlsl_n_u32_x(pg128_all, svreinterpret_u32_u64(svdup_n_u64(new_utmp.u64[j/2])), 8*(4-2*(j%2)-1)), 24)); + acc_sumif1 = svmla_s32_x(pg128_all, acc_sumif1, svscales, sumi1); + + q4bytes_0_l = svreinterpret_s8_u8(svlsr_n_u8_x(pg128_all, svld1_u8(pg128_all, q4_0), 4)); + q4bytes_1_l = svreinterpret_s8_u8(svlsr_n_u8_x(pg128_all, svld1_u8(pg128_all, q4_1), 4)); + q4bytes_0_h = svreinterpret_s8_u8(svlsr_n_u8_x(pg128_all, svld1_u8(pg128_all, q4_0+16), 4)); + q4bytes_1_h = svreinterpret_s8_u8(svlsr_n_u8_x(pg128_all, svld1_u8(pg128_all, q4_1+16), 4)); + l0 = svreinterpret_s8_s64(svzip1_s64(svreinterpret_s64_s8(q4bytes_0_l), svreinterpret_s64_s8(q4bytes_1_l))); + l1 = svreinterpret_s8_s64(svzip2_s64(svreinterpret_s64_s8(q4bytes_0_l), svreinterpret_s64_s8(q4bytes_1_l))); + l2 = svreinterpret_s8_s64(svzip1_s64(svreinterpret_s64_s8(q4bytes_0_h), svreinterpret_s64_s8(q4bytes_1_h))); + l3 = svreinterpret_s8_s64(svzip2_s64(svreinterpret_s64_s8(q4bytes_0_h), svreinterpret_s64_s8(q4bytes_1_h))); + q8bytes_0_h = svld1_s8(pg128_all, q8_0+32); + q8bytes_1_h = svld1_s8(pg128_all, q8_1+32); + q8bytes_0_l = svld1_s8(pg128_all, q8_0+48); + q8bytes_1_l = svld1_s8(pg128_all, q8_1+48); + r0 = svreinterpret_s8_s64(svzip1_s64(svreinterpret_s64_s8(q8bytes_0_h), svreinterpret_s64_s8(q8bytes_1_h))); + r1 = svreinterpret_s8_s64(svzip2_s64(svreinterpret_s64_s8(q8bytes_0_h), svreinterpret_s64_s8(q8bytes_1_h))); + r2 = svreinterpret_s8_s64(svzip1_s64(svreinterpret_s64_s8(q8bytes_0_l), svreinterpret_s64_s8(q8bytes_1_l))); + r3 = svreinterpret_s8_s64(svzip2_s64(svreinterpret_s64_s8(q8bytes_0_l), svreinterpret_s64_s8(q8bytes_1_l))); + sumi2 = svmmla_s32(svmmla_s32(svmmla_s32(svmmla_s32(svdup_n_s32(0), r0, l0), r1, l1), r2, l2), r3, l3); + svscales = svreinterpret_s32_u32(svlsr_n_u32_x(pg128_all, svlsl_n_u32_x(pg128_all, svreinterpret_u32_u64(svdup_n_u64(new_utmp.u64[j/2])), 8*(4-2*(j%2)-2)), 24)); + acc_sumif2 = svmla_s32_x(pg128_all, acc_sumif2, svscales, sumi2); + q4_0 += 32; q4_1 += 32; q8_0 += 64; q8_1 += 64; + } + sumf1 = svmla_f32_x(pg128_all, + svmla_f32_x(pg128_all, + sumf1, + svcvt_f32_x(pg128_all, + svadd_s32_x(pg128_all, acc_sumif1, acc_sumif2)), + svsuper_block_scales), + svdmins, + svcvt_f32_s32_x(pg128_all, svsumfs_tmp)); + } //end of for nb + } // end of case 128 + break; + case 256: + case 512: + { + const svbool_t pg32_4 = svptrue_pat_b32(SV_VL4); + const svbool_t pg8_16 = svptrue_pat_b8(SV_VL16); + const svbool_t pg256_all = svptrue_pat_b8(SV_ALL); + for (int i = 0; i < nb; ++i) { + const uint8_t * GGML_RESTRICT q4_0 = vx0[i].qs; + const int8_t * GGML_RESTRICT q8_0 = vy0[i].qs; + const uint8_t * GGML_RESTRICT q4_1 = vx1[i].qs; + const int8_t * GGML_RESTRICT q8_1 = vy1[i].qs; + svint32_t svscales, sumi1, sumi2; + svint32_t acc_sumif1 = svdup_n_s32(0); + svint32_t acc_sumif2 = svdup_n_s32(0); + svint8_t l0, l1, l2, l3, r0, r1, r2, r3; + svfloat32_t vx_d = svzip1_f32(svdup_n_f32(GGML_FP16_TO_FP32(vx0[i].d)), svdup_n_f32(GGML_FP16_TO_FP32(vx1[i].d))); + svfloat64_t vy_d_tmp = svreinterpret_f64_f32(svuzp1_f32(svdup_n_f32(vy0[i].d), svdup_n_f32(vy1[i].d))); + svfloat32_t vy_d = svreinterpret_f32_f64(svuzp1_f64(vy_d_tmp, vy_d_tmp)); + svfloat32_t svsuper_block_scales = svmul_f32_z(pg32_4, vy_d, vx_d); + svfloat32_t vx_dmins = svzip1_f32(svdup_n_f32(GGML_FP16_TO_FP32(vx0[i].dmin)), svdup_n_f32(GGML_FP16_TO_FP32(vx1[i].dmin))); + svfloat64_t vy_dmins_tmp = svreinterpret_f64_f32(svuzp1_f32(svdup_n_f32(vy0[i].d), svdup_n_f32(vy1[i].d))); + svfloat32_t vy_dmins = svreinterpret_f32_f64(svuzp1_f64(vy_dmins_tmp, vy_dmins_tmp)); + svfloat32_t svdmins = svmul_n_f32_x(pg32_4, svmul_f32_x(pg32_4, vx_dmins, vy_dmins), -1); + svint16_t rc1 = svuzp1_s16(svld1_s16(pg256_all, vy0[i].bsums), svld1_s16(pg256_all, vy1[i].bsums)); + svint16_t rc2 = svuzp2_s16(svld1_s16(pg256_all, vy0[i].bsums), svld1_s16(pg256_all, vy1[i].bsums)); + svint16_t svq8sums = svadd_s16_x(pg256_all, rc1, rc2); + svuint32_t decoded_scales0 = ggml_decode_q4scales_and_mins_for_mmla((const uint32_t *)vx0[i].scales); + svuint32_t decoded_scales1 = ggml_decode_q4scales_and_mins_for_mmla((const uint32_t *)vx1[i].scales); + svuint32x2_t decoded_scales = svcreate2_u32(decoded_scales0, decoded_scales1); + svst2_u32(pg8_16, new_utmp.u32, decoded_scales); + svint16_t new_svq8sums_0 = svreinterpret_s16_u64(svtrn1_u64(svreinterpret_u64_s16(svq8sums), svreinterpret_u64_s16(svq8sums))); + svint16_t new_svq8sums_1 = svreinterpret_s16_u64(svtrn2_u64(svreinterpret_u64_s16(svq8sums), svreinterpret_u64_s16(svq8sums))); + svuint64_t new_mins_0 = svdup_u64(new_utmp.u64[2]); + svuint64_t new_mins_1 = svdup_u64(new_utmp.u64[3]); + svint16_t new_svmins8_0 = svreinterpret_s16_u16(svunpklo_u16(svreinterpret_u8_u64(new_mins_0))); + svint16_t new_svmins8_1 = svreinterpret_s16_u16(svunpklo_u16(svreinterpret_u8_u64(new_mins_1))); + svint64_t dot_prod_0 = svdot_s64(svdup_s64(0), new_svmins8_0, new_svq8sums_0); + svint64_t dot_prod_1 = svdot_s64(dot_prod_0, new_svmins8_1, new_svq8sums_1); + svfloat32_t converted_dot_prod_1 = svcvt_f32_s64_x(pg256_all, dot_prod_1); + svfloat32_t svsumfs_tmp = svuzp1_f32(converted_dot_prod_1, converted_dot_prod_1); + +#pragma GCC unroll 1 + for (int j = 0; j < QK_K/64; ++j) { + svuint8_t q4bytes_0 = svand_n_u8_x(pg256_all, svld1_u8(pg256_all, q4_0), 0xf); + svuint8_t q4bytes_1 = svand_n_u8_x(pg256_all, svld1_u8(pg256_all, q4_1), 0xf); + svuint8_t q4bytes_2 = svlsr_n_u8_x(pg256_all, svld1_u8(pg256_all, q4_0), 4); + svuint8_t q4bytes_3 = svlsr_n_u8_x(pg256_all, svld1_u8(pg256_all, q4_1), 4); + l0 = svreinterpret_s8_u64(svzip1_u64(svreinterpret_u64_u8(q4bytes_0), svreinterpret_u64_u8(q4bytes_1))); + l1 = svreinterpret_s8_u64(svzip2_u64(svreinterpret_u64_u8(q4bytes_0), svreinterpret_u64_u8(q4bytes_1))); + l2 = svreinterpret_s8_u64(svzip1_u64(svreinterpret_u64_u8(q4bytes_2), svreinterpret_u64_u8(q4bytes_3))); + l3 = svreinterpret_s8_u64(svzip2_u64(svreinterpret_u64_u8(q4bytes_2), svreinterpret_u64_u8(q4bytes_3))); + svint8_t q8bytes_0 = svld1_s8(pg256_all, q8_0); + svint8_t q8bytes_1 = svld1_s8(pg256_all, q8_1); + svint8_t q8bytes_2 = svld1_s8(pg256_all, q8_0+32); + svint8_t q8bytes_3 = svld1_s8(pg256_all, q8_1+32); + r0 = svreinterpret_s8_s64(svzip1_s64(svreinterpret_s64_s8(q8bytes_0), svreinterpret_s64_s8(q8bytes_1))); + r1 = svreinterpret_s8_s64(svzip2_s64(svreinterpret_s64_s8(q8bytes_0), svreinterpret_s64_s8(q8bytes_1))); + r2 = svreinterpret_s8_s64(svzip1_s64(svreinterpret_s64_s8(q8bytes_2), svreinterpret_s64_s8(q8bytes_3))); + r3 = svreinterpret_s8_s64(svzip2_s64(svreinterpret_s64_s8(q8bytes_2), svreinterpret_s64_s8(q8bytes_3))); + sumi1 = svmmla(svmmla(svdup_n_s32(0), r0, l0), r1, l1); + svscales = svreinterpret_s32_u32(svlsr_n_u32_x(pg256_all, svlsl_n_u32_x(pg256_all, svreinterpret_u32_u64(svdup_n_u64(new_utmp.u64[j/2])), 8*(4-2*(j%2)-1)), 24)); + acc_sumif1 = svmla_s32_x(pg256_all, acc_sumif1, svscales, sumi1); + sumi2 = svmmla(svmmla(svdup_n_s32(0), r2, l2), r3, l3); + svscales = svreinterpret_s32_u32(svlsr_n_u32_x(pg256_all, svlsl_n_u32_x(pg256_all, svreinterpret_u32_u64(svdup_n_u64(new_utmp.u64[j/2])), 8*(4-2*(j%2)-2)), 24)); + acc_sumif2 = svmla_s32_x(pg256_all, acc_sumif2, svscales, sumi2); + q4_0 += 32; q4_1 += 32; q8_0 += 64; q8_1 += 64; + } + svint32_t acc_sumif = svadd_s32_x(pg256_all, acc_sumif1, acc_sumif2); + svint32_t swap_acc_sumif = svext_s32(acc_sumif, acc_sumif, 4); + acc_sumif = svadd_s32_x(pg32_4, acc_sumif, swap_acc_sumif); + sumf1 = svmla_f32_x(pg32_4, + svmla_f32_x(pg32_4, + sumf1, + svcvt_f32_x(pg32_4, acc_sumif), + svsuper_block_scales), + svdmins, + svsumfs_tmp); + } // end of for nb + } // end of case 256-512 + break; + default: + assert(false && "Unsupported vector length"); + break; + } + + svst1_f32(pg32_2, s, sumf1); + svst1_f32(pg32_2, s + bs, svreinterpret_f32_u8(svext_u8(svreinterpret_u8_f32(sumf1), svdup_n_u8(0), 8))); + + return; + } +#elif defined(__ARM_FEATURE_MATMUL_INT8) if (nrc == 2) { const block_q4_K * GGML_RESTRICT x0 = x; const block_q4_K * GGML_RESTRICT x1 = (const block_q4_K *) ((const uint8_t *)vx + bx); @@ -2235,7 +2467,6 @@ void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi const uint8_t * GGML_RESTRICT q4 = x[i].qs; const int8_t * GGML_RESTRICT q8 = y[i].qs; - const int vector_length = ggml_cpu_get_sve_cnt()*8; const svuint8_t m4b = svdup_n_u8(0xf); const svint32_t mzero = svdup_n_s32(0); svint32_t sumi1 = svdup_n_s32(0); @@ -2480,7 +2711,201 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi const int nb = n / QK_K; -#if defined(__ARM_FEATURE_MATMUL_INT8) +#ifdef __ARM_FEATURE_SVE + const int vector_length = ggml_cpu_get_sve_cnt()*8; +#endif +#if defined(__ARM_FEATURE_SVE) && defined(__ARM_FEATURE_MATMUL_INT8) + if (nrc == 2) { + const svbool_t pg32_2 = svptrue_pat_b32(SV_VL2); + + svfloat32_t sum = svdup_n_f32(0); + + const block_q6_K * GGML_RESTRICT vx0 = vx; + const block_q8_K * GGML_RESTRICT vy0 = vy; + const block_q6_K * GGML_RESTRICT vx1 = (const block_q6_K *) ((const uint8_t*)vx + bx); + const block_q8_K * GGML_RESTRICT vy1 = (const block_q8_K *) ((const uint8_t*)vy + by); + + switch (vector_length) { + case 128: + { + const svbool_t pg128_all = svptrue_pat_b8(SV_ALL); + for (int i = 0; i < nb; ++i) { + const uint8_t * GGML_RESTRICT ql0 = vx0[i].ql; + const uint8_t * GGML_RESTRICT qh0 = vx0[i].qh; + const uint8_t * GGML_RESTRICT ql1 = vx1[i].ql; + const uint8_t * GGML_RESTRICT qh1 = vx1[i].qh; + const int8_t * GGML_RESTRICT q80 = vy0[i].qs; + const int8_t * GGML_RESTRICT q81 = vy1[i].qs; + + const int8_t * GGML_RESTRICT scale0 = vx0[i].scales; + const int8_t * GGML_RESTRICT scale1 = vx1[i].scales; + + svfloat32_t vy_d = svuzp1_f32(svdup_n_f32(vy0[i].d), svdup_n_f32(vy1[i].d)); + svfloat32_t vx_d = svzip1_f32(svdup_n_f32(GGML_FP16_TO_FP32(vx0[i].d)), svdup_n_f32(GGML_FP16_TO_FP32(vx1[i].d))); + svfloat32_t svsuper_block_scales = svmul_f32_x(pg128_all, vy_d, vx_d); + // process q8sum summation 128 bit route + const svint16_t q8sums_01 = svld1_s16(pg128_all, vy0[i].bsums); + const svint16_t q8sums_02 = svld1_s16(pg128_all, vy0[i].bsums + 8); + const svint16_t q8sums_11 = svld1_s16(pg128_all, vy1[i].bsums); + const svint16_t q8sums_12 = svld1_s16(pg128_all, vy1[i].bsums + 8); + const svint64x2_t q6scales_0_tmp = svld2_s64(pg128_all, (const int64_t *)scale0); + const svint16_t q6scales_01 = svunpklo_s16(svreinterpret_s8_s64(svget2_s64(q6scales_0_tmp, 0))); + const svint16_t q6scales_02 = svunpklo_s16(svreinterpret_s8_s64(svget2_s64(q6scales_0_tmp, 1))); + const svint64x2_t q6scales_1_tmp = svld2_s64(pg128_all, (const int64_t *)scale1); + const svint16_t q6scales_11 = svunpklo_s16(svreinterpret_s8_s64(svget2_s64(q6scales_1_tmp, 0))); + const svint16_t q6scales_12 = svunpklo_s16(svreinterpret_s8_s64(svget2_s64(q6scales_1_tmp, 1))); + const svint64_t prod = svdup_n_s64(0); + + svint32_t isum_tmp1 = svreinterpret_s32_s64(svdot_s64(svdot_s64(prod, q8sums_01, q6scales_01), q8sums_02, q6scales_02)); + svint32_t isum_tmp2 = svreinterpret_s32_s64(svdot_s64(svdot_s64(prod, q8sums_01, q6scales_11), q8sums_02, q6scales_12)); + svint32_t isum_tmp3 = svtrn1_s32(isum_tmp1, isum_tmp2); + svint32_t isum_tmp4 = svreinterpret_s32_s64(svdot_s64(svdot_s64(prod, q8sums_11, q6scales_01), q8sums_12, q6scales_02)); + svint32_t isum_tmp5 = svreinterpret_s32_s64(svdot_s64(svdot_s64(prod, q8sums_11, q6scales_11), q8sums_12, q6scales_12)); + svint32_t isum_tmp6 = svtrn1_s32(isum_tmp4, isum_tmp5); + svint32_t isum_tmp7 = svreinterpret_s32_s64(svtrn2_s64(svreinterpret_s64_s32(isum_tmp3), svreinterpret_s64_s32(isum_tmp6))); + svint32_t isum_tmp8 = svreinterpret_s32_s64(svtrn1_s64(svreinterpret_s64_s32(isum_tmp3), svreinterpret_s64_s32(isum_tmp6))); + svint32_t svisum_mins = svadd_s32_x(pg128_all, isum_tmp7, isum_tmp8); + + // process mmla + svint8_t l0, l1, r0, r1; + svint32_t isum_tmp = svdup_n_s32(0); + for (int j = 0; j < QK_K/128; ++j) { + for (int k = 0; k < 8; ++k) { + svuint8_t qhbits_0 = svld1_u8(pg128_all, qh0+16*(k%2)); + svuint8_t qhbits_1 = svld1_u8(pg128_all, qh1+16*(k%2)); + svuint8_t q6bits_0 = svld1_u8(pg128_all, ql0+16*(k%4)); + svuint8_t q6bits_1 = svld1_u8(pg128_all, ql1+16*(k%4)); + const int ql_pos = (k/4)*4; + svuint8_t q6bytes_0_lo = (ql_pos < 4) ? svand_n_u8_x(pg128_all, q6bits_0, 0xf) : svlsr_n_u8_x(pg128_all, q6bits_0, 4); + svuint8_t q6bytes_1_lo = (ql_pos < 4) ? svand_n_u8_x(pg128_all, q6bits_1, 0xf) : svlsr_n_u8_x(pg128_all, q6bits_1, 4); + const int qh_pos = (k/2)*2; + svuint8_t q6bytes_0_hi = svand_n_u8_x(pg128_all, qhbits_0, 0x3 << qh_pos); + svuint8_t q6bytes_1_hi = svand_n_u8_x(pg128_all, qhbits_1, 0x3 << qh_pos); + svint8_t q6bytes_0, q6bytes_1; + if (qh_pos <= 4) { + q6bytes_0 = svreinterpret_s8_u8(svmla_n_u8_x(pg128_all, q6bytes_0_lo, q6bytes_0_hi, 1 << (4 - qh_pos))); + q6bytes_1 = svreinterpret_s8_u8(svmla_n_u8_x(pg128_all, q6bytes_1_lo, q6bytes_1_hi, 1 << (4 - qh_pos))); + } else { + q6bytes_0 = svreinterpret_s8_u8(svorr_u8_x(pg128_all, q6bytes_0_lo, svlsr_n_u8_x(pg128_all, q6bytes_0_hi, (qh_pos - 4)))); + q6bytes_1 = svreinterpret_s8_u8(svorr_u8_x(pg128_all, q6bytes_1_lo, svlsr_n_u8_x(pg128_all, q6bytes_1_hi, (qh_pos - 4)))); + } + svint8_t q8bytes_0 = svld1_s8(pg128_all, q80+16*(k%8)); + svint8_t q8bytes_1 = svld1_s8(pg128_all, q81+16*(k%8)); + l0 = svreinterpret_s8_s64(svzip1_s64(svreinterpret_s64_s8(q6bytes_0), svreinterpret_s64_s8(q6bytes_1))); + l1 = svreinterpret_s8_s64(svzip2_s64(svreinterpret_s64_s8(q6bytes_0), svreinterpret_s64_s8(q6bytes_1))); + r0 = svreinterpret_s8_s64(svzip1_s64(svreinterpret_s64_s8(q8bytes_0), svreinterpret_s64_s8(q8bytes_1))); + r1 = svreinterpret_s8_s64(svzip2_s64(svreinterpret_s64_s8(q8bytes_0), svreinterpret_s64_s8(q8bytes_1))); + svint32_t svscale = svzip1_s32(svdup_n_s32(scale0[k]), svdup_n_s32(scale1[k])); + isum_tmp = svmla_s32_x(pg128_all, isum_tmp, svmmla_s32(svmmla_s32(svdup_n_s32(0), r0, l0), r1, l1), svscale); + } + qh0 += 32; qh1 += 32; + ql0 += 64; ql1 += 64; + q80 += 128; q81 += 128; + scale0 += 8; scale1 += 8; + } + sum = svmla_f32_x(pg128_all, sum, + svcvt_f32_x(pg128_all, svmla_s32_x(pg128_all, isum_tmp, + svisum_mins, svdup_n_s32(-32))), + svsuper_block_scales); + } + } // end of case 128 + break; + case 256: + case 512: + { + const svbool_t pg256_all = svptrue_pat_b8(SV_ALL); + const svbool_t pg32_4 = svptrue_pat_b32(SV_VL4); + for (int i = 0; i < nb; ++i) { + const uint8_t * GGML_RESTRICT ql0 = vx0[i].ql; + const uint8_t * GGML_RESTRICT qh0 = vx0[i].qh; + const uint8_t * GGML_RESTRICT ql1 = vx1[i].ql; + const uint8_t * GGML_RESTRICT qh1 = vx1[i].qh; + const int8_t * GGML_RESTRICT q80 = vy0[i].qs; + const int8_t * GGML_RESTRICT q81 = vy1[i].qs; + + const int8_t * GGML_RESTRICT scale0 = vx0[i].scales; + const int8_t * GGML_RESTRICT scale1 = vx1[i].scales; + svfloat32_t vx_d = svzip1_f32(svdup_n_f32(GGML_FP16_TO_FP32(vx0[i].d)), svdup_n_f32(GGML_FP16_TO_FP32(vx1[i].d))); + svfloat64_t vy_d_tmp = svreinterpret_f64_f32(svuzp1_f32(svdup_n_f32(vy0[i].d), svdup_n_f32(vy1[i].d))); + svfloat32_t vy_d = svreinterpret_f32_f64(svuzp1_f64(vy_d_tmp, vy_d_tmp)); + svfloat32_t svsuper_block_scales = svmul_f32_x(pg32_4, vy_d, vx_d); + // process q8sum summation 256 bit route + const svint16_t q8sums_0 = svld1_s16(pg256_all, vy0[i].bsums); + const svint16_t q8sums_1 = svld1_s16(pg256_all, vy1[i].bsums); + const svint16_t q6scales_0 = svunpklo_s16(svld1_s8(pg256_all, scale0)); + const svint16_t q6scales_1 = svunpklo_s16(svld1_s8(pg256_all, scale1)); + const svint64_t prod = svdup_n_s64(0); + svint32_t isum_tmp1 = svreinterpret_s32_s64(svdot_s64(prod, q8sums_0, q6scales_0)); + svint32_t isum_tmp2 = svreinterpret_s32_s64(svdot_s64(prod, q8sums_0, q6scales_1)); + svint32_t isum_tmp3 = svreinterpret_s32_s64(svdot_s64(prod, q8sums_1, q6scales_0)); + svint32_t isum_tmp4 = svreinterpret_s32_s64(svdot_s64(prod, q8sums_1, q6scales_1)); + svint32_t isum_tmp5 = svtrn1_s32(isum_tmp1, isum_tmp2); + svint32_t isum_tmp6 = svtrn1_s32(isum_tmp3, isum_tmp4); + svint32_t isum_tmp7 = svreinterpret_s32_s64(svtrn2_s64(svreinterpret_s64_s32(isum_tmp5), svreinterpret_s64_s32(isum_tmp6))); + svint32_t isum_tmp8 = svreinterpret_s32_s64(svtrn1_s64(svreinterpret_s64_s32(isum_tmp5), svreinterpret_s64_s32(isum_tmp6))); + svint32_t isum_tmp9 = svadd_s32_x(pg256_all, isum_tmp7, isum_tmp8); + svint32_t isum_tmp10 = svreinterpret_s32_u8(svext_u8(svreinterpret_u8_s32(isum_tmp9), svreinterpret_u8_s32(isum_tmp9), 16)); + svint32_t svisum_mins = svadd_s32_z(pg32_4, isum_tmp9, isum_tmp10); + + // process mmla + svint8_t l0, l1, r0, r1; + svint32_t isum_tmp = svdup_n_s32(0); + for (int j = 0; j < QK_K/128; ++j) { + for (int k = 0; k < 8; k+=2) { // process 2 block + svuint8_t qhbits_0 = svld1_u8(pg256_all, qh0); + svuint8_t qhbits_1 = svld1_u8(pg256_all, qh1); + svuint8_t q6bits_0 = svld1_u8(pg256_all, ql0+32*((k%4)/2)); + svuint8_t q6bits_1 = svld1_u8(pg256_all, ql1+32*((k%4)/2)); + const int ql_pos = (k/4)*4; + svuint8_t q6bytes_0_lo = (ql_pos < 4) ? svand_n_u8_x(pg256_all, q6bits_0, 0xf) : svlsr_n_u8_x(pg256_all, q6bits_0, 4); + svuint8_t q6bytes_1_lo = (ql_pos < 4) ? svand_n_u8_x(pg256_all, q6bits_1, 0xf) : svlsr_n_u8_x(pg256_all, q6bits_1, 4); + const int qh_pos = (k/2)*2; + svuint8_t q6bytes_0_hi = svand_n_u8_x(pg256_all, qhbits_0, 0x3 << qh_pos); + svuint8_t q6bytes_1_hi = svand_n_u8_x(pg256_all, qhbits_1, 0x3 << qh_pos); + svint8_t q6bytes_0, q6bytes_1; + if (qh_pos <= 4) { + q6bytes_0 = svreinterpret_s8_u8(svmla_n_u8_x(pg256_all, q6bytes_0_lo, q6bytes_0_hi, 1 << (4 - qh_pos))); + q6bytes_1 = svreinterpret_s8_u8(svmla_n_u8_x(pg256_all, q6bytes_1_lo, q6bytes_1_hi, 1 << (4 - qh_pos))); + } else { + q6bytes_0 = svreinterpret_s8_u8(svorr_u8_x(pg256_all, q6bytes_0_lo, svlsr_n_u8_x(pg256_all, q6bytes_0_hi, (qh_pos - 4)))); + q6bytes_1 = svreinterpret_s8_u8(svorr_u8_x(pg256_all, q6bytes_1_lo, svlsr_n_u8_x(pg256_all, q6bytes_1_hi, (qh_pos - 4)))); + } + svint8_t q8bytes_0 = svld1_s8(pg256_all, q80+32*(k/2)); + svint8_t q8bytes_1 = svld1_s8(pg256_all, q81+32*(k/2)); + l0 = svreinterpret_s8_s64(svzip1_s64(svreinterpret_s64_s8(q6bytes_0), svreinterpret_s64_s8(q6bytes_1))); + l1 = svreinterpret_s8_s64(svzip2_s64(svreinterpret_s64_s8(q6bytes_0), svreinterpret_s64_s8(q6bytes_1))); + r0 = svreinterpret_s8_s64(svzip1_s64(svreinterpret_s64_s8(q8bytes_0), svreinterpret_s64_s8(q8bytes_1))); + r1 = svreinterpret_s8_s64(svzip2_s64(svreinterpret_s64_s8(q8bytes_0), svreinterpret_s64_s8(q8bytes_1))); + svint32_t svscale0 = svzip1_s32(svdup_n_s32(scale0[k]), svdup_n_s32(scale1[k])); + svint32_t svscale1 = svzip1_s32(svdup_n_s32(scale0[k+1]), svdup_n_s32(scale1[k+1])); + isum_tmp = svmla_s32_x(pg256_all, isum_tmp, svmmla_s32(svdup_n_s32(0), r0, l0), svscale0); + isum_tmp = svmla_s32_x(pg256_all, isum_tmp, svmmla_s32(svdup_n_s32(0), r1, l1), svscale1); + } + qh0 += 32; qh1 += 32; + ql0 += 64; ql1 += 64; + q80 += 128; q81 += 128; + scale0 += 8; scale1 += 8; + } // end of for + svint32_t swap_isum_tmp = svext_s32(isum_tmp, isum_tmp, 4); + isum_tmp = svadd_s32_x(pg32_4, isum_tmp, swap_isum_tmp); + sum = svmla_f32_x(pg32_4, sum, + svcvt_f32_x(pg32_4, svmla_s32_x(pg32_4, isum_tmp, + svisum_mins, svdup_n_s32(-32))), + svsuper_block_scales); + } + } // end of case 256 + break; + default: + assert(false && "Unsupported vector length"); + break; + } // end of switch + + svst1_f32(pg32_2, s, sum); + svst1_f32(pg32_2, s + bs, svreinterpret_f32_u8(svext_u8(svreinterpret_u8_f32(sum), svdup_n_u8(0), 8))); + + return; + } +#elif defined(__ARM_FEATURE_MATMUL_INT8) if (nrc == 2) { const block_q6_K * GGML_RESTRICT x0 = x; const block_q6_K * GGML_RESTRICT x1 = (const block_q6_K *) ((const uint8_t *)vx + bx); @@ -2594,27 +3019,6 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi // adjust bias, apply superblock scale { int32_t bias[4]; -#ifdef __ARM_FEATURE_SVE - const svbool_t pg16_8 = svptrue_pat_b16(SV_VL8); - const svbool_t pg8_8 = svptrue_pat_b8(SV_VL8); - const svint16_t y0_q8sums_0 = svld1_s16(pg16_8, y0->bsums); - const svint16_t y0_q8sums_1 = svld1_s16(pg16_8, y0->bsums + 8); - const svint16_t y1_q8sums_0 = svld1_s16(pg16_8, y1->bsums); - const svint16_t y1_q8sums_1 = svld1_s16(pg16_8, y1->bsums + 8); - const svint16_t x0_q6scales_0 = svunpklo_s16(svld1_s8(pg8_8, x0->scales)); - const svint16_t x0_q6scales_1 = svunpklo_s16(svld1_s8(pg8_8, x0->scales + 8)); - const svint16_t x1_q6scales_0 = svunpklo_s16(svld1_s8(pg8_8, x1->scales)); - const svint16_t x1_q6scales_1 = svunpklo_s16(svld1_s8(pg8_8, x1->scales + 8)); - const svint64_t zero = svdup_n_s64(0); - bias[0] = svaddv_s64(svptrue_b64(), svadd_s64_x(svptrue_b64(), svdot_s64(zero, y0_q8sums_0, x0_q6scales_0), - svdot_s64(zero, y0_q8sums_1, x0_q6scales_1))); - bias[1] = svaddv_s64(svptrue_b64(), svadd_s64_x(svptrue_b64(), svdot_s64(zero, y1_q8sums_0, x0_q6scales_0), - svdot_s64(zero, y1_q8sums_1, x0_q6scales_1))); - bias[2] = svaddv_s64(svptrue_b64(), svadd_s64_x(svptrue_b64(), svdot_s64(zero, y0_q8sums_0, x1_q6scales_0), - svdot_s64(zero, y0_q8sums_1, x1_q6scales_1))); - bias[3] = svaddv_s64(svptrue_b64(), svadd_s64_x(svptrue_b64(), svdot_s64(zero, y1_q8sums_0, x1_q6scales_0), - svdot_s64(zero, y1_q8sums_1, x1_q6scales_1))); -#else // NEON doesn't support int16 dot product, fallback to separated mul and add const int16x8x2_t q8sums0 = vld1q_s16_x2(y0->bsums); const int16x8x2_t q8sums1 = vld1q_s16_x2(y1->bsums); @@ -2646,7 +3050,6 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi vmull_s16(vget_high_s16(q8sums1.val[1]), vget_high_s16(q6scales1.val[1])))); bias[3] = vaddvq_s32(prod); -#endif const int32x4_t vibias = vmulq_n_s32(vld1q_s32(bias), 32); const float32x4_t superblock_scale = { @@ -2672,7 +3075,6 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const voi #endif #ifdef __ARM_FEATURE_SVE - const int vector_length = ggml_cpu_get_sve_cnt()*8; float sum = 0; svuint8_t m4b = svdup_n_u8(0xf); svint32_t vzero = svdup_n_s32(0); From 4cd5695c80eccfbdc1d898ba47522dc5099d4fd2 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 10 Nov 2025 15:38:42 +0200 Subject: [PATCH 04/51] metal : enable tensor API for A19 (llama/17087) --- ggml/src/ggml-metal/ggml-metal-device.m | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/ggml/src/ggml-metal/ggml-metal-device.m b/ggml/src/ggml-metal/ggml-metal-device.m index 606cfd0a5e2..3471225ab47 100644 --- a/ggml/src/ggml-metal/ggml-metal-device.m +++ b/ggml/src/ggml-metal/ggml-metal-device.m @@ -564,8 +564,10 @@ ggml_metal_device_t ggml_metal_device_init(void) { // TODO: try to update the tensor API kernels to at least match the simdgroup performance if (getenv("GGML_METAL_TENSOR_ENABLE") == NULL && ![[dev->mtl_device name] containsString:@"M5"] && - ![[dev->mtl_device name] containsString:@"M6"]) { - GGML_LOG_WARN("%s: tensor API disabled for pre-M5 device\n", __func__); + ![[dev->mtl_device name] containsString:@"M6"] && + ![[dev->mtl_device name] containsString:@"A19"] && + ![[dev->mtl_device name] containsString:@"A20"]) { + GGML_LOG_WARN("%s: tensor API disabled for pre-M5 and pre-A19 devices\n", __func__); dev->props.has_tensor = false; } From a64712e939da9310b033ada045d098d352fcd99c Mon Sep 17 00:00:00 2001 From: Ruben Ortlam Date: Mon, 10 Nov 2025 16:59:10 +0100 Subject: [PATCH 05/51] vulkan: fix validation issue introduced by #16868 (llama/17145) --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index a7a28b1934c..e4b7e1ea9e5 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -6831,7 +6831,7 @@ static void ggml_vk_mul_mat_vec_q_f16(ggml_backend_vk_context * ctx, vk_context& vk_buffer d_B = d_D; size_t b_buf_offset = 0; - uint64_t b_sz = 0; + uint64_t b_sz = 1; if (enable_bias) { const ggml_tensor * add = cgraph->nodes[node_idx + 1]; @@ -6965,7 +6965,7 @@ static void ggml_vk_mul_mat_vec_p021_f16_f32(ggml_backend_vk_context * ctx, vk_c vk_buffer d_B = d_D; size_t b_buf_offset = 0; - uint64_t b_sz = 0; + uint64_t b_sz = 1; if (enable_bias) { const ggml_tensor * add = cgraph->nodes[node_idx + 1]; @@ -7101,7 +7101,7 @@ static void ggml_vk_mul_mat_vec_nc_f16_f32(ggml_backend_vk_context * ctx, vk_con vk_buffer d_B = d_D; size_t b_buf_offset = 0; - uint64_t b_sz = 0; + uint64_t b_sz = 1; if (enable_bias) { const ggml_tensor * add = cgraph->nodes[node_idx + 1]; @@ -7676,7 +7676,7 @@ static void ggml_vk_mul_mat_vec_id_q_f16(ggml_backend_vk_context * ctx, vk_conte vk_buffer d_B = d_D; size_t b_buf_offset = 0; - uint64_t b_sz = 0; + uint64_t b_sz = 1; if (enable_bias || enable_scale) { const ggml_tensor * bias = cgraph->nodes[node_idx + 1]->src[1]; From d1a83fbfdc6f92d0bbffbb53ac28625358f59768 Mon Sep 17 00:00:00 2001 From: Ruben Ortlam Date: Mon, 10 Nov 2025 16:59:26 +0100 Subject: [PATCH 06/51] vulkan: check glslc executable string (llama/17144) --- ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp b/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp index c2e42cf006e..c8a6f97ecff 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp @@ -18,6 +18,7 @@ #include #include #include +#include #ifdef _WIN32 #define NOMINMAX @@ -1080,6 +1081,11 @@ int main(int argc, char** argv) { if (args.find("--glslc") != args.end()) { GLSLC = args["--glslc"]; // Path to glslc + + if (!std::filesystem::exists(GLSLC) || !std::filesystem::is_regular_file(GLSLC)) { + std::cerr << "Error: glslc not found at " << GLSLC << std::endl; + return EXIT_FAILURE; + } } if (args.find("--source") != args.end()) { input_filepath = args["--source"]; // The shader source file to compile From e4c1e3cdba2ae53a29f63a275786bf321c368ff3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrien=20Gallou=C3=ABt?= Date: Mon, 10 Nov 2025 20:03:36 +0100 Subject: [PATCH 07/51] ggml-cpu : inspect -march and -mcpu to found the CPU (llama/16333) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Adrien Gallouët --- ggml/src/ggml-cpu/CMakeLists.txt | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/ggml/src/ggml-cpu/CMakeLists.txt b/ggml/src/ggml-cpu/CMakeLists.txt index 23ec8bb08a7..a55191aede9 100644 --- a/ggml/src/ggml-cpu/CMakeLists.txt +++ b/ggml/src/ggml-cpu/CMakeLists.txt @@ -126,25 +126,36 @@ function(ggml_add_cpu_backend_variant_impl tag_name) ) if (NOT ARM_MCPU_RESULT) string(REGEX MATCH "-mcpu=[^ ']+" ARM_MCPU_FLAG "${ARM_MCPU}") + string(REGEX MATCH "-march=[^ ']+" ARM_MARCH_FLAG "${ARM_MCPU}") + + # on some old GCC we need to read -march= + if (ARM_MARCH_FLAG AND NOT "${ARM_MARCH_FLAG}" STREQUAL "-march=native") + set(ARM_NATIVE_FLAG "${ARM_MARCH_FLAG}") + elseif(ARM_MCPU_FLAG AND NOT "${ARM_MCPU_FLAG}" STREQUAL "-mcpu=native") + set(ARM_NATIVE_FLAG "${ARM_MCPU_FLAG}") + endif() endif() - if ("${ARM_MCPU_FLAG}" STREQUAL "") - set(ARM_MCPU_FLAG -mcpu=native) - message(STATUS "ARM -mcpu not found, -mcpu=native will be used") + + if ("${ARM_NATIVE_FLAG}" STREQUAL "") + set(ARM_NATIVE_FLAG -mcpu=native) + message(WARNING "ARM -march/-mcpu not found, -mcpu=native will be used") + else() + message(STATUS "ARM detected flags: ${ARM_NATIVE_FLAG}") endif() include(CheckCXXSourceRuns) function(check_arm_feature tag code) set(CMAKE_REQUIRED_FLAGS_SAVE ${CMAKE_REQUIRED_FLAGS}) - set(CMAKE_REQUIRED_FLAGS "${ARM_MCPU_FLAG}+${tag}") + set(CMAKE_REQUIRED_FLAGS "${ARM_NATIVE_FLAG}+${tag}") check_cxx_source_runs("${code}" GGML_MACHINE_SUPPORTS_${tag}) if (GGML_MACHINE_SUPPORTS_${tag}) - set(ARM_MCPU_FLAG_FIX "${ARM_MCPU_FLAG_FIX}+${tag}" PARENT_SCOPE) + set(ARM_NATIVE_FLAG_FIX "${ARM_NATIVE_FLAG_FIX}+${tag}" PARENT_SCOPE) else() - set(CMAKE_REQUIRED_FLAGS "${ARM_MCPU_FLAG}+no${tag}") + set(CMAKE_REQUIRED_FLAGS "${ARM_NATIVE_FLAG}+no${tag}") check_cxx_source_compiles("int main() { return 0; }" GGML_MACHINE_SUPPORTS_no${tag}) if (GGML_MACHINE_SUPPORTS_no${tag}) - set(ARM_MCPU_FLAG_FIX "${ARM_MCPU_FLAG_FIX}+no${tag}" PARENT_SCOPE) + set(ARM_NATIVE_FLAG_FIX "${ARM_NATIVE_FLAG_FIX}+no${tag}" PARENT_SCOPE) endif() endif() set(CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS_SAVE}) @@ -155,7 +166,7 @@ function(ggml_add_cpu_backend_variant_impl tag_name) check_arm_feature(sve "#include \nint main() { svfloat32_t _a, _b; volatile svfloat32_t _c = svadd_f32_z(svptrue_b8(), _a, _b); return 0; }") check_arm_feature(sme "#include \n__arm_locally_streaming int main() { __asm__ volatile(\"smstart; smstop;\"); return 0; }") - list(APPEND ARCH_FLAGS "${ARM_MCPU_FLAG}${ARM_MCPU_FLAG_FIX}") + list(APPEND ARCH_FLAGS "${ARM_NATIVE_FLAG}${ARM_NATIVE_FLAG_FIX}") else() if (GGML_CPU_ARM_ARCH) list(APPEND ARCH_FLAGS -march=${GGML_CPU_ARM_ARCH}) From 4413a561230440bd791a672cdf23ad9525a7d3fa Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 10 Nov 2025 21:33:35 +0200 Subject: [PATCH 08/51] metal : cap threadgroups size of set_rows (llama/17146) --- ggml/src/ggml-metal/ggml-metal-ops.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ggml/src/ggml-metal/ggml-metal-ops.cpp b/ggml/src/ggml-metal/ggml-metal-ops.cpp index 7a85edbdcdb..5a8f150a71d 100644 --- a/ggml/src/ggml-metal/ggml-metal-ops.cpp +++ b/ggml/src/ggml-metal/ggml-metal-ops.cpp @@ -1036,6 +1036,11 @@ int ggml_metal_op_set_rows(ggml_metal_op_t ctx, int idx) { nth = std::min(nth, nk0); + if (nth*nrptg > ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)) { + nth = ggml_metal_pipeline_max_theads_per_threadgroup(pipeline); + nrptg = 1; + } + ggml_metal_kargs_set_rows args = { /*.nk0 =*/ nk0, /*.ne01 =*/ ne01, From becc46e7fee579025b85bb6d3469f84ad1dca0f8 Mon Sep 17 00:00:00 2001 From: Max Krasnyansky Date: Mon, 10 Nov 2025 12:44:49 -0800 Subject: [PATCH 09/51] cpu: skip NOPs to avoid barriers (llama/17133) * cpu: skip NOPs to avoid barriers * cpu: use ggml_op_is_empty --- ggml/src/ggml-cpu/ggml-cpu.c | 37 ++++++++++++++++++--------------- ggml/src/ggml-cpu/ops.cpp | 40 ------------------------------------ ggml/src/ggml-cpu/ops.h | 4 ---- 3 files changed, 21 insertions(+), 60 deletions(-) diff --git a/ggml/src/ggml-cpu/ggml-cpu.c b/ggml/src/ggml-cpu/ggml-cpu.c index b5466dd703d..086708bae0b 100644 --- a/ggml/src/ggml-cpu/ggml-cpu.c +++ b/ggml/src/ggml-cpu/ggml-cpu.c @@ -1807,22 +1807,6 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm { ggml_compute_forward_cont(params, tensor); } break; - case GGML_OP_RESHAPE: - { - ggml_compute_forward_reshape(params, tensor); - } break; - case GGML_OP_VIEW: - { - ggml_compute_forward_view(params, tensor); - } break; - case GGML_OP_PERMUTE: - { - ggml_compute_forward_permute(params, tensor); - } break; - case GGML_OP_TRANSPOSE: - { - ggml_compute_forward_transpose(params, tensor); - } break; case GGML_OP_GET_ROWS: { ggml_compute_forward_get_rows(params, tensor); @@ -2042,6 +2026,22 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm { // nop } break; + case GGML_OP_RESHAPE: + { + // nop + } break; + case GGML_OP_PERMUTE: + { + // nop + } break; + case GGML_OP_VIEW: + { + // nop + } break; + case GGML_OP_TRANSPOSE: + { + // nop + } break; case GGML_OP_COUNT: { GGML_ABORT("fatal error"); @@ -2884,6 +2884,11 @@ static thread_ret_t ggml_graph_compute_thread(void * data) { for (int node_n = 0; node_n < cgraph->n_nodes && atomic_load_explicit(&tp->abort, memory_order_relaxed) != node_n; node_n++) { struct ggml_tensor * node = cgraph->nodes[node_n]; + if (ggml_op_is_empty(node->op)) { + // skip NOPs + continue; + } + ggml_compute_forward(¶ms, node); if (state->ith == 0 && cplan->abort_callback && diff --git a/ggml/src/ggml-cpu/ops.cpp b/ggml/src/ggml-cpu/ops.cpp index 8235f695943..7c42fb78b4a 100644 --- a/ggml/src/ggml-cpu/ops.cpp +++ b/ggml/src/ggml-cpu/ops.cpp @@ -4455,46 +4455,6 @@ void ggml_compute_forward_cont( ggml_compute_forward_dup(params, dst); } -// ggml_compute_forward_reshape - -void ggml_compute_forward_reshape( - const ggml_compute_params * params, - ggml_tensor * dst) { - // NOP - GGML_UNUSED(params); - GGML_UNUSED(dst); -} - -// ggml_compute_forward_view - -void ggml_compute_forward_view( - const ggml_compute_params * params, - ggml_tensor * dst) { - // NOP - GGML_UNUSED(params); - GGML_UNUSED(dst); -} - -// ggml_compute_forward_permute - -void ggml_compute_forward_permute( - const ggml_compute_params * params, - ggml_tensor * dst) { - // NOP - GGML_UNUSED(params); - GGML_UNUSED(dst); -} - -// ggml_compute_forward_transpose - -void ggml_compute_forward_transpose( - const ggml_compute_params * params, - ggml_tensor * dst) { - // NOP - GGML_UNUSED(params); - GGML_UNUSED(dst); -} - // ggml_compute_forward_get_rows static void ggml_compute_forward_get_rows_q( diff --git a/ggml/src/ggml-cpu/ops.h b/ggml/src/ggml-cpu/ops.h index 9824a03b458..2b4127c12b1 100644 --- a/ggml/src/ggml-cpu/ops.h +++ b/ggml/src/ggml-cpu/ops.h @@ -51,10 +51,6 @@ void ggml_compute_forward_scale(const struct ggml_compute_params * params, struc void ggml_compute_forward_set(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_cpy(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_cont(const struct ggml_compute_params * params, struct ggml_tensor * dst); -void ggml_compute_forward_reshape(const struct ggml_compute_params * params, struct ggml_tensor * dst); -void ggml_compute_forward_view(const struct ggml_compute_params * params, struct ggml_tensor * dst); -void ggml_compute_forward_permute(const struct ggml_compute_params * params, struct ggml_tensor * dst); -void ggml_compute_forward_transpose(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_get_rows(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_get_rows_back(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_set_rows(const struct ggml_compute_params * params, struct ggml_tensor * dst); From 485e42356d711f255a62bd754b6fef66d69b0473 Mon Sep 17 00:00:00 2001 From: lhez Date: Mon, 10 Nov 2025 15:00:13 -0800 Subject: [PATCH 10/51] opencl: add fastdiv and use it in set_rows, ported from cuda (llama/17090) * opencl: add fastdiv for mm q8_0 * opencl: use uint4 for fastdiv vals * opencl: use fastdiv for set_rows * opencl: do not use fastdiv for q8_0 mm --- ggml/src/ggml-opencl/ggml-opencl.cpp | 38 +++++++++++++++++- ggml/src/ggml-opencl/kernels/set_rows.cl | 51 ++++++++++++++++-------- 2 files changed, 71 insertions(+), 18 deletions(-) diff --git a/ggml/src/ggml-opencl/ggml-opencl.cpp b/ggml/src/ggml-opencl/ggml-opencl.cpp index 1d3a318a569..465272fab90 100644 --- a/ggml/src/ggml-opencl/ggml-opencl.cpp +++ b/ggml/src/ggml-opencl/ggml-opencl.cpp @@ -53,6 +53,37 @@ bool ggml_cl_compute_forward(ggml_backend_t backend, struct ggml_tensor * tensor); +// See https://gmplib.org/~tege/divcnst-pldi94.pdf figure 4.1. +// Precompute mp (m' in the paper) and L such that division +// can be computed using a multiply (high 32b of 64b result) +// and a shift: +// +// n/d = (mulhi(n, mp) + n) >> L; +struct fastdiv_vals { + uint32_t mp; + uint32_t L; + uint32_t d; + uint32_t pad; +}; +static_assert(sizeof(fastdiv_vals) == 16, "fastdiv_vals size incorrect"); + +static fastdiv_vals init_fastdiv_values(uint64_t d_64) { + GGML_ASSERT(d_64 != 0); + GGML_ASSERT(d_64 <= std::numeric_limits::max()); + + uint32_t d = (uint32_t)d_64; + + // compute L = ceil(log2(d)); + uint32_t L = 0; + while (L < 32 && (uint32_t{ 1 } << L) < d) { + L++; + } + + uint32_t mp = (uint32_t) ((uint64_t{ 1 } << 32) * ((uint64_t{ 1 } << L) - d) / d + 1); + // pack divisor as well to reduce error surface + return { mp, L, d, 0 }; +} + enum GPU_FAMILY { ADRENO, INTEL, @@ -4464,6 +4495,9 @@ static void ggml_cl_set_rows(ggml_backend_t backend, const ggml_tensor * src0, c GGML_ABORT("not implemented"); } + fastdiv_vals ne11_ = init_fastdiv_values(ne11); + fastdiv_vals ne12_ = init_fastdiv_values(ne12); + CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); @@ -4474,8 +4508,8 @@ static void ggml_cl_set_rows(ggml_backend_t backend, const ggml_tensor * src0, c CL_CHECK(clSetKernelArg(kernel, 7, sizeof(cl_ulong), &nb01)); CL_CHECK(clSetKernelArg(kernel, 8, sizeof(cl_ulong), &nb02)); CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_ulong), &nb03)); - CL_CHECK(clSetKernelArg(kernel, 10, sizeof(int), &ne11)); - CL_CHECK(clSetKernelArg(kernel, 11, sizeof(int), &ne12)); + CL_CHECK(clSetKernelArg(kernel, 10, sizeof(fastdiv_vals), &ne11_)); + CL_CHECK(clSetKernelArg(kernel, 11, sizeof(fastdiv_vals), &ne12_)); CL_CHECK(clSetKernelArg(kernel, 12, sizeof(cl_ulong), &nb10)); CL_CHECK(clSetKernelArg(kernel, 13, sizeof(cl_ulong), &nb11)); CL_CHECK(clSetKernelArg(kernel, 14, sizeof(cl_ulong), &nb12)); diff --git a/ggml/src/ggml-opencl/kernels/set_rows.cl b/ggml/src/ggml-opencl/kernels/set_rows.cl index dcdc1d1b6fd..fc3ff7aa1e7 100644 --- a/ggml/src/ggml-opencl/kernels/set_rows.cl +++ b/ggml/src/ggml-opencl/kernels/set_rows.cl @@ -1,5 +1,16 @@ #pragma OPENCL EXTENSION cl_khr_fp16 : enable +// v = { mp, L, d } +inline uint fastdiv(uint n, uint4 v) { + uint msbs; + msbs = mul_hi(n, v.s0); + return (msbs + n) >> v.s1; +} +inline uint fastmod(uint n, uint4 v) { + uint q = fastdiv(n, v); + return n - q * v.s2; +} + kernel void kernel_set_rows_f32_i64( global char * src0, ulong offset0, @@ -11,8 +22,8 @@ kernel void kernel_set_rows_f32_i64( ulong nb01, ulong nb02, ulong nb03, - int ne11, - int ne12, + uint4 ne11, + uint4 ne12, ulong nb10, ulong nb11, ulong nb12, @@ -33,8 +44,10 @@ kernel void kernel_set_rows_f32_i64( return; } - int i12 = i03%ne12; - int i11 = i02%ne11; + //int i12 = i03%ne12; + //int i11 = i02%ne11; + int i12 = fastmod(i03, ne12); + int i11 = fastmod(i02, ne11); int i10 = i01; long i1 = ((global long *)(src1 + i10*nb10 + i11*nb11 + i12*nb12))[0]; @@ -58,8 +71,8 @@ kernel void kernel_set_rows_f16_i64( ulong nb01, ulong nb02, ulong nb03, - int ne11, - int ne12, + uint4 ne11, + uint4 ne12, ulong nb10, ulong nb11, ulong nb12, @@ -80,8 +93,10 @@ kernel void kernel_set_rows_f16_i64( return; } - int i12 = i03%ne12; - int i11 = i02%ne11; + //int i12 = i03%ne12; + //int i11 = i02%ne11; + int i12 = fastmod(i03, ne12); + int i11 = fastmod(i02, ne11); int i10 = i01; long i1 = ((global long *)(src1 + i10*nb10 + i11*nb11 + i12*nb12))[0]; @@ -105,8 +120,8 @@ kernel void kernel_set_rows_f32_i32( ulong nb01, ulong nb02, ulong nb03, - int ne11, - int ne12, + uint4 ne11, + uint4 ne12, ulong nb10, ulong nb11, ulong nb12, @@ -127,8 +142,10 @@ kernel void kernel_set_rows_f32_i32( return; } - int i12 = i03%ne12; - int i11 = i02%ne11; + //int i12 = i03%ne12; + //int i11 = i02%ne11; + int i12 = fastmod(i03, ne12); + int i11 = fastmod(i02, ne11); int i10 = i01; int i1 = ((global int *)(src1 + i10*nb10 + i11*nb11 + i12*nb12))[0]; @@ -152,8 +169,8 @@ kernel void kernel_set_rows_f16_i32( ulong nb01, ulong nb02, ulong nb03, - int ne11, - int ne12, + uint4 ne11, + uint4 ne12, ulong nb10, ulong nb11, ulong nb12, @@ -174,8 +191,10 @@ kernel void kernel_set_rows_f16_i32( return; } - int i12 = i03%ne12; - int i11 = i02%ne11; + //int i12 = i03%ne12; + //int i11 = i02%ne11; + int i12 = fastmod(i03, ne12); + int i11 = fastmod(i02, ne11); int i10 = i01; int i1 = ((global int *)(src1 + i10*nb10 + i11*nb11 + i12*nb12))[0]; From bee7518675d8f0815c50c691d185d33cf681c7d3 Mon Sep 17 00:00:00 2001 From: Mike Abbott Date: Tue, 11 Nov 2025 04:19:50 -0700 Subject: [PATCH 11/51] cmake : add version to all shared object files (llama/17091) When compiling llama.cpp in Yocto, it fails QA checks because the generated so files aren't versioned. This applies a version to all generated so files, allowing the package to build without errors. --- ggml/src/CMakeLists.txt | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/ggml/src/CMakeLists.txt b/ggml/src/CMakeLists.txt index f30e4ac9020..628db3fd655 100644 --- a/ggml/src/CMakeLists.txt +++ b/ggml/src/CMakeLists.txt @@ -211,6 +211,11 @@ add_library(ggml-base ggml-quants.h gguf.cpp) +set_target_properties(ggml-base PROPERTIES + VERSION ${GGML_VERSION} + SOVERSION ${GGML_VERSION_MAJOR} +) + target_include_directories(ggml-base PRIVATE .) if (GGML_BACKEND_DL) target_compile_definitions(ggml-base PUBLIC GGML_BACKEND_DL) @@ -220,6 +225,11 @@ add_library(ggml ggml-backend-reg.cpp) add_library(ggml::ggml ALIAS ggml) +set_target_properties(ggml PROPERTIES + VERSION ${GGML_VERSION} + SOVERSION ${GGML_VERSION_MAJOR} +) + if (GGML_BACKEND_DIR) if (NOT GGML_BACKEND_DL) message(FATAL_ERROR "GGML_BACKEND_DIR requires GGML_BACKEND_DL") @@ -259,6 +269,12 @@ function(ggml_add_backend_library backend) target_compile_definitions(${backend} PUBLIC GGML_BACKEND_SHARED) endif() + # Set versioning properties for all backend libraries + set_target_properties(${backend} PROPERTIES + VERSION ${GGML_VERSION} + SOVERSION ${GGML_VERSION_MAJOR} + ) + if(NOT GGML_AVAILABLE_BACKENDS) set(GGML_AVAILABLE_BACKENDS "${backend}" CACHE INTERNAL "List of backends for cmake package") From 2fe28b67cb5a8cee647ba4b2ece99f3fb0df14e9 Mon Sep 17 00:00:00 2001 From: Charles Xu Date: Tue, 11 Nov 2025 12:20:31 +0100 Subject: [PATCH 12/51] kleidiai: add optimized per-channel kernels for Q8_0 (llama/16993) --- ggml/src/ggml-cpu/CMakeLists.txt | 18 +- ggml/src/ggml-cpu/kleidiai/kernels.cpp | 283 ++++++++++++++++++++++++ ggml/src/ggml-cpu/kleidiai/kernels.h | 1 + ggml/src/ggml-cpu/kleidiai/kleidiai.cpp | 269 +++++++++++++++++++--- 4 files changed, 534 insertions(+), 37 deletions(-) diff --git a/ggml/src/ggml-cpu/CMakeLists.txt b/ggml/src/ggml-cpu/CMakeLists.txt index a55191aede9..e52e050a81a 100644 --- a/ggml/src/ggml-cpu/CMakeLists.txt +++ b/ggml/src/ggml-cpu/CMakeLists.txt @@ -590,6 +590,7 @@ function(ggml_add_cpu_backend_variant_impl tag_name) ${KLEIDIAI_SRC}/kai/ukernels/ ${KLEIDIAI_SRC}/kai/ukernels/matmul/ ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qsi8d32p_qsi4c32p/ + ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qai8dxp_qsi8cxp/ ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_fp32_bf16p_bf16p/ ${KLEIDIAI_SRC}/kai/ukernels/matmul/pack/) @@ -608,23 +609,34 @@ function(ggml_add_cpu_backend_variant_impl tag_name) ${KLEIDIAI_SRC}/kai/ukernels/matmul/pack/kai_lhs_quant_pack_qsi8d32p4x8sb_f32_neon.c ${KLEIDIAI_SRC}/kai/ukernels/matmul/pack/kai_rhs_pack_nxk_qsi4c32ps1s0scalef16_qsu4c32s16s0_neon.c ${KLEIDIAI_SRC}/kai/ukernels/matmul/pack/kai_lhs_quant_pack_qsi8d32p_f32_neon.c - ${KLEIDIAI_SRC}/kai/ukernels/matmul/pack/kai_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0.c) + ${KLEIDIAI_SRC}/kai/ukernels/matmul/pack/kai_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0.c + ${KLEIDIAI_SRC}/kai/ukernels/matmul/pack/kai_lhs_quant_pack_qai8dxp_f32.c + ${KLEIDIAI_SRC}/kai/ukernels/matmul/pack/kai_rhs_pack_nxk_qsi8cxp_qsi8cx_neon.c) if (NOT DOTPROD_ENABLED MATCHES -1) list(APPEND GGML_KLEIDIAI_SOURCES ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qsi8d32p_qsi4c32p/kai_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod.c ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qsi8d32p_qsi4c32p/kai_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod.c - ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qsi8d32p_qsi4c32p/kai_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod.c) + ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qsi8d32p_qsi4c32p/kai_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod.c + ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qai8dxp_qsi8cxp/kai_matmul_clamp_f32_qai8dxp4x4_qsi8cxp4x4_16x4_neon_dotprod.c + ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qai8dxp_qsi8cxp/kai_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4x4_1x4_neon_dotprod.c + ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qai8dxp_qsi8cxp/kai_matmul_clamp_f32_qai8dxp1x8_qsi8cxp4x8_1x4_neon_dotprod.c) endif() if (NOT I8MM_ENABLED MATCHES -1) - list(APPEND GGML_KLEIDIAI_SOURCES ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qsi8d32p_qsi4c32p/kai_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm.c) + list(APPEND GGML_KLEIDIAI_SOURCES + ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qsi8d32p_qsi4c32p/kai_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm.c + ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qai8dxp_qsi8cxp/kai_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm.c) endif() if (NOT SME_ENABLED MATCHES -1) list(APPEND GGML_KLEIDIAI_SOURCES ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qsi8d32p_qsi4c32p/kai_matmul_clamp_f32_qsi8d32p1vlx4_qsi4c32p4vlx4_1vlx4vl_sme2_mopa.c ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qsi8d32p_qsi4c32p/kai_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4vlx4_1x4vl_sme2_sdot.c + ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qai8dxp_qsi8cxp/kai_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme2_mopa.c + ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qai8dxp_qsi8cxp/kai_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme2_mopa_asm.S + ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qai8dxp_qsi8cxp/kai_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4vlx4_1x4vl_sme2_dot.c + ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qai8dxp_qsi8cxp/kai_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4vlx4_1x4vl_sme2_dot_asm.S ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_fp32_bf16p_bf16p/kai_matmul_clamp_f32_bf16p2vlx2_bf16p2vlx2_2vlx2vl_sme2_mopa.c ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_fp32_bf16p_bf16p/kai_matmul_clamp_f32_bf16p2vlx2_bf16p2vlx2_2vlx2vl_sme2_mopa_asm.S ${KLEIDIAI_SRC}/kai/ukernels/matmul/pack/kai_lhs_pack_bf16p2vlx2_f32_sme.c diff --git a/ggml/src/ggml-cpu/kleidiai/kernels.cpp b/ggml/src/ggml-cpu/kleidiai/kernels.cpp index 3eaa5e3f410..1d5b44f9fe3 100644 --- a/ggml/src/ggml-cpu/kleidiai/kernels.cpp +++ b/ggml/src/ggml-cpu/kleidiai/kernels.cpp @@ -4,6 +4,7 @@ // KleidiAI micro-kernels #include "kai_matmul_clamp_f32_qsi8d32p_qsi4c32p_interface.h" +#include "kai_matmul_clamp_f32_qai8dxp_qsi8cxp_interface.h" #include "kai_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod.h" #include "kai_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod.h" #include "kai_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod.h" @@ -11,20 +12,31 @@ #include "kai_matmul_clamp_f32_qsi8d32p1vlx4_qsi4c32p4vlx4_1vlx4vl_sme2_mopa.h" #include "kai_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4vlx4_1x4vl_sme2_sdot.h" #include "kai_matmul_clamp_f32_bf16p2vlx2_bf16p2vlx2_2vlx2vl_sme2_mopa.h" +#include "kai_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme2_mopa.h" +#include "kai_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4vlx4_1x4vl_sme2_dot.h" +#include "kai_matmul_clamp_f32_qai8dxp1x8_qsi8cxp4x8_1x4_neon_dotprod.h" +#include "kai_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4x4_1x4_neon_dotprod.h" +#include "kai_matmul_clamp_f32_qai8dxp4x4_qsi8cxp4x4_16x4_neon_dotprod.h" +#include "kai_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm.h" #include "kai_lhs_pack_bf16p2vlx2_f32_sme.h" #include "kai_lhs_quant_pack_qsi8d32p_f32.h" #include "kai_lhs_quant_pack_qsi8d32p4x8sb_f32_neon.h" #include "kai_lhs_quant_pack_qsi8d32p_f32_neon.h" +#include "kai_lhs_quant_pack_qai8dxp_f32.h" #include "kai_rhs_pack_kxn_bf16p2vlx2b_f32_x32_sme.h" #include "kai_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0.h" #include "kai_rhs_pack_nxk_qsi4c32ps1s0scalef16_qsu4c32s16s0_neon.h" +#include "kai_rhs_pack_nxk_qsi8cxp_qsi8cx_neon.h" #include "kai_common.h" #include "simd-mappings.h" +#define GGML_COMMON_DECL_CPP +#include "ggml-common.h" + #include "kernels.h" #define NELEMS(x) sizeof(x) / sizeof(*x) @@ -55,6 +67,14 @@ static inline void kernel_run_fn10(size_t m, size_t n, size_t k, size_t /*bl*/, Fn(m, n, k, lhs, rhs, dst, dst_stride_row, dst_stride_col, clamp_min, clamp_max); } +template +static inline void kernel_run_float_fn10(size_t m, size_t n, size_t k, size_t /*bl*/, + const void* lhs, const void* rhs, void* dst, + size_t dst_stride_row, size_t dst_stride_col, + float clamp_min, float clamp_max) { + Fn(m, n, k, lhs, rhs, static_cast(dst), dst_stride_row, dst_stride_col, clamp_min, clamp_max); +} + template static inline size_t lhs_ps_fn6(size_t m, size_t k, size_t bl, size_t mr, size_t kr, size_t sr) { return Fn(m, k, bl, mr, kr, sr); @@ -93,6 +113,12 @@ static inline void lhs_pack_void_fn9(size_t m, size_t k, size_t /*bl*/, size_t m Fn(m, k, mr, kr, sr, m_idx_start, lhs, lhs_stride, lhs_packed); } +template +static inline void lhs_pack_float_fn9_no_bl(size_t m, size_t k, size_t /*bl*/, size_t mr, size_t kr, size_t sr, + size_t m_idx_start, const void * lhs, size_t lhs_stride, void * lhs_packed) { + Fn(m, k, mr, kr, sr, m_idx_start, static_cast(lhs), lhs_stride, lhs_packed); +} + template static inline size_t rhs_ps_fn5(size_t n, size_t k, size_t nr, size_t kr, size_t bl) { return Fn(n, k, nr, kr, bl); @@ -124,6 +150,18 @@ static inline void rhs_pack_fn12(size_t num_groups, size_t n, size_t k, size_t n static_cast(params)); } +template +static inline void rhs_pack_scale_fn12(size_t num_groups, size_t n, size_t k, size_t nr, size_t kr, size_t sr, size_t /*bl*/, + size_t /*rhs_stride*/, const void* rhs, const void* bias, const void* scale, + void* rhs_packed, size_t extra_bytes, const void* params) { + Fn(num_groups, n, k, nr, kr, sr, + static_cast(rhs), + static_cast(bias), + static_cast(scale), + rhs_packed, extra_bytes, + static_cast(params)); +} + template static inline void rhs_pack_fn13(size_t num_groups, size_t n, size_t k, size_t nr, size_t kr, size_t sr, size_t /*bl*/, size_t rhs_stride, const void* rhs, const void* bias, const void* scale, @@ -213,6 +251,57 @@ static void dequantize_row_qsi4c32ps1s0scalef16( GGML_UNUSED(kr); } +static void dequantize_row_qsi8cxp( + const void *packed_data, + int32_t row_idx, + int64_t k, + float *out, + size_t nr, + size_t packed_row_stride, + size_t kr, + size_t bl, + size_t num_bytes_multiplier +) { + GGML_UNUSED(bl); + GGML_UNUSED(num_bytes_multiplier); + + const size_t k_internal = ((size_t) k + QK8_0 - 1) / QK8_0 * QK8_0; + const size_t group_idx = row_idx / nr; + const size_t row_in_group = row_idx % nr; + + const uint8_t * group_ptr = static_cast(packed_data) + group_idx * packed_row_stride; + const int8_t * data_base = reinterpret_cast(group_ptr); + + const size_t num_blocks = k_internal / kr; + + for (size_t block = 0; block < num_blocks; ++block) { + const int8_t * block_ptr = data_base + (block * nr + row_in_group) * kr; + for (size_t i = 0; i < kr; ++i) { + const size_t k_idx = block * kr + i; + if (k_idx < (size_t) k) { + out[k_idx] = static_cast(block_ptr[i]); + } + } + } + + const uint8_t * sums_ptr = group_ptr + nr * k_internal; + GGML_UNUSED(sums_ptr); + + const float * scale_ptr = reinterpret_cast(sums_ptr + nr * sizeof(int32_t)); + const float scale = scale_ptr[row_in_group]; + + if (scale == 0.0f) { + for (size_t i = 0; i < (size_t) k; ++i) { + out[i] = 0.0f; + } + return; + } + + for (size_t i = 0; i < (size_t) k; ++i) { + out[i] *= scale; + } +} + static ggml_kleidiai_kernels gemm_gemv_kernels[] = { #if defined(__ARM_FEATURE_SME) { @@ -548,6 +637,174 @@ static ggml_kleidiai_kernels gemm_gemv_kernels[] = { #endif }; +static ggml_kleidiai_kernels gemm_gemv_kernels_q8[] = { +#if defined(__ARM_FEATURE_SME) + { + /* SME GEMM */ + { + /* .get_m_step = */ kai_get_m_step_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme2_mopa, + /* .get_n_step = */ kai_get_n_step_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme2_mopa, + /* .get_mr = */ kai_get_mr_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme2_mopa, + /* .get_nr = */ kai_get_nr_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme2_mopa, + /* .get_kr = */ kai_get_kr_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme2_mopa, + /* .get_sr = */ kai_get_sr_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme2_mopa, + /* .get_dst_offset = */ kai_get_dst_offset_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme2_mopa, + /* .get_dst_size = */ kai_get_dst_size_matmul_clamp_f32_qai8dxp1vlx4_qsi8cxp4vlx4_1vlx4vl_sme2_mopa, + /* .get_lhs_offset_ex = */ &kernel_offs_fn2, + /* .get_rhs_packed_offset_ex = */ &kernel_offs_fn2, + /* .run_kernel_ex = */ &kernel_run_float_fn10, + }, + /* .gemm_lhs_info = */ { + /* .get_offset = */ kai_get_lhs_offset_lhs_quant_pack_qai8dxp_f32, + /* .get_packed_offset_ex = */ &lhs_offs_fn5, + /* .packed_size_ex = */ &lhs_ps_fn5, + /* .pack_func_ex = */ &lhs_pack_float_fn9_no_bl, + }, + /* SME GEMV */ + { + /* .get_m_step = */ kai_get_m_step_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4vlx4_1x4vl_sme2_dot, + /* .get_n_step = */ kai_get_n_step_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4vlx4_1x4vl_sme2_dot, + /* .get_mr = */ kai_get_mr_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4vlx4_1x4vl_sme2_dot, + /* .get_nr = */ kai_get_nr_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4vlx4_1x4vl_sme2_dot, + /* .get_kr = */ kai_get_kr_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4vlx4_1x4vl_sme2_dot, + /* .get_sr = */ kai_get_sr_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4vlx4_1x4vl_sme2_dot, + /* .get_dst_offset = */ kai_get_dst_offset_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4vlx4_1x4vl_sme2_dot, + /* .get_dst_size = */ kai_get_dst_size_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4vlx4_1x4vl_sme2_dot, + /* .get_lhs_offset_ex = */ &kernel_offs_fn2, + /* .get_rhs_packed_offset_ex = */ &kernel_offs_fn2, + /* .run_kernel_ex = */ &kernel_run_float_fn10, + }, + /* .gemv_lhs_info = */ { + /* .get_offset = */ kai_get_lhs_offset_lhs_quant_pack_qai8dxp_f32, + /* .get_packed_offset_ex = */ &lhs_offs_fn5, + /* .packed_size_ex = */ &lhs_ps_fn5, + /* .pack_func_ex = */ &lhs_pack_float_fn9_no_bl, + }, + /* .rhs_info = */ { + /* .packed_stride = */ kai_get_rhs_packed_stride_rhs_pack_nxk_qsi8cxp_qsi8cx_neon, + /* .to_float = */ dequantize_row_qsi8cxp, + /* .packed_size_ex = */ &rhs_ps_fn5, + /* .packed_stride_ex = */ &rhs_stride_fn4, + /* .pack_func_ex = */ &rhs_pack_scale_fn12, + }, + /* .required_cpu = */ CPU_FEATURE_SME, + /* .lhs_type = */ GGML_TYPE_F32, + /* .rhs_type = */ GGML_TYPE_Q8_0, + /* .op_type = */ GGML_TYPE_F32, + }, +#endif +#if defined(__ARM_FEATURE_MATMUL_INT8) + { + /* I8MM GEMM */ + { + /* .get_m_step = */ kai_get_m_step_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm, + /* .get_n_step = */ kai_get_n_step_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm, + /* .get_mr = */ kai_get_mr_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm, + /* .get_nr = */ kai_get_nr_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm, + /* .get_kr = */ kai_get_kr_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm, + /* .get_sr = */ kai_get_sr_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm, + /* .get_dst_offset = */ kai_get_dst_offset_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm, + /* .get_dst_size = */ kai_get_dst_size_matmul_clamp_f32_qai8dxp4x8_qsi8cxp4x8_16x4_neon_i8mm, + /* .get_lhs_offset_ex = */ &kernel_offs_fn2, + /* .get_rhs_packed_offset_ex = */ &kernel_offs_fn2, + /* .run_kernel_ex = */ &kernel_run_float_fn10, + }, + /* .gemm_lhs_info = */ { + /* .get_offset = */ kai_get_lhs_offset_lhs_quant_pack_qai8dxp_f32, + /* .get_packed_offset_ex = */ &lhs_offs_fn5, + /* .packed_size_ex = */ &lhs_ps_fn5, + /* .pack_func_ex = */ &lhs_pack_float_fn9_no_bl, + }, + /* I8MM GEMV (dotprod fallback) */ + { + /* .get_m_step = */ kai_get_m_step_matmul_clamp_f32_qai8dxp1x8_qsi8cxp4x8_1x4_neon_dotprod, + /* .get_n_step = */ kai_get_n_step_matmul_clamp_f32_qai8dxp1x8_qsi8cxp4x8_1x4_neon_dotprod, + /* .get_mr = */ kai_get_mr_matmul_clamp_f32_qai8dxp1x8_qsi8cxp4x8_1x4_neon_dotprod, + /* .get_nr = */ kai_get_nr_matmul_clamp_f32_qai8dxp1x8_qsi8cxp4x8_1x4_neon_dotprod, + /* .get_kr = */ kai_get_kr_matmul_clamp_f32_qai8dxp1x8_qsi8cxp4x8_1x4_neon_dotprod, + /* .get_sr = */ kai_get_sr_matmul_clamp_f32_qai8dxp1x8_qsi8cxp4x8_1x4_neon_dotprod, + /* .get_dst_offset = */ kai_get_dst_offset_matmul_clamp_f32_qai8dxp1x8_qsi8cxp4x8_1x4_neon_dotprod, + /* .get_dst_size = */ kai_get_dst_size_matmul_clamp_f32_qai8dxp1x8_qsi8cxp4x8_1x4_neon_dotprod, + /* .get_lhs_offset_ex = */ &kernel_offs_fn2, + /* .get_rhs_packed_offset_ex = */ &kernel_offs_fn2, + /* .run_kernel_ex = */ &kernel_run_float_fn10, + }, + /* .gemv_lhs_info = */ { + /* .get_offset = */ kai_get_lhs_offset_lhs_quant_pack_qai8dxp_f32, + /* .get_packed_offset_ex = */ &lhs_offs_fn5, + /* .packed_size_ex = */ &lhs_ps_fn5, + /* .pack_func_ex = */ &lhs_pack_float_fn9_no_bl, + }, + /* .rhs_info = */ { + /* .packed_stride = */ kai_get_rhs_packed_stride_rhs_pack_nxk_qsi8cxp_qsi8cx_neon, + /* .to_float = */ dequantize_row_qsi8cxp, + /* .packed_size_ex = */ &rhs_ps_fn5, + /* .packed_stride_ex = */ &rhs_stride_fn4, + /* .pack_func_ex = */ &rhs_pack_scale_fn12, + }, + /* .required_cpu = */ CPU_FEATURE_DOTPROD | CPU_FEATURE_I8MM, + /* .lhs_type = */ GGML_TYPE_F32, + /* .rhs_type = */ GGML_TYPE_Q8_0, + /* .op_type = */ GGML_TYPE_F32, + }, +#endif +#if defined(__ARM_FEATURE_DOTPROD) + { + /* DOTPROD GEMM */ + { + /* .get_m_step = */ kai_get_m_step_matmul_clamp_f32_qai8dxp4x4_qsi8cxp4x4_16x4_neon_dotprod, + /* .get_n_step = */ kai_get_n_step_matmul_clamp_f32_qai8dxp4x4_qsi8cxp4x4_16x4_neon_dotprod, + /* .get_mr = */ kai_get_mr_matmul_clamp_f32_qai8dxp4x4_qsi8cxp4x4_16x4_neon_dotprod, + /* .get_nr = */ kai_get_nr_matmul_clamp_f32_qai8dxp4x4_qsi8cxp4x4_16x4_neon_dotprod, + /* .get_kr = */ kai_get_kr_matmul_clamp_f32_qai8dxp4x4_qsi8cxp4x4_16x4_neon_dotprod, + /* .get_sr = */ kai_get_sr_matmul_clamp_f32_qai8dxp4x4_qsi8cxp4x4_16x4_neon_dotprod, + /* .get_dst_offset = */ kai_get_dst_offset_matmul_clamp_f32_qai8dxp4x4_qsi8cxp4x4_16x4_neon_dotprod, + /* .get_dst_size = */ kai_get_dst_size_matmul_clamp_f32_qai8dxp4x4_qsi8cxp4x4_16x4_neon_dotprod, + /* .get_lhs_offset_ex = */ &kernel_offs_fn2, + /* .get_rhs_packed_offset_ex = */ &kernel_offs_fn2, + /* .run_kernel_ex = */ &kernel_run_float_fn10, + }, + /* .gemm_lhs_info = */ { + /* .get_offset = */ kai_get_lhs_offset_lhs_quant_pack_qai8dxp_f32, + /* .get_packed_offset_ex = */ &lhs_offs_fn5, + /* .packed_size_ex = */ &lhs_ps_fn5, + /* .pack_func_ex = */ &lhs_pack_float_fn9_no_bl, + }, + /* DOTPROD GEMV */ + { + /* .get_m_step = */ kai_get_m_step_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4x4_1x4_neon_dotprod, + /* .get_n_step = */ kai_get_n_step_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4x4_1x4_neon_dotprod, + /* .get_mr = */ kai_get_mr_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4x4_1x4_neon_dotprod, + /* .get_nr = */ kai_get_nr_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4x4_1x4_neon_dotprod, + /* .get_kr = */ kai_get_kr_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4x4_1x4_neon_dotprod, + /* .get_sr = */ kai_get_sr_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4x4_1x4_neon_dotprod, + /* .get_dst_offset = */ kai_get_dst_offset_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4x4_1x4_neon_dotprod, + /* .get_dst_size = */ kai_get_dst_size_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4x4_1x4_neon_dotprod, + /* .get_lhs_offset_ex = */ &kernel_offs_fn2, + /* .get_rhs_packed_offset_ex = */ &kernel_offs_fn2, + /* .run_kernel_ex = */ &kernel_run_float_fn10, + }, + /* .gemv_lhs_info = */ { + /* .get_offset = */ kai_get_lhs_offset_lhs_quant_pack_qai8dxp_f32, + /* .get_packed_offset_ex = */ &lhs_offs_fn5, + /* .packed_size_ex = */ &lhs_ps_fn5, + /* .pack_func_ex = */ &lhs_pack_float_fn9_no_bl, + }, + /* .rhs_info = */ { + /* .packed_stride = */ kai_get_rhs_packed_stride_rhs_pack_nxk_qsi8cxp_qsi8cx_neon, + /* .to_float = */ dequantize_row_qsi8cxp, + /* .packed_size_ex = */ &rhs_ps_fn5, + /* .packed_stride_ex = */ &rhs_stride_fn4, + /* .pack_func_ex = */ &rhs_pack_scale_fn12, + }, + /* .required_cpu = */ CPU_FEATURE_DOTPROD, + /* .lhs_type = */ GGML_TYPE_F32, + /* .rhs_type = */ GGML_TYPE_Q8_0, + /* .op_type = */ GGML_TYPE_F32, + }, +#endif +}; + ggml_kleidiai_kernels * ggml_kleidiai_select_kernels(cpu_feature cpu_features, const ggml_tensor * tensor) { ggml_kleidiai_kernels * kernel = nullptr; @@ -562,6 +819,17 @@ ggml_kleidiai_kernels * ggml_kleidiai_select_kernels(cpu_feature cpu_features, c break; } } + if (!kernel) { + for (size_t i = 0; i < NELEMS(gemm_gemv_kernels_q8); ++i) { + if ((cpu_features & gemm_gemv_kernels_q8[i].required_cpu) == gemm_gemv_kernels_q8[i].required_cpu && + gemm_gemv_kernels_q8[i].lhs_type == tensor->src[1]->type && + gemm_gemv_kernels_q8[i].rhs_type == tensor->src[0]->type && + gemm_gemv_kernels_q8[i].op_type == tensor->type) { + kernel = &gemm_gemv_kernels_q8[i]; + break; + } + } + } #endif } @@ -582,3 +850,18 @@ ggml_kleidiai_kernels * ggml_kleidiai_select_kernels_q4_0(cpu_feature features) return kernels; } + +ggml_kleidiai_kernels * ggml_kleidiai_select_kernels_q8_0(cpu_feature features) { + ggml_kleidiai_kernels * kernels = nullptr; + +#if defined(__ARM_FEATURE_SME) || defined(__ARM_FEATURE_DOTPROD) || defined(__ARM_FEATURE_MATMUL_INT8) + for (size_t i = 0; i < NELEMS(gemm_gemv_kernels_q8); ++i) { + if ((features & gemm_gemv_kernels_q8[i].required_cpu) == gemm_gemv_kernels_q8[i].required_cpu) { + kernels = &gemm_gemv_kernels_q8[i]; + break; + } + } +#endif + + return kernels; +} diff --git a/ggml/src/ggml-cpu/kleidiai/kernels.h b/ggml/src/ggml-cpu/kleidiai/kernels.h index a84795a6b2e..129245400b4 100644 --- a/ggml/src/ggml-cpu/kleidiai/kernels.h +++ b/ggml/src/ggml-cpu/kleidiai/kernels.h @@ -87,3 +87,4 @@ struct ggml_kleidiai_kernels { ggml_kleidiai_kernels * ggml_kleidiai_select_kernels(cpu_feature cpu_features, const ggml_tensor * tensor); ggml_kleidiai_kernels * ggml_kleidiai_select_kernels_q4_0(cpu_feature features); +ggml_kleidiai_kernels * ggml_kleidiai_select_kernels_q8_0(cpu_feature features); diff --git a/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp b/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp index 8b3df7d7800..6f2a90fbda7 100644 --- a/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp +++ b/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp @@ -5,10 +5,13 @@ #include #include #include +#include +#include #include #include #include #include +#include #if defined(__linux__) #include #include @@ -38,8 +41,9 @@ struct ggml_kleidiai_context { cpu_feature features; - ggml_kleidiai_kernels * kernels; -} static ctx = { CPU_FEATURE_NONE, NULL }; + ggml_kleidiai_kernels * kernels_q4; + ggml_kleidiai_kernels * kernels_q8; +} static ctx = { CPU_FEATURE_NONE, NULL, NULL }; static const char* cpu_feature_to_string(cpu_feature f) { switch (f) { @@ -73,10 +77,14 @@ static void init_kleidiai_context(void) { if (sme_enabled != 0) { ctx.features |= ggml_cpu_has_sme() ? CPU_FEATURE_SME : CPU_FEATURE_NONE; } - ctx.kernels = ggml_kleidiai_select_kernels_q4_0(ctx.features); + ctx.kernels_q4 = ggml_kleidiai_select_kernels_q4_0(ctx.features); + ctx.kernels_q8 = ggml_kleidiai_select_kernels_q8_0(ctx.features); #ifndef NDEBUG - if (ctx.kernels) { - GGML_LOG_DEBUG("kleidiai: using kernel with CPU feature %s\n", cpu_feature_to_string(ctx.kernels->required_cpu)); + if (ctx.kernels_q4) { + GGML_LOG_DEBUG("kleidiai: using q4 kernel with CPU feature %s\n", cpu_feature_to_string(ctx.kernels_q4->required_cpu)); + } + if (ctx.kernels_q8) { + GGML_LOG_DEBUG("kleidiai: using q8 kernel with CPU feature %s\n", cpu_feature_to_string(ctx.kernels_q8->required_cpu)); } #endif } @@ -130,6 +138,9 @@ class tensor_traits : public ggml::cpu::tensor_traits { if (kernels->rhs_type == GGML_TYPE_Q4_0) { if (!lhs_info->packed_size_ex) return false; size = lhs_info->packed_size_ex(m, k, QK4_0, mr, kr, sr); + } else if (kernels->rhs_type == GGML_TYPE_Q8_0) { + if (!lhs_info->packed_size_ex) return false; + size = lhs_info->packed_size_ex(m, k, QK8_0, mr, kr, sr); } else if (kernels->rhs_type == GGML_TYPE_F16) { if (!lhs_info->packed_size_ex || !kernels->rhs_info.packed_size_ex) return false; const int64_t lhs_batch_size0 = op->src[1]->ne[2]; @@ -149,11 +160,13 @@ class tensor_traits : public ggml::cpu::tensor_traits { if (dst->op == GGML_OP_MUL_MAT) { if (dst->src[0]->type == GGML_TYPE_Q4_0) { return compute_forward_q4_0(params, dst); + } else if (dst->src[0]->type == GGML_TYPE_Q8_0) { + return compute_forward_q8_0(params, dst); } else if (dst->src[0]->type == GGML_TYPE_F16) { return compute_forward_fp16(params, dst); } } else if (dst->op == GGML_OP_GET_ROWS) { - if (dst->src[0]->type == GGML_TYPE_Q4_0) { + if (dst->src[0]->type == GGML_TYPE_Q4_0 || dst->src[0]->type == GGML_TYPE_Q8_0) { return compute_forward_get_rows(params, dst); } } @@ -400,19 +413,120 @@ class tensor_traits : public ggml::cpu::tensor_traits { return true; } - bool compute_forward_get_rows(struct ggml_compute_params * params, struct ggml_tensor * dst) { - GGML_ASSERT(dst->src[0]->type == GGML_TYPE_Q4_0); - if (!ctx.kernels) { + bool compute_forward_q8_0(struct ggml_compute_params * params, struct ggml_tensor * dst) { + GGML_ASSERT(dst->src[0]->type == GGML_TYPE_Q8_0); + + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_TENSOR_BINARY_OP_LOCALS + + ggml_kleidiai_kernels *kernels = ggml_kleidiai_select_kernels(ctx.features, dst); + if (!kernels) { return false; } + bool is_gemv = src1->ne[1] == 1; + kernel_info * kernel = is_gemv ? &kernels->gemv : &kernels->gemm; + lhs_packing_info * lhs_info = is_gemv ? &kernels->gemv_lhs_info : &kernels->gemm_lhs_info; + + if (!kernel || !lhs_info->get_packed_offset_ex || !lhs_info->pack_func_ex || + !kernel->get_rhs_packed_offset_ex || !kernel->run_kernel_ex || !kernel->get_dst_offset) { + return false; + } + + const int ith = params->ith; + const int nth_raw = params->nth; + const int nth = nth_raw > 0 ? nth_raw : 1; + + const size_t k = ne00; + const size_t m = ne11; + const size_t n = ne01; + + size_t mr = kernel->get_mr(); + size_t kr = kernel->get_kr(); + size_t sr = kernel->get_sr(); + + const uint8_t * lhs = static_cast(src1->data); + uint8_t * lhs_packed = static_cast(params->wdata); + const uint8_t * rhs_packed = static_cast(src0->data); + + const size_t n_step = kernel->get_n_step(); + const size_t num_n_per_thread = kai_roundup(kai_roundup(n, nth) / nth, n_step); + const size_t n_start = ith * num_n_per_thread; + + size_t n_to_process = 0; + if (n_start < n) { + n_to_process = num_n_per_thread; + if ((n_start + n_to_process) > n) { + n_to_process = n - n_start; + } + } + + const size_t num_m_per_thread = kai_roundup(m, mr * nth) / nth; + const size_t m_start = ith * num_m_per_thread; + size_t m_to_process = num_m_per_thread; + if ((m_start + m_to_process) > m) { + m_to_process = m - m_start; + } + + if (m_start < m) { + const size_t src_stride = src1->nb[1]; + const float * src_ptr = reinterpret_cast(lhs + lhs_info->get_offset(m_start, dst->src[1]->nb[1])); + const size_t lhs_packed_offset = lhs_info->get_packed_offset_ex(m_start, k, 0, mr, kr, sr); + void * lhs_packed_ptr = static_cast(lhs_packed + lhs_packed_offset); + + lhs_info->pack_func_ex(m_to_process, k, 0, mr, kr, sr, 0, src_ptr, src_stride, lhs_packed_ptr); + } + + ggml_barrier(params->threadpool); + + const size_t dst_stride = dst->nb[1]; + const size_t lhs_packed_offset = lhs_info->get_packed_offset_ex(0, k, 0, mr, kr, sr); + const size_t rhs_packed_offset = kernel->get_rhs_packed_offset_ex(n_start, k, 0); + const size_t dst_offset = kernel->get_dst_offset(0, n_start, dst_stride); + const void * rhs_ptr = static_cast(rhs_packed + rhs_packed_offset); + const void * lhs_ptr = static_cast(lhs_packed + lhs_packed_offset); + float * dst_ptr = reinterpret_cast(static_cast(dst->data) + dst_offset); + + if (n_to_process > 0) { + kernel->run_kernel_ex(m, n_to_process, k, 0, lhs_ptr, rhs_ptr, dst_ptr, dst_stride, + sizeof(float), -FLT_MAX, FLT_MAX); + } + + return true; + } + + bool compute_forward_get_rows(struct ggml_compute_params * params, struct ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; GGML_TENSOR_BINARY_OP_LOCALS - rhs_packing_info * rhs_info = &ctx.kernels->rhs_info; - kernel_info * kernel = &ctx.kernels->gemm; + ggml_kleidiai_kernels * kernels = nullptr; + size_t block_len = 0; + size_t num_bytes_multiplier = 0; + + if (dst->src[0]->type == GGML_TYPE_Q4_0) { + if (!ctx.kernels_q4) { + return false; + } + kernels = ctx.kernels_q4; + block_len = QK4_0; + num_bytes_multiplier = sizeof(uint16_t); + } else if (dst->src[0]->type == GGML_TYPE_Q8_0) { + if (!ctx.kernels_q8) { + return false; + } + kernels = ctx.kernels_q8; + block_len = QK8_0; + num_bytes_multiplier = sizeof(float); + } else { + return false; + } + + rhs_packing_info * rhs_info = &kernels->rhs_info; + kernel_info * kernel = &kernels->gemm; if (!rhs_info->to_float || !kernel->get_nr) { return false; } @@ -423,8 +537,7 @@ class tensor_traits : public ggml::cpu::tensor_traits { const size_t block_rows = kernel->get_nr(); const size_t kr = kernel->get_kr(); - const size_t num_bytes_multiplier = sizeof(uint16_t); - const size_t packed_stride = rhs_info->packed_stride(nc, block_rows, kr, QK4_0); + const size_t packed_stride = rhs_info->packed_stride(nc, block_rows, kr, block_len); const int ith = params->ith; const int nth = params->nth; @@ -439,7 +552,7 @@ class tensor_traits : public ggml::cpu::tensor_traits { GGML_ASSERT(row_idx >= 0 && row_idx < src0->ne[1]); float *out = (float *)((char *)dst->data + i * nb1); - rhs_info->to_float(src0->data, row_idx, nc, out, block_rows, packed_stride, kr, QK4_0, num_bytes_multiplier); + rhs_info->to_float(src0->data, row_idx, nc, out, block_rows, packed_stride, kr, block_len, num_bytes_multiplier); } return true; @@ -447,21 +560,91 @@ class tensor_traits : public ggml::cpu::tensor_traits { public: int repack(struct ggml_tensor * tensor, const void * data, size_t data_size) { - GGML_ASSERT(tensor->type == GGML_TYPE_Q4_0); - GGML_ASSERT(ctx.kernels); const size_t n = tensor->ne[1]; const size_t k = tensor->ne[0]; - size_t nr = ctx.kernels->gemm.get_nr(); - size_t kr = ctx.kernels->gemm.get_kr(); - size_t sr = ctx.kernels->gemm.get_sr(); - struct kai_rhs_pack_qs4cxs1s0_param params; - params.lhs_zero_point = 1; - params.rhs_zero_point = 8; - ctx.kernels->rhs_info.pack_func_ex(1, n, k, nr, kr, sr, QK4_0, 0, (const uint8_t*)data, nullptr, nullptr, tensor->data, 0, ¶ms); + if (tensor->type == GGML_TYPE_Q4_0) { + if (!ctx.kernels_q4) { + return -1; + } + size_t nr = ctx.kernels_q4->gemm.get_nr(); + size_t kr = ctx.kernels_q4->gemm.get_kr(); + size_t sr = ctx.kernels_q4->gemm.get_sr(); + + struct kai_rhs_pack_qs4cxs1s0_param params; + params.lhs_zero_point = 1; + params.rhs_zero_point = 8; + ctx.kernels_q4->rhs_info.pack_func_ex(1, n, k, nr, kr, sr, QK4_0, 0, + static_cast(data), + nullptr, nullptr, tensor->data, 0, ¶ms); + GGML_UNUSED(data_size); + return 0; + } else if (tensor->type == GGML_TYPE_Q8_0) { + if (!ctx.kernels_q8) { + return -1; + } + + const size_t row_stride = tensor->nb[1]; + const size_t k_blocks = (k + QK8_0 - 1) / QK8_0; + + std::vector qdata(n * k, 0); + std::vector scales(n, 0.0f); + + for (size_t row = 0; row < n; ++row) { + const auto * row_blocks = reinterpret_cast( + static_cast(data) + row * row_stride); + + float max_abs = 0.0f; + for (size_t block = 0; block < k_blocks; ++block) { + const block_q8_0 & blk = row_blocks[block]; + const float d = GGML_FP16_TO_FP32(blk.d); + for (size_t l = 0; l < QK8_0; ++l) { + const size_t linear_idx = block * QK8_0 + l; + if (linear_idx >= k) { + break; + } + const float value = d * blk.qs[l]; + max_abs = std::max(max_abs, std::fabs(value)); + } + } + + float scale = max_abs > 0.0f ? max_abs / 127.0f : 0.0f; + scales[row] = scale; + const float inv_scale = scale > 0.0f ? 1.0f / scale : 0.0f; + + for (size_t block = 0; block < k_blocks; ++block) { + const block_q8_0 & blk = row_blocks[block]; + const float d = GGML_FP16_TO_FP32(blk.d); + for (size_t l = 0; l < QK8_0; ++l) { + const size_t linear_idx = block * QK8_0 + l; + if (linear_idx >= k) { + break; + } + const float value = d * blk.qs[l]; + int32_t q = scale > 0.0f ? static_cast(std::lround(value * inv_scale)) : 0; + q = std::clamp(q, -127, 127); + qdata[row * k + linear_idx] = static_cast(q); + } + } + } + + size_t nr = ctx.kernels_q8->gemm.get_nr(); + size_t kr = ctx.kernels_q8->gemm.get_kr(); + size_t sr = ctx.kernels_q8->gemm.get_sr(); + + struct kai_rhs_pack_qsi8cx_params params; + params.lhs_zero_point = 1; + params.scale_multiplier = 1.0f; + + ctx.kernels_q8->rhs_info.pack_func_ex(1, n, k, nr, kr, sr, 0, 0, + qdata.data(), nullptr, scales.data(), + tensor->data, 0, ¶ms); + GGML_UNUSED(data_size); + return 0; + } - return 0; GGML_UNUSED(data_size); + return -1; } }; @@ -518,27 +701,45 @@ static size_t ggml_backend_cpu_kleidiai_buffer_type_get_alignment(ggml_backend_b } static size_t ggml_backend_cpu_kleidiai_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const struct ggml_tensor * tensor) { - GGML_ASSERT(tensor->type == GGML_TYPE_Q4_0); - GGML_ASSERT(ctx.kernels); + GGML_UNUSED(buft); - const size_t n = tensor->ne[1]; - const size_t k = tensor->ne[0]; - const size_t nr = ctx.kernels->gemm.get_nr(); - const size_t kr = ctx.kernels->gemm.get_kr(); + const size_t n = tensor->ne[1]; + const size_t k = tensor->ne[0]; + + ggml_kleidiai_kernels * kernels = nullptr; + size_t block_len = 0; + + if (tensor->type == GGML_TYPE_Q4_0) { + GGML_ASSERT(ctx.kernels_q4); + kernels = ctx.kernels_q4; + block_len = QK4_0; + } else if (tensor->type == GGML_TYPE_Q8_0) { + GGML_ASSERT(ctx.kernels_q8); + kernels = ctx.kernels_q8; + block_len = QK8_0; + } else { + return 0; + } - return ctx.kernels->rhs_info.packed_size_ex(n, k, nr, kr, QK4_0); + const size_t nr = kernels->gemm.get_nr(); + const size_t kr = kernels->gemm.get_kr(); + const size_t packed = kernels->rhs_info.packed_size_ex(n, k, nr, kr, block_len); + const size_t raw = ggml_nbytes(tensor); - GGML_UNUSED(buft); + return packed > raw ? packed : raw; } namespace ggml::cpu::kleidiai { class extra_buffer_type : ggml::cpu::extra_buffer_type { bool supports_op(ggml_backend_dev_t, const struct ggml_tensor * op) override { if ((op->op == GGML_OP_MUL_MAT || op->op == GGML_OP_GET_ROWS) && - op->src[0]->type == GGML_TYPE_Q4_0 && + (op->src[0]->type == GGML_TYPE_Q4_0 || op->src[0]->type == GGML_TYPE_Q8_0) && op->src[0]->buffer && (ggml_n_dims(op->src[0]) == 2) && - op->src[0]->buffer->buft == ggml_backend_cpu_kleidiai_buffer_type() && ctx.kernels) { + op->src[0]->buffer->buft == ggml_backend_cpu_kleidiai_buffer_type()) { + if (((op->src[0]->type == GGML_TYPE_Q4_0) ? ctx.kernels_q4 : ctx.kernels_q8) == nullptr) { + return false; + } if (op->src[1]->buffer && !ggml_backend_buft_is_host(op->src[1]->buffer->buft)) { return false; } From abbb5f2a971730e40aee5fbabc3bcf31494d88b3 Mon Sep 17 00:00:00 2001 From: duduta Date: Tue, 11 Nov 2025 13:33:24 +0200 Subject: [PATCH 13/51] ggml-cpu: templateify ggml_compute_forward_rope_f32 and _f16 (llama/16805) * extract rotate_pairs logic from ggml_compute_forward_rope_f32 * templateify ggml_compute_forward_rope_f32 and _f16 * abort when rope type not supported, remove GLM from test-rope * add imrope branch to switch * add rope tests for perf * Update ggml/src/ggml-cpu/ops.cpp Co-authored-by: Georgi Gerganov * Update ggml/src/ggml-cpu/ops.cpp Co-authored-by: Georgi Gerganov --------- Co-authored-by: Georgi Gerganov --- ggml/src/ggml-cpu/ops.cpp | 317 +++++++------------------------------- 1 file changed, 54 insertions(+), 263 deletions(-) diff --git a/ggml/src/ggml-cpu/ops.cpp b/ggml/src/ggml-cpu/ops.cpp index 7c42fb78b4a..5a272b9abc5 100644 --- a/ggml/src/ggml-cpu/ops.cpp +++ b/ggml/src/ggml-cpu/ops.cpp @@ -5503,194 +5503,28 @@ static void ggml_mrope_cache_init( } } -static void ggml_compute_forward_rope_f32( - const ggml_compute_params * params, - ggml_tensor * dst, - const bool forward) { - - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - const ggml_tensor * src2 = dst->src[2]; - - float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow; - int sections[4]; - - //const int n_past = ((int32_t *) dst->op_params)[0]; - const int n_dims = ((int32_t *) dst->op_params)[1]; - const int mode = ((int32_t *) dst->op_params)[2]; - //const int n_ctx = ((int32_t *) dst->op_params)[3]; - const int n_ctx_orig = ((int32_t *) dst->op_params)[4]; - - memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float)); - memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float)); - memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float)); - memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float)); - memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float)); - memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float)); - memcpy(§ions, (int32_t *) dst->op_params + 11, sizeof(int)*4); - - GGML_TENSOR_UNARY_OP_LOCALS - - //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3); - //printf("n_past = %d, ne2 = %d\n", n_past, ne2); - - GGML_ASSERT(nb00 == sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(dst); - - GGML_ASSERT(n_dims <= ne0); - GGML_ASSERT(n_dims % 2 == 0); - - // rows per thread - const int dr = (nr + nth - 1)/nth; - // row range for this thread - const int ir0 = dr*ith; - const int ir1 = MIN(ir0 + dr, nr); - - // row index used to determine which thread to use - int ir = 0; - - const float theta_scale = powf(freq_base, -2.0f/n_dims); - - float corr_dims[2]; - ggml_rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow, corr_dims); - - const bool is_neox = mode & GGML_ROPE_TYPE_NEOX; - const bool is_mrope = mode & GGML_ROPE_TYPE_MROPE; // ggml_rope_multi, multimodal rotary position embedding - const bool is_imrope = mode == GGML_ROPE_TYPE_IMROPE; // qwen3vl apply interleaved mrope - const bool is_vision = mode == GGML_ROPE_TYPE_VISION; - - if (is_mrope) { - GGML_ASSERT(sections[0] > 0 || sections[1] > 0 || sections[2] > 0); - } - - if (is_vision) { - GGML_ASSERT(n_dims == ne0/2); - } +template +static void rotate_pairs(const int64_t n, const int64_t n_offset, const float * cache, const T * src_data, T * dst_data, const int scale = 2) { + for (int64_t i0 = 0; i0 < n; i0 += 2) { + const int64_t ic = i0/scale; // hack for GGML_ROPE_TYPE_NORMAL, where we need ic = i0; for all other cases, ic = i0/2 - const float * freq_factors = NULL; - if (src2 != NULL) { - GGML_ASSERT(src2->type == GGML_TYPE_F32); - GGML_ASSERT(src2->ne[0] >= n_dims / 2); - freq_factors = (const float *) src2->data; - } + const float cos_theta = cache[i0 + 0]; + const float sin_theta = cache[i0 + 1]; - // backward process uses inverse rotation by cos and sin. - // cos and sin build a rotation matrix, where the inverse is the transpose. - // this essentially just switches the sign of sin. - const float sin_sign = forward ? 1.0f : -1.0f; + const T * const src = src_data + ic; + T * dst = dst_data + ic; - const int32_t * pos = (const int32_t *) src1->data; + const float x0 = type_conversion_table::to_f32(src[0]); + const float x1 = type_conversion_table::to_f32(src[n_offset]); - for (int64_t i3 = 0; i3 < ne3; i3++) { // batch - for (int64_t i2 = 0; i2 < ne2; i2++) { // seq-len - - float * cache = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32)*ith; - if (!is_mrope) { - const int64_t p = pos[i2]; - ggml_rope_cache_init(p, freq_scale, freq_factors, corr_dims, ne0, ext_factor, attn_factor, cache, sin_sign, theta_scale); - } - else { - const int64_t p_t = pos[i2]; - const int64_t p_h = pos[i2 + ne2]; - const int64_t p_w = pos[i2 + ne2 * 2]; - const int64_t p_e = pos[i2 + ne2 * 3]; - ggml_mrope_cache_init( - p_t, p_h, p_w, p_e, sections, is_imrope, is_vision, - freq_scale, freq_factors, corr_dims, ne0, ext_factor, attn_factor, cache, sin_sign, theta_scale); - } - - for (int64_t i1 = 0; i1 < ne1; i1++) { // attn-heads - if (ir++ < ir0) continue; - if (ir > ir1) break; - - if (is_neox || is_mrope) { - if (is_vision){ - for (int64_t i0 = 0; i0 < n_dims; i0 += 2) { - const int64_t ic = i0/2; - - const float cos_theta = cache[i0 + 0]; - const float sin_theta = cache[i0 + 1]; - - const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + ic*nb00); - float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + ic*nb0); - - const float x0 = src[0]; - const float x1 = src[n_dims]; - - dst_data[0] = x0*cos_theta - x1*sin_theta; - dst_data[n_dims] = x0*sin_theta + x1*cos_theta; - } - } else { - for (int64_t i0 = 0; i0 < n_dims; i0 += 2) { - const int64_t ic = i0/2; - - const float cos_theta = cache[i0 + 0]; - const float sin_theta = cache[i0 + 1]; - - const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + ic*nb00); - float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + ic*nb0); - - const float x0 = src[0]; - const float x1 = src[n_dims/2]; - - dst_data[0] = x0*cos_theta - x1*sin_theta; - dst_data[n_dims/2] = x0*sin_theta + x1*cos_theta; - } - } - } else { - for (int64_t i0 = 0; i0 < n_dims; i0 += 2) { - const float cos_theta = cache[i0 + 0]; - const float sin_theta = cache[i0 + 1]; - - const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); - float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); - - const float x0 = src[0]; - const float x1 = src[1]; - - dst_data[0] = x0*cos_theta - x1*sin_theta; - dst_data[1] = x0*sin_theta + x1*cos_theta; - } - } - - if (is_vision) { - for (int64_t i0 = n_dims; i0 < ne0; i0 += 2) { - const int64_t ic = i0/2; - - const float cos_theta = cache[i0 + 0]; - const float sin_theta = cache[i0 + 1]; - - const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + ic*nb00); - float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + ic*nb0); - - const float x0 = src[0]; - const float x1 = src[n_dims]; - - dst_data[0] = x0*cos_theta - x1*sin_theta; - dst_data[n_dims] = x0*sin_theta + x1*cos_theta; - } - } else { - // fill the remain channels with data from src tensor - for (int64_t i0 = n_dims; i0 < ne0; i0 += 2) { - const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); - float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); - - dst_data[0] = src[0]; - dst_data[1] = src[1]; - } - } - } - } - } + dst[0] = type_conversion_table::from_f32(x0*cos_theta - x1*sin_theta); + dst[n_offset] = type_conversion_table::from_f32(x0*sin_theta + x1*cos_theta); + } } -// TODO: deduplicate f16/f32 code -static void ggml_compute_forward_rope_f16( +template //float or ggml_fp16_t +static void ggml_compute_forward_rope_flt( const ggml_compute_params * params, ggml_tensor * dst, const bool forward) { @@ -5699,6 +5533,9 @@ static void ggml_compute_forward_rope_f16( const ggml_tensor * src1 = dst->src[1]; const ggml_tensor * src2 = dst->src[2]; + GGML_ASSERT(src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16); + GGML_ASSERT(src1->type == GGML_TYPE_I32); + float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow; int sections[4]; @@ -5707,6 +5544,7 @@ static void ggml_compute_forward_rope_f16( const int mode = ((int32_t *) dst->op_params)[2]; //const int n_ctx = ((int32_t *) dst->op_params)[3]; const int n_ctx_orig = ((int32_t *) dst->op_params)[4]; + memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float)); memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float)); memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float)); @@ -5715,13 +5553,13 @@ static void ggml_compute_forward_rope_f16( memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float)); memcpy(§ions, (int32_t *) dst->op_params + 11, sizeof(int)*4); - GGML_TENSOR_UNARY_OP_LOCALS //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3); //printf("n_past = %d, ne2 = %d\n", n_past, ne2); - GGML_ASSERT(nb0 == sizeof(ggml_fp16_t)); + GGML_ASSERT(nb0 == nb00); + GGML_ASSERT(nb0 == sizeof(T)); const int ith = params->ith; const int nth = params->nth; @@ -5746,12 +5584,11 @@ static void ggml_compute_forward_rope_f16( float corr_dims[2]; ggml_rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow, corr_dims); - const bool is_neox = mode & GGML_ROPE_TYPE_NEOX; - const bool is_mrope = mode & GGML_ROPE_TYPE_MROPE; - const bool is_imrope = mode == GGML_ROPE_TYPE_IMROPE; + const bool is_imrope = mode == GGML_ROPE_TYPE_IMROPE; // qwen3vl apply interleaved mrope + const bool mrope_used = mode & GGML_ROPE_TYPE_MROPE; // ggml_rope_multi, note: also true for vision (24 & 8 == true) and for imrope const bool is_vision = mode == GGML_ROPE_TYPE_VISION; - if (is_mrope) { + if (mrope_used) { GGML_ASSERT(sections[0] > 0 || sections[1] > 0 || sections[2] > 0); } @@ -5773,11 +5610,11 @@ static void ggml_compute_forward_rope_f16( const int32_t * pos = (const int32_t *) src1->data; - for (int64_t i3 = 0; i3 < ne3; i3++) { - for (int64_t i2 = 0; i2 < ne2; i2++) { + for (int64_t i3 = 0; i3 < ne3; i3++) { // batch + for (int64_t i2 = 0; i2 < ne2; i2++) { // seq-len float * cache = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32)*ith; - if (!is_mrope) { + if (!mrope_used) { const int64_t p = pos[i2]; ggml_rope_cache_init(p, freq_scale, freq_factors, corr_dims, ne0, ext_factor, attn_factor, cache, sin_sign, theta_scale); } @@ -5791,86 +5628,40 @@ static void ggml_compute_forward_rope_f16( freq_scale, freq_factors, corr_dims, ne0, ext_factor, attn_factor, cache, sin_sign, theta_scale); } - for (int64_t i1 = 0; i1 < ne1; i1++) { + for (int64_t i1 = 0; i1 < ne1; i1++) { // attn-heads if (ir++ < ir0) continue; if (ir > ir1) break; - if (is_neox || is_mrope) { - if (is_vision) { - for (int64_t i0 = 0; i0 < n_dims; i0 += 2) { - const int64_t ic = i0/2; - - const float cos_theta = cache[i0 + 0]; - const float sin_theta = cache[i0 + 1]; - - const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + ic*nb00); - ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + ic*nb0); - - const float x0 = GGML_CPU_FP16_TO_FP32(src[0]); - const float x1 = GGML_CPU_FP16_TO_FP32(src[n_dims]); - - dst_data[0] = GGML_CPU_FP32_TO_FP16(x0*cos_theta - x1*sin_theta); - dst_data[n_dims] = GGML_CPU_FP32_TO_FP16(x0*sin_theta + x1*cos_theta); - } - } else { - for (int64_t i0 = 0; i0 < n_dims; i0 += 2) { - const int64_t ic = i0/2; - - const float cos_theta = cache[i0 + 0]; - const float sin_theta = cache[i0 + 1]; - - const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + ic*nb00); - ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + ic*nb0); - - const float x0 = GGML_CPU_FP16_TO_FP32(src[0]); - const float x1 = GGML_CPU_FP16_TO_FP32(src[n_dims/2]); - - dst_data[0] = GGML_CPU_FP32_TO_FP16(x0*cos_theta - x1*sin_theta); - dst_data[n_dims/2] = GGML_CPU_FP32_TO_FP16(x0*sin_theta + x1*cos_theta); - } - } - } else { - for (int64_t i0 = 0; i0 < n_dims; i0 += 2) { - const float cos_theta = cache[i0 + 0]; - const float sin_theta = cache[i0 + 1]; - - const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); - ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); - - const float x0 = GGML_CPU_FP16_TO_FP32(src[0]); - const float x1 = GGML_CPU_FP16_TO_FP32(src[1]); - - dst_data[0] = GGML_CPU_FP32_TO_FP16(x0*cos_theta - x1*sin_theta); - dst_data[1] = GGML_CPU_FP32_TO_FP16(x0*sin_theta + x1*cos_theta); - } + T * src = (T *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01); + T * dst_data = (T *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1); + + switch (mode) { + case GGML_ROPE_TYPE_NORMAL: + rotate_pairs(n_dims, 1, cache, src, dst_data, 1); + break; + case GGML_ROPE_TYPE_NEOX: + case GGML_ROPE_TYPE_MROPE: + case GGML_ROPE_TYPE_IMROPE: + rotate_pairs(n_dims, n_dims/2, cache, src, dst_data); + break; + case GGML_ROPE_TYPE_VISION: + rotate_pairs(ne0, n_dims, cache, src, dst_data); + break; + default: + GGML_ABORT("rope type not supported"); } - if (is_vision) { - for (int64_t i0 = n_dims; i0 < ne0; i0 += 2) { - const int64_t ic = i0/2; - - const float cos_theta = cache[i0 + 0]; - const float sin_theta = cache[i0 + 1]; - - const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + ic*nb00); - ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + ic*nb0); - - const float x0 = GGML_CPU_FP16_TO_FP32(src[0]); - const float x1 = GGML_CPU_FP16_TO_FP32(src[n_dims]); - - dst_data[0] = GGML_CPU_FP32_TO_FP16(x0*cos_theta - x1*sin_theta); - dst_data[n_dims] = GGML_CPU_FP32_TO_FP16(x0*sin_theta + x1*cos_theta); - } - } else { + if (!is_vision) { + // fill the remain channels with data from src tensor for (int64_t i0 = n_dims; i0 < ne0; i0 += 2) { - const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); - ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); + const T * const src = (T *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); + T * dst_data = (T *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); dst_data[0] = src[0]; dst_data[1] = src[1]; } } - } + } //attn-heads } } } @@ -5884,11 +5675,11 @@ void ggml_compute_forward_rope( switch (src0->type) { case GGML_TYPE_F16: { - ggml_compute_forward_rope_f16(params, dst, true); + ggml_compute_forward_rope_flt(params, dst, true); } break; case GGML_TYPE_F32: { - ggml_compute_forward_rope_f32(params, dst, true); + ggml_compute_forward_rope_flt(params, dst, true); } break; default: { @@ -5908,11 +5699,11 @@ void ggml_compute_forward_rope_back( switch (src0->type) { case GGML_TYPE_F16: { - ggml_compute_forward_rope_f16(params, dst, false); + ggml_compute_forward_rope_flt(params, dst, false); } break; case GGML_TYPE_F32: { - ggml_compute_forward_rope_f32(params, dst, false); + ggml_compute_forward_rope_flt(params, dst, false); } break; default: { From f52e7c75d8cfda9e0c873305d89783542f0be559 Mon Sep 17 00:00:00 2001 From: ixgbe <1113177880@qq.com> Date: Tue, 11 Nov 2025 19:41:51 +0800 Subject: [PATCH 14/51] ggml-cpu : add RISC-V RVV (Zvfh) optimization for FP16 to FP32 conversion (llama/17161) Signed-off-by: Wang Yang --- ggml/src/ggml-cpu/ggml-cpu.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/ggml/src/ggml-cpu/ggml-cpu.c b/ggml/src/ggml-cpu/ggml-cpu.c index 086708bae0b..d8e3c48c609 100644 --- a/ggml/src/ggml-cpu/ggml-cpu.c +++ b/ggml/src/ggml-cpu/ggml-cpu.c @@ -3274,6 +3274,13 @@ void ggml_cpu_fp16_to_fp32(const ggml_fp16_t * x, float * y, int64_t n) { __m128 y_vec = _mm_cvtph_ps(x_vec); _mm_storeu_ps(y + i, y_vec); } +#elif defined(__riscv_zvfh) + for (int vl; i < n; i += vl) { + vl = __riscv_vsetvl_e16m1(n - i); + vfloat16m1_t vx = __riscv_vle16_v_f16m1((_Float16 *)&x[i], vl); + vfloat32m2_t vy = __riscv_vfwcvt_f_f_v_f32m2(vx, vl); + __riscv_vse32_v_f32m2(&y[i], vy, vl); + } #endif for (; i < n; ++i) { From c3a1298cb9668c22de201d647e165e275b8e422c Mon Sep 17 00:00:00 2001 From: Eve <139727413+netrunnereve@users.noreply.github.com> Date: Tue, 11 Nov 2025 18:53:30 +0000 Subject: [PATCH 15/51] disable rms norm mul rope for chips with no fp16 rte (llama/17134) --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index e4b7e1ea9e5..c6503f03260 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -12670,6 +12670,12 @@ static bool ggml_vk_can_fuse_rms_norm_mul_rope(ggml_backend_vk_context * ctx, co return false; } + // conditions for pipeline creation + if (!(ctx->device->float_controls_rte_fp16 && + sizeof(vk_op_rms_norm_mul_rope_push_constants) <= ctx->device->properties.limits.maxPushConstantsSize)) { + return false; + } + return true; } From 32d1b3493087030f89fedf997c154cb897017bce Mon Sep 17 00:00:00 2001 From: Max Krasnyansky Date: Tue, 11 Nov 2025 15:25:04 -0800 Subject: [PATCH 16/51] hexagon: various Op fixes (llama/17135) * hexagon: explicitly check for ops with zero nrows llm_graph_context::build_inp_out_ids() can generate tensors with zero nrows. Somehow other backends seems to handle this without obvious explicit checks. In the hexagon case we need to check explicitly and skip them. * hexagon: introduce fastdiv, fix test-backend-ops for ADD/SUB/MUL Co-authored-by: chraac * hexagon: use fastdiv in ADD_ID * hexagon: use ggml_op_is_empty and ggml_is_empty to check for NOPs --------- Co-authored-by: chraac --- ggml/src/ggml-hexagon/ggml-hexagon.cpp | 37 ++++--------- ggml/src/ggml-hexagon/htp/binary-ops.c | 76 ++++++++++++++++---------- ggml/src/ggml-hexagon/htp/htp-msg.h | 8 +-- ggml/src/ggml-hexagon/htp/htp-ops.h | 11 ++++ ggml/src/ggml-hexagon/htp/ops-utils.h | 33 +++++++++++ 5 files changed, 106 insertions(+), 59 deletions(-) diff --git a/ggml/src/ggml-hexagon/ggml-hexagon.cpp b/ggml/src/ggml-hexagon/ggml-hexagon.cpp index 7064b7486f2..cabd301ad35 100644 --- a/ggml/src/ggml-hexagon/ggml-hexagon.cpp +++ b/ggml/src/ggml-hexagon/ggml-hexagon.cpp @@ -3156,26 +3156,17 @@ static inline bool op_reuse_src1(const ggml_tensor * op1, const ggml_tensor * op return (op0 && op0->src[1] == op1->src[1]); } +static inline bool is_compute_op(ggml_tensor *node) +{ + return !(ggml_op_is_empty(node->op) || ggml_is_empty(node)); +} + // scan the graph and figure out last compute op index static inline int last_compute_op(ggml_cgraph * graph) { - int last; + int last = 0; for (int i = 0; i < graph->n_nodes; ++i) { - ggml_tensor * node = graph->nodes[i]; - - switch (node->op) { - case GGML_OP_MUL_MAT: - case GGML_OP_MUL_MAT_ID: - case GGML_OP_MUL: - case GGML_OP_ADD: - case GGML_OP_SUB: - case GGML_OP_RMS_NORM: - case GGML_OP_GLU: - case GGML_OP_ADD_ID: - last = i; - break; - - default: - break; + if (is_compute_op(graph->nodes[i])) { + last = i; } } @@ -3194,6 +3185,10 @@ static ggml_status ggml_backend_hexagon_graph_compute(ggml_backend_t backend, gg for (int i = 0; i < graph->n_nodes; ++i) { ggml_tensor * node = graph->nodes[i]; + if (!is_compute_op(node)) { + continue; + } + uint32_t flags = 0; // skip quantizer if src1 is reused @@ -3245,14 +3240,6 @@ static ggml_status ggml_backend_hexagon_graph_compute(ggml_backend_t backend, gg ggml_hexagon_rope(node, flags); break; - // non-compute ops - case GGML_OP_NONE: - case GGML_OP_RESHAPE: - case GGML_OP_VIEW: - case GGML_OP_PERMUTE: - case GGML_OP_TRANSPOSE: - break; - default: GGML_ABORT("\nggml-hex: graph-compute %s is not supported\n", ggml_op_desc(node)); } diff --git a/ggml/src/ggml-hexagon/htp/binary-ops.c b/ggml/src/ggml-hexagon/htp/binary-ops.c index 92c0109d287..8ed7f67d9c8 100644 --- a/ggml/src/ggml-hexagon/htp/binary-ops.c +++ b/ggml/src/ggml-hexagon/htp/binary-ops.c @@ -34,6 +34,11 @@ static hvx_elemwise_f32_func func_table_HVX[] = { hvx_mul_f32, hvx_add_f32, static hvx_elemwise_f32_func func_table_HVX_opt[] = { hvx_mul_f32_opt, hvx_add_f32_opt, hvx_sub_f32_opt }; #define htp_binary_preamble \ + const struct htp_tensor * src0 = &octx->src0; \ + const struct htp_tensor * src1 = &octx->src1; \ + const struct htp_tensor * src2 = &octx->src2; \ + struct htp_tensor * dst = &octx->dst; \ + \ const uint32_t ne00 = src0->ne[0]; \ const uint32_t ne01 = src0->ne[1]; \ const uint32_t ne02 = src0->ne[2]; \ @@ -62,16 +67,15 @@ static hvx_elemwise_f32_func func_table_HVX_opt[] = { hvx_mul_f32_opt, hvx_add_f const uint32_t nb0 = dst->nb[0]; \ const uint32_t nb1 = dst->nb[1]; \ const uint32_t nb2 = dst->nb[2]; \ - const uint32_t nb3 = dst->nb[3]; - -static void binary_job_f32_per_thread(const struct htp_tensor * src0, - const struct htp_tensor * src1, - struct htp_tensor * dst, - uint8_t * spad_data, - uint32_t nth, - uint32_t ith, - uint32_t src0_nrows_per_thread, - enum htp_op op) { + const uint32_t nb3 = dst->nb[3]; \ + \ + const uint32_t src0_nrows_per_thread = octx->src0_nrows_per_thread; + +static void binary_job_f32_per_thread(struct htp_ops_context * octx, + uint8_t * spad_data, + uint32_t nth, + uint32_t ith, + enum htp_op op) { htp_binary_preamble; const size_t src0_row_size = nb01; @@ -107,16 +111,23 @@ static void binary_job_f32_per_thread(const struct htp_tensor * src0, uint8_t * restrict spad_data_th = spad_data + (ith * src0_row_size); - const uint32_t nr0 = ne00 / ne10; - const uint8_t * restrict src0_ptr = (const uint8_t *) src0->data + (src0_start_row * src0_row_size); uint8_t * restrict dst_ptr = (uint8_t *) dst->data + (src0_start_row * dst_row_size); const uint8_t * restrict data_src1 = (const uint8_t *) src1->data; - const uint8_t * restrict src1_ptr = NULL; + + const uint32_t ne02_ne01 = ne02 * ne01; for (uint32_t ir = src0_start_row; ir < src0_end_row; ir++) { - src1_ptr = data_src1 + (ir % src1_nrows) * src1_row_size; + const uint32_t i03 = fastdiv(ir, &octx->src0_div21); + const uint32_t i02 = fastdiv(ir - i03 * ne02_ne01, &octx->src0_div1); + const uint32_t i01 = (ir - i03 * ne02_ne01 - i02 * ne01); + + const uint32_t i13 = fastmodulo(i03, ne13, &octx->src1_div3); + const uint32_t i12 = fastmodulo(i02, ne12, &octx->src1_div2); + const uint32_t i11 = fastmodulo(i01, ne11, &octx->src1_div1); + + const uint8_t * restrict src1_ptr = data_src1 + i13 * nb13 + i12 * nb12 + i11 * src1_row_size; if (ir + 1 < src0_end_row) { htp_l2fetch(src0_ptr + ne00, 1, src0_row_size, src0_row_size); @@ -125,6 +136,7 @@ static void binary_job_f32_per_thread(const struct htp_tensor * src0, } } + const uint32_t nr0 = ne00 / ne10; if (nr0 > 1) { if ((1 == is_aligned) && (nr0 == ne00)) { hvx_bcast_fp32_a(spad_data_th, *(float *) src1_ptr, nr0); @@ -149,22 +161,17 @@ static void binary_job_f32_per_thread(const struct htp_tensor * src0, (unsigned) HAP_perf_qtimer_count_to_us(t2 - t1)); } -static void binary_add_id_job_f32_per_thread(const struct htp_tensor * src0, - const struct htp_tensor * src1, - const struct htp_tensor * src2, - struct htp_tensor * dst, - uint8_t * spad_data, - uint32_t nth, - uint32_t ith, - uint32_t src0_nrows_per_thread, - hvx_elemwise_f32_func func_HVX) { +static void binary_add_id_job_f32_per_thread(struct htp_ops_context * octx, + uint8_t * spad_data, + uint32_t nth, + uint32_t ith, + hvx_elemwise_f32_func func_HVX) { htp_binary_preamble; const size_t src0_row_size = nb01; const size_t src1_row_size = nb11; const size_t dst_row_size = nb1; - const uint32_t ne02_ne01 = ne02 * ne01; const uint32_t src0_nrows = ne01 * ne02 * ne03; // src0 rows const uint32_t src0_start_row = src0_nrows_per_thread * ith; @@ -187,10 +194,11 @@ static void binary_add_id_job_f32_per_thread(const struct htp_tensor * src0, const uint8_t * restrict data_src1 = (const uint8_t *) src1->data; uint8_t * restrict data_dst = (uint8_t *) dst->data; + const uint32_t ne02_ne01 = ne02 * ne01; for (uint32_t ir = src0_start_row; ir < src0_end_row; ir++) { // src0 indices - const uint32_t i03 = ir / ne02_ne01; - const uint32_t i02 = (ir - i03 * ne02_ne01) / ne01; + const uint32_t i03 = fastdiv(ir, &octx->src0_div21); + const uint32_t i02 = fastdiv(ir - i03 * ne02_ne01, &octx->src0_div1); const uint32_t i01 = (ir - i03 * ne02_ne01 - i02 * ne01); // src1 indices @@ -234,13 +242,11 @@ static void binary_job_dispatcher_f32(unsigned int n, unsigned int i, void * dat case HTP_OP_MUL: case HTP_OP_ADD: case HTP_OP_SUB: - binary_job_f32_per_thread(&octx->src0, &octx->src1, &octx->dst, octx->src1_spad.data, n, i, - octx->src0_nrows_per_thread, octx->op); + binary_job_f32_per_thread(octx, octx->src1_spad.data, n, i, octx->op); break; case HTP_OP_ADD_ID: - binary_add_id_job_f32_per_thread(&octx->src0, &octx->src1, &octx->src2, &octx->dst, octx->src0_spad.data, n, - i, octx->src0_nrows_per_thread, hvx_add_f32); + binary_add_id_job_f32_per_thread(octx, octx->src0_spad.data, n, i, hvx_add_f32); break; default: @@ -321,6 +327,16 @@ static int execute_op_binary_f32(struct htp_ops_context * octx) { octx->src0_nrows_per_thread = (src0_nrows + n_jobs - 1) / n_jobs; + octx->src0_div21 = init_fastdiv_values(src0->ne[2] * src0->ne[1]); + octx->src0_div3 = init_fastdiv_values(src0->ne[3]); + octx->src0_div2 = init_fastdiv_values(src0->ne[2]); + octx->src0_div1 = init_fastdiv_values(src0->ne[1]); + + octx->src1_div21 = init_fastdiv_values(src1->ne[2] * src1->ne[1]); + octx->src1_div3 = init_fastdiv_values(src1->ne[3]); + octx->src1_div2 = init_fastdiv_values(src1->ne[2]); + octx->src1_div1 = init_fastdiv_values(src1->ne[1]); + worker_pool_run_func(octx->ctx->worker_pool, binary_op_func, octx, n_jobs); } diff --git a/ggml/src/ggml-hexagon/htp/htp-msg.h b/ggml/src/ggml-hexagon/htp/htp-msg.h index f23d5788068..9278f41f4e1 100644 --- a/ggml/src/ggml-hexagon/htp/htp-msg.h +++ b/ggml/src/ggml-hexagon/htp/htp-msg.h @@ -119,10 +119,10 @@ static const char * htp_type_name(uint32_t t) { #define HTP_MAX_DIMS 4 struct htp_tensor { - uint32_t data; // Buffer offset in the messages, and data pointer on the NSP - uint32_t type; // Data type - uint32_t ne[HTP_MAX_DIMS]; // Number of elements - uint32_t nb[HTP_MAX_DIMS]; // Stride in bytes (see ggml.h ggml_tensor) + uint32_t data; // Buffer offset in the messages, and data pointer on the NSP + uint32_t type; // Data type + uint32_t ne[HTP_MAX_DIMS]; // Number of elements + uint32_t nb[HTP_MAX_DIMS]; // Stride in bytes (see ggml.h ggml_tensor) }; #define HTP_MAX_OP_PARAMS 64 diff --git a/ggml/src/ggml-hexagon/htp/htp-ops.h b/ggml/src/ggml-hexagon/htp/htp-ops.h index 45723196791..e87657436f0 100644 --- a/ggml/src/ggml-hexagon/htp/htp-ops.h +++ b/ggml/src/ggml-hexagon/htp/htp-ops.h @@ -4,6 +4,7 @@ #include "htp-ctx.h" #include "htp-msg.h" #include "worker-pool.h" +#include "ops-utils.h" #include #include @@ -38,6 +39,16 @@ struct htp_ops_context { uint32_t src0_nrows_per_thread; uint32_t src1_nrows_per_thread; + struct fastdiv_values src0_div1; // fastdiv values for ne1 + struct fastdiv_values src0_div2; // fastdiv values for ne2 + struct fastdiv_values src0_div3; // fastdiv values for ne3 + struct fastdiv_values src0_div21; // fastdiv values for ne2 * ne1 + + struct fastdiv_values src1_div1; // fastdiv values for ne1 + struct fastdiv_values src1_div2; // fastdiv values for ne2 + struct fastdiv_values src1_div3; // fastdiv values for ne3 + struct fastdiv_values src1_div21; // fastdiv values for ne2 * ne1 + uint32_t flags; }; diff --git a/ggml/src/ggml-hexagon/htp/ops-utils.h b/ggml/src/ggml-hexagon/htp/ops-utils.h index 302f1625216..af9c3305f61 100644 --- a/ggml/src/ggml-hexagon/htp/ops-utils.h +++ b/ggml/src/ggml-hexagon/htp/ops-utils.h @@ -31,6 +31,39 @@ static inline uint32_t htp_round_up(uint32_t n, uint32_t m) { return m * ((n + m - 1) / m); } +// See https://gmplib.org/~tege/divcnst-pldi94.pdf figure 4.1. +// Precompute mp (m' in the paper) and L such that division +// can be computed using a multiply (high 32b of 64b result) +// and a shift: +// +// n/d = (mulhi(n, mp) + n) >> L; +struct fastdiv_values { + uint32_t mp; + uint32_t l; +}; + +static inline struct fastdiv_values init_fastdiv_values(uint32_t d) { + struct fastdiv_values result = { 0, 0 }; + // compute L = ceil(log2(d)); + while (result.l < 32 && ((uint32_t) 1 << result.l) < d) { + ++(result.l); + } + + result.mp = (uint32_t) (((uint64_t) 1 << 32) * (((uint64_t) 1 << result.l) - d) / d + 1); + return result; +} + +static inline uint32_t fastdiv(uint32_t n, const struct fastdiv_values * vals) { + // Compute high 32 bits of n * mp + const uint32_t hi = (uint32_t) (((uint64_t) n * vals->mp) >> 32); // mulhi(n, mp) + // add n, apply bit shift + return (hi + n) >> vals->l; +} + +static inline uint32_t fastmodulo(uint32_t n, uint32_t d, const struct fastdiv_values * vals) { + return n - fastdiv(n, vals) * d; +} + static inline void htp_l2fetch(const void * p, uint32_t height, uint32_t width, uint32_t stride) { const uint64_t control = Q6_P_combine_RR(stride, Q6_R_combine_RlRl(width, height)); asm volatile(" l2fetch(%0,%1) " : : "r"(p), "r"(control)); From e9df95815094dc6024a86e88ff9eaa2d3f95652e Mon Sep 17 00:00:00 2001 From: Neo Zhang Jianyu Date: Wed, 12 Nov 2025 14:44:29 +0800 Subject: [PATCH 17/51] fix ci crash about SSM_CONV (llama/17169) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix ci crash * Update ggml-sycl.cpp * Update ggml/src/ggml-sycl/ggml-sycl.cpp Co-authored-by: Sigbjørn Skjæret --------- Co-authored-by: Zhang Jianyu Co-authored-by: Sigbjørn Skjæret --- ggml/src/ggml-sycl/ggml-sycl.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/ggml/src/ggml-sycl/ggml-sycl.cpp b/ggml/src/ggml-sycl/ggml-sycl.cpp index f3b3e365740..941fd41c0d0 100644 --- a/ggml/src/ggml-sycl/ggml-sycl.cpp +++ b/ggml/src/ggml-sycl/ggml-sycl.cpp @@ -3933,6 +3933,7 @@ static bool ggml_sycl_compute_forward(ggml_backend_sycl_context & ctx, struct gg break; case GGML_OP_SSM_CONV: ggml_sycl_ssm_conv(ctx, dst); + break; case GGML_OP_ROLL: ggml_sycl_roll(ctx, dst); break; From 2f2c6c3b23a8390eeb0fdf2edc1b245b3edc6ad9 Mon Sep 17 00:00:00 2001 From: TecJesh Date: Wed, 12 Nov 2025 15:11:42 +0800 Subject: [PATCH 18/51] CANN: Add L2_NORM op support (llama/16856) * update L2_NORM op support * update L2_NORM op support * remove extra whitespace --- ggml/src/ggml-cann/aclnn_ops.cpp | 29 +++++++++++++++++++++++++++++ ggml/src/ggml-cann/aclnn_ops.h | 24 ++++++++++++++++++++++++ ggml/src/ggml-cann/ggml-cann.cpp | 4 ++++ 3 files changed, 57 insertions(+) diff --git a/ggml/src/ggml-cann/aclnn_ops.cpp b/ggml/src/ggml-cann/aclnn_ops.cpp index 5df6dc96a3b..4835c5c0387 100644 --- a/ggml/src/ggml-cann/aclnn_ops.cpp +++ b/ggml/src/ggml-cann/aclnn_ops.cpp @@ -448,6 +448,35 @@ void ggml_cann_norm(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_cann_release_resources(ctx, norm, acl_src, acl_dst); } +void ggml_cann_l2_norm(ggml_backend_cann_context & ctx, ggml_tensor * dst) { + ggml_tensor * src = dst->src[0]; + + aclTensor * acl_src = ggml_cann_create_tensor(src); + aclTensor * acl_dst = ggml_cann_create_tensor(dst); + + size_t type_size = ggml_type_size(src->type); + int64_t n_bytes = src->ne[3]* src->ne[2]* src->ne[1]* type_size; + ggml_cann_pool_alloc temp_buffer_allocator(ctx.pool(), n_bytes); + void * buffer = temp_buffer_allocator.get(); + + int64_t div_ne[] = {1, src->ne[1], src->ne[2], src->ne[3]}; + size_t div_nb[GGML_MAX_DIMS]; + div_nb[0] = sizeof(float); + for (int i = 1; i < GGML_MAX_DIMS; ++i) { + div_nb[i] = div_nb[i - 1] * div_ne[i - 1]; + } + aclTensor * acl_div = ggml_cann_create_tensor(buffer, ACL_FLOAT, type_size, div_ne, div_nb, GGML_MAX_DIMS); + + std::vector norm_dims = { 3 }; + aclIntArray * dims_array = aclCreateIntArray(norm_dims.data(), norm_dims.size()); + + float p_value = 2.0f; + aclScalar * p_scalar = aclCreateScalar(&p_value, aclDataType::ACL_FLOAT); + GGML_CANN_CALL_ACLNN_OP(ctx, Norm, acl_src, p_scalar, dims_array, true, acl_div); + GGML_CANN_CALL_ACLNN_OP(ctx, Div, acl_src, acl_div, acl_dst); + ggml_cann_release_resources(ctx, dims_array, p_scalar, acl_src, acl_dst, acl_div); +} + void ggml_cann_group_norm(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src = dst->src[0]; diff --git a/ggml/src/ggml-cann/aclnn_ops.h b/ggml/src/ggml-cann/aclnn_ops.h index ec7455af88c..060eedbbb02 100644 --- a/ggml/src/ggml-cann/aclnn_ops.h +++ b/ggml/src/ggml-cann/aclnn_ops.h @@ -46,6 +46,7 @@ #include #include #include +#include #include "acl_tensor.h" #include "common.h" @@ -187,6 +188,29 @@ void ggml_cann_argsort(ggml_backend_cann_context & ctx, ggml_tensor * dst); */ void ggml_cann_norm(ggml_backend_cann_context & ctx, ggml_tensor * dst); +/** + * @brief Computes the L2 Normalization for a ggml tensor using the CANN + * backend. + * + * @details This function applies the L2 Normalization operation on the + * input tensor `src` and stores the result in the destination tensor + * `dst`. L2 Normalization scales the input tensor such that the + * L2 norm along the specified dimension equals 1. This operation + * is commonly used in neural networks for feature normalization + * and vector scaling. + * The operation is defined as: + * \f[ + * \text{out} = \frac{x}{\sqrt{\sum{x^2}}} + * \f] + * The normalization is performed along the last dimension by default. + * + * @param ctx The CANN context used for operations. + * @param dst The destination tensor where the normalized values will be stored. + * @attention The normalization is performed along the last dimension of the + * input tensor by default. + */ +void ggml_cann_l2_norm(ggml_backend_cann_context & ctx, ggml_tensor * dst); + /** * @brief Computes the Group Normalization for a ggml tensor using the CANN * backend. diff --git a/ggml/src/ggml-cann/ggml-cann.cpp b/ggml/src/ggml-cann/ggml-cann.cpp index 51345742ee5..9de9440ac65 100644 --- a/ggml/src/ggml-cann/ggml-cann.cpp +++ b/ggml/src/ggml-cann/ggml-cann.cpp @@ -1777,6 +1777,9 @@ static bool ggml_cann_compute_forward(ggml_backend_cann_context & ctx, struct gg case GGML_OP_GROUP_NORM: ggml_cann_group_norm(ctx, dst); break; + case GGML_OP_L2_NORM: + ggml_cann_l2_norm(ctx, dst); + break; case GGML_OP_CONCAT: ggml_cann_concat(ctx, dst); break; @@ -2515,6 +2518,7 @@ static bool ggml_backend_cann_supports_op(ggml_backend_dev_t dev, const ggml_ten // value of paddingW should be at most half of kernelW return (p0 <= (k0 / 2)) && (p1 <= (k1 / 2)); } + case GGML_OP_L2_NORM: case GGML_OP_DUP: case GGML_OP_SUM: case GGML_OP_IM2COL: From 6a2c71b9ae08d882e88a69d336c9e2cb91698855 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alberto=20Cabrera=20P=C3=A9rez?= Date: Wed, 12 Nov 2025 12:52:19 +0000 Subject: [PATCH 19/51] ggml-cpu: handle 3d tensors in repack mat_mul (llama/17030) * ggml-cpu: handle 3d tensors in repack mul_mat * Removed unnecessary branch, removed need for * Fixed dst_ptr pointer in chunk + clang_format * GGML_ASSERT to check wdata within bounds * Accidental ggml.h inclusion * Improved GGML_ASSERT on wdata boundaries --- ggml/src/ggml-cpu/repack.cpp | 132 ++++++++++++++++++++++++----------- 1 file changed, 90 insertions(+), 42 deletions(-) diff --git a/ggml/src/ggml-cpu/repack.cpp b/ggml/src/ggml-cpu/repack.cpp index 8421c84ce09..274be146dc5 100644 --- a/ggml/src/ggml-cpu/repack.cpp +++ b/ggml/src/ggml-cpu/repack.cpp @@ -1600,29 +1600,52 @@ template src[0]; const ggml_tensor * src1 = op->src[1]; ggml_tensor * dst = op; GGML_TENSOR_BINARY_OP_LOCALS - const void * src1_wdata = params->wdata; const size_t src1_col_stride = ggml_row_size(PARAM_TYPE, ne10); + GGML_ASSERT(ne03 == 1 && ne13 == 1); + GGML_ASSERT(ne12 % ne02 == 0); + const int64_t r2 = ne12 / ne02; + + const int64_t i12 = src1_start / ne1; + const int64_t i11 = src1_start - i12 * ne1; + + // Determine batch index + const int64_t i02 = i12 / r2; + + const int64_t i1 = i11; + const int64_t i2 = i12; + + const char * src0_ptr = (const char *) src0->data + i02 * nb02; + const char * src1_ptr = (const char *) params->wdata + (i11 + i12 * ne11) * src1_col_stride; + char * dst_ptr = ((char *) dst->data + (i1 * nb1 + i2 * nb2)); + + const int64_t nrows = src1_end - src1_start; + const int64_t ncols = src0_end - src0_start; + + GGML_ASSERT(src1_ptr + src1_col_stride * nrows <= (const char *) params->wdata + params->wsize); + // If there are more than three rows in src1, use gemm; otherwise, use gemv. - if (ne11 > 3) { - gemm(ne00, - (float *) ((char *) dst->data) + src0_start, ne01, - (const char *) src0->data + src0_start * nb01, - (const char *) src1_wdata, ne11 - ne11 % 4, src0_end - src0_start); + if (nrows > 3) { + gemm(ne00, (float *) (dst_ptr) + src0_start, nb1 / nb0, + src0_ptr + src0_start * nb01, src1_ptr, + nrows - (nrows % 4), ncols); } - for (int iter = ne11 - ne11 % 4; iter < ne11; iter++) { - gemv(ne00, - (float *) ((char *) dst->data + (iter * nb1)) + src0_start, ne01, - (const char *) src0->data + src0_start * nb01, - (const char *) src1_wdata + (src1_col_stride * iter), 1, - src0_end - src0_start); + for (int iter = nrows - (nrows % 4); iter < nrows; iter++) { + gemv(ne00, (float *) (dst_ptr + (iter * nb1)) + src0_start, + ne01, src0_ptr + src0_start * nb01, + src1_ptr + (src1_col_stride * iter), 1 /* nrows */, ncols); } } @@ -1647,6 +1670,12 @@ template type == GGML_TYPE_F32); GGML_ASSERT(ggml_n_dims(op->src[0]) == 2); @@ -1654,47 +1683,60 @@ template (params->wdata); const size_t nbw1 = ggml_row_size(PARAM_TYPE, ne10); + const size_t nbw2 = nbw1 * ne11; - assert(params->wsize >= nbw1 * ne11); + assert(params->wsize >= nbw2 * ne12); const ggml_from_float_t from_float = ggml_get_type_traits_cpu(PARAM_TYPE)->from_float; - int64_t i11_processed = 0; - for (int64_t i11 = ith * 4; i11 < ne11 - ne11 % 4; i11 += nth * 4) { - ggml_quantize_mat_t((float *) ((char *) src1->data + i11 * nb11), (void *) (wdata + i11 * nbw1), 4, ne10); - } + for (int64_t i12 = 0; i12 < ne12; i12++) { + char * data_ptr = (char *) src1->data + i12 * nb12; + char * wdata_ptr = wdata + i12 * nbw2; - i11_processed = ne11 - ne11 % 4; - for (int64_t i11 = i11_processed + ith; i11 < ne11; i11 += nth) { - from_float((float *) ((char *) src1->data + i11 * nb11), (void *) (wdata + i11 * nbw1), ne10); + for (int64_t i11 = ith * 4; i11 < ne11 - ne11 % 4; i11 += nth * 4) { + ggml_quantize_mat_t((float *) (data_ptr + i11 * nb11), + (void *) (wdata_ptr + i11 * nbw1), 4, ne10); + } + + const int64_t i11_processed = ne11 - ne11 % 4; + for (int64_t i11 = i11_processed + ith; i11 < ne11; i11 += nth) { + from_float((float *) (data_ptr + i11 * nb11), (void *) (wdata_ptr + i11 * nbw1), ne10); + } } // disable for NUMA const bool disable_chunking = ggml_is_numa(); // 4x chunks per thread - int64_t nr = ggml_nrows(op->src[0]); - int nth_scaled = nth * 4; - int64_t chunk_size = (nr + nth_scaled - 1) / nth_scaled; - int64_t nchunk = (nr + chunk_size - 1) / chunk_size; + const int64_t nr0 = ggml_nrows(op->src[0]); + const int64_t nr1 = ne1 * ne2 * ne3; + + int nth_scaled = nth * 4; + int64_t chunk_size0 = (nr0 + nth_scaled - 1) / nth_scaled; + // avoid too small chunks for narrow src1 + int64_t chunk_size1 = MAX(16, (nr1 + nth - 1) / nth); + int64_t nchunk0 = (nr0 + chunk_size0 - 1) / chunk_size0; + int64_t nchunk1 = (nr1 + chunk_size1 - 1) / chunk_size1; // Ensure minimum chunk size to avoid alignment issues with high thread counts // Minimum chunk size should be at least NB_COLS to prevent overlapping chunks after alignment const int64_t min_chunk_size = NB_COLS; - if (nchunk > 0 && (nr / nchunk) < min_chunk_size && nr >= min_chunk_size) { - nchunk = (nr + min_chunk_size - 1) / min_chunk_size; + if (nchunk0 > 0 && (nr0 / nchunk0) < min_chunk_size && nr0 >= min_chunk_size) { + nchunk0 = (nr0 + min_chunk_size - 1) / min_chunk_size; } - if (nth == 1 || nchunk < nth || disable_chunking) { - nchunk = nth; + if (nth == 1 || nchunk0 * nchunk1 < nth || disable_chunking) { + nchunk0 = nr0 > nr1 ? nth : 1; + nchunk1 = nr0 > nr1 ? 1 : nth; } + const int64_t dr0 = (nr0 + nchunk0 - 1) / nchunk0; + const int64_t dr1 = (nr1 + nchunk1 - 1) / nchunk1; + // Ensure nchunk doesn't exceed the number of rows divided by minimum chunk size // This prevents creating too many tiny chunks that could overlap after alignment - const int64_t max_nchunk = (nr + min_chunk_size - 1) / min_chunk_size; - if (nchunk > max_nchunk) { - nchunk = max_nchunk; - } + const int64_t max_nchunk = (nr0 + min_chunk_size - 1) / min_chunk_size; + nchunk0 = MIN(nchunk0, max_nchunk); if (ith == 0) { // Every thread starts at ith, so the first unprocessed chunk is nth. This save a bit of coordination right at the start. @@ -1706,23 +1748,29 @@ template ne01) { - src0_end = ne01; - } + src0_end = (src0_end % NB_COLS) ? src0_end + NB_COLS - (src0_end % NB_COLS) : src0_end; + src0_end = MIN(src0_end, ne01); + // Make sure current plane is the last one before exiting if (src0_start >= src0_end) { - break; + current_chunk = ggml_threadpool_chunk_add(params->threadpool, 1); + continue; } - forward_mul_mat_one_chunk(params, dst, src0_start, src0_end); + forward_mul_mat_one_chunk(params, dst, src0_start, src0_end, src1_start, src1_end); current_chunk = ggml_threadpool_chunk_add(params->threadpool, 1); } From a541b0ef57793b6be2895a95571703b2564efd98 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 12 Nov 2025 20:43:38 +0200 Subject: [PATCH 20/51] ggml : use std::sort in ggml_argsort CPU implementation (llama/17211) * ggml : use std::sort in ggml_argsort CPU implementation * cont : add missing header --- ggml/src/ggml-cpu/ops.cpp | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/ggml/src/ggml-cpu/ops.cpp b/ggml/src/ggml-cpu/ops.cpp index 5a272b9abc5..9f1e5f8d646 100644 --- a/ggml/src/ggml-cpu/ops.cpp +++ b/ggml/src/ggml-cpu/ops.cpp @@ -7,8 +7,9 @@ #include "unary-ops.h" #include "vec.h" -#include +#include #include +#include // ggml_compute_forward_dup @@ -7682,24 +7683,24 @@ static void ggml_compute_forward_argsort_f32( ggml_sort_order order = (ggml_sort_order) ggml_get_op_params_i32(dst, 0); for (int64_t i = ith; i < nr; i += nth) { - int32_t * dst_data = (int32_t *)((char *) dst->data + i*nb1); const float * src_data = (float *)((char *) src0->data + i*nb01); + int32_t * dst_data = (int32_t *)((char *) dst->data + i*nb1); + for (int64_t j = 0; j < ne0; j++) { dst_data[j] = j; } - // C doesn't have a functional sort, so we do a bubble sort instead - for (int64_t j = 0; j < ne0; j++) { - for (int64_t k = j + 1; k < ne0; k++) { - if ((order == GGML_SORT_ORDER_ASC && src_data[dst_data[j]] > src_data[dst_data[k]]) || - (order == GGML_SORT_ORDER_DESC && src_data[dst_data[j]] < src_data[dst_data[k]])) { - int32_t tmp = dst_data[j]; - dst_data[j] = dst_data[k]; - dst_data[k] = tmp; - } - } + std::function cmp; + + // note: this might be causing memory allocations? ideally should be avoided if it's the case + switch (order) { + case GGML_SORT_ORDER_ASC: cmp = [src_data](int32_t a, int32_t b) { return src_data[a] < src_data[b]; }; break; + case GGML_SORT_ORDER_DESC: cmp = [src_data](int32_t a, int32_t b) { return src_data[a] > src_data[b]; }; break; + default: GGML_ABORT("invalid sort order"); } + + std::sort(dst_data, dst_data + ne0, cmp); } } From 214d1af0511ed3a7a2f1be0875bd3c8393b98001 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=A4=C3=9Fler?= Date: Wed, 12 Nov 2025 23:13:55 +0100 Subject: [PATCH 21/51] CUDA: static assert to prevent misuse of memcpy_1 (llama/17198) --- ggml/src/ggml-cuda/common.cuh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/ggml/src/ggml-cuda/common.cuh b/ggml/src/ggml-cuda/common.cuh index ca876459d40..25e9308d756 100644 --- a/ggml/src/ggml-cuda/common.cuh +++ b/ggml/src/ggml-cuda/common.cuh @@ -586,6 +586,12 @@ static __device__ __forceinline__ void ggml_cuda_mad(half2 & acc, const half2 v, // If dst and src point at different address spaces then they are guaranteed to not be aliased. template static __device__ __forceinline__ void ggml_cuda_memcpy_1(void * __restrict__ dst, const void * __restrict__ src) { + static_assert( + nbytes <= ggml_cuda_get_max_cpy_bytes() || alignment == 0, + "You are misusing the alignment parameter for ggml_cuda_memcpy_1. " + "The intent is for the parameter is only as a workaround if either one of the pointers is not properly aligned. " + "If you use it to do more bytes per copy than ggml_cuda_max_cpy_bytes() the reads and writes may not be coalesced. " + "Call ggml_cuda_memcpy_1 in a loop instead."); if constexpr (alignment != 0) { static_assert(nbytes % alignment == 0, "bad alignment"); } From be4d13038f1aaf0008c4f85f5e7445ceb0faa77d Mon Sep 17 00:00:00 2001 From: Aman Gupta Date: Thu, 13 Nov 2025 08:50:01 +0800 Subject: [PATCH 22/51] CUDA: fuse rope + set_rows (llama/16884) * CUDA: add fused rope * move k forward_expand up * create helper function instead of re-using params * make assert statement more in line with comment * rope_norm: coalesced writes to global mem --- ggml/src/ggml-cuda/ggml-cuda.cu | 49 +++++++ ggml/src/ggml-cuda/rope.cu | 222 +++++++++++++++++++++++--------- ggml/src/ggml-cuda/rope.cuh | 2 + 3 files changed, 213 insertions(+), 60 deletions(-) diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index 68dc57843e4..41de87c0999 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -2992,6 +2992,36 @@ static void update_cuda_graph_executable(ggml_backend_cuda_context * cuda_ctx) { } #endif +static bool ggml_cuda_should_fuse_rope_set_rows(const ggml_tensor * rope, + const ggml_tensor * view, + const ggml_tensor * set_rows) { + // ne3 not tested + if (rope->src[0]->ne[3] != 1) { + return false; + } + + if (set_rows->type != GGML_TYPE_F32 && set_rows->type != GGML_TYPE_F16) { + return false; + } + + if (set_rows->src[1]->type != GGML_TYPE_I64) { + return false; + } + + // The view should flatten two dims of rope into one dim + if (!ggml_is_contiguous(view) || view->ne[0] != rope->ne[0] * rope->ne[1]) { + return false; + } + + // Only norm/neox shaders have the fusion code + const int mode = ((const int32_t *) rope->op_params)[2]; + if (mode != GGML_ROPE_TYPE_NORMAL && mode != GGML_ROPE_TYPE_NEOX) { + return false; + } + + return true; +} + static bool ggml_cuda_can_fuse(const struct ggml_cgraph * cgraph, int node_idx, std::initializer_list ops, std::initializer_list unary_ops) { #ifndef NDEBUG const size_t num_unary = std::count(ops.begin(), ops.end(), GGML_OP_UNARY); @@ -3067,6 +3097,16 @@ static bool ggml_cuda_can_fuse(const struct ggml_cgraph * cgraph, int node_idx, } } + if (ops.size() == 3 && ggml_can_fuse_subgraph(cgraph, node_idx, ops, { node_idx + 2 })) { + const ggml_tensor * rope = cgraph->nodes[node_idx]; + const ggml_tensor * view = cgraph->nodes[node_idx + 1]; + const ggml_tensor * set_rows = cgraph->nodes[node_idx + 2]; + + if (ggml_cuda_should_fuse_rope_set_rows(rope, view, set_rows)) { + return true; + } + } + if (!ggml_can_fuse(cgraph, node_idx, ops)) { return false; } @@ -3196,6 +3236,15 @@ static void evaluate_and_capture_cuda_graph(ggml_backend_cuda_context * cuda_ctx continue; } + if (ggml_cuda_can_fuse(cgraph, i, { GGML_OP_ROPE, GGML_OP_VIEW, GGML_OP_SET_ROWS }, {})) { + ggml_tensor * rope = cgraph->nodes[i]; + ggml_tensor * set_rows = cgraph->nodes[i + 2]; + + ggml_cuda_op_rope_fused(*cuda_ctx, rope, set_rows); + i += 2; + continue; + } + if (node->op == GGML_OP_ADD) { int n_fuse = 0; ggml_op ops[8]; diff --git a/ggml/src/ggml-cuda/rope.cu b/ggml/src/ggml-cuda/rope.cu index 78ed7f519ab..88ed79111a1 100644 --- a/ggml/src/ggml-cuda/rope.cu +++ b/ggml/src/ggml-cuda/rope.cu @@ -1,3 +1,6 @@ +#include "convert.cuh" +#include "ggml-cuda/common.cuh" +#include "ggml.h" #include "rope.cuh" struct rope_corr_dims { @@ -37,11 +40,23 @@ static __device__ void rope_yarn( } } -template -static __global__ void rope_norm( - const T * x, T * dst, const int ne0, const int ne1, const int s1, const int s2, const int n_dims, - const int32_t * pos, const float freq_scale, const float ext_factor, const float attn_factor, - const rope_corr_dims corr_dims, const float theta_scale, const float * freq_factors) { +template +static __global__ void rope_norm(const T * x, + D * dst, + const int ne0, + const int ne1, + const int s1, + const int s2, + const int n_dims, + const int32_t * pos, + const float freq_scale, + const float ext_factor, + const float attn_factor, + const rope_corr_dims corr_dims, + const float theta_scale, + const float * freq_factors, + const int64_t * row_indices, + const int set_rows_stride) { const int i0 = 2*(blockDim.y*blockIdx.y + threadIdx.y); if (i0 >= ne0) { @@ -53,13 +68,27 @@ static __global__ void rope_norm( const int row_x = row_dst % ne1; const int channel_x = row_dst / ne1; - const int idst = row_dst*ne0 + i0; + int idst = row_dst * ne0 + i0; const int ix = channel_x*s2 + row_x*s1 + i0; - if (i0 >= n_dims) { - dst[idst + 0] = x[ix + 0]; - dst[idst + 1] = x[ix + 1]; + // Fusion optimization: ROPE + VIEW + SET_ROWS. + // The rope output is viewed as a 1D tensor and offset based on a row index in row_indices. + if (set_rows_stride != 0) { + idst = row_x * ne0 + i0; + idst += row_indices[channel_x] * set_rows_stride; + } + const auto & store_coaelsced = [&](float x0, float x1) { + if constexpr (std::is_same_v) { + float2 v = make_float2(x0, x1); + ggml_cuda_memcpy_1<8>(dst + idst, &v); + } else if constexpr (std::is_same_v) { + half2 v = make_half2(x0, x1); + ggml_cuda_memcpy_1<4>(dst + idst, &v); + } + }; + if (i0 >= n_dims) { + store_coaelsced(x[ix + 0], x[ix + 1]); return; } @@ -75,15 +104,26 @@ static __global__ void rope_norm( const float x0 = x[ix + 0]; const float x1 = x[ix + 1]; - dst[idst + 0] = x0*cos_theta - x1*sin_theta; - dst[idst + 1] = x0*sin_theta + x1*cos_theta; + store_coaelsced(x0 * cos_theta - x1 * sin_theta, x0 * sin_theta + x1 * cos_theta); } -template -static __global__ void rope_neox( - const T * x, T * dst, const int ne0, const int ne1, const int s1, const int s2, const int n_dims, - const int32_t * pos, const float freq_scale, const float ext_factor, const float attn_factor, - const rope_corr_dims corr_dims, const float theta_scale, const float * freq_factors) { +template +static __global__ void rope_neox(const T * x, + D * dst, + const int ne0, + const int ne1, + const int s1, + const int s2, + const int n_dims, + const int32_t * pos, + const float freq_scale, + const float ext_factor, + const float attn_factor, + const rope_corr_dims corr_dims, + const float theta_scale, + const float * freq_factors, + const int64_t * row_indices, + const int set_rows_stride) { const int i0 = 2*(blockDim.y*blockIdx.y + threadIdx.y); if (i0 >= ne0) { @@ -95,12 +135,19 @@ static __global__ void rope_neox( const int row_x = row_dst % ne1; const int channel_x = row_dst / ne1; - const int idst = row_dst*ne0 + i0/2; + int idst = row_dst * ne0 + i0 / 2; const int ix = channel_x*s2 + row_x*s1 + i0/2; + // Fusion optimization: ROPE + VIEW + SET_ROWS. + // The rope output is viewed as a 1D tensor and offset based on a row index in row_indices. + if (set_rows_stride != 0) { + idst = row_x * ne0 + i0 / 2; + idst += row_indices[channel_x] * set_rows_stride; + } + if (i0 >= n_dims) { - dst[idst + i0/2 + 0] = x[ix + i0/2 + 0]; - dst[idst + i0/2 + 1] = x[ix + i0/2 + 1]; + dst[idst + i0 / 2 + 0] = ggml_cuda_cast(x[ix + i0 / 2 + 0]); + dst[idst + i0 / 2 + 1] = ggml_cuda_cast(x[ix + i0 / 2 + 1]); return; } @@ -117,8 +164,8 @@ static __global__ void rope_neox( const float x0 = x[ix + 0]; const float x1 = x[ix + n_dims/2]; - dst[idst + 0] = x0*cos_theta - x1*sin_theta; - dst[idst + n_dims/2] = x0*sin_theta + x1*cos_theta; + dst[idst + 0] = ggml_cuda_cast(x0 * cos_theta - x1 * sin_theta); + dst[idst + n_dims / 2] = ggml_cuda_cast(x0 * sin_theta + x1 * cos_theta); } template @@ -238,11 +285,25 @@ static __global__ void rope_vision( dst[idst + n_dims] = x0*sin_theta + x1*cos_theta; } -template -static void rope_norm_cuda( - const T * x, T * dst, const int ne0, const int ne1, const int s1, const int s2, const int n_dims, const int nr, - const int32_t * pos, const float freq_scale, const float freq_base, const float ext_factor, const float attn_factor, - const rope_corr_dims corr_dims, const float * freq_factors, cudaStream_t stream) { +template +static void rope_norm_cuda(const T * x, + D * dst, + const int ne0, + const int ne1, + const int s1, + const int s2, + const int n_dims, + const int nr, + const int32_t * pos, + const float freq_scale, + const float freq_base, + const float ext_factor, + const float attn_factor, + const rope_corr_dims corr_dims, + const float * freq_factors, + const int64_t * row_indices, + const int set_rows_stride, + cudaStream_t stream) { GGML_ASSERT(ne0 % 2 == 0); const dim3 block_dims(1, CUDA_ROPE_BLOCK_SIZE, 1); const int n_blocks_x = (ne0 + 2*CUDA_ROPE_BLOCK_SIZE - 1) / (2*CUDA_ROPE_BLOCK_SIZE); @@ -252,20 +313,34 @@ static void rope_norm_cuda( if (freq_factors == nullptr) { rope_norm<<>>( - x, dst, ne0, ne1, s1, s2, n_dims, pos, freq_scale, ext_factor, - attn_factor, corr_dims, theta_scale, freq_factors); + x, dst, ne0, ne1, s1, s2, n_dims, pos, freq_scale, ext_factor, attn_factor, corr_dims, theta_scale, + freq_factors, row_indices, set_rows_stride); } else { rope_norm<<>>( - x, dst, ne0, ne1, s1, s2, n_dims, pos, freq_scale, ext_factor, - attn_factor, corr_dims, theta_scale, freq_factors); + x, dst, ne0, ne1, s1, s2, n_dims, pos, freq_scale, ext_factor, attn_factor, corr_dims, theta_scale, + freq_factors, row_indices, set_rows_stride); } } -template -static void rope_neox_cuda( - const T * x, T * dst, const int ne0, const int ne1, const int s1, const int s2, const int n_dims, const int nr, - const int32_t * pos, const float freq_scale, const float freq_base, const float ext_factor, const float attn_factor, - const rope_corr_dims corr_dims, const float * freq_factors, cudaStream_t stream) { +template +static void rope_neox_cuda(const T * x, + D * dst, + const int ne0, + const int ne1, + const int s1, + const int s2, + const int n_dims, + const int nr, + const int32_t * pos, + const float freq_scale, + const float freq_base, + const float ext_factor, + const float attn_factor, + const rope_corr_dims corr_dims, + const float * freq_factors, + const int64_t * row_indices, + const int set_rows_stride, + cudaStream_t stream) { GGML_ASSERT(ne0 % 2 == 0); const dim3 block_dims(1, CUDA_ROPE_BLOCK_SIZE, 1); const int n_blocks_x = (ne0 + 2*CUDA_ROPE_BLOCK_SIZE - 1) / (2*CUDA_ROPE_BLOCK_SIZE); @@ -274,13 +349,13 @@ static void rope_neox_cuda( const float theta_scale = powf(freq_base, -2.0f/n_dims); if (freq_factors == nullptr) { - rope_neox<<>>( - x, dst, ne0, ne1, s1, s2, n_dims, pos, freq_scale, ext_factor, - attn_factor, corr_dims, theta_scale, freq_factors); + rope_neox<<>>( + x, dst, ne0, ne1, s1, s2, n_dims, pos, freq_scale, ext_factor, attn_factor, corr_dims, theta_scale, + freq_factors, row_indices, set_rows_stride); } else { - rope_neox<<>>( - x, dst, ne0, ne1, s1, s2, n_dims, pos, freq_scale, ext_factor, - attn_factor, corr_dims, theta_scale, freq_factors); + rope_neox<<>>( + x, dst, ne0, ne1, s1, s2, n_dims, pos, freq_scale, ext_factor, attn_factor, corr_dims, theta_scale, + freq_factors, row_indices, set_rows_stride); } } @@ -333,7 +408,9 @@ static void rope_vision_cuda( } template -void ggml_cuda_op_rope_impl(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { +void ggml_cuda_op_rope_impl(ggml_backend_cuda_context & ctx, + ggml_tensor * dst, + const ggml_tensor * set_rows = nullptr) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; const ggml_tensor * src2 = dst->src[2]; @@ -341,12 +418,25 @@ void ggml_cuda_op_rope_impl(ggml_backend_cuda_context & ctx, ggml_tensor * dst) const float * src0_d = (const float *)src0->data; const float * src1_d = (const float *)src1->data; - float * dst_d = (float *)dst->data; + void * dst_d = dst->data; + const int64_t * row_indices = nullptr; + ggml_type dst_type = dst->type; + int set_rows_stride = 0; + + if (set_rows != nullptr) { + GGML_ASSERT(forward); + dst_d = set_rows->data; + row_indices = (const int64_t *) set_rows->src[1]->data; + dst_type = set_rows->type; + set_rows_stride = set_rows->nb[1] / ggml_type_size(set_rows->type); + } cudaStream_t stream = ctx.stream(); GGML_ASSERT(src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16); GGML_ASSERT( dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); - GGML_ASSERT(src0->type == dst->type); + // When not fused, src0 and dst types must match + // When fused (ROPE+VIEW+SET_ROWS), src0 may be F32 and dst may be F16 + GGML_ASSERT(src0->type == dst->type || (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F16)); const int64_t ne00 = src0->ne[0]; // head dims const int64_t ne01 = src0->ne[1]; // num heads @@ -404,14 +494,18 @@ void ggml_cuda_op_rope_impl(ggml_backend_cuda_context & ctx, ggml_tensor * dst) // compute if (is_neox) { - if (src0->type == GGML_TYPE_F32) { - rope_neox_cuda( - (const float *) src0_d, (float *) dst_d, ne00, ne01, s01, s02, n_dims, nr, pos, freq_scale, - freq_base, ext_factor, attn_factor, corr_dims, freq_factors, stream); - } else if (src0->type == GGML_TYPE_F16) { - rope_neox_cuda( - (const half *) src0_d, (half *) dst_d, ne00, ne01, s01, s02, n_dims, nr, pos, freq_scale, - freq_base, ext_factor, attn_factor, corr_dims, freq_factors, stream); + if (src0->type == GGML_TYPE_F32 && dst_type == GGML_TYPE_F32) { + rope_neox_cuda((const float *) src0_d, (float *) dst_d, ne00, ne01, s01, s02, n_dims, + nr, pos, freq_scale, freq_base, ext_factor, attn_factor, corr_dims, + freq_factors, row_indices, set_rows_stride, stream); + } else if (src0->type == GGML_TYPE_F32 && dst_type == GGML_TYPE_F16) { + rope_neox_cuda((const float *) src0_d, (half *) dst_d, ne00, ne01, s01, s02, n_dims, + nr, pos, freq_scale, freq_base, ext_factor, attn_factor, corr_dims, + freq_factors, row_indices, set_rows_stride, stream); + } else if (src0->type == GGML_TYPE_F16 && dst_type == GGML_TYPE_F16) { + rope_neox_cuda((const half *) src0_d, (half *) dst_d, ne00, ne01, s01, s02, n_dims, nr, + pos, freq_scale, freq_base, ext_factor, attn_factor, corr_dims, + freq_factors, row_indices, set_rows_stride, stream); } else { GGML_ABORT("fatal error"); } @@ -440,14 +534,18 @@ void ggml_cuda_op_rope_impl(ggml_backend_cuda_context & ctx, ggml_tensor * dst) GGML_ABORT("fatal error"); } } else { - if (src0->type == GGML_TYPE_F32) { - rope_norm_cuda( - (const float *) src0_d, (float *) dst_d, ne00, ne01, s01, s02, n_dims, nr, pos, freq_scale, - freq_base, ext_factor, attn_factor, corr_dims, freq_factors, stream); - } else if (src0->type == GGML_TYPE_F16) { - rope_norm_cuda( - (const half *) src0_d, (half *) dst_d, ne00, ne01, s01, s02, n_dims, nr, pos, freq_scale, - freq_base, ext_factor, attn_factor, corr_dims, freq_factors, stream); + if (src0->type == GGML_TYPE_F32 && dst_type == GGML_TYPE_F32) { + rope_norm_cuda((const float *) src0_d, (float *) dst_d, ne00, ne01, s01, s02, n_dims, + nr, pos, freq_scale, freq_base, ext_factor, attn_factor, corr_dims, + freq_factors, row_indices, set_rows_stride, stream); + } else if (src0->type == GGML_TYPE_F32 && dst_type == GGML_TYPE_F16) { + rope_norm_cuda((const float *) src0_d, (half *) dst_d, ne00, ne01, s01, s02, n_dims, + nr, pos, freq_scale, freq_base, ext_factor, attn_factor, corr_dims, + freq_factors, row_indices, set_rows_stride, stream); + } else if (src0->type == GGML_TYPE_F16 && dst_type == GGML_TYPE_F16) { + rope_norm_cuda((const half *) src0_d, (half *) dst_d, ne00, ne01, s01, s02, n_dims, nr, + pos, freq_scale, freq_base, ext_factor, attn_factor, corr_dims, + freq_factors, row_indices, set_rows_stride, stream); } else { GGML_ABORT("fatal error"); } @@ -461,3 +559,7 @@ void ggml_cuda_op_rope(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { void ggml_cuda_op_rope_back(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_rope_impl(ctx, dst); } + +void ggml_cuda_op_rope_fused(ggml_backend_cuda_context & ctx, ggml_tensor * rope, ggml_tensor * set_rows) { + ggml_cuda_op_rope_impl(ctx, rope, set_rows); +} diff --git a/ggml/src/ggml-cuda/rope.cuh b/ggml/src/ggml-cuda/rope.cuh index 9139f3b220d..72af086cd1b 100644 --- a/ggml/src/ggml-cuda/rope.cuh +++ b/ggml/src/ggml-cuda/rope.cuh @@ -5,3 +5,5 @@ void ggml_cuda_op_rope(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_rope_back(ggml_backend_cuda_context & ctx, ggml_tensor * dst); + +void ggml_cuda_op_rope_fused(ggml_backend_cuda_context & ctx, ggml_tensor * dst, ggml_tensor * set_rows); From c880b4305ca97c11e817f92ebc02d8920374b5a0 Mon Sep 17 00:00:00 2001 From: TecJesh Date: Thu, 13 Nov 2025 09:39:51 +0800 Subject: [PATCH 23/51] CANN: Add cross_entropy_loss op support (llama/16886) * update L2_NORM op support * update L2_NORM op support * remove extra whitespace * cann: update cross_entropy_loss op support * remove trailing whitespaces * rebase the latest code in the main repository and remove the l2_norm operator that already exists in another pull request. * undo the l2_norm operator deletion --- ggml/src/ggml-cann/aclnn_ops.cpp | 86 ++++++++++++++++++++++++++++++++ ggml/src/ggml-cann/aclnn_ops.h | 38 ++++++++++++++ ggml/src/ggml-cann/ggml-cann.cpp | 4 ++ 3 files changed, 128 insertions(+) diff --git a/ggml/src/ggml-cann/aclnn_ops.cpp b/ggml/src/ggml-cann/aclnn_ops.cpp index 4835c5c0387..6d8b4a5f0eb 100644 --- a/ggml/src/ggml-cann/aclnn_ops.cpp +++ b/ggml/src/ggml-cann/aclnn_ops.cpp @@ -477,6 +477,92 @@ void ggml_cann_l2_norm(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_cann_release_resources(ctx, dims_array, p_scalar, acl_src, acl_dst, acl_div); } +void ggml_cann_cross_entropy_loss(ggml_backend_cann_context & ctx, ggml_tensor * dst) { + ggml_tensor * src0 = dst->src[0]; + ggml_tensor * src1 = dst->src[1]; + + const int64_t nc = src0->ne[0]; + const int64_t nr = ggml_nrows(src0); + + int64_t logits_ne[] = {nc, nr}; + size_t logits_nb[2]; + logits_nb[0] = ggml_type_size(src0->type); + logits_nb[1] = logits_nb[0] * logits_ne[0]; + aclTensor * acl_logits = ggml_cann_create_tensor(src0->data, ACL_FLOAT, sizeof(float), logits_ne, logits_nb, 2); + + size_t log_softmax_type_size = sizeof(float); + int64_t log_softmax_n_bytes = nr * nc * log_softmax_type_size; + ggml_cann_pool_alloc log_softmax_allocator(ctx.pool(), log_softmax_n_bytes); + void * log_softmax_buffer = log_softmax_allocator.get(); + + int64_t log_softmax_ne[] = {nc, nr}; + size_t log_softmax_nb[2]; + log_softmax_nb[0] = log_softmax_type_size; + log_softmax_nb[1] = log_softmax_nb[0] * log_softmax_ne[0]; + aclTensor * acl_log_softmax = ggml_cann_create_tensor(log_softmax_buffer, ACL_FLOAT, log_softmax_type_size, log_softmax_ne, log_softmax_nb, 2); + + GGML_CANN_CALL_ACLNN_OP(ctx, LogSoftmax, acl_logits, 1, acl_log_softmax); + + int64_t labels_ne[] = {nc, nr}; + size_t labels_nb[2]; + labels_nb[0] = ggml_type_size(src1->type); + labels_nb[1] = labels_nb[0] * labels_ne[0]; + aclTensor * acl_labels = ggml_cann_create_tensor(src1->data, ACL_FLOAT, sizeof(float), labels_ne, labels_nb, 2); + + size_t mul_type_size = sizeof(float); + int64_t mul_n_bytes = nr * nc * mul_type_size; + ggml_cann_pool_alloc mul_allocator(ctx.pool(), mul_n_bytes); + void * mul_buffer = mul_allocator.get(); + + int64_t mul_ne[] = {nc, nr}; + size_t mul_nb[2]; + mul_nb[0] = mul_type_size; + mul_nb[1] = mul_nb[0] * mul_ne[0]; + aclTensor * acl_mul_result = ggml_cann_create_tensor(mul_buffer, ACL_FLOAT, mul_type_size, mul_ne, mul_nb, 2); + + GGML_CANN_CALL_ACLNN_OP(ctx, Mul, acl_log_softmax, acl_labels, acl_mul_result); + + size_t sum_per_sample_type_size = sizeof(float); + int64_t sum_per_sample_n_bytes = nr * sum_per_sample_type_size; + ggml_cann_pool_alloc sum_per_sample_allocator(ctx.pool(), sum_per_sample_n_bytes); + void * sum_per_sample_buffer = sum_per_sample_allocator.get(); + + int64_t sum_per_sample_ne[] = {nr}; + size_t sum_per_sample_nb[1]; + sum_per_sample_nb[0] = sum_per_sample_type_size; + aclTensor * acl_sum_per_sample = ggml_cann_create_tensor(sum_per_sample_buffer, ACL_FLOAT, sum_per_sample_type_size, sum_per_sample_ne, sum_per_sample_nb, 1); + + std::vector sum_dims = {1}; + aclIntArray * dims_array = aclCreateIntArray(sum_dims.data(), sum_dims.size()); + bool keep_dims = false; + + GGML_CANN_CALL_ACLNN_OP(ctx, ReduceSum, acl_mul_result, dims_array, keep_dims, ACL_FLOAT, acl_sum_per_sample); + + size_t total_sum_type_size = sizeof(float); + int64_t total_sum_n_bytes = 1 * total_sum_type_size; + ggml_cann_pool_alloc total_sum_allocator(ctx.pool(), total_sum_n_bytes); + void * total_sum_buffer = total_sum_allocator.get(); + + int64_t total_sum_ne[] = {1}; + size_t total_sum_nb[1]; + total_sum_nb[0] = total_sum_type_size; + + aclTensor * acl_total_sum = ggml_cann_create_tensor(total_sum_buffer, ACL_FLOAT, total_sum_type_size, total_sum_ne, total_sum_nb, 1); + + std::vector total_sum_dims = {0}; + aclIntArray * total_sum_dims_array = aclCreateIntArray(total_sum_dims.data(), total_sum_dims.size()); + + GGML_CANN_CALL_ACLNN_OP(ctx, ReduceSum, acl_sum_per_sample, total_sum_dims_array, keep_dims, ACL_FLOAT, acl_total_sum); + + float value = -1.0f / static_cast(nr); + aclScalar * scale_factor = aclCreateScalar(&value, aclDataType::ACL_FLOAT); + aclTensor * acl_dst = ggml_cann_create_tensor(dst->data, ACL_FLOAT, sizeof(float), total_sum_ne, total_sum_nb, 1); + + GGML_CANN_CALL_ACLNN_OP(ctx, Muls, acl_total_sum, scale_factor, acl_dst); + + ggml_cann_release_resources(ctx, acl_logits, acl_log_softmax, acl_labels, acl_mul_result, acl_sum_per_sample, acl_total_sum, acl_dst, scale_factor, dims_array, total_sum_dims_array); +} + void ggml_cann_group_norm(ggml_backend_cann_context & ctx, ggml_tensor * dst) { ggml_tensor * src = dst->src[0]; diff --git a/ggml/src/ggml-cann/aclnn_ops.h b/ggml/src/ggml-cann/aclnn_ops.h index 060eedbbb02..c1ea1b153fc 100644 --- a/ggml/src/ggml-cann/aclnn_ops.h +++ b/ggml/src/ggml-cann/aclnn_ops.h @@ -47,6 +47,7 @@ #include #include #include +#include #include "acl_tensor.h" #include "common.h" @@ -211,6 +212,43 @@ void ggml_cann_norm(ggml_backend_cann_context & ctx, ggml_tensor * dst); */ void ggml_cann_l2_norm(ggml_backend_cann_context & ctx, ggml_tensor * dst); +/** + * @brief Computes the Cross Entropy Loss for a ggml tensor using the CANN + * backend. + * + * @details This function computes the cross entropy loss between the predicted + * logits and target probability distributions. The operation follows + * the same computation pattern as the CPU implementation: + * 1. Applies log_softmax to the logits along the class dimension + * 2. Element-wise multiplication with target distributions + * 3. Summation along the class dimension to get per-sample losses + * 4. Global summation and scaling by -1/nr to get final loss + * + * The computation can be expressed as: + * \f[ + * \text{loss} = -\frac{1}{N} \sum_{i=1}^{N} \sum_{j=1}^{C} y_{ij} \cdot \log(\text{softmax}(x_{ij})) + * \f] + * where \f$N\f$ is the total number of samples, \f$C\f$ is the number + * of classes, \f$x\f$ are the logits, and \f$y\f$ are the target + * probability distributions. + * + * @param ctx The CANN context used for operations. + * @param dst The destination tensor where the computed loss will be stored. + * This should be a scalar tensor containing the final loss value. + * + * @note This implementation computes cross entropy between probability + * distributions, not the typical classification cross entropy that + * expects class indices as targets. Both input tensors (src0 and src1) + * should have the same shape and represent probability distributions + * over the class dimension. + * @note The function expects two source tensors: + * - dst->src[0]: Logits tensor (before softmax) + * - dst->src[1]: Target probability distributions tensor + * @note The computation is performed using CANN backend operators including + * LogSoftmax, Mul, ReduceSum, and Muls for the final scaling. + */ +void ggml_cann_cross_entropy_loss(ggml_backend_cann_context & ctx, ggml_tensor * dst); + /** * @brief Computes the Group Normalization for a ggml tensor using the CANN * backend. diff --git a/ggml/src/ggml-cann/ggml-cann.cpp b/ggml/src/ggml-cann/ggml-cann.cpp index 9de9440ac65..da7aede702a 100644 --- a/ggml/src/ggml-cann/ggml-cann.cpp +++ b/ggml/src/ggml-cann/ggml-cann.cpp @@ -1780,6 +1780,9 @@ static bool ggml_cann_compute_forward(ggml_backend_cann_context & ctx, struct gg case GGML_OP_L2_NORM: ggml_cann_l2_norm(ctx, dst); break; + case GGML_OP_CROSS_ENTROPY_LOSS: + ggml_cann_cross_entropy_loss(ctx, dst); + break; case GGML_OP_CONCAT: ggml_cann_concat(ctx, dst); break; @@ -2519,6 +2522,7 @@ static bool ggml_backend_cann_supports_op(ggml_backend_dev_t dev, const ggml_ten return (p0 <= (k0 / 2)) && (p1 <= (k1 / 2)); } case GGML_OP_L2_NORM: + case GGML_OP_CROSS_ENTROPY_LOSS: case GGML_OP_DUP: case GGML_OP_SUM: case GGML_OP_IM2COL: From 9808706d091bfd12c10692b790ff7d823db788bc Mon Sep 17 00:00:00 2001 From: Diego Devesa Date: Thu, 13 Nov 2025 00:59:05 -0800 Subject: [PATCH 24/51] ggml-cpu : use template for argsort (llama/17222) --- ggml/src/ggml-cpu/ops.cpp | 30 ++++++++++++++++++++++-------- 1 file changed, 22 insertions(+), 8 deletions(-) diff --git a/ggml/src/ggml-cpu/ops.cpp b/ggml/src/ggml-cpu/ops.cpp index 9f1e5f8d646..09f53b470b2 100644 --- a/ggml/src/ggml-cpu/ops.cpp +++ b/ggml/src/ggml-cpu/ops.cpp @@ -7665,6 +7665,18 @@ void ggml_compute_forward_timestep_embedding( // ggml_compute_forward_argsort +template +struct argsort_cmp { + const float * data; + bool operator()(int32_t a, int32_t b) const { + if constexpr (order == GGML_SORT_ORDER_ASC) { + return data[a] < data[b]; + } else { + return data[a] > data[b]; + } + } +}; + static void ggml_compute_forward_argsort_f32( const ggml_compute_params * params, ggml_tensor * dst) { @@ -7691,16 +7703,18 @@ static void ggml_compute_forward_argsort_f32( dst_data[j] = j; } - std::function cmp; - - // note: this might be causing memory allocations? ideally should be avoided if it's the case switch (order) { - case GGML_SORT_ORDER_ASC: cmp = [src_data](int32_t a, int32_t b) { return src_data[a] < src_data[b]; }; break; - case GGML_SORT_ORDER_DESC: cmp = [src_data](int32_t a, int32_t b) { return src_data[a] > src_data[b]; }; break; - default: GGML_ABORT("invalid sort order"); - } + case GGML_SORT_ORDER_ASC: + std::sort(dst_data, dst_data + ne0, argsort_cmp{src_data}); + break; - std::sort(dst_data, dst_data + ne0, cmp); + case GGML_SORT_ORDER_DESC: + std::sort(dst_data, dst_data + ne0, argsort_cmp{src_data}); + break; + + default: + GGML_ABORT("invalid sort order"); + } } } From b6d0ebe27b0510c6234198a17af6f7944266dc6f Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Thu, 13 Nov 2025 12:59:37 +0200 Subject: [PATCH 25/51] Revert "ggml-cpu: handle 3d tensors in repack mat_mul (llama/17030)" (llama/17233) This reverts commit 1c398dc9eca9c366ce98deb0e6f3538e444ebc8a. --- ggml/src/ggml-cpu/repack.cpp | 132 +++++++++++------------------------ 1 file changed, 42 insertions(+), 90 deletions(-) diff --git a/ggml/src/ggml-cpu/repack.cpp b/ggml/src/ggml-cpu/repack.cpp index 274be146dc5..8421c84ce09 100644 --- a/ggml/src/ggml-cpu/repack.cpp +++ b/ggml/src/ggml-cpu/repack.cpp @@ -1600,52 +1600,29 @@ template src[0]; const ggml_tensor * src1 = op->src[1]; ggml_tensor * dst = op; GGML_TENSOR_BINARY_OP_LOCALS + const void * src1_wdata = params->wdata; const size_t src1_col_stride = ggml_row_size(PARAM_TYPE, ne10); - GGML_ASSERT(ne03 == 1 && ne13 == 1); - GGML_ASSERT(ne12 % ne02 == 0); - const int64_t r2 = ne12 / ne02; - - const int64_t i12 = src1_start / ne1; - const int64_t i11 = src1_start - i12 * ne1; - - // Determine batch index - const int64_t i02 = i12 / r2; - - const int64_t i1 = i11; - const int64_t i2 = i12; - - const char * src0_ptr = (const char *) src0->data + i02 * nb02; - const char * src1_ptr = (const char *) params->wdata + (i11 + i12 * ne11) * src1_col_stride; - char * dst_ptr = ((char *) dst->data + (i1 * nb1 + i2 * nb2)); - - const int64_t nrows = src1_end - src1_start; - const int64_t ncols = src0_end - src0_start; - - GGML_ASSERT(src1_ptr + src1_col_stride * nrows <= (const char *) params->wdata + params->wsize); - // If there are more than three rows in src1, use gemm; otherwise, use gemv. - if (nrows > 3) { - gemm(ne00, (float *) (dst_ptr) + src0_start, nb1 / nb0, - src0_ptr + src0_start * nb01, src1_ptr, - nrows - (nrows % 4), ncols); + if (ne11 > 3) { + gemm(ne00, + (float *) ((char *) dst->data) + src0_start, ne01, + (const char *) src0->data + src0_start * nb01, + (const char *) src1_wdata, ne11 - ne11 % 4, src0_end - src0_start); } - for (int iter = nrows - (nrows % 4); iter < nrows; iter++) { - gemv(ne00, (float *) (dst_ptr + (iter * nb1)) + src0_start, - ne01, src0_ptr + src0_start * nb01, - src1_ptr + (src1_col_stride * iter), 1 /* nrows */, ncols); + for (int iter = ne11 - ne11 % 4; iter < ne11; iter++) { + gemv(ne00, + (float *) ((char *) dst->data + (iter * nb1)) + src0_start, ne01, + (const char *) src0->data + src0_start * nb01, + (const char *) src1_wdata + (src1_col_stride * iter), 1, + src0_end - src0_start); } } @@ -1670,12 +1647,6 @@ template type == GGML_TYPE_F32); GGML_ASSERT(ggml_n_dims(op->src[0]) == 2); @@ -1683,60 +1654,47 @@ template (params->wdata); const size_t nbw1 = ggml_row_size(PARAM_TYPE, ne10); - const size_t nbw2 = nbw1 * ne11; - assert(params->wsize >= nbw2 * ne12); + assert(params->wsize >= nbw1 * ne11); const ggml_from_float_t from_float = ggml_get_type_traits_cpu(PARAM_TYPE)->from_float; - for (int64_t i12 = 0; i12 < ne12; i12++) { - char * data_ptr = (char *) src1->data + i12 * nb12; - char * wdata_ptr = wdata + i12 * nbw2; - - for (int64_t i11 = ith * 4; i11 < ne11 - ne11 % 4; i11 += nth * 4) { - ggml_quantize_mat_t((float *) (data_ptr + i11 * nb11), - (void *) (wdata_ptr + i11 * nbw1), 4, ne10); - } + int64_t i11_processed = 0; + for (int64_t i11 = ith * 4; i11 < ne11 - ne11 % 4; i11 += nth * 4) { + ggml_quantize_mat_t((float *) ((char *) src1->data + i11 * nb11), (void *) (wdata + i11 * nbw1), 4, ne10); + } - const int64_t i11_processed = ne11 - ne11 % 4; - for (int64_t i11 = i11_processed + ith; i11 < ne11; i11 += nth) { - from_float((float *) (data_ptr + i11 * nb11), (void *) (wdata_ptr + i11 * nbw1), ne10); - } + i11_processed = ne11 - ne11 % 4; + for (int64_t i11 = i11_processed + ith; i11 < ne11; i11 += nth) { + from_float((float *) ((char *) src1->data + i11 * nb11), (void *) (wdata + i11 * nbw1), ne10); } // disable for NUMA const bool disable_chunking = ggml_is_numa(); // 4x chunks per thread - const int64_t nr0 = ggml_nrows(op->src[0]); - const int64_t nr1 = ne1 * ne2 * ne3; - - int nth_scaled = nth * 4; - int64_t chunk_size0 = (nr0 + nth_scaled - 1) / nth_scaled; - // avoid too small chunks for narrow src1 - int64_t chunk_size1 = MAX(16, (nr1 + nth - 1) / nth); - int64_t nchunk0 = (nr0 + chunk_size0 - 1) / chunk_size0; - int64_t nchunk1 = (nr1 + chunk_size1 - 1) / chunk_size1; + int64_t nr = ggml_nrows(op->src[0]); + int nth_scaled = nth * 4; + int64_t chunk_size = (nr + nth_scaled - 1) / nth_scaled; + int64_t nchunk = (nr + chunk_size - 1) / chunk_size; // Ensure minimum chunk size to avoid alignment issues with high thread counts // Minimum chunk size should be at least NB_COLS to prevent overlapping chunks after alignment const int64_t min_chunk_size = NB_COLS; - if (nchunk0 > 0 && (nr0 / nchunk0) < min_chunk_size && nr0 >= min_chunk_size) { - nchunk0 = (nr0 + min_chunk_size - 1) / min_chunk_size; + if (nchunk > 0 && (nr / nchunk) < min_chunk_size && nr >= min_chunk_size) { + nchunk = (nr + min_chunk_size - 1) / min_chunk_size; } - if (nth == 1 || nchunk0 * nchunk1 < nth || disable_chunking) { - nchunk0 = nr0 > nr1 ? nth : 1; - nchunk1 = nr0 > nr1 ? 1 : nth; + if (nth == 1 || nchunk < nth || disable_chunking) { + nchunk = nth; } - const int64_t dr0 = (nr0 + nchunk0 - 1) / nchunk0; - const int64_t dr1 = (nr1 + nchunk1 - 1) / nchunk1; - // Ensure nchunk doesn't exceed the number of rows divided by minimum chunk size // This prevents creating too many tiny chunks that could overlap after alignment - const int64_t max_nchunk = (nr0 + min_chunk_size - 1) / min_chunk_size; - nchunk0 = MIN(nchunk0, max_nchunk); + const int64_t max_nchunk = (nr + min_chunk_size - 1) / min_chunk_size; + if (nchunk > max_nchunk) { + nchunk = max_nchunk; + } if (ith == 0) { // Every thread starts at ith, so the first unprocessed chunk is nth. This save a bit of coordination right at the start. @@ -1748,29 +1706,23 @@ template ne01) { + src0_end = ne01; + } - // Make sure current plane is the last one before exiting if (src0_start >= src0_end) { - current_chunk = ggml_threadpool_chunk_add(params->threadpool, 1); - continue; + break; } - forward_mul_mat_one_chunk(params, dst, src0_start, src0_end, src1_start, src1_end); + forward_mul_mat_one_chunk(params, dst, src0_start, src0_end); current_chunk = ggml_threadpool_chunk_add(params->threadpool, 1); } From 5150c23e11b441cfddd04d21cd13a9382aa412f8 Mon Sep 17 00:00:00 2001 From: bagheera <59658056+bghira@users.noreply.github.com> Date: Thu, 13 Nov 2025 05:32:44 -0600 Subject: [PATCH 26/51] metal: accelerated conv2d (llama/17175) * metal: accelerated conv2d * cont : cleanup --------- Co-authored-by: bghira Co-authored-by: Georgi Gerganov --- ggml/src/ggml-metal/ggml-metal-device.cpp | 24 +++++ ggml/src/ggml-metal/ggml-metal-device.h | 1 + ggml/src/ggml-metal/ggml-metal-device.m | 5 + ggml/src/ggml-metal/ggml-metal-impl.h | 30 ++++++ ggml/src/ggml-metal/ggml-metal-ops.cpp | 88 ++++++++++++++++- ggml/src/ggml-metal/ggml-metal-ops.h | 1 + ggml/src/ggml-metal/ggml-metal.metal | 114 ++++++++++++++++++++++ 7 files changed, 258 insertions(+), 5 deletions(-) diff --git a/ggml/src/ggml-metal/ggml-metal-device.cpp b/ggml/src/ggml-metal/ggml-metal-device.cpp index 5607deaf414..08095dcf060 100644 --- a/ggml/src/ggml-metal/ggml-metal-device.cpp +++ b/ggml/src/ggml-metal/ggml-metal-device.cpp @@ -1438,6 +1438,30 @@ ggml_metal_pipeline_t ggml_metal_library_get_pipeline_conv_transpose_2d(ggml_met return res; } +ggml_metal_pipeline_t ggml_metal_library_get_pipeline_conv_2d(ggml_metal_library_t lib, const ggml_tensor * op) { + assert(op->op == GGML_OP_CONV_2D); + + GGML_ASSERT(ggml_is_contiguous(op->src[0])); + GGML_ASSERT(op->src[0]->type == GGML_TYPE_F16 || op->src[0]->type == GGML_TYPE_F32); + GGML_ASSERT(op->src[1]->type == GGML_TYPE_F32); + GGML_ASSERT(op->type == GGML_TYPE_F32); + + char base[256]; + char name[256]; + + snprintf(base, 256, "kernel_conv_2d_%s_%s", ggml_type_name(op->src[0]->type), ggml_type_name(op->src[1]->type)); + snprintf(name, 256, "%s", base); + + ggml_metal_pipeline_t res = ggml_metal_library_get_pipeline(lib, name); + if (res) { + return res; + } + + res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr); + + return res; +} + ggml_metal_pipeline_t ggml_metal_library_get_pipeline_upscale(ggml_metal_library_t lib, const ggml_tensor * op) { assert(op->op == GGML_OP_UPSCALE); diff --git a/ggml/src/ggml-metal/ggml-metal-device.h b/ggml/src/ggml-metal/ggml-metal-device.h index cb27dca9892..5a8bc0c1ccd 100644 --- a/ggml/src/ggml-metal/ggml-metal-device.h +++ b/ggml/src/ggml-metal/ggml-metal-device.h @@ -133,6 +133,7 @@ ggml_metal_pipeline_t ggml_metal_library_get_pipeline_rope (ggml_me ggml_metal_pipeline_t ggml_metal_library_get_pipeline_im2col (ggml_metal_library_t lib, const struct ggml_tensor * op); ggml_metal_pipeline_t ggml_metal_library_get_pipeline_conv_transpose_1d (ggml_metal_library_t lib, const struct ggml_tensor * op); ggml_metal_pipeline_t ggml_metal_library_get_pipeline_conv_transpose_2d (ggml_metal_library_t lib, const struct ggml_tensor * op); +ggml_metal_pipeline_t ggml_metal_library_get_pipeline_conv_2d (ggml_metal_library_t lib, const struct ggml_tensor * op); ggml_metal_pipeline_t ggml_metal_library_get_pipeline_upscale (ggml_metal_library_t lib, const struct ggml_tensor * op); ggml_metal_pipeline_t ggml_metal_library_get_pipeline_pad (ggml_metal_library_t lib, const struct ggml_tensor * op); ggml_metal_pipeline_t ggml_metal_library_get_pipeline_pad_reflect_1d (ggml_metal_library_t lib, const struct ggml_tensor * op); diff --git a/ggml/src/ggml-metal/ggml-metal-device.m b/ggml/src/ggml-metal/ggml-metal-device.m index 3471225ab47..69c8820854a 100644 --- a/ggml/src/ggml-metal/ggml-metal-device.m +++ b/ggml/src/ggml-metal/ggml-metal-device.m @@ -885,6 +885,11 @@ bool ggml_metal_device_supports_op(ggml_metal_device_t dev, const struct ggml_te return true; case GGML_OP_IM2COL: return ggml_is_contiguous(op->src[1]) && op->src[1]->type == GGML_TYPE_F32 && (op->type == GGML_TYPE_F16 || op->type == GGML_TYPE_F32); + case GGML_OP_CONV_2D: + return ggml_is_contiguous(op->src[0]) && + op->src[1]->type == GGML_TYPE_F32 && + op->type == GGML_TYPE_F32 && + (op->src[0]->type == GGML_TYPE_F16 || op->src[0]->type == GGML_TYPE_F32); case GGML_OP_POOL_1D: return false; case GGML_OP_UPSCALE: diff --git a/ggml/src/ggml-metal/ggml-metal-impl.h b/ggml/src/ggml-metal/ggml-metal-impl.h index 7a878a657bc..6d02befa97d 100644 --- a/ggml/src/ggml-metal/ggml-metal-impl.h +++ b/ggml/src/ggml-metal/ggml-metal-impl.h @@ -528,6 +528,36 @@ typedef struct { uint64_t nb2; } ggml_metal_kargs_conv_transpose_2d; +typedef struct { + uint64_t nb00; + uint64_t nb01; + uint64_t nb02; + uint64_t nb03; + uint64_t nb10; + uint64_t nb11; + uint64_t nb12; + uint64_t nb13; + uint64_t nb0; + uint64_t nb1; + uint64_t nb2; + uint64_t nb3; + int32_t IW; + int32_t IH; + int32_t KW; + int32_t KH; + int32_t IC; + int32_t OC; + int32_t OW; + int32_t OH; + int32_t N; + int32_t s0; + int32_t s1; + int32_t p0; + int32_t p1; + int32_t d0; + int32_t d1; +} ggml_metal_kargs_conv_2d; + typedef struct { uint64_t ofs0; uint64_t ofs1; diff --git a/ggml/src/ggml-metal/ggml-metal-ops.cpp b/ggml/src/ggml-metal/ggml-metal-ops.cpp index 5a8f150a71d..d9811e31159 100644 --- a/ggml/src/ggml-metal/ggml-metal-ops.cpp +++ b/ggml/src/ggml-metal/ggml-metal-ops.cpp @@ -10,6 +10,7 @@ #include #include +#include static ggml_metal_buffer_id ggml_metal_get_buffer_id(const ggml_tensor * t) { if (!t) { @@ -364,6 +365,10 @@ static int ggml_metal_op_encode_impl(ggml_metal_op_t ctx, int idx) { { n_fuse = ggml_metal_op_im2col(ctx, idx); } break; + case GGML_OP_CONV_2D: + { + n_fuse = ggml_metal_op_conv_2d(ctx, idx); + } break; case GGML_OP_CONV_TRANSPOSE_1D: { n_fuse = ggml_metal_op_conv_transpose_1d(ctx, idx); @@ -1036,11 +1041,6 @@ int ggml_metal_op_set_rows(ggml_metal_op_t ctx, int idx) { nth = std::min(nth, nk0); - if (nth*nrptg > ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)) { - nth = ggml_metal_pipeline_max_theads_per_threadgroup(pipeline); - nrptg = 1; - } - ggml_metal_kargs_set_rows args = { /*.nk0 =*/ nk0, /*.ne01 =*/ ne01, @@ -3082,6 +3082,84 @@ int ggml_metal_op_im2col(ggml_metal_op_t ctx, int idx) { return 1; } +int ggml_metal_op_conv_2d(ggml_metal_op_t ctx, int idx) { + ggml_tensor * op = ctx->node(idx); + + ggml_metal_library_t lib = ctx->lib; + ggml_metal_encoder_t enc = ctx->enc; + + GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); + GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); + GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne); + GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb); + GGML_TENSOR_LOCALS( int32_t, ne, op, ne); + GGML_TENSOR_LOCALS(uint64_t, nb, op, nb); + + GGML_ASSERT(ggml_is_contiguous(op->src[0])); + GGML_ASSERT(op->src[1]->type == GGML_TYPE_F32); + GGML_ASSERT(op->type == GGML_TYPE_F32); + GGML_ASSERT(op->src[0]->type == GGML_TYPE_F16 || op->src[0]->type == GGML_TYPE_F32); + + const int32_t s0 = ((const int32_t *) op->op_params)[0]; + const int32_t s1 = ((const int32_t *) op->op_params)[1]; + const int32_t p0 = ((const int32_t *) op->op_params)[2]; + const int32_t p1 = ((const int32_t *) op->op_params)[3]; + const int32_t d0 = ((const int32_t *) op->op_params)[4]; + const int32_t d1 = ((const int32_t *) op->op_params)[5]; + + ggml_metal_kargs_conv_2d args = { + /*.nb00 =*/ nb00, + /*.nb01 =*/ nb01, + /*.nb02 =*/ nb02, + /*.nb03 =*/ nb03, + /*.nb10 =*/ nb10, + /*.nb11 =*/ nb11, + /*.nb12 =*/ nb12, + /*.nb13 =*/ nb13, + /*.nb0 =*/ nb0, + /*.nb1 =*/ nb1, + /*.nb2 =*/ nb2, + /*.nb3 =*/ nb3, + /*.IW =*/ ne10, + /*.IH =*/ ne11, + /*.KW =*/ ne00, + /*.KH =*/ ne01, + /*.IC =*/ ne02, + /*.OC =*/ ne03, + /*.OW =*/ ne0, + /*.OH =*/ ne1, + /*.N =*/ ne3, + /*.s0 =*/ s0, + /*.s1 =*/ s1, + /*.p0 =*/ p0, + /*.p1 =*/ p1, + /*.d0 =*/ d0, + /*.d1 =*/ d1, + }; + + ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_conv_2d(lib, op); + + int nth = ggml_metal_pipeline_max_theads_per_threadgroup(pipeline); + nth = std::min(nth, 256); + nth = std::max(nth, 1); + + const uint64_t n_out = ggml_nelements(op); + + uint64_t tg = (n_out + nth - 1)/nth; + tg = std::max(tg, 1); + tg = std::min(tg, (uint64_t) std::numeric_limits::max()); + + ggml_metal_encoder_set_pipeline(enc, pipeline); + ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); + ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1); + ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[1]), 2); + ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 3); + + ggml_metal_encoder_dispatch_threadgroups(enc, tg, 1, 1, nth, 1, 1); + + return 1; +} + int ggml_metal_op_conv_transpose_1d(ggml_metal_op_t ctx, int idx) { ggml_tensor * op = ctx->node(idx); diff --git a/ggml/src/ggml-metal/ggml-metal-ops.h b/ggml/src/ggml-metal/ggml-metal-ops.h index 0d9cb8af7c1..3cf400dc45c 100644 --- a/ggml/src/ggml-metal/ggml-metal-ops.h +++ b/ggml/src/ggml-metal/ggml-metal-ops.h @@ -70,6 +70,7 @@ int ggml_metal_op_group_norm (ggml_metal_op_t ctx, int idx); int ggml_metal_op_norm (ggml_metal_op_t ctx, int idx); int ggml_metal_op_rope (ggml_metal_op_t ctx, int idx); int ggml_metal_op_im2col (ggml_metal_op_t ctx, int idx); +int ggml_metal_op_conv_2d (ggml_metal_op_t ctx, int idx); int ggml_metal_op_conv_transpose_1d (ggml_metal_op_t ctx, int idx); int ggml_metal_op_conv_transpose_2d (ggml_metal_op_t ctx, int idx); int ggml_metal_op_upscale (ggml_metal_op_t ctx, int idx); diff --git a/ggml/src/ggml-metal/ggml-metal.metal b/ggml/src/ggml-metal/ggml-metal.metal index cea535ade74..7f94419c3ac 100644 --- a/ggml/src/ggml-metal/ggml-metal.metal +++ b/ggml/src/ggml-metal/ggml-metal.metal @@ -4146,6 +4146,120 @@ template [[host_name("kernel_im2col_f16")]] kernel im2col_t kernel_im2col; //template [[host_name("kernel_im2col_ext_f32")]] kernel im2col_ext_t kernel_im2col_ext; //template [[host_name("kernel_im2col_ext_f16")]] kernel im2col_ext_t kernel_im2col_ext; +template +kernel void kernel_conv_2d( + constant ggml_metal_kargs_conv_2d & args, + device const char * weights, + device const char * src, + device char * dst, + uint3 tgpig[[threadgroup_position_in_grid]], + uint3 tgpg[[threadgroups_per_grid]], + uint3 tpitg[[thread_position_in_threadgroup]], + uint3 ntg[[threads_per_threadgroup]]) { + + const uint threads_per_tg = ntg.x * ntg.y * ntg.z; + const uint tg_index = (tgpig.z * tgpg.y + tgpig.y) * tgpg.x + tgpig.x; + const uint local_thread = tpitg.z * (ntg.x * ntg.y) + tpitg.y * ntg.x + tpitg.x; + const uint thread_index = tg_index * threads_per_tg + local_thread; + const uint64_t total_threads = (uint64_t) threads_per_tg * tgpg.x * tgpg.y * tgpg.z; + const uint64_t total_outputs = (uint64_t) args.N * args.OC * args.OH * args.OW; + + for (uint64_t index = thread_index; index < total_outputs; index += total_threads) { + uint64_t tmp = index; + + const int32_t ow = tmp % args.OW; tmp /= args.OW; + const int32_t oh = tmp % args.OH; tmp /= args.OH; + const int32_t oc = tmp % args.OC; tmp /= args.OC; + const int32_t n = tmp; + + float acc = 0.0f; + + const int32_t base_x = ow*args.s0 - args.p0; + const int32_t base_y = oh*args.s1 - args.p1; + + int32_t ky_start = 0; + if (base_y < 0) { + ky_start = (-base_y + args.d1 - 1)/args.d1; + } + int32_t ky_end = args.KH; + const int32_t y_max = args.IH - 1 - base_y; + if (y_max < 0) { + ky_end = ky_start; + } else if (base_y + (args.KH - 1)*args.d1 >= args.IH) { + ky_end = min(ky_end, y_max/args.d1 + 1); + } + + int32_t kx_start = 0; + if (base_x < 0) { + kx_start = (-base_x + args.d0 - 1)/args.d0; + } + int32_t kx_end = args.KW; + const int32_t x_max = args.IW - 1 - base_x; + if (x_max < 0) { + kx_end = kx_start; + } else if (base_x + (args.KW - 1)*args.d0 >= args.IW) { + kx_end = min(kx_end, x_max/args.d0 + 1); + } + + if (ky_start < ky_end && kx_start < kx_end) { + const uint64_t src_base_n = (uint64_t) n * args.nb13; + const uint64_t w_base_oc = (uint64_t) oc * args.nb03; + + for (int32_t ic = 0; ic < args.IC; ++ic) { + const uint64_t src_base_nc = src_base_n + (uint64_t) ic * args.nb12; + const uint64_t w_base_ocic = w_base_oc + (uint64_t) ic * args.nb02; + + for (int32_t ky = ky_start; ky < ky_end; ++ky) { + const int32_t iy = base_y + ky*args.d1; + const uint64_t src_base_row = src_base_nc + (uint64_t) iy * args.nb11; + const uint64_t w_base_row = w_base_ocic + (uint64_t) ky * args.nb01; + + for (int32_t kx = kx_start; kx < kx_end; ++kx) { + const int32_t ix = base_x + kx*args.d0; + const uint64_t src_offs = src_base_row + (uint64_t) ix * args.nb10; + const uint64_t w_offs = w_base_row + (uint64_t) kx * args.nb00; + + const float x = *(device const float *)(src + src_offs); + const float w = (float) (*(device const TK *)(weights + w_offs)); + + acc += x * w; + } + } + } + } + + const uint64_t dst_offs = + (uint64_t) n * args.nb3 + + (uint64_t) oc * args.nb2 + + (uint64_t) oh * args.nb1 + + (uint64_t) ow * args.nb0; + + *(device float *)(dst + dst_offs) = acc; + } +} + +template [[host_name("kernel_conv_2d_f32_f32")]] +kernel void kernel_conv_2d( + constant ggml_metal_kargs_conv_2d & args, + device const char * weights, + device const char * src, + device char * dst, + uint3 tgpig[[threadgroup_position_in_grid]], + uint3 tgpg[[threadgroups_per_grid]], + uint3 tpitg[[thread_position_in_threadgroup]], + uint3 ntg[[threads_per_threadgroup]]); + +template [[host_name("kernel_conv_2d_f16_f32")]] +kernel void kernel_conv_2d( + constant ggml_metal_kargs_conv_2d & args, + device const char * weights, + device const char * src, + device char * dst, + uint3 tgpig[[threadgroup_position_in_grid]], + uint3 tgpg[[threadgroups_per_grid]], + uint3 tpitg[[thread_position_in_threadgroup]], + uint3 ntg[[threads_per_threadgroup]]); + typedef void (conv_transpose_1d_t)( constant ggml_metal_kargs_conv_transpose_1d & args, device const float * src0, From 273dd3fef920de4cb85b26daaf32d3b9c1b983f6 Mon Sep 17 00:00:00 2001 From: ixgbe <1113177880@qq.com> Date: Thu, 13 Nov 2025 20:13:32 +0800 Subject: [PATCH 27/51] ggml-cpu : add RISC-V vector intrinsic support for silu and cvar operations (llama/17227) Signed-off-by: Wang Yang --- ggml/src/ggml-cpu/vec.cpp | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/ggml/src/ggml-cpu/vec.cpp b/ggml/src/ggml-cpu/vec.cpp index 43dc7537c33..ac8633e2128 100644 --- a/ggml/src/ggml-cpu/vec.cpp +++ b/ggml/src/ggml-cpu/vec.cpp @@ -360,6 +360,13 @@ void ggml_vec_silu_f32(const int n, float * y, const float * x) { for (; i + 3 < n; i += 4) { vst1q_f32(y + i, ggml_v_silu(vld1q_f32(x + i))); } +#elif defined(__riscv_v_intrinsic) + for (int vl; i < n; i += vl) { + vl = __riscv_vsetvl_e32m2(n - i); + vfloat32m2_t vx = __riscv_vle32_v_f32m2(&x[i], vl); + vfloat32m2_t vy = ggml_v_silu_m2(vx, vl); + __riscv_vse32_v_f32m2(&y[i], vy, vl); + } #endif for (; i < n; ++i) { y[i] = ggml_silu_f32(x[i]); @@ -460,6 +467,16 @@ ggml_float ggml_vec_cvar_f32(const int n, float * y, const float * x, const floa val = vec_mul(val, val); sum += (ggml_float)vec_hsum_f32x4(val); } +#elif defined(__riscv_v_intrinsic) + vfloat64m1_t vsum = __riscv_vfmv_v_f_f64m1(0, 1); + for (int vl; i < n; i += vl) { + vl = __riscv_vsetvl_e32m2(n - i); + vfloat32m2_t val = __riscv_vfsub_vf_f32m2(__riscv_vle32_v_f32m2(&x[i], vl), mean, vl); + __riscv_vse32_v_f32m2(&y[i], val, vl); + val = __riscv_vfmul_vv_f32m2(val, val, vl); + vsum = __riscv_vfwredusum_vs_f32m2_f64m1(val, vsum, vl); + } + sum = (ggml_float)__riscv_vfmv_f_s_f64m1_f64(vsum); #endif for (; i < n; ++i) { float val = x[i] - mean; From 312480c9038f87e008be89f615abd0acac4b1cb9 Mon Sep 17 00:00:00 2001 From: Diego Devesa Date: Thu, 13 Nov 2025 04:14:02 -0800 Subject: [PATCH 28/51] sched : fix reserve ignoring user tensor assignments (llama/17232) --- ggml/src/ggml-backend.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/ggml/src/ggml-backend.cpp b/ggml/src/ggml-backend.cpp index ff9135fe2d8..eeaf35c169f 100644 --- a/ggml/src/ggml-backend.cpp +++ b/ggml/src/ggml-backend.cpp @@ -1698,8 +1698,6 @@ bool ggml_backend_sched_reserve(ggml_backend_sched_t sched, struct ggml_cgraph * GGML_ASSERT(sched); GGML_ASSERT((int)sched->hash_set.size >= measure_graph->n_nodes + measure_graph->n_leafs); - ggml_backend_sched_reset(sched); - ggml_backend_sched_synchronize(sched); ggml_backend_sched_split_graph(sched, measure_graph); From 1b4c6ad1dd27ebbbfdf3e44c01773c87d16a00df Mon Sep 17 00:00:00 2001 From: Ruben Ortlam Date: Thu, 13 Nov 2025 14:51:21 +0100 Subject: [PATCH 29/51] vulkan: remove shell call from vulkan-shaders-gen tool, revert file check (llama/17219) * vulkan: remove shell call from vulkan-shaders-gen tool * use string vector for command execution * Fix condition * use string, remove const_cast * Fix dependency file quotation on Windows --------- Co-authored-by: Jeff Bolz --- .../vulkan-shaders/vulkan-shaders-gen.cpp | 53 ++++++++++++------- 1 file changed, 33 insertions(+), 20 deletions(-) diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp b/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp index c8a6f97ecff..1423f772403 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp @@ -76,7 +76,7 @@ enum MatMulIdType { namespace { -void execute_command(const std::string& command, std::string& stdout_str, std::string& stderr_str) { +void execute_command(std::vector& command, std::string& stdout_str, std::string& stderr_str) { #ifdef _WIN32 HANDLE stdout_read, stdout_write; HANDLE stderr_read, stderr_write; @@ -99,8 +99,10 @@ void execute_command(const std::string& command, std::string& stdout_str, std::s si.hStdOutput = stdout_write; si.hStdError = stderr_write; - std::vector cmd(command.begin(), command.end()); - cmd.push_back('\0'); + std::string cmd; + for (const auto& part : command) { + cmd += part + " "; + } if (!CreateProcessA(NULL, cmd.data(), NULL, NULL, TRUE, 0, NULL, NULL, &si, &pi)) { throw std::runtime_error("Failed to create process"); @@ -138,6 +140,12 @@ void execute_command(const std::string& command, std::string& stdout_str, std::s throw std::runtime_error("Failed to fork process"); } + std::vector argv; + for (std::string& part : command) { + argv.push_back(part.data()); + } + argv.push_back(nullptr); + if (pid == 0) { close(stdout_pipe[0]); close(stderr_pipe[0]); @@ -145,7 +153,7 @@ void execute_command(const std::string& command, std::string& stdout_str, std::s dup2(stderr_pipe[1], STDERR_FILENO); close(stdout_pipe[1]); close(stderr_pipe[1]); - execl("/bin/sh", "sh", "-c", command.c_str(), (char*) nullptr); + execvp(argv[0], argv.data()); _exit(EXIT_FAILURE); } else { close(stdout_pipe[1]); @@ -316,21 +324,27 @@ compile_count_guard acquire_compile_slot() { void string_to_spv_func(std::string name, std::string in_path, std::string out_path, std::map defines, bool coopmat, bool dep_file, compile_count_guard slot) { std::string target_env = (name.find("_cm2") != std::string::npos) ? "--target-env=vulkan1.3" : "--target-env=vulkan1.2"; - // disable spirv-opt for coopmat shaders for https://github.com/ggerganov/llama.cpp/issues/10734 - // disable spirv-opt for bf16 shaders for https://github.com/ggml-org/llama.cpp/issues/15344 - // disable spirv-opt for rope shaders for https://github.com/ggml-org/llama.cpp/issues/16860 - std::string opt_level = (coopmat || name.find("bf16") != std::string::npos || name.find("rope") != std::string::npos) ? "" : "-O"; - #ifdef _WIN32 - std::vector cmd = {GLSLC, "-fshader-stage=compute", target_env, opt_level, "\"" + in_path + "\"", "-o", "\"" + out_path + "\""}; + std::vector cmd = {GLSLC, "-fshader-stage=compute", target_env, "\"" + in_path + "\"", "-o", "\"" + out_path + "\""}; #else - std::vector cmd = {GLSLC, "-fshader-stage=compute", target_env, opt_level, in_path, "-o", out_path}; + std::vector cmd = {GLSLC, "-fshader-stage=compute", target_env, in_path, "-o", out_path}; #endif + // disable spirv-opt for coopmat shaders for https://github.com/ggerganov/llama.cpp/issues/10734 + // disable spirv-opt for bf16 shaders for https://github.com/ggml-org/llama.cpp/issues/15344 + // disable spirv-opt for rope shaders for https://github.com/ggml-org/llama.cpp/issues/16860 + if (!coopmat && name.find("bf16") == std::string::npos && name.find("rope") == std::string::npos) { + cmd.push_back("-O"); + } + if (dep_file) { cmd.push_back("-MD"); cmd.push_back("-MF"); +#ifdef _WIN32 cmd.push_back("\"" + target_cpp + ".d\""); +#else + cmd.push_back(target_cpp + ".d"); +#endif } #ifdef GGML_VULKAN_SHADER_DEBUG_INFO @@ -354,9 +368,13 @@ void string_to_spv_func(std::string name, std::string in_path, std::string out_p // } // std::cout << std::endl; - execute_command(command, stdout_str, stderr_str); + execute_command(cmd, stdout_str, stderr_str); if (!stderr_str.empty()) { - std::cerr << "cannot compile " << name << "\n\n" << command << "\n\n" << stderr_str << std::endl; + std::cerr << "cannot compile " << name << "\n\n"; + for (const auto& part : cmd) { + std::cerr << part << " "; + } + std::cerr << "\n\n" << stderr_str << std::endl; return; } @@ -430,7 +448,7 @@ void matmul_shaders(bool fp16, MatMulIdType matmul_id_type, bool coopmat, bool c base_dict["ACC_TYPE" ] = f16acc ? "float16_t" : "float"; base_dict["ACC_TYPE_VEC2"] = f16acc ? "f16vec2" : "vec2"; if (f16acc) { - base_dict["ACC_TYPE_MAX"] = "\"float16_t(65504.0)\""; + base_dict["ACC_TYPE_MAX"] = "float16_t(65504.0)"; } if (coopmat) { @@ -610,7 +628,7 @@ void process_shaders() { fa_base_dict["ACC_TYPE"] = f16acc ? "float16_t" : "float"; fa_base_dict["ACC_TYPEV4"] = f16acc ? "f16vec4" : "vec4"; if (f16acc) { - fa_base_dict["ACC_TYPE_MAX"] = "\"float16_t(65504.0)\""; + fa_base_dict["ACC_TYPE_MAX"] = "float16_t(65504.0)"; } for (const auto& tname : type_names) { @@ -1081,11 +1099,6 @@ int main(int argc, char** argv) { if (args.find("--glslc") != args.end()) { GLSLC = args["--glslc"]; // Path to glslc - - if (!std::filesystem::exists(GLSLC) || !std::filesystem::is_regular_file(GLSLC)) { - std::cerr << "Error: glslc not found at " << GLSLC << std::endl; - return EXIT_FAILURE; - } } if (args.find("--source") != args.end()) { input_filepath = args["--source"]; // The shader source file to compile From e9b37f56465cbdea62c7dff87681b8f35083cd3e Mon Sep 17 00:00:00 2001 From: "Piotr Wilkin (ilintar)" Date: Thu, 13 Nov 2025 19:54:47 +0100 Subject: [PATCH 30/51] ggml : add ops SOFTPLUS, EXPM1, TRI, SOLVE_TRI, CUMSUM (llama/17063) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add ops needed for new hybrid models: SOFTPLUS, EXPM1, TRI, SOLVE_TRI, CUMSUM * Update ggml/include/ggml.h Co-authored-by: Georgi Gerganov * Update tests/test-backend-ops.cpp Co-authored-by: Georgi Gerganov * Code review * Whitespace * Update tests/test-backend-ops.cpp Co-authored-by: Diego Devesa * This is actually sigmoid, duh. * Add CONST, remove TRI_KEEP, other changes from review * Update tests/test-backend-ops.cpp Co-authored-by: Georgi Gerganov * Update ggml/src/ggml.c Co-authored-by: Georgi Gerganov * Update ggml/src/ggml.c Co-authored-by: Georgi Gerganov * Update ggml/src/ggml-cuda/unary.cu Co-authored-by: Aman Gupta * Remove extra script * Update ggml/src/ggml.c Co-authored-by: Diego Devesa * Update tests/test-backend-ops.cpp Co-authored-by: Diego Devesa * moving changes from laptop [no ci] * pre-rebase * Update tests/test-backend-ops.cpp Co-authored-by: Sigbjørn Skjæret * Update tests/test-backend-ops.cpp Co-authored-by: Sigbjørn Skjæret * Refactor tests * ggml : cleanup * cont : fix ggml_fill srcs * tests : add note * ggml : add ggml_fill_inplace * ggml : add asserts * ggml : fix ggml_fill constant cast * cont : ggml_tri minor * Use TENSOR_LOCALS * Fix regression from #14596, regenerate * Don't make commits at night... --------- Co-authored-by: Georgi Gerganov Co-authored-by: Diego Devesa Co-authored-by: Aman Gupta Co-authored-by: Sigbjørn Skjæret --- ggml/include/ggml.h | 71 +++++++++++ ggml/src/ggml-cpu/ggml-cpu.c | 22 ++++ ggml/src/ggml-cpu/ops.cpp | 210 +++++++++++++++++++++++++++++++- ggml/src/ggml-cpu/ops.h | 4 + ggml/src/ggml-cpu/unary-ops.cpp | 16 +++ ggml/src/ggml-cpu/unary-ops.h | 2 + ggml/src/ggml-cpu/vec.h | 10 ++ ggml/src/ggml-cuda/ggml-cuda.cu | 8 ++ ggml/src/ggml-cuda/unary.cu | 16 +++ ggml/src/ggml-cuda/unary.cuh | 4 + ggml/src/ggml-impl.h | 2 +- ggml/src/ggml.c | 159 +++++++++++++++++++++++- 12 files changed, 516 insertions(+), 8 deletions(-) diff --git a/ggml/include/ggml.h b/ggml/include/ggml.h index c1ed1a21c81..605fcfcb9c2 100644 --- a/ggml/include/ggml.h +++ b/ggml/include/ggml.h @@ -475,6 +475,7 @@ extern "C" { GGML_OP_COS, GGML_OP_SUM, GGML_OP_SUM_ROWS, + GGML_OP_CUMSUM, GGML_OP_MEAN, GGML_OP_ARGMAX, GGML_OP_COUNT_EQUAL, @@ -530,6 +531,8 @@ extern "C" { GGML_OP_TIMESTEP_EMBEDDING, GGML_OP_ARGSORT, GGML_OP_LEAKY_RELU, + GGML_OP_TRI, + GGML_OP_FILL, GGML_OP_FLASH_ATTN_EXT, GGML_OP_FLASH_ATTN_BACK, @@ -542,6 +545,7 @@ extern "C" { GGML_OP_RWKV_WKV6, GGML_OP_GATED_LINEAR_ATTN, GGML_OP_RWKV_WKV7, + GGML_OP_SOLVE_TRI, GGML_OP_UNARY, @@ -576,6 +580,8 @@ extern "C" { GGML_UNARY_OP_HARDSWISH, GGML_UNARY_OP_HARDSIGMOID, GGML_UNARY_OP_EXP, + GGML_UNARY_OP_EXPM1, + GGML_UNARY_OP_SOFTPLUS, GGML_UNARY_OP_GELU_ERF, GGML_UNARY_OP_XIELU, GGML_UNARY_OP_FLOOR, @@ -620,6 +626,13 @@ extern "C" { GGML_TENSOR_FLAG_LOSS = 8, // ...defines loss for numerical optimization (multiple loss tensors add up) }; + enum ggml_tri_type { + GGML_TRI_TYPE_UPPER_DIAG = 0, + GGML_TRI_TYPE_UPPER = 1, + GGML_TRI_TYPE_LOWER_DIAG = 2, + GGML_TRI_TYPE_LOWER = 3 + }; + struct ggml_init_params { // memory pool size_t mem_size; // bytes @@ -957,6 +970,22 @@ extern "C" { struct ggml_context * ctx, struct ggml_tensor * a); + GGML_API struct ggml_tensor * ggml_expm1( + struct ggml_context * ctx, + struct ggml_tensor * a); + + GGML_API struct ggml_tensor * ggml_expm1_inplace( + struct ggml_context * ctx, + struct ggml_tensor * a); + + GGML_API struct ggml_tensor * ggml_softplus( + struct ggml_context * ctx, + struct ggml_tensor * a); + + GGML_API struct ggml_tensor * ggml_softplus_inplace( + struct ggml_context * ctx, + struct ggml_tensor * a); + GGML_API struct ggml_tensor * ggml_sin( struct ggml_context * ctx, struct ggml_tensor * a); @@ -983,6 +1012,10 @@ extern "C" { struct ggml_context * ctx, struct ggml_tensor * a); + GGML_API struct ggml_tensor * ggml_cumsum( + struct ggml_context * ctx, + struct ggml_tensor * a); + // mean along rows GGML_API struct ggml_tensor * ggml_mean( struct ggml_context * ctx, @@ -2187,6 +2220,23 @@ extern "C" { int shift2, int shift3); + // Convert matrix into a triangular one (upper, strict upper, lower or strict lower) by writing + // zeroes everywhere outside the masked area + GGML_API struct ggml_tensor * ggml_tri( + struct ggml_context * ctx, + struct ggml_tensor * a, + enum ggml_tri_type type); + + // Fill tensor a with constant c + GGML_API struct ggml_tensor * ggml_fill( + struct ggml_context * ctx, + struct ggml_tensor * a, + float c); + + GGML_API struct ggml_tensor * ggml_fill_inplace( + struct ggml_context * ctx, + struct ggml_tensor * a, + float c); // Ref: https://github.com/CompVis/stable-diffusion/blob/main/ldm/modules/diffusionmodules/util.py#L151 // timesteps: [N,] @@ -2356,6 +2406,27 @@ extern "C" { struct ggml_tensor * b, struct ggml_tensor * state); + /* Solves a specific equation of the form Ax=B, where A is a triangular matrix + * without zeroes on the diagonal (i.e. invertible). + * B can have any number of columns, but must have the same number of rows as A + * If A is [n, n] and B is [n, m], then the result will be [n, m] as well + * Has O(n^3) complexity (unlike most matrix ops out there), so use on cases + * where n > 100 sparingly, pre-chunk if necessary. + * + * If left = false, solves xA=B instead + * If lower = false, assumes upper triangular instead + * If uni = true, assumes diagonal of A to be all ones (will override actual values) + * + * TODO: currently only lower, right, non-unitriangular variant is implemented + */ + GGML_API struct ggml_tensor * ggml_solve_tri( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b, + bool left, + bool lower, + bool uni); + // custom operators typedef void (*ggml_custom1_op_t)(struct ggml_tensor * dst , const struct ggml_tensor * a, int ith, int nth, void * userdata); diff --git a/ggml/src/ggml-cpu/ggml-cpu.c b/ggml/src/ggml-cpu/ggml-cpu.c index d8e3c48c609..c7348cc26c1 100644 --- a/ggml/src/ggml-cpu/ggml-cpu.c +++ b/ggml/src/ggml-cpu/ggml-cpu.c @@ -1731,6 +1731,10 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm { ggml_compute_forward_sum_rows(params, tensor); } break; + case GGML_OP_CUMSUM: + { + ggml_compute_forward_cumsum(params, tensor); + } break; case GGML_OP_MEAN: { ggml_compute_forward_mean(params, tensor); @@ -1927,6 +1931,14 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm { ggml_compute_forward_leaky_relu(params, tensor); } break; + case GGML_OP_TRI: + { + ggml_compute_forward_tri(params, tensor); + } break; + case GGML_OP_FILL: + { + ggml_compute_forward_fill(params, tensor); + } break; case GGML_OP_FLASH_ATTN_EXT: { ggml_compute_forward_flash_attn_ext(params, tensor); @@ -1982,6 +1994,10 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm { ggml_compute_forward_rwkv_wkv7(params, tensor); } break; + case GGML_OP_SOLVE_TRI: + { + ggml_compute_forward_solve_tri(params, tensor); + } break; case GGML_OP_MAP_CUSTOM1: { ggml_compute_forward_map_custom1(params, tensor); @@ -2140,6 +2156,9 @@ static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads) { case GGML_OP_ADD_ID: case GGML_OP_ADD1: case GGML_OP_ACC: + case GGML_OP_CUMSUM: + case GGML_OP_TRI: + case GGML_OP_FILL: { n_tasks = n_threads; } break; @@ -2157,6 +2176,7 @@ static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads) { n_tasks = 1; } break; case GGML_OP_COUNT_EQUAL: + case GGML_OP_SOLVE_TRI: { n_tasks = n_threads; } break; @@ -2179,6 +2199,8 @@ static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads) { case GGML_UNARY_OP_HARDSWISH: case GGML_UNARY_OP_HARDSIGMOID: case GGML_UNARY_OP_EXP: + case GGML_UNARY_OP_SOFTPLUS: + case GGML_UNARY_OP_EXPM1: case GGML_UNARY_OP_FLOOR: case GGML_UNARY_OP_CEIL: case GGML_UNARY_OP_ROUND: diff --git a/ggml/src/ggml-cpu/ops.cpp b/ggml/src/ggml-cpu/ops.cpp index 09f53b470b2..b6209588db1 100644 --- a/ggml/src/ggml-cpu/ops.cpp +++ b/ggml/src/ggml-cpu/ops.cpp @@ -9,6 +9,7 @@ #include #include +#include #include // ggml_compute_forward_dup @@ -1395,6 +1396,56 @@ void ggml_compute_forward_sum( } } +// ggml_compute_forward_cumsum + +static void ggml_compute_forward_cumsum_f32( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(src0->nb[0] == sizeof(float)); + GGML_ASSERT(dst->nb[0] == sizeof(float)); + + GGML_TENSOR_UNARY_OP_LOCALS + + GGML_ASSERT(ne0 == ne00); + GGML_ASSERT(ne1 == ne01); + GGML_ASSERT(ne2 == ne02); + GGML_ASSERT(ne3 == ne03); + + const auto [ir0, ir1] = get_thread_range(params, src0); + + for (int64_t ir = ir0; ir < ir1; ++ir) { + const int64_t i03 = ir/(ne02*ne01); + const int64_t i02 = (ir - i03*ne02*ne01)/ne01; + const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01); + + float * src_row = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03); + float * dst_row = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3); + + ggml_vec_cumsum_f32(ne00, dst_row, src_row); + } +} + +void ggml_compute_forward_cumsum( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_cumsum_f32(params, dst); + } break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + // ggml_compute_forward_sum_rows static void ggml_compute_forward_sum_rows_f32( @@ -2141,6 +2192,83 @@ static void ggml_compute_forward_gelu( } } +// ggml_compute_fill + +static void ggml_compute_forward_fill_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const float c = ggml_get_op_params_f32(dst, 0); + + GGML_TENSOR_LOCALS(int64_t, ne, dst, ne); + GGML_TENSOR_LOCALS(size_t, nb, dst, nb); + + const auto [ir0, ir1] = get_thread_range(params, dst); + + for (int64_t ir = ir0; ir < ir1; ++ir) { + const int64_t i03 = ir/(ne2*ne1); + const int64_t i02 = (ir - i03*ne2*ne1)/ne1; + const int64_t i01 = (ir - i03*ne2*ne1 - i02*ne1); + + float * dst_ptr = (float *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1); + + ggml_vec_set_f32(ne0, dst_ptr, c); + } +} + +void ggml_compute_forward_fill(const ggml_compute_params * params, ggml_tensor * dst) { + ggml_compute_forward_fill_f32(params, dst); +} + +// ggml_compute_tri + +static void ggml_compute_forward_tri_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + const ggml_tri_type ttype = (ggml_tri_type) ggml_get_op_params_i32(dst, 0); + + GGML_ASSERT(ggml_is_contiguous(src0)); + + GGML_TENSOR_UNARY_OP_LOCALS + + const auto [ir0, ir1] = get_thread_range(params, src0); + + bool (*bipred)(int, int); + + switch (ttype) { + case GGML_TRI_TYPE_LOWER: bipred = [](int i, int r) { return i < r; }; break; + case GGML_TRI_TYPE_LOWER_DIAG: bipred = [](int i, int r) { return i <= r; }; break; + case GGML_TRI_TYPE_UPPER: bipred = [](int i, int r) { return i > r; }; break; + case GGML_TRI_TYPE_UPPER_DIAG: bipred = [](int i, int r) { return i >= r; }; break; + default: GGML_ABORT("invalid tri type"); + } + + for (int64_t ir = ir0; ir < ir1; ++ir) { + const int64_t i03 = ir/(ne02*ne01); + const int64_t i02 = (ir - i03*ne02*ne01)/ne01; + const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01); + + const float * src_ptr = (const float *) ((const char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01); + float * dst_ptr = ( float *) (( char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1); + + for (int i0 = 0; i0 < ne0; ++i0) { + dst_ptr[i0] = bipred(i0, i01) ? src_ptr[i0] : 0.0f; + } + } +} + +void ggml_compute_forward_tri(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_tri_f32(params, dst); + } break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + // ggml_compute_forward_gelu_erf static void ggml_compute_forward_gelu_erf_f32( @@ -8536,7 +8664,7 @@ static void ggml_compute_forward_ssm_scan_f32( // n_head for (int h = ih0; h < ih1; ++h) { // ref: https://github.com/state-spaces/mamba/blob/62db608da60f6fc790b8ed9f4b3225e95ca15fde/mamba_ssm/ops/triton/softplus.py#L16 - const float dt_soft_plus = ggml_softplus(dt[h]); + const float dt_soft_plus = ggml_compute_softplus_f32(dt[h]); const float dA = expf(dt_soft_plus * A[h]); const int g = h / (nh / ng); // repeat_interleave @@ -8633,7 +8761,7 @@ static void ggml_compute_forward_ssm_scan_f32( // n_head for (int h = ih0; h < ih1; ++h) { // ref: https://github.com/state-spaces/mamba/blob/62db608da60f6fc790b8ed9f4b3225e95ca15fde/mamba_ssm/ops/triton/softplus.py#L16 - const float dt_soft_plus = ggml_softplus(dt[h]); + const float dt_soft_plus = ggml_compute_softplus_f32(dt[h]); const int g = h / (nh / ng); // repeat_interleave // dim @@ -8916,6 +9044,14 @@ void ggml_compute_forward_unary( { ggml_compute_forward_xielu(params, dst); } break; + case GGML_UNARY_OP_EXPM1: + { + ggml_compute_forward_expm1(params, dst); + } break; + case GGML_UNARY_OP_SOFTPLUS: + { + ggml_compute_forward_softplus(params, dst); + } break; default: { GGML_ABORT("fatal error"); @@ -9512,6 +9648,76 @@ void ggml_compute_forward_gla( } } +static void ggml_compute_forward_solve_tri_f32(const struct ggml_compute_params * params, struct ggml_tensor * dst) { + const struct ggml_tensor * src0 = dst->src[0]; // A (lower triangular) + const struct ggml_tensor * src1 = dst->src[1]; // B (RHS) + + GGML_TENSOR_BINARY_OP_LOCALS; + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + + GGML_ASSERT(ne00 == ne01); // A must be square + GGML_ASSERT(ne0 == ne10); // solution cols == B cols + GGML_ASSERT(ne1 == ne11); // solution rows == B rows + + GGML_ASSERT(ne02 == ne12 && ne12 == ne2); + GGML_ASSERT(ne03 == ne13 && ne13 == ne3); + + const int ith = params->ith; + const int nth = params->nth; + + const int64_t k = ne10; // number of RHS columns + const int64_t n = ne11; // A is n×n + const int64_t nr = ne02 * ne03 * k; // we're parallelizing on columns here, so seq x token x column will be the unit + + // chunks per thread + const int64_t dr = (nr + nth - 1)/nth; + + // chunk range for this thread + const int64_t ir0 = dr*ith; + const int64_t ir1 = MIN(ir0 + dr, nr); + + const float * A = (const float *) src0->data; // [n, n, B1, B2] + const float * B = (const float *) src1->data; // [n, k, B1, B2] + float * X = ( float *) dst->data; // [n, k, B1, B2] + + for (int64_t ir = ir0; ir < ir1; ++ir) { + const int64_t i03 = ir/(ne02*k); + const int64_t i02 = (ir - i03*ne02*k)/k; + const int64_t i01 = (ir - i03*ne02*k - i02*k); + + const float * A_batch = A + i02 * nb02 / sizeof(float) + i03 * nb03 / sizeof(float); + const float * B_batch = B + i02 * nb12 / sizeof(float) + i03 * nb13 / sizeof(float); + + float * X_batch = X + i02 * nb2 / sizeof(float) + i03 * nb3 / sizeof(float); + + for (int64_t i00 = 0; i00 < n; ++i00) { + float sum = 0.0f; + for (int64_t t = 0; t < i00; ++t) { + sum += A_batch[i00 * n + t] * X_batch[i01 * n + t]; + } + + const float diag = A_batch[i00 * n + i00]; + GGML_ASSERT(diag != 0.0f && "Zero diagonal in triangular matrix"); + + X_batch[i01 * n + i00] = (B_batch[i00 * k + i01] - sum) / diag; + } + } +} + +void ggml_compute_forward_solve_tri(const struct ggml_compute_params * params, struct ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32) { + ggml_compute_forward_solve_tri_f32(params, dst); + } else { + GGML_ABORT("fatal error"); + } +} + // ggml_compute_forward_rwkv_wkv7 static void ggml_compute_forward_rwkv_wkv7_f32( diff --git a/ggml/src/ggml-cpu/ops.h b/ggml/src/ggml-cpu/ops.h index 2b4127c12b1..98a0eec16d9 100644 --- a/ggml/src/ggml-cpu/ops.h +++ b/ggml/src/ggml-cpu/ops.h @@ -34,6 +34,7 @@ void ggml_compute_forward_add1(const struct ggml_compute_params * params, struct void ggml_compute_forward_acc(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_sum(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_sum_rows(const struct ggml_compute_params * params, struct ggml_tensor * dst); +void ggml_compute_forward_cumsum(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_mean(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_argmax(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_count_equal(const struct ggml_compute_params * params, struct ggml_tensor * dst); @@ -81,6 +82,8 @@ void ggml_compute_forward_arange(const struct ggml_compute_params * params, stru void ggml_compute_forward_timestep_embedding(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_argsort(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_leaky_relu(const struct ggml_compute_params * params, struct ggml_tensor * dst); +void ggml_compute_forward_tri(const struct ggml_compute_params * params, struct ggml_tensor * dst); +void ggml_compute_forward_fill(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_flash_attn_ext(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_flash_attn_back( const struct ggml_compute_params * params, @@ -96,6 +99,7 @@ void ggml_compute_forward_get_rel_pos(const struct ggml_compute_params * params, void ggml_compute_forward_add_rel_pos(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_rwkv_wkv6(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_rwkv_wkv7(const struct ggml_compute_params * params, struct ggml_tensor * dst); +void ggml_compute_forward_solve_tri(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_gla(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_map_custom1(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_map_custom2(const struct ggml_compute_params * params, struct ggml_tensor * dst); diff --git a/ggml/src/ggml-cpu/unary-ops.cpp b/ggml/src/ggml-cpu/unary-ops.cpp index a047537b34f..1d9873ad0f2 100644 --- a/ggml/src/ggml-cpu/unary-ops.cpp +++ b/ggml/src/ggml-cpu/unary-ops.cpp @@ -73,6 +73,14 @@ static inline float op_log(float x) { return logf(x); } +static inline float op_expm1(float x) { + return expf(x) - 1.0f; +} + +static inline float op_softplus(float x) { + return (x > 20.0f) ? x : logf(1.0f + expf(x)); +} + static inline float op_floor(float x) { return floorf(x); } @@ -290,6 +298,14 @@ void ggml_compute_forward_log(const ggml_compute_params * params, ggml_tensor * unary_op(params, dst); } +void ggml_compute_forward_expm1(const ggml_compute_params * params, ggml_tensor * dst) { + unary_op(params, dst); +} + +void ggml_compute_forward_softplus(const ggml_compute_params * params, ggml_tensor * dst) { + unary_op(params, dst); +} + void ggml_compute_forward_floor(const ggml_compute_params * params, ggml_tensor * dst) { unary_op(params, dst); } diff --git a/ggml/src/ggml-cpu/unary-ops.h b/ggml/src/ggml-cpu/unary-ops.h index fa45d9f0e63..bcad5a3af1a 100644 --- a/ggml/src/ggml-cpu/unary-ops.h +++ b/ggml/src/ggml-cpu/unary-ops.h @@ -22,6 +22,8 @@ void ggml_compute_forward_sqrt(const struct ggml_compute_params * params, struct void ggml_compute_forward_sin(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_cos(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_log(const struct ggml_compute_params * params, struct ggml_tensor * dst); +void ggml_compute_forward_expm1(const struct ggml_compute_params * params, struct ggml_tensor * dst); +void ggml_compute_forward_softplus(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_floor(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_ceil(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_round(const struct ggml_compute_params * params, struct ggml_tensor * dst); diff --git a/ggml/src/ggml-cpu/vec.h b/ggml/src/ggml-cpu/vec.h index 65c7dfb6b9a..ac59f1fe85b 100644 --- a/ggml/src/ggml-cpu/vec.h +++ b/ggml/src/ggml-cpu/vec.h @@ -1416,6 +1416,16 @@ inline static void ggml_vec_sum_f32(const int n, float * s, const float * x) { #endif } +inline static void ggml_vec_cumsum_f32(const int n, float * y, const float * x) { + for (int i = 0; i < n; ++i) { + if (i == 0) { + y[i] = x[i]; + } else { + y[i] = y[i - 1] + x[i]; + } + } +} + inline static void ggml_vec_sum_f32_ggf(const int n, ggml_float * s, const float * x) { ggml_float sum = 0.0; for (int i = 0; i < n; ++i) { diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index 41de87c0999..7d792e60cf9 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -2527,6 +2527,12 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg case GGML_UNARY_OP_TRUNC: ggml_cuda_op_trunc(ctx, dst); break; + case GGML_UNARY_OP_EXPM1: + ggml_cuda_op_expm1(ctx, dst); + break; + case GGML_UNARY_OP_SOFTPLUS: + ggml_cuda_op_softplus(ctx, dst); + break; default: return false; } @@ -3829,6 +3835,8 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g case GGML_UNARY_OP_GELU_QUICK: case GGML_UNARY_OP_TANH: case GGML_UNARY_OP_EXP: + case GGML_UNARY_OP_EXPM1: + case GGML_UNARY_OP_SOFTPLUS: case GGML_UNARY_OP_ELU: case GGML_UNARY_OP_FLOOR: case GGML_UNARY_OP_CEIL: diff --git a/ggml/src/ggml-cuda/unary.cu b/ggml/src/ggml-cuda/unary.cu index c1dc6ddbf8f..d4866067a4f 100644 --- a/ggml/src/ggml-cuda/unary.cu +++ b/ggml/src/ggml-cuda/unary.cu @@ -81,6 +81,14 @@ static __device__ __forceinline__ float op_log(float x) { return logf(x); } +static __device__ __forceinline__ float op_expm1(float x) { + return expm1f(x); +} + +static __device__ __forceinline__ float op_softplus(float x) { + return (x > 20.0f) ? x : logf(1.0f + expf(x)); +} + static __device__ __forceinline__ float op_elu(float x) { return (x > 0.f) ? x : expm1f(x); } @@ -233,6 +241,14 @@ void ggml_cuda_op_round(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { void ggml_cuda_op_trunc(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_unary(ctx, dst); } + +void ggml_cuda_op_expm1(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + ggml_cuda_op_unary(ctx, dst); +} + +void ggml_cuda_op_softplus(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + ggml_cuda_op_unary(ctx, dst); +} /* gated ops */ template diff --git a/ggml/src/ggml-cuda/unary.cuh b/ggml/src/ggml-cuda/unary.cuh index 2800c75ba3f..609046e5694 100644 --- a/ggml/src/ggml-cuda/unary.cuh +++ b/ggml/src/ggml-cuda/unary.cuh @@ -61,6 +61,10 @@ void ggml_cuda_op_cos(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_log(ggml_backend_cuda_context & ctx, ggml_tensor * dst); +void ggml_cuda_op_expm1(ggml_backend_cuda_context & ctx, ggml_tensor * dst); + +void ggml_cuda_op_softplus(ggml_backend_cuda_context & ctx, ggml_tensor * dst); + void ggml_cuda_op_elu(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_floor(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/ggml/src/ggml-impl.h b/ggml/src/ggml-impl.h index ec37a25337b..fe57d4c582b 100644 --- a/ggml/src/ggml-impl.h +++ b/ggml/src/ggml-impl.h @@ -102,7 +102,7 @@ static bool ggml_op_is_empty(enum ggml_op op) { } } -static inline float ggml_softplus(float input) { +static inline float ggml_compute_softplus_f32(float input) { return (input > 20.0f) ? input : logf(1 + expf(input)); } // diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c index 9be35c1be84..a5846a23937 100644 --- a/ggml/src/ggml.c +++ b/ggml/src/ggml.c @@ -935,6 +935,7 @@ static const char * GGML_OP_NAME[GGML_OP_COUNT] = { "COS", "SUM", "SUM_ROWS", + "CUMSUM", "MEAN", "ARGMAX", "COUNT_EQUAL", @@ -990,6 +991,8 @@ static const char * GGML_OP_NAME[GGML_OP_COUNT] = { "TIMESTEP_EMBEDDING", "ARGSORT", "LEAKY_RELU", + "TRI", + "FILL", "FLASH_ATTN_EXT", "FLASH_ATTN_BACK", @@ -1002,6 +1005,7 @@ static const char * GGML_OP_NAME[GGML_OP_COUNT] = { "RWKV_WKV6", "GATED_LINEAR_ATTN", "RWKV_WKV7", + "SOLVE_TRI", "UNARY", @@ -1019,7 +1023,7 @@ static const char * GGML_OP_NAME[GGML_OP_COUNT] = { "GLU", }; -static_assert(GGML_OP_COUNT == 90, "GGML_OP_COUNT != 90"); +static_assert(GGML_OP_COUNT == 94, "GGML_OP_COUNT != 94"); static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { "none", @@ -1039,6 +1043,7 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { "cos(x)", "Σx", "Σx_k", + "cumsum(x)", "Σx/n", "argmax(x)", "count_equal(x)", @@ -1094,6 +1099,8 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { "timestep_embedding(timesteps, dim, max_period)", "argsort(x)", "leaky_relu(x)", + "tri(x)", + "fill(x, c)", "flash_attn_ext(x)", "flash_attn_back(x)", @@ -1106,6 +1113,7 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { "rwkv_wkv6(k, v, r, tf, td, s)", "gated_linear_attn(k, v, q, gate, s)", "rwkv_wkv7(r, w, k, v, a, b, s)", + "A X = B, A triangular, solve X", "unary(x)", @@ -1123,7 +1131,7 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { "glu(x)", }; -static_assert(GGML_OP_COUNT == 90, "GGML_OP_COUNT != 90"); +static_assert(GGML_OP_COUNT == 94, "GGML_OP_COUNT != 94"); static_assert(GGML_OP_POOL_COUNT == 2, "GGML_OP_POOL_COUNT != 2"); @@ -1142,6 +1150,8 @@ static const char * GGML_UNARY_OP_NAME[GGML_UNARY_OP_COUNT] = { "HARDSWISH", "HARDSIGMOID", "EXP", + "EXPM1", + "SOFTPLUS", "GELU_ERF", "XIELU", "FLOOR", @@ -1150,7 +1160,7 @@ static const char * GGML_UNARY_OP_NAME[GGML_UNARY_OP_COUNT] = { "TRUNC", }; -static_assert(GGML_UNARY_OP_COUNT == 20, "GGML_UNARY_OP_COUNT != 20"); +static_assert(GGML_UNARY_OP_COUNT == 22, "GGML_UNARY_OP_COUNT != 22"); static const char * GGML_GLU_OP_NAME[GGML_GLU_OP_COUNT] = { "REGLU", @@ -2258,6 +2268,30 @@ struct ggml_tensor * ggml_log_inplace( return ggml_log_impl(ctx, a, true); } +struct ggml_tensor * ggml_expm1( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_unary(ctx, a, GGML_UNARY_OP_EXPM1); +} + +struct ggml_tensor * ggml_expm1_inplace( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_EXPM1); +} + +struct ggml_tensor * ggml_softplus( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_unary(ctx, a, GGML_UNARY_OP_SOFTPLUS); +} + +struct ggml_tensor * ggml_softplus_inplace( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_SOFTPLUS); +} + // ggml_sin static struct ggml_tensor * ggml_sin_impl( @@ -2341,6 +2375,21 @@ struct ggml_tensor * ggml_sum_rows( return result; } +// ggml_cumsum + +struct ggml_tensor * ggml_cumsum( + struct ggml_context * ctx, + struct ggml_tensor * a) { + GGML_ASSERT(a->type == GGML_TYPE_F32); + + struct ggml_tensor * result = ggml_dup_tensor(ctx, a); + + result->op = GGML_OP_CUMSUM; + result->src[0] = a; + + return result; +} + // ggml_mean struct ggml_tensor * ggml_mean( @@ -2668,8 +2717,8 @@ struct ggml_tensor * ggml_xielu( struct ggml_tensor * result = ggml_dup_tensor(ctx, a); ggml_set_op_params_i32(result, 0, (int32_t) GGML_UNARY_OP_XIELU); - ggml_set_op_params_f32(result, 1, beta + ggml_softplus(alpha_n)); - ggml_set_op_params_f32(result, 2, ggml_softplus(alpha_p)); + ggml_set_op_params_f32(result, 1, beta + ggml_compute_softplus_f32(alpha_n)); + ggml_set_op_params_f32(result, 2, ggml_compute_softplus_f32(alpha_p)); ggml_set_op_params_f32(result, 3, beta); ggml_set_op_params_f32(result, 4, eps); @@ -5028,6 +5077,61 @@ struct ggml_tensor * ggml_timestep_embedding( return result; } +// ggml_tri + +struct ggml_tensor * ggml_tri( + struct ggml_context * ctx, + struct ggml_tensor * a, + enum ggml_tri_type type) { + GGML_ASSERT(a->type == GGML_TYPE_F32); + + GGML_ASSERT(ggml_is_contiguous(a)); + GGML_ASSERT(a->ne[0] == a->ne[1]); + + struct ggml_tensor * result = ggml_dup_tensor(ctx, a); + + ggml_set_op_params_i32(result, 0, type); + + result->op = GGML_OP_TRI; + result->src[0] = a; + + return result; +} + +// ggml_fill + +static struct ggml_tensor * ggml_fill_impl( + struct ggml_context * ctx, + struct ggml_tensor * a, + float c, + bool inplace) { + GGML_ASSERT(a->type == GGML_TYPE_F32); + GGML_ASSERT(ggml_is_contiguous(a)); + + struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); + + ggml_set_op_params_f32(result, 0, c); + + result->op = GGML_OP_FILL; + result->src[0] = a; + + return result; +} + +struct ggml_tensor * ggml_fill( + struct ggml_context * ctx, + struct ggml_tensor * a, + float c) { + return ggml_fill_impl(ctx, a, c, false); +} + +struct ggml_tensor * ggml_fill_inplace( + struct ggml_context * ctx, + struct ggml_tensor * a, + float c) { + return ggml_fill_impl(ctx, a, c, true); +} + // ggml_argsort struct ggml_tensor * ggml_argsort( @@ -5882,6 +5986,41 @@ struct ggml_tensor * ggml_opt_step_sgd( return result; } +// solve_tri + +struct ggml_tensor * ggml_solve_tri( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b, + bool left, + bool lower, + bool uni) { + GGML_ASSERT(a->type == GGML_TYPE_F32); + GGML_ASSERT(b->type == GGML_TYPE_F32); + + // A must be square and lower diagonal + GGML_ASSERT(a->ne[0] == a->ne[1]); + // B must have same outer dimension as A + GGML_ASSERT(a->ne[1] == b->ne[1]); + + // batch dimensions must be equal + GGML_ASSERT(a->ne[2] == b->ne[2]); + GGML_ASSERT(a->ne[3] == b->ne[3]); + + GGML_ASSERT(ggml_is_contiguous(a)); + GGML_ASSERT(ggml_is_contiguous(b)); + + GGML_ASSERT(lower && left && !uni); // TODO: support other variants + + struct ggml_tensor * result = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, b->ne[0], b->ne[1], b->ne[2], b->ne[3]); + + result->op = GGML_OP_SOLVE_TRI; + result->src[0] = a; + result->src[1] = b; + + return result; +} + //////////////////////////////////////////////////////////////////////////////// struct ggml_hash_set ggml_hash_set_new(size_t size) { @@ -6454,6 +6593,16 @@ static void ggml_compute_backward( ggml_add_or_set(ctx, cgraph, isrc0, ggml_mul(ctx, tensor, grad)); } } break; + case GGML_UNARY_OP_EXPM1: { + if (src0_needs_grads) { + ggml_add_or_set(ctx, cgraph, isrc0, ggml_mul(ctx, grad, ggml_exp(ctx, src0))); + } + } break; + case GGML_UNARY_OP_SOFTPLUS: { + if (src0_needs_grads) { + ggml_add_or_set(ctx, cgraph, isrc0, ggml_mul(ctx, grad, ggml_sigmoid(ctx, src0))); + } + } break; default: { fprintf(stderr, "%s: unsupported unary op for backward pass: %s\n", __func__, ggml_unary_op_name(ggml_get_unary_op(tensor))); From 3e4ae291755c99bc7afa62c8625cc54a3558c0ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alberto=20Cabrera=20P=C3=A9rez?= Date: Thu, 13 Nov 2025 20:53:00 +0000 Subject: [PATCH 31/51] ggml-cpu: handle 3d tensors in repack mat_mul (llama/17241) * ggml-cpu: handle 3d tensors in repack mul_mat * Removed unnecessary branch, removed need for * Fixed dst_ptr pointer in chunk + clang_format * GGML_ASSERT to check wdata within bounds * Accidental ggml.h inclusion * Improved GGML_ASSERT on wdata boundaries * Address performance regression in Qwen and llama.cpp due to chunking --- ggml/src/ggml-cpu/repack.cpp | 137 ++++++++++++++++++++++++----------- 1 file changed, 95 insertions(+), 42 deletions(-) diff --git a/ggml/src/ggml-cpu/repack.cpp b/ggml/src/ggml-cpu/repack.cpp index 8421c84ce09..3db26cff74b 100644 --- a/ggml/src/ggml-cpu/repack.cpp +++ b/ggml/src/ggml-cpu/repack.cpp @@ -1600,29 +1600,52 @@ template src[0]; const ggml_tensor * src1 = op->src[1]; ggml_tensor * dst = op; GGML_TENSOR_BINARY_OP_LOCALS - const void * src1_wdata = params->wdata; const size_t src1_col_stride = ggml_row_size(PARAM_TYPE, ne10); + GGML_ASSERT(ne03 == 1 && ne13 == 1); + GGML_ASSERT(ne12 % ne02 == 0); + const int64_t r2 = ne12 / ne02; + + const int64_t i12 = src1_start / ne1; + const int64_t i11 = src1_start - i12 * ne1; + + // Determine batch index + const int64_t i02 = i12 / r2; + + const int64_t i1 = i11; + const int64_t i2 = i12; + + const char * src0_ptr = (const char *) src0->data + i02 * nb02; + const char * src1_ptr = (const char *) params->wdata + (i11 + i12 * ne11) * src1_col_stride; + char * dst_ptr = ((char *) dst->data + (i1 * nb1 + i2 * nb2)); + + const int64_t nrows = src1_end - src1_start; + const int64_t ncols = src0_end - src0_start; + + GGML_ASSERT(src1_ptr + src1_col_stride * nrows <= (const char *) params->wdata + params->wsize); + // If there are more than three rows in src1, use gemm; otherwise, use gemv. - if (ne11 > 3) { - gemm(ne00, - (float *) ((char *) dst->data) + src0_start, ne01, - (const char *) src0->data + src0_start * nb01, - (const char *) src1_wdata, ne11 - ne11 % 4, src0_end - src0_start); + if (nrows > 3) { + gemm(ne00, (float *) (dst_ptr) + src0_start, nb1 / nb0, + src0_ptr + src0_start * nb01, src1_ptr, + nrows - (nrows % 4), ncols); } - for (int iter = ne11 - ne11 % 4; iter < ne11; iter++) { - gemv(ne00, - (float *) ((char *) dst->data + (iter * nb1)) + src0_start, ne01, - (const char *) src0->data + src0_start * nb01, - (const char *) src1_wdata + (src1_col_stride * iter), 1, - src0_end - src0_start); + for (int iter = nrows - (nrows % 4); iter < nrows; iter++) { + gemv(ne00, (float *) (dst_ptr + (iter * nb1)) + src0_start, + ne01, src0_ptr + src0_start * nb01, + src1_ptr + (src1_col_stride * iter), 1 /* nrows */, ncols); } } @@ -1647,6 +1670,12 @@ template type == GGML_TYPE_F32); GGML_ASSERT(ggml_n_dims(op->src[0]) == 2); @@ -1654,47 +1683,64 @@ template (params->wdata); const size_t nbw1 = ggml_row_size(PARAM_TYPE, ne10); + const size_t nbw2 = nbw1 * ne11; - assert(params->wsize >= nbw1 * ne11); + assert(params->wsize >= nbw2 * ne12); const ggml_from_float_t from_float = ggml_get_type_traits_cpu(PARAM_TYPE)->from_float; - int64_t i11_processed = 0; - for (int64_t i11 = ith * 4; i11 < ne11 - ne11 % 4; i11 += nth * 4) { - ggml_quantize_mat_t((float *) ((char *) src1->data + i11 * nb11), (void *) (wdata + i11 * nbw1), 4, ne10); - } + // INFO: Quantization is done in planes to avoid extra complexity in chunking. + // Flattening dimensions not multiple of INTER_SIZE would require extra handling depending on how + // the planes are broadcast. + for (int64_t i12 = 0; i12 < ne12; i12++) { + char * data_ptr = (char *) src1->data + i12 * nb12; + char * wdata_ptr = wdata + i12 * nbw2; - i11_processed = ne11 - ne11 % 4; - for (int64_t i11 = i11_processed + ith; i11 < ne11; i11 += nth) { - from_float((float *) ((char *) src1->data + i11 * nb11), (void *) (wdata + i11 * nbw1), ne10); + for (int64_t i11 = ith * 4; i11 < ne11 - ne11 % 4; i11 += nth * 4) { + ggml_quantize_mat_t((float *) (data_ptr + i11 * nb11), + (void *) (wdata_ptr + i11 * nbw1), 4, ne10); + } + + const int64_t i11_processed = ne11 - ne11 % 4; + for (int64_t i11 = i11_processed + ith; i11 < ne11; i11 += nth) { + from_float((float *) (data_ptr + i11 * nb11), (void *) (wdata_ptr + i11 * nbw1), ne10); + } } // disable for NUMA const bool disable_chunking = ggml_is_numa(); // 4x chunks per thread - int64_t nr = ggml_nrows(op->src[0]); - int nth_scaled = nth * 4; - int64_t chunk_size = (nr + nth_scaled - 1) / nth_scaled; - int64_t nchunk = (nr + chunk_size - 1) / chunk_size; + const int64_t nr0 = ggml_nrows(op->src[0]); + + int nth_scaled = nth * 4; + int64_t chunk_size0 = (nr0 + nth_scaled - 1) / nth_scaled; + int64_t nchunk0 = (nr0 + chunk_size0 - 1) / chunk_size0; + + // src1 is chunked only by full planes. + // When we flatten we need to address dimensions not multiple of the q8 INTER_SIZE + // to route them thorugh GEMV. + // nchunk1 = ne12 also avoids messing the chunking for models with no 3d tensors + // to avoid affecting their performance + int64_t nchunk1 = ne12; // Ensure minimum chunk size to avoid alignment issues with high thread counts // Minimum chunk size should be at least NB_COLS to prevent overlapping chunks after alignment const int64_t min_chunk_size = NB_COLS; - if (nchunk > 0 && (nr / nchunk) < min_chunk_size && nr >= min_chunk_size) { - nchunk = (nr + min_chunk_size - 1) / min_chunk_size; + if (nchunk0 > 0 && (nr0 / nchunk0) < min_chunk_size && nr0 >= min_chunk_size) { + nchunk0 = (nr0 + min_chunk_size - 1) / min_chunk_size; } - if (nth == 1 || nchunk < nth || disable_chunking) { - nchunk = nth; + if (nth == 1 || nchunk0 < nth || disable_chunking) { + nchunk0 = nth; } + const int64_t dr0 = (nr0 + nchunk0 - 1) / nchunk0; + // Ensure nchunk doesn't exceed the number of rows divided by minimum chunk size // This prevents creating too many tiny chunks that could overlap after alignment - const int64_t max_nchunk = (nr + min_chunk_size - 1) / min_chunk_size; - if (nchunk > max_nchunk) { - nchunk = max_nchunk; - } + const int64_t max_nchunk = (nr0 + min_chunk_size - 1) / min_chunk_size; + nchunk0 = MIN(nchunk0, max_nchunk); if (ith == 0) { // Every thread starts at ith, so the first unprocessed chunk is nth. This save a bit of coordination right at the start. @@ -1706,23 +1752,30 @@ template ne01) { - src0_end = ne01; - } + src0_end = (src0_end % NB_COLS) ? src0_end + NB_COLS - (src0_end % NB_COLS) : src0_end; + src0_end = MIN(src0_end, ne01); + // Make sure current plane is the last one before exiting if (src0_start >= src0_end) { - break; + current_chunk = ggml_threadpool_chunk_add(params->threadpool, 1); + continue; } - forward_mul_mat_one_chunk(params, dst, src0_start, src0_end); + forward_mul_mat_one_chunk(params, dst, src0_start, src0_end, src1_start, src1_end); current_chunk = ggml_threadpool_chunk_add(params->threadpool, 1); } From ae08083e2dddeefccf8095ac8ed2510dc3f74699 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 14 Nov 2025 09:13:34 +0200 Subject: [PATCH 32/51] metal : make the FA extra sizes consistent (llama/17143) --- ggml/src/ggml-metal/ggml-metal-ops.cpp | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/ggml/src/ggml-metal/ggml-metal-ops.cpp b/ggml/src/ggml-metal/ggml-metal-ops.cpp index d9811e31159..c48f7cd29fe 100644 --- a/ggml/src/ggml-metal/ggml-metal-ops.cpp +++ b/ggml/src/ggml-metal/ggml-metal-ops.cpp @@ -1975,7 +1975,9 @@ size_t ggml_metal_op_flash_attn_ext_extra_pad(const ggml_tensor * op) { const bool has_mask = op->src[3] != nullptr; if (ggml_metal_op_flash_attn_ext_use_vec(op)) { - const bool has_kvpad = ne11 % OP_FLASH_ATTN_EXT_VEC_NCPSG != 0; + // note: always reserve the padding space to avoid graph reallocations + //const bool has_kvpad = ne11 % OP_FLASH_ATTN_EXT_VEC_NCPSG != 0; + const bool has_kvpad = true; if (has_kvpad) { res += OP_FLASH_ATTN_EXT_VEC_NCPSG*( @@ -1984,7 +1986,8 @@ size_t ggml_metal_op_flash_attn_ext_extra_pad(const ggml_tensor * op) { (has_mask ? ggml_type_size(GGML_TYPE_F16)*ne31*ne32*ne33 : 0)); } } else { - const bool has_kvpad = ne11 % OP_FLASH_ATTN_EXT_NCPSG != 0; + //const bool has_kvpad = ne11 % OP_FLASH_ATTN_EXT_NCPSG != 0; + const bool has_kvpad = true; if (has_kvpad) { res += OP_FLASH_ATTN_EXT_NCPSG*( @@ -2020,9 +2023,10 @@ size_t ggml_metal_op_flash_attn_ext_extra_blk(const ggml_tensor * op) { const bool is_vec = ggml_metal_op_flash_attn_ext_use_vec(op); // this optimization is not useful for the vector kernels - if (is_vec) { - return res; - } + // note: always reserve the blk buffer to avoid graph reallocations + //if (is_vec) { + // return res; + //} const int nqptg = is_vec ? OP_FLASH_ATTN_EXT_VEC_NQPTG : OP_FLASH_ATTN_EXT_NQPTG; const int ncpsg = is_vec ? OP_FLASH_ATTN_EXT_VEC_NCPSG : OP_FLASH_ATTN_EXT_NCPSG; @@ -2049,13 +2053,16 @@ size_t ggml_metal_op_flash_attn_ext_extra_tmp(const ggml_tensor * op) { size_t res = 0; - if (ggml_metal_op_flash_attn_ext_use_vec(op)) { + // note: always reserve the temp buffer to avoid graph reallocations + //if (ggml_metal_op_flash_attn_ext_use_vec(op)) { + if (true) { const int64_t nwg = 32; + const int64_t ne01_max = std::min(ne01, 32); // temp buffer for writing the results from each workgroup // - ne20: the size of the Value head // - + 2: the S and M values for each intermediate result - res += ggml_type_size(GGML_TYPE_F32)*(ne01*ne02*ne03*nwg*(ne20 + 2)); + res += ggml_type_size(GGML_TYPE_F32)*(ne01_max*ne02*ne03*nwg*(ne20 + 2)); } return res; From a6f1d807eed3950cd6c7029385ba84d7b2637fea Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 14 Nov 2025 09:36:06 +0200 Subject: [PATCH 33/51] metal : support argsort for ne00 > 1024 (llama/17247) * metal : refactor argsort * cont : sort chunks * cont : merge sorted buckets * cont : cleanup --- ggml/src/ggml-metal/ggml-metal-device.cpp | 28 ++++ ggml/src/ggml-metal/ggml-metal-device.h | 1 + ggml/src/ggml-metal/ggml-metal-device.m | 2 - ggml/src/ggml-metal/ggml-metal-impl.h | 22 ++- ggml/src/ggml-metal/ggml-metal-ops.cpp | 81 +++++++++-- ggml/src/ggml-metal/ggml-metal.cpp | 4 + ggml/src/ggml-metal/ggml-metal.metal | 164 ++++++++++++++++++---- 7 files changed, 259 insertions(+), 43 deletions(-) diff --git a/ggml/src/ggml-metal/ggml-metal-device.cpp b/ggml/src/ggml-metal/ggml-metal-device.cpp index 08095dcf060..e61b0978339 100644 --- a/ggml/src/ggml-metal/ggml-metal-device.cpp +++ b/ggml/src/ggml-metal/ggml-metal-device.cpp @@ -943,6 +943,34 @@ ggml_metal_pipeline_t ggml_metal_library_get_pipeline_argsort(ggml_metal_library return res; } +ggml_metal_pipeline_t ggml_metal_library_get_pipeline_argsort_merge(ggml_metal_library_t lib, const ggml_tensor * op) { + assert(op->op == GGML_OP_ARGSORT); + + char base[256]; + char name[256]; + + ggml_sort_order order = (ggml_sort_order) op->op_params[0]; + + const char * order_str = "undefined"; + switch (order) { + case GGML_SORT_ORDER_ASC: order_str = "asc"; break; + case GGML_SORT_ORDER_DESC: order_str = "desc"; break; + default: GGML_ABORT("fatal error"); + }; + + snprintf(base, 256, "kernel_argsort_merge_%s_%s_%s", ggml_type_name(op->src[0]->type), ggml_type_name(op->type), order_str); + snprintf(name, 256, "%s", base); + + ggml_metal_pipeline_t res = ggml_metal_library_get_pipeline(lib, name); + if (res) { + return res; + } + + res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr); + + return res; +} + ggml_metal_pipeline_t ggml_metal_library_get_pipeline_flash_attn_ext_pad( ggml_metal_library_t lib, const struct ggml_tensor * op, diff --git a/ggml/src/ggml-metal/ggml-metal-device.h b/ggml/src/ggml-metal/ggml-metal-device.h index 5a8bc0c1ccd..5539abda336 100644 --- a/ggml/src/ggml-metal/ggml-metal-device.h +++ b/ggml/src/ggml-metal/ggml-metal-device.h @@ -125,6 +125,7 @@ ggml_metal_pipeline_t ggml_metal_library_get_pipeline_mul_mm_id (ggml_me ggml_metal_pipeline_t ggml_metal_library_get_pipeline_mul_mv_id (ggml_metal_library_t lib, const struct ggml_tensor * op); ggml_metal_pipeline_t ggml_metal_library_get_pipeline_argmax (ggml_metal_library_t lib, const struct ggml_tensor * op); ggml_metal_pipeline_t ggml_metal_library_get_pipeline_argsort (ggml_metal_library_t lib, const struct ggml_tensor * op); +ggml_metal_pipeline_t ggml_metal_library_get_pipeline_argsort_merge (ggml_metal_library_t lib, const struct ggml_tensor * op); ggml_metal_pipeline_t ggml_metal_library_get_pipeline_bin (ggml_metal_library_t lib, enum ggml_op op, int32_t n_fuse, bool row); ggml_metal_pipeline_t ggml_metal_library_get_pipeline_l2_norm (ggml_metal_library_t lib, const struct ggml_tensor * op); ggml_metal_pipeline_t ggml_metal_library_get_pipeline_group_norm (ggml_metal_library_t lib, const struct ggml_tensor * op); diff --git a/ggml/src/ggml-metal/ggml-metal-device.m b/ggml/src/ggml-metal/ggml-metal-device.m index 69c8820854a..741b1a44db3 100644 --- a/ggml/src/ggml-metal/ggml-metal-device.m +++ b/ggml/src/ggml-metal/ggml-metal-device.m @@ -904,8 +904,6 @@ bool ggml_metal_device_supports_op(ggml_metal_device_t dev, const struct ggml_te case GGML_OP_LEAKY_RELU: return op->src[0]->type == GGML_TYPE_F32; case GGML_OP_ARGSORT: - // TODO: Support arbitrary column width - return op->src[0]->ne[0] <= 1024; case GGML_OP_ARANGE: return true; case GGML_OP_FLASH_ATTN_EXT: diff --git a/ggml/src/ggml-metal/ggml-metal-impl.h b/ggml/src/ggml-metal/ggml-metal-impl.h index 6d02befa97d..dd889cd90d5 100644 --- a/ggml/src/ggml-metal/ggml-metal-impl.h +++ b/ggml/src/ggml-metal/ggml-metal-impl.h @@ -793,10 +793,28 @@ typedef struct { } ggml_metal_kargs_leaky_relu; typedef struct { - int64_t ncols; - int64_t ncols_pad; + int64_t ne00; + int64_t ne01; + int64_t ne02; + int64_t ne03; + uint64_t nb00; + uint64_t nb01; + uint64_t nb02; + uint64_t nb03; } ggml_metal_kargs_argsort; +typedef struct { + int64_t ne00; + int64_t ne01; + int64_t ne02; + int64_t ne03; + uint64_t nb00; + uint64_t nb01; + uint64_t nb02; + uint64_t nb03; + int32_t len; +} ggml_metal_kargs_argsort_merge; + typedef struct { int64_t ne0; float start; diff --git a/ggml/src/ggml-metal/ggml-metal-ops.cpp b/ggml/src/ggml-metal/ggml-metal-ops.cpp index c48f7cd29fe..ae098d371f1 100644 --- a/ggml/src/ggml-metal/ggml-metal-ops.cpp +++ b/ggml/src/ggml-metal/ggml-metal-ops.cpp @@ -3530,38 +3530,95 @@ int ggml_metal_op_argsort(ggml_metal_op_t ctx, int idx) { ggml_metal_library_t lib = ctx->lib; ggml_metal_encoder_t enc = ctx->enc; + GGML_ASSERT(ggml_is_contiguous_rows(op->src[0])); + GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne); GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb); GGML_TENSOR_LOCALS( int32_t, ne, op, ne); GGML_TENSOR_LOCALS(uint32_t, nb, op, nb); + ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_argsort(lib, op); + // bitonic sort requires the number of elements to be power of 2 - int64_t ne00_padded = 1; - while (ne00_padded < ne00) { - ne00_padded *= 2; + int nth = 1; + while (nth < ne00 && 2*nth <= ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)) { + nth *= 2; } - ggml_metal_pipeline_t pipeline = ggml_metal_library_get_pipeline_argsort(lib, op); - - const int64_t nrows = ggml_nrows(op->src[0]); + const int nptg = (ne00 + nth - 1)/nth; // Metal kernels require the buffer size to be multiple of 16 bytes // https://developer.apple.com/documentation/metal/mtlcomputecommandencoder/1443142-setthreadgroupmemorylength - const size_t smem = GGML_PAD(ne00_padded*sizeof(int32_t), 16); + const size_t smem = GGML_PAD(nth*sizeof(int32_t), 16); + + ggml_metal_buffer_id bid_src0 = ggml_metal_get_buffer_id(op->src[0]); + ggml_metal_buffer_id bid_dst = ggml_metal_get_buffer_id(op); + + ggml_metal_buffer_id bid_tmp = bid_dst; + bid_tmp.offs += ggml_nbytes(op); + + if ((int) ceil(std::log(nptg) / std::log(2)) % 2 == 1) { + std::swap(bid_dst, bid_tmp); + } ggml_metal_kargs_argsort args = { - /*.ncols =*/ ne00, - /*.ncols_pad =*/ ne00_padded + /*.ne00 =*/ ne00, + /*.ne01 =*/ ne01, + /*.ne02 =*/ ne02, + /*.ne03 =*/ ne03, + /*.nb00 =*/ nb00, + /*.nb01 =*/ nb01, + /*.nb02 =*/ nb02, + /*.nb03 =*/ nb03, }; ggml_metal_encoder_set_pipeline(enc, pipeline); ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0); - ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1); - ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 2); + ggml_metal_encoder_set_buffer (enc, bid_src0, 1); + ggml_metal_encoder_set_buffer (enc, bid_dst, 2); ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0); - ggml_metal_encoder_dispatch_threadgroups(enc, 1, nrows, 1, ne00_padded, 1, 1); + ggml_metal_encoder_dispatch_threadgroups(enc, nptg*ne01, ne02, ne03, nth, 1, 1); + + ggml_metal_pipeline_t pipeline_merge = ggml_metal_library_get_pipeline_argsort_merge(lib, op); + + int len = nth; + + while (len < ne00) { + ggml_metal_op_concurrency_reset(ctx); + + ggml_metal_kargs_argsort_merge args_merge = { + .ne00 = ne00, + .ne01 = ne01, + .ne02 = ne02, + .ne03 = ne03, + .nb00 = nb00, + .nb01 = nb01, + .nb02 = nb02, + .nb03 = nb03, + .len = len, + }; + + // merges per row + const int nm = (ne00 + 2*len - 1) / (2*len); + + const int nth = std::min(512, ggml_metal_pipeline_max_theads_per_threadgroup(pipeline_merge)); + + ggml_metal_encoder_set_pipeline(enc, pipeline_merge); + ggml_metal_encoder_set_bytes (enc, &args_merge, sizeof(args_merge), 0); + ggml_metal_encoder_set_buffer (enc, bid_src0, 1); + ggml_metal_encoder_set_buffer (enc, bid_dst, 2); + ggml_metal_encoder_set_buffer (enc, bid_tmp, 3); + + ggml_metal_encoder_set_threadgroup_memory_size(enc, 0, 0); + + ggml_metal_encoder_dispatch_threadgroups(enc, nm*ne01, ne02, ne03, nth, 1, 1); + + std::swap(bid_dst, bid_tmp); + + len <<= 1; + } return 1; } diff --git a/ggml/src/ggml-metal/ggml-metal.cpp b/ggml/src/ggml-metal/ggml-metal.cpp index 7afc881fa70..35f07f3e713 100644 --- a/ggml/src/ggml-metal/ggml-metal.cpp +++ b/ggml/src/ggml-metal/ggml-metal.cpp @@ -197,6 +197,10 @@ static size_t ggml_backend_metal_buffer_type_get_alloc_size(ggml_backend_buffer_ res += ggml_metal_op_flash_attn_ext_extra_blk(tensor); res += ggml_metal_op_flash_attn_ext_extra_tmp(tensor); } break; + case GGML_OP_ARGSORT: + { + res *= 2; + } break; default: break; } diff --git a/ggml/src/ggml-metal/ggml-metal.metal b/ggml/src/ggml-metal/ggml-metal.metal index 7f94419c3ac..8afc7318f68 100644 --- a/ggml/src/ggml-metal/ggml-metal.metal +++ b/ggml/src/ggml-metal/ggml-metal.metal @@ -4541,69 +4541,179 @@ kernel void kernel_timestep_embedding_f32( // bitonic sort implementation following the CUDA kernels as reference typedef void (argsort_t)( constant ggml_metal_kargs_argsort & args, - device const float * x, + device const char * src0, device int32_t * dst, - threadgroup int32_t * shared_values [[threadgroup(0)]], - uint3 tgpig[[threadgroup_position_in_grid]], - uint3 tpitg[[thread_position_in_threadgroup]]); + threadgroup int32_t * smem_i32 [[threadgroup(0)]], + uint3 tgpig[[threadgroup_position_in_grid]], + ushort3 tpitg[[thread_position_in_threadgroup]], + ushort3 ntg[[threads_per_threadgroup]]); template kernel void kernel_argsort_f32_i32( constant ggml_metal_kargs_argsort & args, - device const float * x, + device const char * src0, device int32_t * dst, - threadgroup int32_t * shared_values [[threadgroup(0)]], - uint3 tgpig[[threadgroup_position_in_grid]], - uint3 tpitg[[thread_position_in_threadgroup]]) { + threadgroup int32_t * smem_i32 [[threadgroup(0)]], + uint3 tgpig[[threadgroup_position_in_grid]], + ushort3 tpitg[[thread_position_in_threadgroup]], + ushort3 ntg[[threads_per_threadgroup]]) { // bitonic sort - int col = tpitg[0]; - int row = tgpig[1]; + const int col = tpitg[0]; - if (col >= args.ncols_pad) return; + const int i00 = (tgpig[0]/args.ne01)*ntg.x; + const int i01 = tgpig[0]%args.ne01; + const int i02 = tgpig[1]; + const int i03 = tgpig[2]; - device const float * x_row = x + row * args.ncols; - threadgroup int32_t * dst_row = shared_values; + device const float * x_row = (device const float *) (src0 + args.nb01*i01 + args.nb02*i02 + args.nb03*i03); // initialize indices - dst_row[col] = col; + smem_i32[col] = i00 + col; threadgroup_barrier(mem_flags::mem_threadgroup); - for (int k = 2; k <= args.ncols_pad; k *= 2) { + for (int k = 2; k <= ntg.x; k *= 2) { for (int j = k / 2; j > 0; j /= 2) { int ixj = col ^ j; if (ixj > col) { if ((col & k) == 0) { - if (dst_row[col] >= args.ncols || - (dst_row[ixj] < args.ncols && (order == GGML_SORT_ORDER_ASC ? - x_row[dst_row[col]] > x_row[dst_row[ixj]] : - x_row[dst_row[col]] < x_row[dst_row[ixj]])) + if (smem_i32[col] >= args.ne00 || + (smem_i32[ixj] < args.ne00 && (order == GGML_SORT_ORDER_ASC ? + x_row[smem_i32[col]] > x_row[smem_i32[ixj]] : + x_row[smem_i32[col]] < x_row[smem_i32[ixj]])) ) { - SWAP(dst_row[col], dst_row[ixj]); + SWAP(smem_i32[col], smem_i32[ixj]); } } else { - if (dst_row[ixj] >= args.ncols || - (dst_row[col] < args.ncols && (order == GGML_SORT_ORDER_ASC ? - x_row[dst_row[col]] < x_row[dst_row[ixj]] : - x_row[dst_row[col]] > x_row[dst_row[ixj]])) + if (smem_i32[ixj] >= args.ne00 || + (smem_i32[col] < args.ne00 && (order == GGML_SORT_ORDER_ASC ? + x_row[smem_i32[col]] < x_row[smem_i32[ixj]] : + x_row[smem_i32[col]] > x_row[smem_i32[ixj]])) ) { - SWAP(dst_row[col], dst_row[ixj]); + SWAP(smem_i32[col], smem_i32[ixj]); } } } + threadgroup_barrier(mem_flags::mem_threadgroup); } } // copy the result to dst without the padding - if (col < args.ncols) { - dst[row * args.ncols + col] = dst_row[col]; + if (i00 + col < args.ne00) { + dst += i00 + args.ne00*i01 + args.ne00*args.ne01*i02 + args.ne00*args.ne01*args.ne02*i03; + + dst[col] = smem_i32[col]; } } template [[host_name("kernel_argsort_f32_i32_asc")]] kernel argsort_t kernel_argsort_f32_i32; template [[host_name("kernel_argsort_f32_i32_desc")]] kernel argsort_t kernel_argsort_f32_i32; +typedef void (argsort_merge_t)( + constant ggml_metal_kargs_argsort_merge & args, + device const char * src0, + device const int32_t * tmp, + device int32_t * dst, + uint3 tgpig[[threadgroup_position_in_grid]], + ushort3 tpitg[[thread_position_in_threadgroup]], + ushort3 ntg[[threads_per_threadgroup]]); + +template +kernel void kernel_argsort_merge_f32_i32( + constant ggml_metal_kargs_argsort_merge & args, + device const char * src0, + device const int32_t * tmp, + device int32_t * dst, + uint3 tgpig[[threadgroup_position_in_grid]], + ushort3 tpitg[[thread_position_in_threadgroup]], + ushort3 ntg[[threads_per_threadgroup]]) { + int im = tgpig[0] / args.ne01; + int i01 = tgpig[0] % args.ne01; + int i02 = tgpig[1]; + int i03 = tgpig[2]; + + const int start = im * (2*args.len); + + const int len0 = MIN(args.len, MAX(0, args.ne00 - (int)(start))); + const int len1 = MIN(args.len, MAX(0, args.ne00 - (int)(start + args.len))); + + const int total = len0 + len1; + + device const int32_t * tmp0 = tmp + start + + i01*args.ne00 + + i02*args.ne00*args.ne01 + + i03*args.ne00*args.ne01*args.ne02; + + device const int32_t * tmp1 = tmp0 + args.len; + + dst += start + + i01*args.ne00 + + i02*args.ne00*args.ne01 + + i03*args.ne00*args.ne01*args.ne02; + + device const float * src0_row = (device const float *)(src0 + + args.nb01*i01 + + args.nb02*i02 + + args.nb03*i03); + + for (int k = tpitg.x; k < (int) total; k += ntg.x) { + // find partition (i,j) such that i+j = k + int low = k > len1 ? k - len1 : 0; + int high = MIN(k, len0); + + while (low < high) { + const int mid = (low + high) >> 1; + + const int32_t idx0 = tmp0[mid]; + const int32_t idx1 = tmp1[k - mid - 1]; + + const float val0 = src0_row[idx0]; + const float val1 = src0_row[idx1]; + + if (order == GGML_SORT_ORDER_ASC) { + if (val0 <= val1) { + low = mid + 1; + } else { + high = mid; + } + } else { + if (val0 >= val1) { + low = mid + 1; + } else { + high = mid; + } + } + } + + const int i = low; + const int j = k - i; + + int32_t out_idx; + + if (i >= len0) { + out_idx = tmp1[j]; + } else if (j >= len1) { + out_idx = tmp0[i]; + } else { + const int32_t idx0 = tmp0[i]; + const int32_t idx1 = tmp1[j]; + + const float val0 = src0_row[idx0]; + const float val1 = src0_row[idx1]; + + out_idx = (order == GGML_SORT_ORDER_ASC) + ? (val0 <= val1 ? idx0 : idx1) + : (val0 >= val1 ? idx0 : idx1); + } + + dst[k] = out_idx; + } +} + +template [[host_name("kernel_argsort_merge_f32_i32_asc")]] kernel argsort_merge_t kernel_argsort_merge_f32_i32; +template [[host_name("kernel_argsort_merge_f32_i32_desc")]] kernel argsort_merge_t kernel_argsort_merge_f32_i32; + kernel void kernel_leaky_relu_f32( constant ggml_metal_kargs_leaky_relu & args, device const float * src0, From 786e0056cfcd2d4eee1fffeba81cf2fe45a31fe8 Mon Sep 17 00:00:00 2001 From: Jeff Bolz Date: Sat, 15 Nov 2025 02:06:41 -0600 Subject: [PATCH 34/51] vulkan: change graph_compute to be async and enable get_tensor_async (llama/17158) * vulkan: change graph_compute to be async and enable get_tensor_async This allows some additional CPU/GPU overlap for large pp workloads. Also seems to help a bit for token gen, maybe getting rid of a small bubble between graph_compute and get_tensor. Async set and copy functions seem to be very rarely used, so I didn't enable them because I didn't have a good way to test them. The async commands need to be ordered against each other, so put them all on the compute queue. The non-async commands still use the transfer queue. The fence for graph_compute/get_tensor_async is submitted and waited on in ggml_vk_synchronize. * fix thread safety errors * teardown context cleanly * Handle async read to non-pinned dst --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 141 ++++++++++++++++++--------- 1 file changed, 96 insertions(+), 45 deletions(-) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index c6503f03260..55b5a037d50 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -234,6 +234,7 @@ class vk_memory_logger; #endif class vk_perf_logger; static void ggml_vk_destroy_buffer(vk_buffer& buf); +static void ggml_vk_synchronize(ggml_backend_vk_context * ctx); static constexpr uint32_t mul_mat_vec_max_cols = 8; static constexpr uint32_t p021_max_gqa_ratio = 8; @@ -1581,8 +1582,9 @@ struct ggml_backend_vk_context { size_t semaphore_idx, event_idx; ggml_vk_garbage_collector gc; size_t prealloc_size_x, prealloc_size_y, prealloc_size_split_k, prealloc_size_add_rms_partials, prealloc_size_add_rms_partials_offset; - vk_buffer prealloc_x, prealloc_y, prealloc_split_k, prealloc_add_rms_partials; + vk_buffer prealloc_x, prealloc_y, prealloc_split_k, prealloc_add_rms_partials, sync_staging; vk::Fence fence, almost_ready_fence; + bool submit_pending {}; bool almost_ready_fence_pending {}; // Set before op_add and unset after op_rms_norm to indicate that the add should // write partial sums to accumulate the square of the vector components @@ -5602,6 +5604,16 @@ static void ggml_vk_ensure_sync_staging_buffer(vk_device& device, size_t size) { } } +static void ggml_vk_ensure_sync_staging_buffer(ggml_backend_vk_context * ctx, size_t size) { + if (ctx->sync_staging == nullptr || ctx->sync_staging->size < size) { + VK_LOG_MEMORY("ggml_vk_ensure_sync_staging_buffer(" << size << ")"); + ggml_vk_destroy_buffer(ctx->sync_staging); + ctx->sync_staging = ggml_vk_create_buffer_check(ctx->device, size, + vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent | vk::MemoryPropertyFlagBits::eHostCached, + vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent); + } +} + static void ggml_vk_buffer_write_nc_async(ggml_backend_vk_context * ctx, vk_context& subctx, vk_buffer& dst, size_t offset, const ggml_tensor * tensor, bool sync_staging = false) { VK_LOG_DEBUG("ggml_vk_buffer_write_nc_async(" << tensor << ")"); GGML_ASSERT(!ggml_is_contiguous(tensor)); @@ -5803,7 +5815,7 @@ static void ggml_vk_buffer_write(vk_buffer& dst, size_t offset, const void * src ggml_vk_buffer_write_2d(dst, offset, src, 0, size, 1); } -static void ggml_vk_buffer_read_2d_async(vk_context subctx, vk_buffer& src, size_t offset, void * dst, size_t spitch, size_t dpitch, size_t width, size_t height, bool sync_staging = false) { +static bool ggml_vk_buffer_read_2d_async(vk_context subctx, vk_buffer& src, size_t offset, void * dst, size_t spitch, size_t dpitch, size_t width, size_t height, bool sync_staging = false) { VK_LOG_DEBUG("ggml_vk_buffer_read_2d_async(offset=" << offset << ", width=" << width << ", height=" << height << ")"); GGML_ASSERT(width > 0); GGML_ASSERT(height > 0); @@ -5836,12 +5848,13 @@ static void ggml_vk_buffer_read_2d_async(vk_context subctx, vk_buffer& src, size ggml_vk_sync_buffers(nullptr, subctx); subctx->s->buffer.copyBuffer(src->buffer, buf->buffer, slices); - return; + return true; } VK_LOG_DEBUG("STAGING"); if (!sync_staging) { - GGML_ABORT("Asynchronous read from non-pinned memory not supported"); + // copy was not handled caller needs to fall back + return false; } // Fall back to staging buffer @@ -5854,9 +5867,10 @@ static void ggml_vk_buffer_read_2d_async(vk_context subctx, vk_buffer& src, size subctx->s->buffer.copyBuffer(src->buffer, staging_buffer->buffer, slices); deferred_memcpy(dst, staging_buffer->ptr, copy_size, &subctx->out_memcpys); + return true; } -static void ggml_vk_buffer_read_async(vk_context subctx, vk_buffer& src, size_t offset, void * dst, size_t size, bool sync_staging = false) { +static bool ggml_vk_buffer_read_async(vk_context subctx, vk_buffer& src, size_t offset, void * dst, size_t size, bool sync_staging = false) { return ggml_vk_buffer_read_2d_async(subctx, src, offset, dst, size, size, size, 1, sync_staging); } @@ -5875,7 +5889,8 @@ static void ggml_vk_buffer_read(vk_buffer& src, size_t offset, void * dst, size_ vk_context subctx = ggml_vk_create_temporary_context(src->device->transfer_queue.cmd_pool); ggml_vk_ctx_begin(src->device, subctx); - ggml_vk_buffer_read_async(subctx, src, offset, dst, size, true); + bool ret = ggml_vk_buffer_read_async(subctx, src, offset, dst, size, true); + GGML_ASSERT(ret); ggml_vk_ctx_end(subctx); ggml_vk_submit(subctx, src->device->fence); @@ -11204,8 +11219,9 @@ static void ggml_vk_preallocate_buffers(ggml_backend_vk_context * ctx, vk_contex if (subctx) { // Submit and wait for any pending work before reallocating the buffers ggml_vk_ctx_end(subctx); - ggml_vk_submit(subctx, ctx->fence); - ggml_vk_wait_for_fence(ctx); + ggml_vk_submit(subctx, {}); + ctx->submit_pending = true; + ggml_vk_synchronize(ctx); ggml_vk_ctx_begin(ctx->device, subctx); } @@ -11243,7 +11259,7 @@ static void ggml_vk_preallocate_buffers(ggml_backend_vk_context * ctx, vk_contex } } -static bool ggml_vk_compute_forward(ggml_backend_vk_context* ctx, ggml_cgraph * cgraph, ggml_tensor* tensor, int tensor_idx, bool use_fence, bool almost_ready); +static bool ggml_vk_compute_forward(ggml_backend_vk_context* ctx, ggml_cgraph * cgraph, ggml_tensor* tensor, int tensor_idx, bool almost_ready); // Returns true if node has enqueued work into the queue, false otherwise // If submit is true the current all operations queued so far are being submitted to Vulkan to overlap cmdlist creation and GPU execution. @@ -11787,7 +11803,7 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr ctx->compute_ctx.reset(); - bool ok = ggml_vk_compute_forward(ctx, cgraph, node_begin, node_idx_begin, false, almost_ready); + bool ok = ggml_vk_compute_forward(ctx, cgraph, node_begin, node_idx_begin, almost_ready); if (!ok) { if (node->op == GGML_OP_UNARY) { std::cerr << __func__ << ": error: op not supported UNARY " << node->name << " (" << ggml_unary_op_name(static_cast(node->op_params[0])) << ")" << std::endl; @@ -11802,7 +11818,7 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr return true; } -static bool ggml_vk_compute_forward(ggml_backend_vk_context * ctx, ggml_cgraph * cgraph, ggml_tensor * tensor, int tensor_idx, bool use_fence = true, bool almost_ready = false) { +static bool ggml_vk_compute_forward(ggml_backend_vk_context * ctx, ggml_cgraph * cgraph, ggml_tensor * tensor, int tensor_idx, bool almost_ready = false) { GGML_UNUSED(cgraph); ggml_backend_buffer * buf = nullptr; @@ -11919,16 +11935,10 @@ static bool ggml_vk_compute_forward(ggml_backend_vk_context * ctx, ggml_cgraph * vk_context subctx = ctx->tensor_ctxs[tensor_idx].lock(); - // always wait for the GPU work to be done for the last submit - if (tensor_idx == subctx->exit_tensor_idx) { - use_fence = true; - } - // Only run if ctx hasn't been submitted yet if (!subctx->seqs.empty()) { #ifdef GGML_VULKAN_CHECK_RESULTS ggml_vk_check_results_0(ctx, cgraph, tensor_idx); - use_fence = true; #endif // Do staging buffer copies @@ -11940,17 +11950,16 @@ static bool ggml_vk_compute_forward(ggml_backend_vk_context * ctx, ggml_cgraph * memset(mset.dst, mset.val, mset.n); } - if (almost_ready && !ctx->almost_ready_fence_pending && !use_fence) { + if (almost_ready && !ctx->almost_ready_fence_pending) { ggml_vk_submit(subctx, ctx->almost_ready_fence); ctx->almost_ready_fence_pending = true; } else { - ggml_vk_submit(subctx, use_fence ? ctx->fence : vk::Fence{}); + ggml_vk_submit(subctx, {}); } + ctx->submit_pending = true; - if (use_fence) { - ggml_vk_wait_for_fence(ctx); - } #ifdef GGML_VULKAN_CHECK_RESULTS + ggml_vk_synchronize(ctx); ggml_vk_check_results_1(ctx, cgraph, tensor_idx); #endif } @@ -12006,11 +12015,19 @@ static void ggml_vk_graph_cleanup(ggml_backend_vk_context * ctx) { // Clean up on backend free static void ggml_vk_cleanup(ggml_backend_vk_context * ctx) { VK_LOG_DEBUG("ggml_vk_cleanup(" << ctx->name << ")"); + // discard any unsubmitted command buffers + ctx->transfer_ctx.reset(); + // wait for any pending command buffers to finish + ggml_vk_synchronize(ctx); + ggml_vk_graph_cleanup(ctx); ggml_vk_destroy_buffer(ctx->prealloc_x); ggml_vk_destroy_buffer(ctx->prealloc_y); ggml_vk_destroy_buffer(ctx->prealloc_split_k); + ggml_vk_destroy_buffer(ctx->prealloc_add_rms_partials); + ggml_vk_destroy_buffer(ctx->sync_staging); + ctx->prealloc_y_last_pipeline_used = nullptr; ctx->prealloc_size_x = 0; @@ -12305,7 +12322,7 @@ static void ggml_backend_vk_set_tensor_async(ggml_backend_t backend, ggml_tensor if (ctx->transfer_ctx.expired()) { // Initialize new transfer context - transfer_ctx = ggml_vk_create_context(ctx, ctx->transfer_cmd_pool); + transfer_ctx = ggml_vk_create_context(ctx, ctx->compute_cmd_pool); ctx->transfer_ctx = transfer_ctx; ggml_vk_ctx_begin(ctx->device, transfer_ctx); } else { @@ -12328,7 +12345,7 @@ static void ggml_backend_vk_get_tensor_async(ggml_backend_t backend, const ggml_ if (ctx->transfer_ctx.expired()) { // Initialize new transfer context - transfer_ctx = ggml_vk_create_context(ctx, ctx->transfer_cmd_pool); + transfer_ctx = ggml_vk_create_context(ctx, ctx->compute_cmd_pool); ctx->transfer_ctx = transfer_ctx; ggml_vk_ctx_begin(ctx->device, transfer_ctx); } else { @@ -12337,7 +12354,23 @@ static void ggml_backend_vk_get_tensor_async(ggml_backend_t backend, const ggml_ vk_buffer buf = buf_ctx->dev_buffer; - ggml_vk_buffer_read_async(transfer_ctx, buf, vk_tensor_offset(tensor) + tensor->view_offs + offset, data, size); + auto src_offset = vk_tensor_offset(tensor) + tensor->view_offs + offset; + bool ret = ggml_vk_buffer_read_async(transfer_ctx, buf, src_offset, data, size); + + // If that failed, copy synchronously through a staging buffer + if (!ret) { + ggml_vk_ensure_sync_staging_buffer(ctx, size); + ggml_vk_sync_buffers(nullptr, transfer_ctx); + + vk::BufferCopy buffer_cpy; + buffer_cpy.srcOffset = src_offset; + buffer_cpy.dstOffset = 0; + buffer_cpy.size = size; + + transfer_ctx->s->buffer.copyBuffer(buf->buffer, ctx->sync_staging->buffer, { buffer_cpy }); + deferred_memcpy(data, ctx->sync_staging->ptr, size, &transfer_ctx->out_memcpys); + ggml_vk_synchronize(ctx); + } } static bool ggml_backend_vk_cpy_tensor_async(ggml_backend_t backend, const ggml_tensor * src, ggml_tensor * dst) { @@ -12351,7 +12384,7 @@ static bool ggml_backend_vk_cpy_tensor_async(ggml_backend_t backend, const ggml_ if (ctx->transfer_ctx.expired()) { // Initialize new transfer context - transfer_ctx = ggml_vk_create_context(ctx, ctx->transfer_cmd_pool); + transfer_ctx = ggml_vk_create_context(ctx, ctx->compute_cmd_pool); ctx->transfer_ctx = transfer_ctx; ggml_vk_ctx_begin(ctx->device, transfer_ctx); } else { @@ -12368,29 +12401,49 @@ static bool ggml_backend_vk_cpy_tensor_async(ggml_backend_t backend, const ggml_ return false; } -static void ggml_backend_vk_synchronize(ggml_backend_t backend) { - VK_LOG_DEBUG("ggml_backend_vk_synchronize()"); - ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context; - if(ctx->transfer_ctx.expired()) { - return; - } +static void ggml_vk_synchronize(ggml_backend_vk_context * ctx) { + VK_LOG_DEBUG("ggml_vk_synchronize()"); - vk_context transfer_ctx = ctx->transfer_ctx.lock(); + bool do_transfer = !ctx->transfer_ctx.expired(); - ggml_vk_ctx_end(transfer_ctx); + vk_context transfer_ctx; + if (do_transfer) { + transfer_ctx = ctx->transfer_ctx.lock(); - for (auto& cpy : transfer_ctx->in_memcpys) { - memcpy(cpy.dst, cpy.src, cpy.n); + ggml_vk_ctx_end(transfer_ctx); + + for (auto& cpy : transfer_ctx->in_memcpys) { + memcpy(cpy.dst, cpy.src, cpy.n); + } + + ggml_vk_submit(transfer_ctx, {}); + ctx->submit_pending = true; } - ggml_vk_submit(transfer_ctx, ctx->fence); - ggml_vk_wait_for_fence(ctx); + if (ctx->submit_pending) { + { + std::lock_guard guard(queue_mutex); + ctx->device->compute_queue.queue.submit({}, ctx->fence); + } + ggml_vk_wait_for_fence(ctx); + ctx->submit_pending = false; + } - for (auto& cpy : transfer_ctx->out_memcpys) { - memcpy(cpy.dst, cpy.src, cpy.n); + if (do_transfer) { + for (auto& cpy : transfer_ctx->out_memcpys) { + memcpy(cpy.dst, cpy.src, cpy.n); + } + ctx->transfer_ctx.reset(); } +} - ctx->transfer_ctx.reset(); +static void ggml_backend_vk_synchronize(ggml_backend_t backend) { + VK_LOG_DEBUG("ggml_backend_vk_synchronize()"); + ggml_backend_vk_context * ctx = (ggml_backend_vk_context *)backend->context; + + ggml_vk_synchronize(ctx); + + ggml_vk_graph_cleanup(ctx); } static bool ggml_vk_is_empty(ggml_tensor * node) { @@ -12938,8 +12991,6 @@ static ggml_status ggml_backend_vk_graph_compute(ggml_backend_t backend, ggml_cg ctx->device->perf_logger->print_timings(); } - ggml_vk_graph_cleanup(ctx); - return GGML_STATUS_SUCCESS; UNUSED(backend); @@ -13168,9 +13219,9 @@ static ggml_backend_i ggml_backend_vk_interface = { /* .get_name = */ ggml_backend_vk_name, /* .free = */ ggml_backend_vk_free, /* .set_tensor_async = */ NULL, // ggml_backend_vk_set_tensor_async, - /* .get_tensor_async = */ NULL, // ggml_backend_vk_get_tensor_async, + /* .get_tensor_async = */ ggml_backend_vk_get_tensor_async, /* .cpy_tensor_async = */ NULL, // ggml_backend_vk_cpy_tensor_async, - /* .synchronize = */ NULL, // ggml_backend_vk_synchronize, + /* .synchronize = */ ggml_backend_vk_synchronize, /* .graph_plan_create = */ NULL, /* .graph_plan_free = */ NULL, /* .graph_plan_update = */ NULL, From 9d3fa94c0f1c33cf8250c43d7edf387a85f422e6 Mon Sep 17 00:00:00 2001 From: Jeff Bolz Date: Sat, 15 Nov 2025 03:37:25 -0600 Subject: [PATCH 35/51] vulkan: skip all-negative-inf blocks in FA (llama/17186) --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 8 ++- .../vulkan-shaders/flash_attn.comp | 48 +++++++++++----- .../vulkan-shaders/flash_attn_cm1.comp | 35 +++++++++++- .../vulkan-shaders/flash_attn_cm2.comp | 56 ++++++++++++------- 4 files changed, 110 insertions(+), 37 deletions(-) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index 55b5a037d50..f8b1211c515 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -521,6 +521,7 @@ struct vk_device_struct { bool subgroup_shuffle; bool subgroup_ballot; bool subgroup_clustered; + bool subgroup_vote; bool multi_add; bool shader_int64; bool buffer_device_address; @@ -4188,6 +4189,9 @@ static vk_device ggml_vk_get_device(size_t idx) { device->subgroup_ballot = (vk11_props.subgroupSupportedStages & vk::ShaderStageFlagBits::eCompute) && (vk11_props.subgroupSupportedOperations & vk::SubgroupFeatureFlagBits::eBallot); + device->subgroup_vote = (vk11_props.subgroupSupportedStages & vk::ShaderStageFlagBits::eCompute) && + (vk11_props.subgroupSupportedOperations & vk::SubgroupFeatureFlagBits::eVote); + const bool force_disable_f16 = getenv("GGML_VK_DISABLE_F16") != nullptr; device->fp16 = !force_disable_f16 && fp16_storage && fp16_compute; @@ -13572,8 +13576,8 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm default: return false; } - if (!coopmat2 && !device->subgroup_shuffle) { - // scalar FA uses subgroupShuffle + if (!coopmat2 && !(device->subgroup_shuffle && device->subgroup_vote)) { + // scalar/coopmat1 FA uses subgroupShuffle/subgroupAll return false; } return true; diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn.comp b/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn.comp index 2255f9c168e..4bef48b006c 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn.comp @@ -7,6 +7,7 @@ #extension GL_EXT_shader_explicit_arithmetic_types_int32 : require #extension GL_KHR_shader_subgroup_shuffle : enable +#extension GL_KHR_shader_subgroup_vote : enable #include "types.glsl" #include "flash_attn_base.glsl" @@ -108,6 +109,38 @@ void main() { [[dont_unroll]] for (uint32_t j = start_j; j < end_j; ++j) { + if ((p.mask_n_head_log2 & MASK_ENABLE_BIT) != 0) { + bool nem1_bounds_check = !(p.gqa_ratio > 1) && (p.nem1 % Br) != 0; + + float max_mask = NEG_FLT_MAX_OVER_2; + [[unroll]] for (uint32_t idx = 0; idx < Bc * Br; idx += gl_WorkGroupSize.x) { + uint32_t c = (idx + tid) % Bc; + uint32_t r = (idx + tid) / Bc; + if (idx + tid < Bc * Br) { + if ((!KV_bounds_check || j * Bc + c < KV) && (!nem1_bounds_check || i * Br + r < p.nem1)) { + float m = float(data_m[m_offset + (i * Br + r) * m_stride + (j * Bc + c)]); + masksh[c][r] = m; + max_mask = max(max_mask, m); + } else { + masksh[c][r] = float(0); + } + } + } + // skip the block if the mask is entirely -inf + bool all_less = subgroupAll(max_mask <= NEG_FLT_MAX_OVER_2); + barrier(); + if (gl_SubgroupInvocationID == 0) { + tmpsh[gl_SubgroupID] = all_less ? NEG_FLT_MAX_OVER_2 : 0.0f; + } + barrier(); + [[unroll]] for (uint s = 0; s < gl_NumSubgroups; ++s) { + max_mask = max(max_mask, tmpsh[s]); + } + if (max_mask <= NEG_FLT_MAX_OVER_2) { + continue; + } + } + float Sf[Br][cols_per_thread]; [[unroll]] for (uint32_t r = 0; r < Br; ++r) { [[unroll]] for (uint32_t c = 0; c < cols_per_thread; ++c) { @@ -153,21 +186,6 @@ void main() { } if ((p.mask_n_head_log2 & MASK_ENABLE_BIT) != 0) { - bool nem1_bounds_check = !(p.gqa_ratio > 1) && (p.nem1 % Br) != 0; - - [[unroll]] for (uint32_t idx = 0; idx < Bc * Br; idx += gl_WorkGroupSize.x) { - uint32_t c = (idx + tid) % Bc; - uint32_t r = (idx + tid) / Bc; - if (idx + tid < Bc * Br) { - if ((!KV_bounds_check || j * Bc + c < KV) && (!nem1_bounds_check || i * Br + r < p.nem1)) { - masksh[c][r] = float(data_m[m_offset + (i * Br + r) * m_stride + (j * Bc + c)]); - } else { - masksh[c][r] = float(0); - } - } - } - barrier(); - [[unroll]] for (uint32_t c = 0; c < cols_per_thread; ++c) { [[unroll]] for (uint32_t r = 0; r < Br; ++r) { float mvf = masksh[c * cols_per_iter + col_tid][r]; diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm1.comp b/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm1.comp index 8699fa6c9cb..cd82e4abfab 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm1.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm1.comp @@ -7,6 +7,7 @@ #extension GL_EXT_shader_explicit_arithmetic_types_int32 : require #extension GL_KHR_shader_subgroup_basic : enable +#extension GL_KHR_shader_subgroup_vote : enable #extension GL_KHR_memory_scope_semantics : enable #extension GL_KHR_cooperative_matrix : enable @@ -148,6 +149,37 @@ void main() { [[dont_unroll]] for (uint32_t j = start_j; j < end_j; ++j) { + float mask_cache[Bc * Br / WorkGroupSize]; + if ((p.mask_n_head_log2 & MASK_ENABLE_BIT) != 0) { + bool nem1_bounds_check = !(p.gqa_ratio > 1) && (p.nem1 % Br) != 0; + + float max_mask = NEG_FLT_MAX_OVER_2; + [[unroll]] for (uint32_t idx = 0; idx < Bc * Br; idx += gl_WorkGroupSize.x) { + uint32_t c = (idx + tid) % Bc; + uint32_t r = (idx + tid) / Bc; + if (idx + tid < Bc * Br || idx + gl_WorkGroupSize.x <= Bc * Br) { + if ((!KV_bounds_check || j * Bc + c < KV) && (!nem1_bounds_check || i * Br + r < p.nem1)) { + float m = float(data_m[m_offset + (i * Br + r) * m_stride + (j * Bc + c)]); + mask_cache[idx / WorkGroupSize] = m; + max_mask = max(max_mask, m); + } + } + } + // skip the block if the mask is entirely -inf + bool all_less = subgroupAll(max_mask <= NEG_FLT_MAX_OVER_2); + barrier(); + if (gl_SubgroupInvocationID == 0) { + tmpsh[gl_SubgroupID] = all_less ? NEG_FLT_MAX_OVER_2 : 0.0f; + } + barrier(); + [[unroll]] for (uint s = 0; s < gl_NumSubgroups; ++s) { + max_mask = max(max_mask, tmpsh[s]); + } + if (max_mask <= NEG_FLT_MAX_OVER_2) { + continue; + } + } + [[unroll]] for (uint32_t idx = 0; idx < Bc * HSK / 4; idx += gl_WorkGroupSize.x) { uint32_t d = (idx + tid) % (HSK / 4); uint32_t c = (idx + tid) / (HSK / 4); @@ -208,7 +240,8 @@ void main() { uint32_t r = (idx + tid) / Bc; if (idx + tid < Bc * Br || idx + gl_WorkGroupSize.x <= Bc * Br) { if ((!KV_bounds_check || j * Bc + c < KV) && (!nem1_bounds_check || i * Br + r < p.nem1)) { - sfsh[c * sfshstride + r] += ACC_TYPE(slope[r] * float(data_m[m_offset + (i * Br + r) * m_stride + (j * Bc + c)])); + float f = mask_cache[idx / WorkGroupSize]; + sfsh[c * sfshstride + r] += ACC_TYPE(slope[r] * f); } } } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp b/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp index fcfc60a8785..617d8510869 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp @@ -29,6 +29,10 @@ ACC_TYPE maxReduce(const in ACC_TYPE x, const in ACC_TYPE y) { return max(x, y); } +float16_t maxReduceFp16(const in float16_t x, const in float16_t y) { + return max(x, y); +} + ACC_TYPE smearReduce(const in ACC_TYPE x, const in ACC_TYPE y) { return x; } @@ -142,21 +146,7 @@ void main() { [[dont_unroll]] for (uint32_t j = start_j; j < end_j; ++j) { - coopmat S = coopmat(0); - - coopmat K_T; - - uint32_t k_offset = ik2*p.nb12 + ik3*p.nb13; - coopMatLoadTensorNV(K_T, data_k, k_offset, sliceTensorLayoutNV(tensorLayoutK, j * Bc, Bc, 0, HSK_pad), tensorViewTranspose DECODEFUNC); - S = coopMatMulAdd(Qf16, K_T, S); - - if (p.logit_softcap != 0.0f) { - [[unroll]] - for (int k = 0; k < S.length(); ++k) { - S[k] = ACC_TYPE(p.logit_softcap)*tanh(S[k]); - } - } - + coopmat mv; if ((p.mask_n_head_log2 & MASK_ENABLE_BIT) != 0) { bool nem1_bounds_check = !(p.gqa_ratio > 1) && (p.nem1 % Br) != 0; @@ -164,12 +154,17 @@ void main() { tensorLayoutNV<2, gl_CooperativeMatrixClampModeConstantNV> tensorLayoutM = createTensorLayoutNV(2, gl_CooperativeMatrixClampModeConstantNV); tensorLayoutM = setTensorLayoutDimensionNV(tensorLayoutM, p.nem1, KV); tensorLayoutM = setTensorLayoutStrideNV(tensorLayoutM, m_stride, 1); + tensorLayoutM = setTensorLayoutClampValueNV(tensorLayoutM, 0xfc00); // -inf in float16_t - coopmat mv; + coopmat mv, mvmax; coopMatLoadTensorNV(mv, data_m, m_offset, sliceTensorLayoutNV(tensorLayoutM, i * Br, Br, j * Bc, Bc)); - S += slopeMat*coopmat(mv); + // skip the block if the mask is entirely -inf + coopMatReduceNV(mvmax, mv, gl_CooperativeMatrixReduceRowAndColumnNV, maxReduceFp16); + if (mvmax[0] <= NEG_FLT_MAX_OVER_2) { + continue; + } } else { tensorLayoutNV<2, Clamp> tensorLayoutM = createTensorLayoutNV(2, Clamp); // Don't clamp against nem1 when GQA is enabled @@ -177,14 +172,37 @@ void main() { tensorLayoutM = setTensorLayoutDimensionNV(tensorLayoutM, m_height, KV); tensorLayoutM = setTensorLayoutStrideNV(tensorLayoutM, m_stride, 1); - coopmat mv; + coopmat mvmax; coopMatLoadTensorNV(mv, data_m, m_offset, sliceTensorLayoutNV(tensorLayoutM, i * Br, Br, j * Bc, Bc)); - S += slopeMat*coopmat(mv); + // skip the block if the mask is entirely -inf + coopMatReduceNV(mvmax, mv, gl_CooperativeMatrixReduceRowAndColumnNV, maxReduceFp16); + if (mvmax[0] <= NEG_FLT_MAX_OVER_2) { + continue; + } } } + coopmat S = coopmat(0); + + coopmat K_T; + + uint32_t k_offset = ik2*p.nb12 + ik3*p.nb13; + coopMatLoadTensorNV(K_T, data_k, k_offset, sliceTensorLayoutNV(tensorLayoutK, j * Bc, Bc, 0, HSK_pad), tensorViewTranspose DECODEFUNC); + S = coopMatMulAdd(Qf16, K_T, S); + + if (p.logit_softcap != 0.0f) { + [[unroll]] + for (int k = 0; k < S.length(); ++k) { + S[k] = ACC_TYPE(p.logit_softcap)*tanh(S[k]); + } + } + + if ((p.mask_n_head_log2 & MASK_ENABLE_BIT) != 0) { + S += slopeMat*coopmat(mv); + } + // Clear padding elements to -inf, so they don't contribute to rowmax if (Clamp != 0 && ((j + 1) * Bc > KV || From a175f8579f62e05855f477aaf96c45b54dc80767 Mon Sep 17 00:00:00 2001 From: Jeff Bolz Date: Sat, 15 Nov 2025 04:56:15 -0600 Subject: [PATCH 36/51] vulkan: Use ggml_vk_tensor_subbuffer in mul_mat_vec(id) paths (llama/17244) * vulkan: Use ggml_vk_tensor_subbuffer in mul_mat_vec(id) paths * set allow_misalign --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 631 ++++++++++----------------- 1 file changed, 220 insertions(+), 411 deletions(-) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index f8b1211c515..a5a6ad9cbfb 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -838,6 +838,32 @@ struct vk_mat_vec_push_constants { uint32_t broadcast3; }; +struct vk_mat_vec_p021_push_constants { + uint32_t ncols_x; + uint32_t nrows_x; + uint32_t nchannels_x; + uint32_t nchannels_y; + uint32_t b_offset; + uint32_t d_offset; + uint32_t enable_bias; +}; + +struct vk_mat_vec_nc_push_constants { + uint32_t ncols_x; + uint32_t nrows_x; + uint32_t row_stride_x; + uint32_t channel_stride_x; + uint32_t channel_stride_y; + uint32_t channel_x_divisor; + uint32_t ne12; + uint32_t b_offset; + uint32_t d_offset; + uint32_t nb03; + uint32_t nb13; + uint32_t nb23; + uint32_t enable_bias; +}; + struct vk_mat_mat_id_push_constants { uint32_t M; uint32_t N; uint32_t K; uint32_t stride_a; uint32_t stride_b; uint32_t stride_d; @@ -1637,6 +1663,50 @@ static uint64_t vk_tensor_offset(const ggml_tensor * tensor) { return (uint8_t *) tensor->data - (uint8_t *) vk_ptr_base; } +static uint32_t get_misalign_bytes(const ggml_backend_vk_context * ctx, const ggml_tensor * t) +{ + return ((vk_tensor_offset(t) + t->view_offs) & (ctx->device->properties.limits.minStorageBufferOffsetAlignment - 1));; +} + +template void init_pushconst_tensor_offsets(ggml_backend_vk_context * ctx, T &p, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, const ggml_tensor * src3, ggml_tensor * dst) { + GGML_UNUSED(p); + GGML_UNUSED(src0); + GGML_UNUSED(src1); + GGML_UNUSED(src2); + GGML_UNUSED(src3); + GGML_UNUSED(dst); + static_assert(!std::is_const::value, "unexpected type"); + GGML_ASSERT(!src0 || get_misalign_bytes(ctx, src0) == 0); + GGML_ASSERT(!src1 || get_misalign_bytes(ctx, src1) == 0); + GGML_ASSERT(!src2 || get_misalign_bytes(ctx, src2) == 0); + GGML_ASSERT(!src3 || get_misalign_bytes(ctx, src3) == 0); + GGML_ASSERT(!dst || get_misalign_bytes(ctx, dst) == 0); +} + +template <> void init_pushconst_tensor_offsets(ggml_backend_vk_context * ctx, vk_mat_vec_p021_push_constants &p, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, const ggml_tensor * src3, ggml_tensor * dst) { + const uint32_t b_offset = get_misalign_bytes(ctx, src1) / ggml_type_size(src1->type); + const uint32_t d_offset = get_misalign_bytes(ctx, dst) / ggml_type_size(dst->type); + + p.b_offset = b_offset; + p.d_offset = d_offset; + + GGML_UNUSED(src0); + GGML_UNUSED(src2); + GGML_UNUSED(src3); +} + +template <> void init_pushconst_tensor_offsets(ggml_backend_vk_context * ctx, vk_mat_vec_nc_push_constants &p, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, const ggml_tensor * src3, ggml_tensor * dst) { + const uint32_t b_offset = get_misalign_bytes(ctx, src1) / ggml_type_size(src1->type); + const uint32_t d_offset = get_misalign_bytes(ctx, dst) / ggml_type_size(dst->type); + + p.b_offset = b_offset; + p.d_offset = d_offset; + + GGML_UNUSED(src0); + GGML_UNUSED(src2); + GGML_UNUSED(src3); +} + struct ggml_backend_vk_buffer_context { vk_device_ref device; vk_buffer dev_buffer; @@ -3393,6 +3463,8 @@ static void ggml_vk_load_shaders(vk_device& device) { const uint32_t force_subgroup_size = use_subgroups ? subgroup_size : 0; const uint32_t force_subgroup_size16 = use_subgroups16 ? subgroup_size16 : 0; + static constexpr uint32_t mul_mat_vec_num_bindings = 4; + static constexpr uint32_t mul_mat_vec_id_num_bindings = 5; for (uint32_t w = 0; w < DMMV_WG_SIZE_COUNT; ++w) { const uint32_t wg_size_subgroup = (w == DMMV_WG_SIZE_SUBGROUP) ? subgroup_size : (subgroup_size * 4); @@ -3407,92 +3479,92 @@ static void ggml_vk_load_shaders(vk_device& device) { SHADER_REDUCTION_MODE_SHMEM; for (uint32_t i = 0; i < mul_mat_vec_max_cols; ++i) { - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_F32 ][i], "mul_mat_vec_f32_f32_f32", arr_dmmv_f32_f32_f32_len[reduc], arr_dmmv_f32_f32_f32_data[reduc], "main", 4, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {wg_size_subgroup, 2, i+1}, 1, false, use_subgroups, force_subgroup_size); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_F16 ][i], "mul_mat_vec_f16_f32_f32", arr_dmmv_f16_f32_f32_len[reduc], arr_dmmv_f16_f32_f32_data[reduc], "main", 4, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {wg_size_subgroup, 2, i+1}, 1, false, use_subgroups, force_subgroup_size); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_BF16][i], "mul_mat_vec_bf16_f32_f32", arr_dmmv_bf16_f32_f32_len[reduc], arr_dmmv_bf16_f32_f32_data[reduc], "main", 4, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {wg_size_subgroup, 2, i+1}, 1, false, use_subgroups, force_subgroup_size); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_Q4_0][i], "mul_mat_vec_q4_0_f32_f32", arr_dmmv_q4_0_f32_f32_len[reduc], arr_dmmv_q4_0_f32_f32_data[reduc], "main", 4, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup, 2*rm_stdq, i+1}, 1, true, use_subgroups, force_subgroup_size); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_Q4_1][i], "mul_mat_vec_q4_1_f32_f32", arr_dmmv_q4_1_f32_f32_len[reduc], arr_dmmv_q4_1_f32_f32_data[reduc], "main", 4, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup, 2*rm_stdq, i+1}, 1, true, use_subgroups, force_subgroup_size); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_Q5_0][i], "mul_mat_vec_q5_0_f32_f32", arr_dmmv_q5_0_f32_f32_len[reduc], arr_dmmv_q5_0_f32_f32_data[reduc], "main", 4, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup, 2*rm_stdq, i+1}, 1, true, use_subgroups, force_subgroup_size); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_Q5_1][i], "mul_mat_vec_q5_1_f32_f32", arr_dmmv_q5_1_f32_f32_len[reduc], arr_dmmv_q5_1_f32_f32_data[reduc], "main", 4, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup, 2*rm_stdq, i+1}, 1, true, use_subgroups, force_subgroup_size); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_Q8_0][i], "mul_mat_vec_q8_0_f32_f32", arr_dmmv_q8_0_f32_f32_len[reduc], arr_dmmv_q8_0_f32_f32_data[reduc], "main", 4, sizeof(vk_mat_vec_push_constants), {1*rm_stdq, 1, 1}, {wg_size_subgroup, 1*rm_stdq, i+1}, 1, true, use_subgroups, force_subgroup_size); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_Q2_K][i], "mul_mat_vec_q2_k_f32_f32", arr_dmmv_q2_k_f32_f32_len[reduc16], arr_dmmv_q2_k_f32_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {wg_size_subgroup16, rm_kq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_Q3_K][i], "mul_mat_vec_q3_k_f32_f32", arr_dmmv_q3_k_f32_f32_len[reduc16], arr_dmmv_q3_k_f32_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {wg_size_subgroup16, rm_kq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_Q4_K][i], "mul_mat_vec_q4_k_f32_f32", arr_dmmv_q4_k_f32_f32_len[reduc16], arr_dmmv_q4_k_f32_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {wg_size_subgroup16, rm_kq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_Q5_K][i], "mul_mat_vec_q5_k_f32_f32", arr_dmmv_q5_k_f32_f32_len[reduc16], arr_dmmv_q5_k_f32_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {wg_size_subgroup16, rm_kq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_Q6_K][i], "mul_mat_vec_q6_k_f32_f32", arr_dmmv_q6_k_f32_f32_len[reduc16], arr_dmmv_q6_k_f32_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {wg_size_subgroup16, rm_kq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_IQ1_S][i], "mul_mat_vec_iq1_s_f32_f32", arr_dmmv_iq1_s_f32_f32_len[reduc16], arr_dmmv_iq1_s_f32_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_IQ1_M][i], "mul_mat_vec_iq1_m_f32_f32", arr_dmmv_iq1_m_f32_f32_len[reduc16], arr_dmmv_iq1_m_f32_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_IQ2_XXS][i], "mul_mat_vec_iq2_xxs_f32_f32", arr_dmmv_iq2_xxs_f32_f32_len[reduc16], arr_dmmv_iq2_xxs_f32_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_IQ2_XS][i], "mul_mat_vec_iq2_xs_f32_f32", arr_dmmv_iq2_xs_f32_f32_len[reduc16], arr_dmmv_iq2_xs_f32_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_IQ2_S][i], "mul_mat_vec_iq2_s_f32_f32", arr_dmmv_iq2_s_f32_f32_len[reduc16], arr_dmmv_iq2_s_f32_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_IQ3_XXS][i], "mul_mat_vec_iq3_xxs_f32_f32", arr_dmmv_iq3_xxs_f32_f32_len[reduc16], arr_dmmv_iq3_xxs_f32_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_IQ3_S][i], "mul_mat_vec_iq3_s_f32_f32", arr_dmmv_iq3_s_f32_f32_len[reduc16], arr_dmmv_iq3_s_f32_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_IQ4_XS][i], "mul_mat_vec_iq4_xs_f32_f32", arr_dmmv_iq4_xs_f32_f32_len[reduc16], arr_dmmv_iq4_xs_f32_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_IQ4_NL][i], "mul_mat_vec_iq4_nl_f32_f32", arr_dmmv_iq4_nl_f32_f32_len[reduc16], arr_dmmv_iq4_nl_f32_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_MXFP4][i], "mul_mat_vec_mxfp4_f32_f32", arr_dmmv_mxfp4_f32_f32_len[reduc16], arr_dmmv_mxfp4_f32_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); - - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_F32 ][i], "mul_mat_vec_f32_f16_f32", arr_dmmv_f32_f16_f32_len[reduc], arr_dmmv_f32_f16_f32_data[reduc], "main", 4, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {wg_size_subgroup, 2, i+1}, 1, false, use_subgroups, force_subgroup_size); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_F16 ][i], "mul_mat_vec_f16_f16_f32", arr_dmmv_f16_f16_f32_len[reduc], arr_dmmv_f16_f16_f32_data[reduc], "main", 4, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {wg_size_subgroup, 2, i+1}, 1, false, use_subgroups, force_subgroup_size); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_BF16][i], "mul_mat_vec_bf16_f16_f32", arr_dmmv_bf16_f16_f32_len[reduc], arr_dmmv_bf16_f16_f32_data[reduc], "main", 4, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {wg_size_subgroup, 2, i+1}, 1, false, use_subgroups, force_subgroup_size); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_Q4_0][i], "mul_mat_vec_q4_0_f16_f32", arr_dmmv_q4_0_f16_f32_len[reduc], arr_dmmv_q4_0_f16_f32_data[reduc], "main", 4, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup, 2*rm_stdq, i+1}, 1, true, use_subgroups, force_subgroup_size); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_Q4_1][i], "mul_mat_vec_q4_1_f16_f32", arr_dmmv_q4_1_f16_f32_len[reduc], arr_dmmv_q4_1_f16_f32_data[reduc], "main", 4, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup, 2*rm_stdq, i+1}, 1, true, use_subgroups, force_subgroup_size); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_Q5_0][i], "mul_mat_vec_q5_0_f16_f32", arr_dmmv_q5_0_f16_f32_len[reduc], arr_dmmv_q5_0_f16_f32_data[reduc], "main", 4, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup, 2*rm_stdq, i+1}, 1, true, use_subgroups, force_subgroup_size); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_Q5_1][i], "mul_mat_vec_q5_1_f16_f32", arr_dmmv_q5_1_f16_f32_len[reduc], arr_dmmv_q5_1_f16_f32_data[reduc], "main", 4, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup, 2*rm_stdq, i+1}, 1, true, use_subgroups, force_subgroup_size); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_Q8_0][i], "mul_mat_vec_q8_0_f16_f32", arr_dmmv_q8_0_f16_f32_len[reduc], arr_dmmv_q8_0_f16_f32_data[reduc], "main", 4, sizeof(vk_mat_vec_push_constants), {1*rm_stdq, 1, 1}, {wg_size_subgroup, 1*rm_stdq, i+1}, 1, true, use_subgroups, force_subgroup_size); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_Q2_K][i], "mul_mat_vec_q2_k_f16_f32", arr_dmmv_q2_k_f16_f32_len[reduc16], arr_dmmv_q2_k_f16_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {wg_size_subgroup16, rm_kq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_Q3_K][i], "mul_mat_vec_q3_k_f16_f32", arr_dmmv_q3_k_f16_f32_len[reduc16], arr_dmmv_q3_k_f16_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {wg_size_subgroup16, rm_kq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_Q4_K][i], "mul_mat_vec_q4_k_f16_f32", arr_dmmv_q4_k_f16_f32_len[reduc16], arr_dmmv_q4_k_f16_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {wg_size_subgroup16, rm_kq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_Q5_K][i], "mul_mat_vec_q5_k_f16_f32", arr_dmmv_q5_k_f16_f32_len[reduc16], arr_dmmv_q5_k_f16_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {wg_size_subgroup16, rm_kq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_Q6_K][i], "mul_mat_vec_q6_k_f16_f32", arr_dmmv_q6_k_f16_f32_len[reduc16], arr_dmmv_q6_k_f16_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {wg_size_subgroup16, rm_kq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_IQ1_S][i], "mul_mat_vec_iq1_s_f16_f32", arr_dmmv_iq1_s_f16_f32_len[reduc16], arr_dmmv_iq1_s_f16_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_IQ1_M][i], "mul_mat_vec_iq1_m_f16_f32", arr_dmmv_iq1_m_f16_f32_len[reduc16], arr_dmmv_iq1_m_f16_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_IQ2_XXS][i], "mul_mat_vec_iq2_xxs_f16_f32", arr_dmmv_iq2_xxs_f16_f32_len[reduc16], arr_dmmv_iq2_xxs_f16_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_IQ2_XS][i], "mul_mat_vec_iq2_xs_f16_f32", arr_dmmv_iq2_xs_f16_f32_len[reduc16], arr_dmmv_iq2_xs_f16_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_IQ2_S][i], "mul_mat_vec_iq2_s_f16_f32", arr_dmmv_iq2_s_f16_f32_len[reduc16], arr_dmmv_iq2_s_f16_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_IQ3_XXS][i], "mul_mat_vec_iq3_xxs_f16_f32", arr_dmmv_iq3_xxs_f16_f32_len[reduc16], arr_dmmv_iq3_xxs_f16_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_IQ3_S][i], "mul_mat_vec_iq3_s_f16_f32", arr_dmmv_iq3_s_f16_f32_len[reduc16], arr_dmmv_iq3_s_f16_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_IQ4_XS][i], "mul_mat_vec_iq4_xs_f16_f32", arr_dmmv_iq4_xs_f16_f32_len[reduc16], arr_dmmv_iq4_xs_f16_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_IQ4_NL][i], "mul_mat_vec_iq4_nl_f16_f32", arr_dmmv_iq4_nl_f16_f32_len[reduc16], arr_dmmv_iq4_nl_f16_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_MXFP4][i], "mul_mat_vec_mxfp4_f16_f32", arr_dmmv_mxfp4_f16_f32_len[reduc16], arr_dmmv_mxfp4_f16_f32_data[reduc16], "main", 4, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_F32 ][i], "mul_mat_vec_f32_f32_f32", arr_dmmv_f32_f32_f32_len[reduc], arr_dmmv_f32_f32_f32_data[reduc], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {wg_size_subgroup, 2, i+1}, 1, false, use_subgroups, force_subgroup_size); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_F16 ][i], "mul_mat_vec_f16_f32_f32", arr_dmmv_f16_f32_f32_len[reduc], arr_dmmv_f16_f32_f32_data[reduc], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {wg_size_subgroup, 2, i+1}, 1, false, use_subgroups, force_subgroup_size); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_BF16][i], "mul_mat_vec_bf16_f32_f32", arr_dmmv_bf16_f32_f32_len[reduc], arr_dmmv_bf16_f32_f32_data[reduc], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {wg_size_subgroup, 2, i+1}, 1, false, use_subgroups, force_subgroup_size); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_Q4_0][i], "mul_mat_vec_q4_0_f32_f32", arr_dmmv_q4_0_f32_f32_len[reduc], arr_dmmv_q4_0_f32_f32_data[reduc], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup, 2*rm_stdq, i+1}, 1, true, use_subgroups, force_subgroup_size); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_Q4_1][i], "mul_mat_vec_q4_1_f32_f32", arr_dmmv_q4_1_f32_f32_len[reduc], arr_dmmv_q4_1_f32_f32_data[reduc], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup, 2*rm_stdq, i+1}, 1, true, use_subgroups, force_subgroup_size); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_Q5_0][i], "mul_mat_vec_q5_0_f32_f32", arr_dmmv_q5_0_f32_f32_len[reduc], arr_dmmv_q5_0_f32_f32_data[reduc], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup, 2*rm_stdq, i+1}, 1, true, use_subgroups, force_subgroup_size); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_Q5_1][i], "mul_mat_vec_q5_1_f32_f32", arr_dmmv_q5_1_f32_f32_len[reduc], arr_dmmv_q5_1_f32_f32_data[reduc], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup, 2*rm_stdq, i+1}, 1, true, use_subgroups, force_subgroup_size); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_Q8_0][i], "mul_mat_vec_q8_0_f32_f32", arr_dmmv_q8_0_f32_f32_len[reduc], arr_dmmv_q8_0_f32_f32_data[reduc], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {1*rm_stdq, 1, 1}, {wg_size_subgroup, 1*rm_stdq, i+1}, 1, true, use_subgroups, force_subgroup_size); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_Q2_K][i], "mul_mat_vec_q2_k_f32_f32", arr_dmmv_q2_k_f32_f32_len[reduc16], arr_dmmv_q2_k_f32_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {wg_size_subgroup16, rm_kq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_Q3_K][i], "mul_mat_vec_q3_k_f32_f32", arr_dmmv_q3_k_f32_f32_len[reduc16], arr_dmmv_q3_k_f32_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {wg_size_subgroup16, rm_kq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_Q4_K][i], "mul_mat_vec_q4_k_f32_f32", arr_dmmv_q4_k_f32_f32_len[reduc16], arr_dmmv_q4_k_f32_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {wg_size_subgroup16, rm_kq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_Q5_K][i], "mul_mat_vec_q5_k_f32_f32", arr_dmmv_q5_k_f32_f32_len[reduc16], arr_dmmv_q5_k_f32_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {wg_size_subgroup16, rm_kq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_Q6_K][i], "mul_mat_vec_q6_k_f32_f32", arr_dmmv_q6_k_f32_f32_len[reduc16], arr_dmmv_q6_k_f32_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {wg_size_subgroup16, rm_kq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_IQ1_S][i], "mul_mat_vec_iq1_s_f32_f32", arr_dmmv_iq1_s_f32_f32_len[reduc16], arr_dmmv_iq1_s_f32_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_IQ1_M][i], "mul_mat_vec_iq1_m_f32_f32", arr_dmmv_iq1_m_f32_f32_len[reduc16], arr_dmmv_iq1_m_f32_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_IQ2_XXS][i], "mul_mat_vec_iq2_xxs_f32_f32", arr_dmmv_iq2_xxs_f32_f32_len[reduc16], arr_dmmv_iq2_xxs_f32_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_IQ2_XS][i], "mul_mat_vec_iq2_xs_f32_f32", arr_dmmv_iq2_xs_f32_f32_len[reduc16], arr_dmmv_iq2_xs_f32_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_IQ2_S][i], "mul_mat_vec_iq2_s_f32_f32", arr_dmmv_iq2_s_f32_f32_len[reduc16], arr_dmmv_iq2_s_f32_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_IQ3_XXS][i], "mul_mat_vec_iq3_xxs_f32_f32", arr_dmmv_iq3_xxs_f32_f32_len[reduc16], arr_dmmv_iq3_xxs_f32_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_IQ3_S][i], "mul_mat_vec_iq3_s_f32_f32", arr_dmmv_iq3_s_f32_f32_len[reduc16], arr_dmmv_iq3_s_f32_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_IQ4_XS][i], "mul_mat_vec_iq4_xs_f32_f32", arr_dmmv_iq4_xs_f32_f32_len[reduc16], arr_dmmv_iq4_xs_f32_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_IQ4_NL][i], "mul_mat_vec_iq4_nl_f32_f32", arr_dmmv_iq4_nl_f32_f32_len[reduc16], arr_dmmv_iq4_nl_f32_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[w][GGML_TYPE_MXFP4][i], "mul_mat_vec_mxfp4_f32_f32", arr_dmmv_mxfp4_f32_f32_len[reduc16], arr_dmmv_mxfp4_f32_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); + + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_F32 ][i], "mul_mat_vec_f32_f16_f32", arr_dmmv_f32_f16_f32_len[reduc], arr_dmmv_f32_f16_f32_data[reduc], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {wg_size_subgroup, 2, i+1}, 1, false, use_subgroups, force_subgroup_size); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_F16 ][i], "mul_mat_vec_f16_f16_f32", arr_dmmv_f16_f16_f32_len[reduc], arr_dmmv_f16_f16_f32_data[reduc], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {wg_size_subgroup, 2, i+1}, 1, false, use_subgroups, force_subgroup_size); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_BF16][i], "mul_mat_vec_bf16_f16_f32", arr_dmmv_bf16_f16_f32_len[reduc], arr_dmmv_bf16_f16_f32_data[reduc], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {wg_size_subgroup, 2, i+1}, 1, false, use_subgroups, force_subgroup_size); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_Q4_0][i], "mul_mat_vec_q4_0_f16_f32", arr_dmmv_q4_0_f16_f32_len[reduc], arr_dmmv_q4_0_f16_f32_data[reduc], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup, 2*rm_stdq, i+1}, 1, true, use_subgroups, force_subgroup_size); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_Q4_1][i], "mul_mat_vec_q4_1_f16_f32", arr_dmmv_q4_1_f16_f32_len[reduc], arr_dmmv_q4_1_f16_f32_data[reduc], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup, 2*rm_stdq, i+1}, 1, true, use_subgroups, force_subgroup_size); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_Q5_0][i], "mul_mat_vec_q5_0_f16_f32", arr_dmmv_q5_0_f16_f32_len[reduc], arr_dmmv_q5_0_f16_f32_data[reduc], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup, 2*rm_stdq, i+1}, 1, true, use_subgroups, force_subgroup_size); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_Q5_1][i], "mul_mat_vec_q5_1_f16_f32", arr_dmmv_q5_1_f16_f32_len[reduc], arr_dmmv_q5_1_f16_f32_data[reduc], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup, 2*rm_stdq, i+1}, 1, true, use_subgroups, force_subgroup_size); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_Q8_0][i], "mul_mat_vec_q8_0_f16_f32", arr_dmmv_q8_0_f16_f32_len[reduc], arr_dmmv_q8_0_f16_f32_data[reduc], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {1*rm_stdq, 1, 1}, {wg_size_subgroup, 1*rm_stdq, i+1}, 1, true, use_subgroups, force_subgroup_size); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_Q2_K][i], "mul_mat_vec_q2_k_f16_f32", arr_dmmv_q2_k_f16_f32_len[reduc16], arr_dmmv_q2_k_f16_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {wg_size_subgroup16, rm_kq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_Q3_K][i], "mul_mat_vec_q3_k_f16_f32", arr_dmmv_q3_k_f16_f32_len[reduc16], arr_dmmv_q3_k_f16_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {wg_size_subgroup16, rm_kq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_Q4_K][i], "mul_mat_vec_q4_k_f16_f32", arr_dmmv_q4_k_f16_f32_len[reduc16], arr_dmmv_q4_k_f16_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {wg_size_subgroup16, rm_kq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_Q5_K][i], "mul_mat_vec_q5_k_f16_f32", arr_dmmv_q5_k_f16_f32_len[reduc16], arr_dmmv_q5_k_f16_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {wg_size_subgroup16, rm_kq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_Q6_K][i], "mul_mat_vec_q6_k_f16_f32", arr_dmmv_q6_k_f16_f32_len[reduc16], arr_dmmv_q6_k_f16_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {wg_size_subgroup16, rm_kq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_IQ1_S][i], "mul_mat_vec_iq1_s_f16_f32", arr_dmmv_iq1_s_f16_f32_len[reduc16], arr_dmmv_iq1_s_f16_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_IQ1_M][i], "mul_mat_vec_iq1_m_f16_f32", arr_dmmv_iq1_m_f16_f32_len[reduc16], arr_dmmv_iq1_m_f16_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_IQ2_XXS][i], "mul_mat_vec_iq2_xxs_f16_f32", arr_dmmv_iq2_xxs_f16_f32_len[reduc16], arr_dmmv_iq2_xxs_f16_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_IQ2_XS][i], "mul_mat_vec_iq2_xs_f16_f32", arr_dmmv_iq2_xs_f16_f32_len[reduc16], arr_dmmv_iq2_xs_f16_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_IQ2_S][i], "mul_mat_vec_iq2_s_f16_f32", arr_dmmv_iq2_s_f16_f32_len[reduc16], arr_dmmv_iq2_s_f16_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_IQ3_XXS][i], "mul_mat_vec_iq3_xxs_f16_f32", arr_dmmv_iq3_xxs_f16_f32_len[reduc16], arr_dmmv_iq3_xxs_f16_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_IQ3_S][i], "mul_mat_vec_iq3_s_f16_f32", arr_dmmv_iq3_s_f16_f32_len[reduc16], arr_dmmv_iq3_s_f16_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_IQ4_XS][i], "mul_mat_vec_iq4_xs_f16_f32", arr_dmmv_iq4_xs_f16_f32_len[reduc16], arr_dmmv_iq4_xs_f16_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_IQ4_NL][i], "mul_mat_vec_iq4_nl_f16_f32", arr_dmmv_iq4_nl_f16_f32_len[reduc16], arr_dmmv_iq4_nl_f16_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[w][GGML_TYPE_MXFP4][i], "mul_mat_vec_mxfp4_f16_f32", arr_dmmv_mxfp4_f16_f32_len[reduc16], arr_dmmv_mxfp4_f16_f32_data[reduc16], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {rm_iq, 1, 1}, {wg_size_subgroup16, rm_iq, i+1}, 1, true, use_subgroups16, force_subgroup_size16); #if defined(GGML_VULKAN_INTEGER_DOT_GLSLC_SUPPORT) if (device->integer_dot_product) { const uint32_t subgroup_size_int = (device->vendor_id == VK_VENDOR_ID_INTEL && device->subgroup_size_control) ? device->subgroup_min_size : device->subgroup_size; const uint32_t wg_size_subgroup_int = (w == DMMV_WG_SIZE_SUBGROUP) ? subgroup_size_int : (subgroup_size_int * 4); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_q8_1_f32[w][GGML_TYPE_Q4_0][i], "mul_mat_vec_q4_0_q8_1_f32", arr_dmmv_q4_0_q8_1_f32_len[reduc], arr_dmmv_q4_0_q8_1_f32_data[reduc], "main", 4, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup_int, 2*rm_stdq, i+1}, 1, true, use_subgroups, subgroup_size_int); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_q8_1_f32[w][GGML_TYPE_Q4_1][i], "mul_mat_vec_q4_1_q8_1_f32", arr_dmmv_q4_1_q8_1_f32_len[reduc], arr_dmmv_q4_1_q8_1_f32_data[reduc], "main", 4, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup_int, 2*rm_stdq, i+1}, 1, true, use_subgroups, subgroup_size_int); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_q8_1_f32[w][GGML_TYPE_Q5_0][i], "mul_mat_vec_q5_0_q8_1_f32", arr_dmmv_q5_0_q8_1_f32_len[reduc], arr_dmmv_q5_0_q8_1_f32_data[reduc], "main", 4, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup_int, 2*rm_stdq, i+1}, 1, true, use_subgroups, subgroup_size_int); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_q8_1_f32[w][GGML_TYPE_Q5_1][i], "mul_mat_vec_q5_1_q8_1_f32", arr_dmmv_q5_1_q8_1_f32_len[reduc], arr_dmmv_q5_1_q8_1_f32_data[reduc], "main", 4, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup_int, 2*rm_stdq, i+1}, 1, true, use_subgroups, subgroup_size_int); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_q8_1_f32[w][GGML_TYPE_Q8_0][i], "mul_mat_vec_q8_0_q8_1_f32", arr_dmmv_q8_0_q8_1_f32_len[reduc], arr_dmmv_q8_0_q8_1_f32_data[reduc], "main", 4, sizeof(vk_mat_vec_push_constants), {1*rm_stdq, 1, 1}, {wg_size_subgroup_int, 1*rm_stdq, i+1}, 1, true, use_subgroups, subgroup_size_int); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_q8_1_f32[w][GGML_TYPE_Q4_0][i], "mul_mat_vec_q4_0_q8_1_f32", arr_dmmv_q4_0_q8_1_f32_len[reduc], arr_dmmv_q4_0_q8_1_f32_data[reduc], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup_int, 2*rm_stdq, i+1}, 1, true, use_subgroups, subgroup_size_int); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_q8_1_f32[w][GGML_TYPE_Q4_1][i], "mul_mat_vec_q4_1_q8_1_f32", arr_dmmv_q4_1_q8_1_f32_len[reduc], arr_dmmv_q4_1_q8_1_f32_data[reduc], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup_int, 2*rm_stdq, i+1}, 1, true, use_subgroups, subgroup_size_int); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_q8_1_f32[w][GGML_TYPE_Q5_0][i], "mul_mat_vec_q5_0_q8_1_f32", arr_dmmv_q5_0_q8_1_f32_len[reduc], arr_dmmv_q5_0_q8_1_f32_data[reduc], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup_int, 2*rm_stdq, i+1}, 1, true, use_subgroups, subgroup_size_int); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_q8_1_f32[w][GGML_TYPE_Q5_1][i], "mul_mat_vec_q5_1_q8_1_f32", arr_dmmv_q5_1_q8_1_f32_len[reduc], arr_dmmv_q5_1_q8_1_f32_data[reduc], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {wg_size_subgroup_int, 2*rm_stdq, i+1}, 1, true, use_subgroups, subgroup_size_int); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_q8_1_f32[w][GGML_TYPE_Q8_0][i], "mul_mat_vec_q8_0_q8_1_f32", arr_dmmv_q8_0_q8_1_f32_len[reduc], arr_dmmv_q8_0_q8_1_f32_data[reduc], "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_push_constants), {1*rm_stdq, 1, 1}, {wg_size_subgroup_int, 1*rm_stdq, i+1}, 1, true, use_subgroups, subgroup_size_int); } #endif // GGML_VULKAN_INTEGER_DOT_GLSLC_SUPPORT } } - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_F32 ], "mul_mat_vec_id_f32_f32", mul_mat_vec_id_f32_f32_len, mul_mat_vec_id_f32_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_F16 ], "mul_mat_vec_id_f16_f32", mul_mat_vec_id_f16_f32_len, mul_mat_vec_id_f16_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_BF16], "mul_mat_vec_id_bf16_f32", mul_mat_vec_id_bf16_f32_len, mul_mat_vec_id_bf16_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q4_0], "mul_mat_vec_id_q4_0_f32", mul_mat_vec_id_q4_0_f32_len, mul_mat_vec_id_q4_0_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q4_1], "mul_mat_vec_id_q4_1_f32", mul_mat_vec_id_q4_1_f32_len, mul_mat_vec_id_q4_1_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q5_0], "mul_mat_vec_id_q5_0_f32", mul_mat_vec_id_q5_0_f32_len, mul_mat_vec_id_q5_0_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q5_1], "mul_mat_vec_id_q5_1_f32", mul_mat_vec_id_q5_1_f32_len, mul_mat_vec_id_q5_1_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q8_0], "mul_mat_vec_id_q8_0_f32", mul_mat_vec_id_q8_0_f32_len, mul_mat_vec_id_q8_0_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {1*rm_stdq, 1, 1}, {device->subgroup_size, 1*rm_stdq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q2_K], "mul_mat_vec_id_q2_k_f32", mul_mat_vec_id_q2_k_f32_len, mul_mat_vec_id_q2_k_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q3_K], "mul_mat_vec_id_q3_k_f32", mul_mat_vec_id_q3_k_f32_len, mul_mat_vec_id_q3_k_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q4_K], "mul_mat_vec_id_q4_k_f32", mul_mat_vec_id_q4_k_f32_len, mul_mat_vec_id_q4_k_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q5_K], "mul_mat_vec_id_q5_k_f32", mul_mat_vec_id_q5_k_f32_len, mul_mat_vec_id_q5_k_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q6_K], "mul_mat_vec_id_q6_k_f32", mul_mat_vec_id_q6_k_f32_len, mul_mat_vec_id_q6_k_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ1_S], "mul_mat_vec_id_iq1_s_f32", mul_mat_vec_id_iq1_s_f32_len, mul_mat_vec_id_iq1_s_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {rm_iq, 1, 1}, {subgroup_size_16, rm_iq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ1_M], "mul_mat_vec_id_iq1_m_f32", mul_mat_vec_id_iq1_m_f32_len, mul_mat_vec_id_iq1_m_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {rm_iq, 1, 1}, {subgroup_size_16, rm_iq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ2_XXS], "mul_mat_vec_id_iq2_xxs_f32", mul_mat_vec_id_iq2_xxs_f32_len, mul_mat_vec_id_iq2_xxs_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {rm_iq, 1, 1}, {subgroup_size_16, rm_iq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ2_XS], "mul_mat_vec_id_iq2_xs_f32", mul_mat_vec_id_iq2_xs_f32_len, mul_mat_vec_id_iq2_xs_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {rm_iq, 1, 1}, {subgroup_size_16, rm_iq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ2_S], "mul_mat_vec_id_iq2_s_f32", mul_mat_vec_id_iq2_s_f32_len, mul_mat_vec_id_iq2_s_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {rm_iq, 1, 1}, {subgroup_size_16, rm_iq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ3_XXS], "mul_mat_vec_id_iq3_xxs_f32", mul_mat_vec_id_iq3_xxs_f32_len, mul_mat_vec_id_iq3_xxs_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {rm_iq, 1, 1}, {subgroup_size_16, rm_iq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ3_S], "mul_mat_vec_id_iq3_s_f32", mul_mat_vec_id_iq3_s_f32_len, mul_mat_vec_id_iq3_s_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {rm_iq, 1, 1}, {subgroup_size_16, rm_iq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ4_XS], "mul_mat_vec_id_iq4_xs_f32", mul_mat_vec_id_iq4_xs_f32_len, mul_mat_vec_id_iq4_xs_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {rm_iq, 1, 1}, {subgroup_size_16, rm_iq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ4_NL], "mul_mat_vec_id_iq4_nl_f32", mul_mat_vec_id_iq4_nl_f32_len, mul_mat_vec_id_iq4_nl_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {rm_iq, 1, 1}, {subgroup_size_16, rm_iq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_MXFP4], "mul_mat_vec_id_mxfp4_f32", mul_mat_vec_id_mxfp4_f32_len, mul_mat_vec_id_mxfp4_f32_data, "main", 5, sizeof(vk_mat_vec_id_push_constants), {rm_iq, 1, 1}, {subgroup_size_16, rm_iq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_F32 ], "mul_mat_vec_id_f32_f32", mul_mat_vec_id_f32_f32_len, mul_mat_vec_id_f32_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_F16 ], "mul_mat_vec_id_f16_f32", mul_mat_vec_id_f16_f32_len, mul_mat_vec_id_f16_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_BF16], "mul_mat_vec_id_bf16_f32", mul_mat_vec_id_bf16_f32_len, mul_mat_vec_id_bf16_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q4_0], "mul_mat_vec_id_q4_0_f32", mul_mat_vec_id_q4_0_f32_len, mul_mat_vec_id_q4_0_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q4_1], "mul_mat_vec_id_q4_1_f32", mul_mat_vec_id_q4_1_f32_len, mul_mat_vec_id_q4_1_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q5_0], "mul_mat_vec_id_q5_0_f32", mul_mat_vec_id_q5_0_f32_len, mul_mat_vec_id_q5_0_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q5_1], "mul_mat_vec_id_q5_1_f32", mul_mat_vec_id_q5_1_f32_len, mul_mat_vec_id_q5_1_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q8_0], "mul_mat_vec_id_q8_0_f32", mul_mat_vec_id_q8_0_f32_len, mul_mat_vec_id_q8_0_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {1*rm_stdq, 1, 1}, {device->subgroup_size, 1*rm_stdq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q2_K], "mul_mat_vec_id_q2_k_f32", mul_mat_vec_id_q2_k_f32_len, mul_mat_vec_id_q2_k_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q3_K], "mul_mat_vec_id_q3_k_f32", mul_mat_vec_id_q3_k_f32_len, mul_mat_vec_id_q3_k_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q4_K], "mul_mat_vec_id_q4_k_f32", mul_mat_vec_id_q4_k_f32_len, mul_mat_vec_id_q4_k_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q5_K], "mul_mat_vec_id_q5_k_f32", mul_mat_vec_id_q5_k_f32_len, mul_mat_vec_id_q5_k_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q6_K], "mul_mat_vec_id_q6_k_f32", mul_mat_vec_id_q6_k_f32_len, mul_mat_vec_id_q6_k_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ1_S], "mul_mat_vec_id_iq1_s_f32", mul_mat_vec_id_iq1_s_f32_len, mul_mat_vec_id_iq1_s_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {rm_iq, 1, 1}, {subgroup_size_16, rm_iq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ1_M], "mul_mat_vec_id_iq1_m_f32", mul_mat_vec_id_iq1_m_f32_len, mul_mat_vec_id_iq1_m_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {rm_iq, 1, 1}, {subgroup_size_16, rm_iq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ2_XXS], "mul_mat_vec_id_iq2_xxs_f32", mul_mat_vec_id_iq2_xxs_f32_len, mul_mat_vec_id_iq2_xxs_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {rm_iq, 1, 1}, {subgroup_size_16, rm_iq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ2_XS], "mul_mat_vec_id_iq2_xs_f32", mul_mat_vec_id_iq2_xs_f32_len, mul_mat_vec_id_iq2_xs_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {rm_iq, 1, 1}, {subgroup_size_16, rm_iq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ2_S], "mul_mat_vec_id_iq2_s_f32", mul_mat_vec_id_iq2_s_f32_len, mul_mat_vec_id_iq2_s_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {rm_iq, 1, 1}, {subgroup_size_16, rm_iq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ3_XXS], "mul_mat_vec_id_iq3_xxs_f32", mul_mat_vec_id_iq3_xxs_f32_len, mul_mat_vec_id_iq3_xxs_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {rm_iq, 1, 1}, {subgroup_size_16, rm_iq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ3_S], "mul_mat_vec_id_iq3_s_f32", mul_mat_vec_id_iq3_s_f32_len, mul_mat_vec_id_iq3_s_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {rm_iq, 1, 1}, {subgroup_size_16, rm_iq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ4_XS], "mul_mat_vec_id_iq4_xs_f32", mul_mat_vec_id_iq4_xs_f32_len, mul_mat_vec_id_iq4_xs_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {rm_iq, 1, 1}, {subgroup_size_16, rm_iq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ4_NL], "mul_mat_vec_id_iq4_nl_f32", mul_mat_vec_id_iq4_nl_f32_len, mul_mat_vec_id_iq4_nl_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {rm_iq, 1, 1}, {subgroup_size_16, rm_iq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_MXFP4], "mul_mat_vec_id_mxfp4_f32", mul_mat_vec_id_mxfp4_f32_len, mul_mat_vec_id_mxfp4_f32_data, "main", mul_mat_vec_id_num_bindings, sizeof(vk_mat_vec_id_push_constants), {rm_iq, 1, 1}, {subgroup_size_16, rm_iq}, 1, true); // dequant shaders ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_F32 ], "f32_to_f16", dequant_f32_len, dequant_f32_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1); @@ -3577,12 +3649,12 @@ static void ggml_vk_load_shaders(vk_device& device) { for (uint32_t i = 0; i < p021_max_gqa_ratio; ++i) { if (device->subgroup_arithmetic && device->subgroup_require_full_support) { - ggml_vk_create_pipeline2(device, device->pipeline_mul_mat_vec_p021_f16_f32[i], "mul_mat_vec_p021_f16_f32"+std::to_string(i+1), mul_mat_vec_p021_f16_f32_subgroup_add_len, mul_mat_vec_p021_f16_f32_subgroup_add_data, "main", 4, 7 * sizeof(uint32_t), {1, 1, 1}, {device->subgroup_size, i + 1}, 1, true, true); + ggml_vk_create_pipeline2(device, device->pipeline_mul_mat_vec_p021_f16_f32[i], "mul_mat_vec_p021_f16_f32"+std::to_string(i+1), mul_mat_vec_p021_f16_f32_subgroup_add_len, mul_mat_vec_p021_f16_f32_subgroup_add_data, "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_p021_push_constants), {1, 1, 1}, {device->subgroup_size, i + 1}, 1, true, true); } else { - ggml_vk_create_pipeline2(device, device->pipeline_mul_mat_vec_p021_f16_f32[i], "mul_mat_vec_p021_f16_f32"+std::to_string(i+1), mul_mat_vec_p021_f16_f32_len, mul_mat_vec_p021_f16_f32_data, "main", 4, 7 * sizeof(uint32_t), {1, 1, 1}, {device->subgroup_size, i + 1}, 1, true); + ggml_vk_create_pipeline2(device, device->pipeline_mul_mat_vec_p021_f16_f32[i], "mul_mat_vec_p021_f16_f32"+std::to_string(i+1), mul_mat_vec_p021_f16_f32_len, mul_mat_vec_p021_f16_f32_data, "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_p021_push_constants), {1, 1, 1}, {device->subgroup_size, i + 1}, 1, true); } } - ggml_vk_create_pipeline(device, device->pipeline_mul_mat_vec_nc_f16_f32, "mul_mat_vec_nc_f16_f32", mul_mat_vec_nc_f16_f32_len, mul_mat_vec_nc_f16_f32_data, "main", 4, 13 * sizeof(uint32_t), {1, 1, 1}, {}, 1); + ggml_vk_create_pipeline(device, device->pipeline_mul_mat_vec_nc_f16_f32, "mul_mat_vec_nc_f16_f32", mul_mat_vec_nc_f16_f32_len, mul_mat_vec_nc_f16_f32_data, "main", mul_mat_vec_num_bindings, sizeof(vk_mat_vec_nc_push_constants), {1, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_norm_f32, "norm_f32", norm_f32_len, norm_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_group_norm_f32, "group_norm_f32", group_norm_f32_len, group_norm_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, {}, 1); @@ -6259,7 +6331,7 @@ static vk_pipeline ggml_vk_get_cpy_pipeline(ggml_backend_vk_context * ctx, const GGML_ABORT("fatal error"); } -static void ggml_vk_cpy_to_contiguous(ggml_backend_vk_context * ctx, vk_context& subctx, vk_pipeline pipeline, const ggml_tensor * tensor, vk_subbuffer&& in, vk_subbuffer&& out) { +static void ggml_vk_cpy_to_contiguous(ggml_backend_vk_context * ctx, vk_context& subctx, vk_pipeline pipeline, const ggml_tensor * tensor, const vk_subbuffer & in, const vk_subbuffer & out) { VK_LOG_DEBUG("ggml_vk_cpy_to_contiguous((" << tensor << ", type=" << tensor->type << ", ne0=" << tensor->ne[0] << ", ne1=" << tensor->ne[1] << ", ne2=" << tensor->ne[2] << ", ne3=" << tensor->ne[3] << ", nb0=" << tensor->nb[0] << ", nb1=" << tensor->nb[1] << ", nb2=" << tensor->nb[2] << ", nb3=" << tensor->nb[3] << "), "; std::cerr << "buffer in size=" << in.buffer->size << ", buffer out size=" << out.buffer->size << ")"); const int tensor_type_size = ggml_type_size(tensor->type); @@ -6298,7 +6370,7 @@ static vk_pipeline ggml_vk_get_quantize_pipeline(ggml_backend_vk_context * ctx, } } -static void ggml_vk_quantize_q8_1(ggml_backend_vk_context * ctx, vk_context& subctx, vk_subbuffer&& in, vk_subbuffer&& out, uint32_t ne) { +static void ggml_vk_quantize_q8_1(ggml_backend_vk_context * ctx, vk_context& subctx, const vk_subbuffer & in, const vk_subbuffer & out, uint32_t ne) { VK_LOG_DEBUG("ggml_vk_quantize_q8_1(" << "buffer in size=" << in.buffer->size << ", buffer out size=" << out.buffer->size << ", " << ne << ")"); vk_pipeline pipeline = ggml_vk_get_quantize_pipeline(ctx, GGML_TYPE_Q8_1); @@ -6646,24 +6718,6 @@ static void ggml_vk_mul_mat_vec_q_f16(ggml_backend_vk_context * ctx, vk_context& GGML_ASSERT(ne11 == 1 || ne12 * ne13 == 1); bool batch_n = ne11 > 1; - ggml_backend_vk_buffer_context * src0_buf_ctx = (ggml_backend_vk_buffer_context *)src0->buffer->context; - ggml_backend_vk_buffer_context * src1_buf_ctx = (ggml_backend_vk_buffer_context *)src1->buffer->context; - - vk_buffer d_Qx = nullptr; - size_t qx_buf_offset = 0; - vk_buffer d_Qy = nullptr; - size_t qy_buf_offset = 0; - - bool src0_uma = false; - bool src1_uma = false; - - if (ctx->device->uma) { - ggml_vk_host_get(ctx->device, src0->data, d_Qx, qx_buf_offset); - ggml_vk_host_get(ctx->device, src1->data, d_Qy, qy_buf_offset); - src0_uma = d_Qx != nullptr; - src1_uma = d_Qy != nullptr; - } - const bool x_non_contig = !ggml_vk_dim01_contiguous(src0); const bool y_non_contig = !ggml_vk_dim01_contiguous(src1); @@ -6707,14 +6761,11 @@ static void ggml_vk_mul_mat_vec_q_f16(ggml_backend_vk_context * ctx, vk_context& const uint64_t x_ne = ggml_nelements(src0); const uint64_t y_ne = ggml_nelements(src1); - const uint64_t d_ne = ggml_nelements(dst); const uint64_t qx_sz = ggml_vk_align_size(ggml_type_size(src0->type) * x_ne / ggml_blck_size(src0->type), ctx->device->properties.limits.minStorageBufferOffsetAlignment); - const uint64_t qy_sz = ggml_type_size(src1->type) * y_ne / ggml_blck_size(src1->type); const uint64_t x_sz = x_non_contig ? ggml_vk_align_size(ggml_type_size(src0->type) * x_ne, ctx->device->properties.limits.minStorageBufferOffsetAlignment) : qx_sz; const uint64_t y_sz = quantize_y ? (ggml_vk_align_size(y_ne, 128) * ggml_type_size(GGML_TYPE_Q8_1) / ggml_blck_size(GGML_TYPE_Q8_1)) : (f16_f32_kernel ? sizeof(float) * y_ne : sizeof(ggml_fp16_t) * y_ne); - const uint64_t d_sz = sizeof(float) * d_ne; { if ( @@ -6744,51 +6795,21 @@ static void ggml_vk_mul_mat_vec_q_f16(ggml_backend_vk_context * ctx, vk_context& ggml_pipeline_request_descriptor_sets(ctx, dmmv, 1); } - vk_buffer d_D; - uint64_t d_buf_offset = 0; + vk_subbuffer d_D = ggml_vk_tensor_subbuffer(ctx, cgraph->nodes[node_idx + ctx->num_additional_fused_ops]); + vk_subbuffer d_Qx = ggml_vk_tensor_subbuffer(ctx, src0); + vk_subbuffer d_Qy = ggml_vk_tensor_subbuffer(ctx, src1); + vk_subbuffer d_X, d_Y; - if (ctx->num_additional_fused_ops > 0) { - const ggml_tensor * add = cgraph->nodes[node_idx + 1]; - ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)add->buffer->context; - d_D = dst_buf_ctx->dev_buffer; - d_buf_offset = vk_tensor_offset(add) + add->view_offs; - } else { - ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context; - d_D = dst_buf_ctx->dev_buffer; - d_buf_offset = vk_tensor_offset(dst) + dst->view_offs; - } - - GGML_ASSERT(d_D != nullptr); - vk_buffer d_X; - uint64_t x_buf_offset = 0; - vk_buffer d_Y; - uint64_t y_buf_offset = 0; - if(!src0_uma) { - d_Qx = src0_buf_ctx->dev_buffer; - qx_buf_offset = vk_tensor_offset(src0) + src0->view_offs; - GGML_ASSERT(d_Qx != nullptr); - } - if(!src1_uma) { - d_Qy = src1_buf_ctx->dev_buffer; - qy_buf_offset = vk_tensor_offset(src1) + src1->view_offs; - GGML_ASSERT(d_Qy != nullptr); - } if (qx_needs_dequant) { - d_X = ctx->prealloc_x; + d_X = { ctx->prealloc_x, 0, ctx->prealloc_x->size }; } else { d_X = d_Qx; - x_buf_offset = qx_buf_offset; GGML_ASSERT(qx_sz == x_sz); } - if (qy_needs_dequant) { - d_Y = ctx->prealloc_y; - } else if (quantize_y) { - d_Y = ctx->prealloc_y; - GGML_ASSERT(d_Y->size >= CEIL_DIV(y_sz, 144) * 144); + if (qy_needs_dequant || quantize_y) { + d_Y = { ctx->prealloc_y, 0, ctx->prealloc_y->size }; } else { d_Y = d_Qy; - y_buf_offset = qy_buf_offset; - GGML_ASSERT(qy_sz == y_sz); } if (x_non_contig) { @@ -6797,7 +6818,7 @@ static void ggml_vk_mul_mat_vec_q_f16(ggml_backend_vk_context * ctx, vk_context& } GGML_ASSERT(x_sz == ggml_vk_align_size(ggml_type_size(src0->type) * x_ne, ctx->device->properties.limits.minStorageBufferOffsetAlignment)); - ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_0, src0, ggml_vk_subbuffer(ctx, d_Qx, qx_buf_offset), ggml_vk_subbuffer(ctx, d_X, 0)); + ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_0, src0, d_Qx, d_X); } if (y_non_contig) { GGML_ASSERT(y_sz == ggml_type_size(src1->type) * y_ne); @@ -6806,7 +6827,7 @@ static void ggml_vk_mul_mat_vec_q_f16(ggml_backend_vk_context * ctx, vk_context& if (ctx->prealloc_y_need_sync) { ggml_vk_sync_buffers(ctx, subctx); } - ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_1, src1, ggml_vk_subbuffer(ctx, d_Qy, qy_buf_offset), ggml_vk_subbuffer(ctx, d_Y, 0)); + ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_1, src1, d_Qy, d_Y); ctx->prealloc_y_last_pipeline_used = to_fp16_vk_1.get(); ctx->prealloc_y_last_tensor_used = src1; } @@ -6817,7 +6838,7 @@ static void ggml_vk_mul_mat_vec_q_f16(ggml_backend_vk_context * ctx, vk_context& if (ctx->prealloc_y_need_sync) { ggml_vk_sync_buffers(ctx, subctx); } - ggml_vk_quantize_q8_1(ctx, subctx, ggml_vk_subbuffer(ctx, d_Qy, qy_buf_offset), ggml_vk_subbuffer(ctx, d_Y, 0), y_ne); + ggml_vk_quantize_q8_1(ctx, subctx, d_Qy, d_Y, y_ne); ctx->prealloc_y_last_pipeline_used = to_q8_1.get(); ctx->prealloc_y_last_tensor_used = src1; } @@ -6848,26 +6869,13 @@ static void ggml_vk_mul_mat_vec_q_f16(ggml_backend_vk_context * ctx, vk_context& uint32_t enable_bias = ctx->num_additional_fused_ops > 0; - vk_buffer d_B = d_D; - size_t b_buf_offset = 0; - uint64_t b_sz = 1; + vk_subbuffer d_B = d_D; if (enable_bias) { const ggml_tensor * add = cgraph->nodes[node_idx + 1]; const ggml_tensor * bias = add->src[0] == dst ? add->src[1] : add->src[0]; - bool b_uma = false; - if (ctx->device->uma) { - ggml_vk_host_get(ctx->device, bias->data, d_B, b_buf_offset); - b_uma = d_B != nullptr; - } - if(!b_uma) { - ggml_backend_vk_buffer_context * bias_buf_ctx = (ggml_backend_vk_buffer_context *)bias->buffer->context; - d_B = bias_buf_ctx->dev_buffer; - b_buf_offset = vk_tensor_offset(bias) + bias->view_offs; - GGML_ASSERT(d_B != nullptr); - b_sz = ggml_nbytes(bias); - } + d_B = ggml_vk_tensor_subbuffer(ctx, bias); } // compute @@ -6878,10 +6886,10 @@ static void ggml_vk_mul_mat_vec_q_f16(ggml_backend_vk_context * ctx, vk_context& }; ggml_vk_dispatch_pipeline(ctx, subctx, dmmv, { - vk_subbuffer{ d_X, x_buf_offset, x_sz }, - vk_subbuffer{ d_Y, y_buf_offset, y_sz }, - vk_subbuffer{ d_D, d_buf_offset, d_sz }, - vk_subbuffer{ d_B, b_buf_offset, b_sz }, + d_X, + d_Y, + d_D, + d_B, }, pc, { groups_x, (uint32_t)(ne12 * ne13), groups_z }); @@ -6912,34 +6920,13 @@ static void ggml_vk_mul_mat_vec_p021_f16_f32(ggml_backend_vk_context * ctx, vk_c const uint64_t ne02 = src0->ne[2]; // const uint64_t ne03 = src0->ne[3]; - const uint64_t ne10 = src1->ne[0]; + //const uint64_t ne10 = src1->ne[0]; const uint64_t ne11 = src1->ne[1]; const uint64_t ne12 = src1->ne[2]; // const uint64_t ne13 = src1->ne[3]; GGML_ASSERT(ne11 == 1); - ggml_backend_vk_buffer_context * src0_buf_ctx = (ggml_backend_vk_buffer_context *)src0->buffer->context; - ggml_backend_vk_buffer_context * src1_buf_ctx = (ggml_backend_vk_buffer_context *)src1->buffer->context; - - vk_buffer d_Qy = nullptr; - size_t qy_buf_offset = 0; - - bool src1_uma = false; - - if (ctx->device->uma) { - ggml_vk_host_get(ctx->device, src1->data, d_Qy, qy_buf_offset); - src1_uma = d_Qy != nullptr; - } - - const uint64_t x_ne = ne00 * ne01 * ne02; - const uint64_t y_ne = ne10 * ne11 * ne12; - const uint64_t d_ne = ne01 * ne11 * ne12; - - const uint64_t qx_sz = ggml_vk_align_size(ggml_type_size(src0->type) * x_ne / ggml_blck_size(src0->type), ctx->device->properties.limits.minStorageBufferOffsetAlignment); - const uint64_t qy_sz = ggml_type_size(src1->type) * y_ne / ggml_blck_size(src1->type); - const uint64_t d_sz = sizeof(float) * d_ne; - // With grouped query attention there are > 1 Q matrices per K, V matrix. uint32_t gqa_ratio = (uint32_t)ne12 / (uint32_t)ne02; if (gqa_ratio > 8 || gqa_ratio == 0 || ne12 != ne02 * gqa_ratio) { @@ -6951,61 +6938,29 @@ static void ggml_vk_mul_mat_vec_p021_f16_f32(ggml_backend_vk_context * ctx, vk_c ggml_pipeline_request_descriptor_sets(ctx, ctx->device->pipeline_mul_mat_vec_p021_f16_f32[gqa_ratio - 1], 1); } - vk_buffer d_D; - uint64_t d_buf_offset = 0; + vk_subbuffer d_D = ggml_vk_tensor_subbuffer(ctx, cgraph->nodes[node_idx + ctx->num_additional_fused_ops], true); + vk_subbuffer d_Qx = ggml_vk_tensor_subbuffer(ctx, src0); + vk_subbuffer d_Qy = ggml_vk_tensor_subbuffer(ctx, src1, true); - if (ctx->num_additional_fused_ops > 0) { - const ggml_tensor * add = cgraph->nodes[node_idx + 1]; - ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)add->buffer->context; - d_D = dst_buf_ctx->dev_buffer; - d_buf_offset = vk_tensor_offset(add) + add->view_offs; - } else { - ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context; - d_D = dst_buf_ctx->dev_buffer; - d_buf_offset = vk_tensor_offset(dst) + dst->view_offs; - } - GGML_ASSERT(d_D != nullptr); - vk_buffer d_Qx = src0_buf_ctx->dev_buffer; - const uint64_t qx_buf_offset = vk_tensor_offset(src0) + src0->view_offs; - GGML_ASSERT(d_Qx != nullptr); - if (!src1_uma) { - d_Qy = src1_buf_ctx->dev_buffer; - qy_buf_offset = vk_tensor_offset(src1) + src1->view_offs; - GGML_ASSERT(d_Qx != nullptr); - } - - const uint64_t qy_buffer_offset = (qy_buf_offset / ctx->device->properties.limits.minStorageBufferOffsetAlignment) * ctx->device->properties.limits.minStorageBufferOffsetAlignment; - const uint64_t qy_shader_offset = qy_buf_offset - qy_buffer_offset; - - const uint64_t d_buffer_offset = (d_buf_offset / ctx->device->properties.limits.minStorageBufferOffsetAlignment) * ctx->device->properties.limits.minStorageBufferOffsetAlignment; - const uint64_t d_shader_offset = d_buf_offset - d_buffer_offset; + vk_subbuffer d_B = d_D; uint32_t enable_bias = ctx->num_additional_fused_ops > 0; - vk_buffer d_B = d_D; - size_t b_buf_offset = 0; - uint64_t b_sz = 1; - if (enable_bias) { const ggml_tensor * add = cgraph->nodes[node_idx + 1]; const ggml_tensor * bias = add->src[0] == dst ? add->src[1] : add->src[0]; - bool b_uma = false; - if (ctx->device->uma) { - ggml_vk_host_get(ctx->device, bias->data, d_B, b_buf_offset); - b_uma = d_B != nullptr; - } - if(!b_uma) { - ggml_backend_vk_buffer_context * bias_buf_ctx = (ggml_backend_vk_buffer_context *)bias->buffer->context; - d_B = bias_buf_ctx->dev_buffer; - b_buf_offset = vk_tensor_offset(bias) + bias->view_offs; - GGML_ASSERT(d_B != nullptr); - b_sz = ggml_nbytes(bias); - } + d_B = ggml_vk_tensor_subbuffer(ctx, bias); } // compute - const std::array pc = { (uint32_t)ne00, (uint32_t)ne01, (uint32_t)ne02, (uint32_t)ne12, (uint32_t)(qy_shader_offset / ggml_type_size(src1->type)), (uint32_t)(d_shader_offset / ggml_type_size(dst->type)), enable_bias }; + + vk_mat_vec_p021_push_constants pc = { + (uint32_t)ne00, (uint32_t)ne01, (uint32_t)ne02, (uint32_t)ne12, + 0, 0, enable_bias + }; + + init_pushconst_tensor_offsets(ctx, pc, src0, src1, nullptr, nullptr, cgraph->nodes[node_idx + ctx->num_additional_fused_ops]); uint32_t workgroups_z = (uint32_t)ne12; // When gqa_ratio > 1, each invocation does multiple rows and we can launch fewer workgroups @@ -7015,10 +6970,10 @@ static void ggml_vk_mul_mat_vec_p021_f16_f32(ggml_backend_vk_context * ctx, vk_c ggml_vk_dispatch_pipeline(ctx, subctx, ctx->device->pipeline_mul_mat_vec_p021_f16_f32[gqa_ratio - 1], { - vk_subbuffer{ d_Qx, qx_buf_offset, qx_sz }, - vk_subbuffer{ d_Qy, qy_buffer_offset, qy_sz + qy_shader_offset }, - vk_subbuffer{ d_D, d_buffer_offset, d_sz + d_shader_offset }, - vk_subbuffer{ d_B, b_buf_offset, b_sz }, + d_Qx, + d_Qy, + d_D, + d_B, }, pc, { 1, (uint32_t)ne01, workgroups_z }); } @@ -7058,96 +7013,46 @@ static void ggml_vk_mul_mat_vec_nc_f16_f32(ggml_backend_vk_context * ctx, vk_con GGML_ASSERT(ne11 == 1); GGML_ASSERT(src0->ne[3] == src1->ne[3]); // checked in supports_op - ggml_backend_vk_buffer_context * src0_buf_ctx = (ggml_backend_vk_buffer_context *)src0->buffer->context; - ggml_backend_vk_buffer_context * src1_buf_ctx = (ggml_backend_vk_buffer_context *)src1->buffer->context; - - vk_buffer d_Qy = nullptr; - size_t qy_buf_offset = 0; - - bool src1_uma = false; - - if (ctx->device->uma) { - ggml_vk_host_get(ctx->device, src1->data, d_Qy, qy_buf_offset); - src1_uma = d_Qy != nullptr; - } - - const uint64_t d_ne = ne01 * ne11 * ne12 * ne03; - const uint32_t row_stride_x = nb01 / sizeof(ggml_fp16_t); const uint32_t channel_stride_x = nb02 / sizeof(ggml_fp16_t); const uint32_t channel_stride_y = nb12 / sizeof(float); - const uint64_t qx_sz = ggml_nbytes(src0); - const uint64_t qy_sz = ggml_nbytes(src1); - const uint64_t d_sz = sizeof(float) * d_ne; - { // Request descriptor sets ggml_pipeline_request_descriptor_sets(ctx, ctx->device->pipeline_mul_mat_vec_nc_f16_f32, 1); } - vk_buffer d_D; - uint64_t d_buf_offset = 0; - - if (ctx->num_additional_fused_ops > 0) { - const ggml_tensor * add = cgraph->nodes[node_idx + 1]; - ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)add->buffer->context; - d_D = dst_buf_ctx->dev_buffer; - d_buf_offset = vk_tensor_offset(add) + add->view_offs; - } else { - ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context; - d_D = dst_buf_ctx->dev_buffer; - d_buf_offset = vk_tensor_offset(dst) + dst->view_offs; - } - - GGML_ASSERT(d_D != nullptr); - vk_buffer d_Qx = src0_buf_ctx->dev_buffer; - const uint64_t qx_buf_offset = vk_tensor_offset(src0) + src0->view_offs; - GGML_ASSERT(d_Qx != nullptr); - if (!src1_uma) { - d_Qy = src1_buf_ctx->dev_buffer; - qy_buf_offset = vk_tensor_offset(src1) + src1->view_offs; - GGML_ASSERT(d_Qx != nullptr); - } - - const uint64_t qy_buffer_offset = (qy_buf_offset / ctx->device->properties.limits.minStorageBufferOffsetAlignment) * ctx->device->properties.limits.minStorageBufferOffsetAlignment; - const uint64_t qy_shader_offset = qy_buf_offset - qy_buffer_offset; - - const uint64_t d_buffer_offset = (d_buf_offset / ctx->device->properties.limits.minStorageBufferOffsetAlignment) * ctx->device->properties.limits.minStorageBufferOffsetAlignment; - const uint64_t d_shader_offset = d_buf_offset - d_buffer_offset; + vk_subbuffer d_D = ggml_vk_tensor_subbuffer(ctx, cgraph->nodes[node_idx + ctx->num_additional_fused_ops], true); + vk_subbuffer d_Qx = ggml_vk_tensor_subbuffer(ctx, src0); + vk_subbuffer d_Qy = ggml_vk_tensor_subbuffer(ctx, src1, true); + vk_subbuffer d_B = d_D; uint32_t enable_bias = ctx->num_additional_fused_ops > 0; - vk_buffer d_B = d_D; - size_t b_buf_offset = 0; - uint64_t b_sz = 1; - if (enable_bias) { const ggml_tensor * add = cgraph->nodes[node_idx + 1]; const ggml_tensor * bias = add->src[0] == dst ? add->src[1] : add->src[0]; - bool b_uma = false; - if (ctx->device->uma) { - ggml_vk_host_get(ctx->device, bias->data, d_B, b_buf_offset); - b_uma = d_B != nullptr; - } - if(!b_uma) { - ggml_backend_vk_buffer_context * bias_buf_ctx = (ggml_backend_vk_buffer_context *)bias->buffer->context; - d_B = bias_buf_ctx->dev_buffer; - b_buf_offset = vk_tensor_offset(bias) + bias->view_offs; - GGML_ASSERT(d_B != nullptr); - b_sz = ggml_nbytes(bias); - } + d_B = ggml_vk_tensor_subbuffer(ctx, bias); } // compute - const std::array pc = { (uint32_t)ne00, (uint32_t)ne01, row_stride_x, channel_stride_x, channel_stride_y, (uint32_t)(ne12 / ne02), (uint32_t)ne12, (uint32_t)(qy_shader_offset / ggml_type_size(src1->type)), (uint32_t)(d_shader_offset / ggml_type_size(dst->type)), nb03, nb13, nb23, enable_bias }; + vk_mat_vec_nc_push_constants pc = { + (uint32_t)ne00, (uint32_t)ne01, + row_stride_x, channel_stride_x, channel_stride_y, + (uint32_t)(ne12 / ne02), (uint32_t)ne12, + 0, 0, + nb03, nb13, nb23, enable_bias + }; + + init_pushconst_tensor_offsets(ctx, pc, src0, src1, nullptr, nullptr, cgraph->nodes[node_idx + ctx->num_additional_fused_ops]); + ggml_vk_dispatch_pipeline(ctx, subctx, ctx->device->pipeline_mul_mat_vec_nc_f16_f32, { - vk_subbuffer{ d_Qx, qx_buf_offset, qx_sz }, - vk_subbuffer{ d_Qy, qy_buffer_offset, qy_sz + qy_shader_offset }, - vk_subbuffer{ d_D, d_buffer_offset, d_sz + d_shader_offset }, - vk_subbuffer{ d_B, b_buf_offset, b_sz }, + d_Qx, + d_Qy, + d_D, + d_B, }, pc, { (uint32_t)ne03, (uint32_t)ne01, (uint32_t)ne12 }); } @@ -7499,8 +7404,6 @@ static void ggml_vk_mul_mat_vec_id_q_f16(ggml_backend_vk_context * ctx, vk_conte const uint64_t nei0 = ids->ne[0]; const uint64_t nei1 = ids->ne[1]; - const uint64_t nbi2 = ids->nb[2]; - GGML_ASSERT(nei1 == 1); const uint64_t ne20 = dst->ne[0]; @@ -7508,30 +7411,6 @@ static void ggml_vk_mul_mat_vec_id_q_f16(ggml_backend_vk_context * ctx, vk_conte // const uint64_t ne22 = dst->ne[2]; // const uint64_t ne23 = dst->ne[3]; - ggml_backend_vk_buffer_context * src0_buf_ctx = (ggml_backend_vk_buffer_context *)src0->buffer->context; - ggml_backend_vk_buffer_context * src1_buf_ctx = (ggml_backend_vk_buffer_context *)src1->buffer->context; - ggml_backend_vk_buffer_context * ids_buf_ctx = (ggml_backend_vk_buffer_context *)ids->buffer->context; - - vk_buffer d_Qx = nullptr; - size_t qx_buf_offset = 0; - vk_buffer d_Qy = nullptr; - size_t qy_buf_offset = 0; - vk_buffer d_ids = nullptr; - size_t ids_buf_offset = 0; - - bool src0_uma = false; - bool src1_uma = false; - bool ids_uma = false; - - if (ctx->device->uma) { - ggml_vk_host_get(ctx->device, src0->data, d_Qx, qx_buf_offset); - ggml_vk_host_get(ctx->device, src1->data, d_Qy, qy_buf_offset); - ggml_vk_host_get(ctx->device, ids->data, d_ids, ids_buf_offset); - src0_uma = d_Qx != nullptr; - src1_uma = d_Qy != nullptr; - ids_uma = d_ids != nullptr; - } - const bool x_non_contig = !ggml_vk_dim01_contiguous(src0); const bool y_non_contig = !ggml_vk_dim01_contiguous(src1); @@ -7545,14 +7424,10 @@ static void ggml_vk_mul_mat_vec_id_q_f16(ggml_backend_vk_context * ctx, vk_conte const uint64_t x_ne = ggml_nelements(src0); const uint64_t y_ne = ggml_nelements(src1); - const uint64_t d_ne = ggml_nelements(dst); const uint64_t qx_sz = ggml_vk_align_size(ggml_type_size(src0->type) * x_ne / ggml_blck_size(src0->type), ctx->device->properties.limits.minStorageBufferOffsetAlignment); - const uint64_t qy_sz = ggml_type_size(src1->type) * y_ne / ggml_blck_size(src1->type); const uint64_t x_sz = x_non_contig ? ggml_vk_align_size(ggml_type_size(src0->type) * x_ne, ctx->device->properties.limits.minStorageBufferOffsetAlignment) : qx_sz; const uint64_t y_sz = f16_f32_kernel ? sizeof(float) * y_ne : sizeof(ggml_fp16_t) * y_ne; - const uint64_t ids_sz = nbi2; - const uint64_t d_sz = sizeof(float) * d_ne; vk_pipeline to_fp16_vk_0 = nullptr; vk_pipeline to_fp16_vk_1 = nullptr; @@ -7594,53 +7469,22 @@ static void ggml_vk_mul_mat_vec_id_q_f16(ggml_backend_vk_context * ctx, vk_conte ggml_pipeline_request_descriptor_sets(ctx, dmmv, 1); } - vk_buffer d_D; - uint64_t d_buf_offset = 0; - - if (ctx->num_additional_fused_ops > 0) { - const ggml_tensor * add = cgraph->nodes[node_idx + 1]; - ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)add->buffer->context; - d_D = dst_buf_ctx->dev_buffer; - d_buf_offset = vk_tensor_offset(add) + add->view_offs; - } else { - ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context; - d_D = dst_buf_ctx->dev_buffer; - d_buf_offset = vk_tensor_offset(dst) + dst->view_offs; - } + vk_subbuffer d_D = ggml_vk_tensor_subbuffer(ctx, cgraph->nodes[node_idx + ctx->num_additional_fused_ops]); + vk_subbuffer d_Qx = ggml_vk_tensor_subbuffer(ctx, src0); + vk_subbuffer d_Qy = ggml_vk_tensor_subbuffer(ctx, src1); + vk_subbuffer d_ids = ggml_vk_tensor_subbuffer(ctx, ids); + vk_subbuffer d_B = d_D; + vk_subbuffer d_X, d_Y; - GGML_ASSERT(d_D != nullptr); - vk_buffer d_X; - uint64_t x_buf_offset = 0; - vk_buffer d_Y; - uint64_t y_buf_offset = 0; - if(!src0_uma) { - d_Qx = src0_buf_ctx->dev_buffer; - qx_buf_offset = vk_tensor_offset(src0) + src0->view_offs; - GGML_ASSERT(d_Qx != nullptr); - } - if(!src1_uma) { - d_Qy = src1_buf_ctx->dev_buffer; - qy_buf_offset = vk_tensor_offset(src1) + src1->view_offs; - GGML_ASSERT(d_Qy != nullptr); - } - if(!ids_uma) { - d_ids = ids_buf_ctx->dev_buffer; - ids_buf_offset = vk_tensor_offset(ids) + ids->view_offs; - GGML_ASSERT(d_ids != nullptr); - } if (qx_needs_dequant) { - d_X = ctx->prealloc_x; + d_X = { ctx->prealloc_x, 0, ctx->prealloc_x->size }; } else { d_X = d_Qx; - x_buf_offset = qx_buf_offset; - GGML_ASSERT(qx_sz == x_sz); } if (qy_needs_dequant) { - d_Y = ctx->prealloc_y; + d_Y = { ctx->prealloc_y, 0, ctx->prealloc_y->size }; } else { d_Y = d_Qy; - y_buf_offset = qy_buf_offset; - GGML_ASSERT(qy_sz == y_sz); } if (x_non_contig) { @@ -7651,7 +7495,7 @@ static void ggml_vk_mul_mat_vec_id_q_f16(ggml_backend_vk_context * ctx, vk_conte if (x_non_contig) { GGML_ASSERT(x_sz == ggml_vk_align_size(ggml_type_size(src0->type) * x_ne, ctx->device->properties.limits.minStorageBufferOffsetAlignment)); - ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_0, src0, ggml_vk_subbuffer(ctx, d_Qx, qx_buf_offset), ggml_vk_subbuffer(ctx, d_X, 0)); + ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_0, src0, d_Qx, d_X); } if (y_non_contig) { GGML_ASSERT(y_sz == ggml_type_size(src1->type) * y_ne); @@ -7660,7 +7504,7 @@ static void ggml_vk_mul_mat_vec_id_q_f16(ggml_backend_vk_context * ctx, vk_conte if (ctx->prealloc_y_need_sync) { ggml_vk_sync_buffers(ctx, subctx); } - ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_1, src1, ggml_vk_subbuffer(ctx, d_Qy, qy_buf_offset), ggml_vk_subbuffer(ctx, d_Y, 0)); + ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_1, src1, d_Qy, d_Y); ctx->prealloc_y_last_pipeline_used = to_fp16_vk_1.get(); ctx->prealloc_y_last_tensor_used = src1; } @@ -7693,25 +7537,10 @@ static void ggml_vk_mul_mat_vec_id_q_f16(ggml_backend_vk_context * ctx, vk_conte } } - vk_buffer d_B = d_D; - size_t b_buf_offset = 0; - uint64_t b_sz = 1; - if (enable_bias || enable_scale) { const ggml_tensor * bias = cgraph->nodes[node_idx + 1]->src[1]; - bool b_uma = false; - if (ctx->device->uma) { - ggml_vk_host_get(ctx->device, bias->data, d_B, b_buf_offset); - b_uma = d_B != nullptr; - } - if(!b_uma) { - ggml_backend_vk_buffer_context * bias_buf_ctx = (ggml_backend_vk_buffer_context *)bias->buffer->context; - d_B = bias_buf_ctx->dev_buffer; - b_buf_offset = vk_tensor_offset(bias) + bias->view_offs; - GGML_ASSERT(d_B != nullptr); - b_sz = ggml_nbytes(bias); - } + d_B = ggml_vk_tensor_subbuffer(ctx, bias); } // compute @@ -7725,11 +7554,11 @@ static void ggml_vk_mul_mat_vec_id_q_f16(ggml_backend_vk_context * ctx, vk_conte }; ggml_vk_dispatch_pipeline(ctx, subctx, dmmv, { - vk_subbuffer{ d_X, x_buf_offset, x_sz }, - vk_subbuffer{ d_Y, y_buf_offset, y_sz }, - vk_subbuffer{ d_D, d_buf_offset, d_sz }, - vk_subbuffer{ d_B, b_buf_offset, b_sz }, - vk_subbuffer{ d_ids, ids_buf_offset, ids_sz }, + d_X, + d_Y, + d_D, + d_B, + d_ids, }, pc, { groups_x, (uint32_t)nei0, groups_z }); @@ -8675,26 +8504,6 @@ static bool ggml_vk_op_supports_incontiguous(ggml_op op) { } } -static uint32_t get_misalign_bytes(const ggml_backend_vk_context * ctx, const ggml_tensor * t) -{ - return ((vk_tensor_offset(t) + t->view_offs) & (ctx->device->properties.limits.minStorageBufferOffsetAlignment - 1));; -} - -template void init_pushconst_tensor_offsets(ggml_backend_vk_context * ctx, T &p, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, const ggml_tensor * src3, ggml_tensor * dst) { - GGML_UNUSED(p); - GGML_UNUSED(src0); - GGML_UNUSED(src1); - GGML_UNUSED(src2); - GGML_UNUSED(src3); - GGML_UNUSED(dst); - static_assert(!std::is_const::value, "unexpected type"); - GGML_ASSERT(!src0 || get_misalign_bytes(ctx, src0) == 0); - GGML_ASSERT(!src1 || get_misalign_bytes(ctx, src1) == 0); - GGML_ASSERT(!src2 || get_misalign_bytes(ctx, src2) == 0); - GGML_ASSERT(!src3 || get_misalign_bytes(ctx, src3) == 0); - GGML_ASSERT(!dst || get_misalign_bytes(ctx, dst) == 0); -} - template <> void init_pushconst_tensor_offsets(ggml_backend_vk_context * ctx, vk_op_unary_push_constants &p, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, const ggml_tensor * src3, ggml_tensor * dst) { const uint32_t a_offset = get_misalign_bytes(ctx, src0) / ggml_type_size(src0->type); const uint32_t d_offset = get_misalign_bytes(ctx, dst) / ggml_type_size(dst->type); From 89f82bfea408ffeaab1073805fb648ebd2a35839 Mon Sep 17 00:00:00 2001 From: Giuseppe Scrivano Date: Sat, 15 Nov 2025 12:00:29 +0100 Subject: [PATCH 37/51] vulkan: implement ABS and NEG (llama/17245) * docs: update Vulkan ops * vulkan: add NEG op * vulkan: add ABS op --------- Signed-off-by: Giuseppe Scrivano --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 22 +++++++++++++++++++ ggml/src/ggml-vulkan/vulkan-shaders/abs.comp | 21 ++++++++++++++++++ ggml/src/ggml-vulkan/vulkan-shaders/neg.comp | 20 +++++++++++++++++ .../vulkan-shaders/vulkan-shaders-gen.cpp | 4 ++++ 4 files changed, 67 insertions(+) create mode 100644 ggml/src/ggml-vulkan/vulkan-shaders/abs.comp create mode 100644 ggml/src/ggml-vulkan/vulkan-shaders/neg.comp diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index a5a6ad9cbfb..f5812dc4694 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -656,10 +656,12 @@ struct vk_device_struct { vk_pipeline pipeline_gelu_quick[2]; vk_pipeline pipeline_silu[2]; vk_pipeline pipeline_relu[2]; + vk_pipeline pipeline_neg[2]; vk_pipeline pipeline_tanh[2]; vk_pipeline pipeline_sigmoid[2]; vk_pipeline pipeline_hardsigmoid[2]; vk_pipeline pipeline_hardswish[2]; + vk_pipeline pipeline_abs[2]; vk_pipeline pipeline_geglu[2]; vk_pipeline pipeline_reglu[2]; @@ -3804,10 +3806,12 @@ static void ggml_vk_load_shaders(vk_device& device) { CREATE_UNARY(gelu_quick) CREATE_UNARY(silu) CREATE_UNARY(relu) + CREATE_UNARY(neg) CREATE_UNARY(tanh) CREATE_UNARY(sigmoid) CREATE_UNARY(hardsigmoid) CREATE_UNARY(hardswish) + CREATE_UNARY(abs) #undef CREATE_UNARY #define CREATE_UNARY_RTE(name) \ @@ -8170,6 +8174,8 @@ static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const return ctx->device->pipeline_gelu_quick[dst->type == GGML_TYPE_F16]; case GGML_UNARY_OP_RELU: return ctx->device->pipeline_relu[dst->type == GGML_TYPE_F16]; + case GGML_UNARY_OP_NEG: + return ctx->device->pipeline_neg[dst->type == GGML_TYPE_F16]; case GGML_UNARY_OP_TANH: return ctx->device->pipeline_tanh[dst->type == GGML_TYPE_F16]; case GGML_UNARY_OP_SIGMOID: @@ -8178,6 +8184,8 @@ static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const return ctx->device->pipeline_hardsigmoid[dst->type == GGML_TYPE_F16]; case GGML_UNARY_OP_HARDSWISH: return ctx->device->pipeline_hardswish[dst->type == GGML_TYPE_F16]; + case GGML_UNARY_OP_ABS: + return ctx->device->pipeline_abs[dst->type == GGML_TYPE_F16]; default: break; } @@ -11106,10 +11114,12 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr case GGML_UNARY_OP_GELU_ERF: case GGML_UNARY_OP_GELU_QUICK: case GGML_UNARY_OP_RELU: + case GGML_UNARY_OP_NEG: case GGML_UNARY_OP_TANH: case GGML_UNARY_OP_SIGMOID: case GGML_UNARY_OP_HARDSIGMOID: case GGML_UNARY_OP_HARDSWISH: + case GGML_UNARY_OP_ABS: break; default: return false; @@ -11436,10 +11446,12 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr case GGML_UNARY_OP_GELU_ERF: case GGML_UNARY_OP_GELU_QUICK: case GGML_UNARY_OP_RELU: + case GGML_UNARY_OP_NEG: case GGML_UNARY_OP_TANH: case GGML_UNARY_OP_SIGMOID: case GGML_UNARY_OP_HARDSIGMOID: case GGML_UNARY_OP_HARDSWISH: + case GGML_UNARY_OP_ABS: ggml_vk_unary(ctx, compute_ctx, src0, node); break; default: @@ -11706,10 +11718,12 @@ static bool ggml_vk_compute_forward(ggml_backend_vk_context * ctx, ggml_cgraph * case GGML_UNARY_OP_GELU_ERF: case GGML_UNARY_OP_GELU_QUICK: case GGML_UNARY_OP_RELU: + case GGML_UNARY_OP_NEG: case GGML_UNARY_OP_TANH: case GGML_UNARY_OP_SIGMOID: case GGML_UNARY_OP_HARDSIGMOID: case GGML_UNARY_OP_HARDSWISH: + case GGML_UNARY_OP_ABS: buf = tensor->buffer; break; default: @@ -13235,10 +13249,12 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm case GGML_UNARY_OP_GELU_QUICK: case GGML_UNARY_OP_SILU: case GGML_UNARY_OP_RELU: + case GGML_UNARY_OP_NEG: case GGML_UNARY_OP_TANH: case GGML_UNARY_OP_SIGMOID: case GGML_UNARY_OP_HARDSIGMOID: case GGML_UNARY_OP_HARDSWISH: + case GGML_UNARY_OP_ABS: return ggml_is_contiguous(op->src[0]) && (op->src[0]->type == GGML_TYPE_F32 || op->src[0]->type == GGML_TYPE_F16) && (op->type == GGML_TYPE_F32 || op->type == GGML_TYPE_F16) && @@ -14116,6 +14132,9 @@ static void ggml_vk_check_results_0(ggml_backend_vk_context * ctx, ggml_cgraph * case GGML_UNARY_OP_RELU: tensor_clone = ggml_relu(ggml_ctx, src_clone[0]); break; + case GGML_UNARY_OP_NEG: + tensor_clone = ggml_neg(ggml_ctx, src_clone[0]); + break; case GGML_UNARY_OP_TANH: tensor_clone = ggml_tanh(ggml_ctx, src_clone[0]); break; @@ -14128,6 +14147,9 @@ static void ggml_vk_check_results_0(ggml_backend_vk_context * ctx, ggml_cgraph * case GGML_UNARY_OP_HARDSWISH: tensor_clone = ggml_hardswish(ggml_ctx, src_clone[0]); break; + case GGML_UNARY_OP_ABS: + tensor_clone = ggml_abs(ggml_ctx, src_clone[0]); + break; default: std::cerr << "Missing vk_check_results OP: " << ggml_op_name(tensor->op) << std::endl; GGML_ABORT("fatal error"); diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/abs.comp b/ggml/src/ggml-vulkan/vulkan-shaders/abs.comp new file mode 100644 index 00000000000..07bd1c18dad --- /dev/null +++ b/ggml/src/ggml-vulkan/vulkan-shaders/abs.comp @@ -0,0 +1,21 @@ +#version 450 + +#include "generic_head.glsl" +#include "types.glsl" + +#extension GL_EXT_control_flow_attributes : enable + +layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in; + +layout (binding = 0) readonly buffer X {A_TYPE data_a[];}; +layout (binding = 1) writeonly buffer D {D_TYPE data_d[];}; + +void main() { + const uint i = gl_GlobalInvocationID.z * 262144 + gl_GlobalInvocationID.y * 512 + gl_GlobalInvocationID.x; + + if (i >= p.KX) { + return; + } + + data_d[i] = D_TYPE(abs(float(data_a[i]))); +} diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/neg.comp b/ggml/src/ggml-vulkan/vulkan-shaders/neg.comp new file mode 100644 index 00000000000..7f9b1bce99a --- /dev/null +++ b/ggml/src/ggml-vulkan/vulkan-shaders/neg.comp @@ -0,0 +1,20 @@ +#version 450 + +#include "generic_head.glsl" +#include "types.glsl" + +#extension GL_EXT_control_flow_attributes : enable + +layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in; + +layout (binding = 0) readonly buffer X {A_TYPE data_a[];}; +layout (binding = 1) writeonly buffer D {D_TYPE data_d[];}; + +void main() { + const uint i = gl_GlobalInvocationID.z * 262144 + gl_GlobalInvocationID.y * 512 + gl_GlobalInvocationID.x; + + if (i >= p.KX) { + return; + } + data_d[i] = D_TYPE(-float(data_a[i])); +} diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp b/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp index 1423f772403..7623a362087 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp @@ -827,6 +827,8 @@ void process_shaders() { string_to_spv("silu_f32", "silu.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}}); string_to_spv("relu_f16", "relu.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}}); string_to_spv("relu_f32", "relu.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}}); + string_to_spv("neg_f16", "neg.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}}); + string_to_spv("neg_f32", "neg.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}}); string_to_spv("tanh_f16", "tanh.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}}); string_to_spv("tanh_f32", "tanh.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}}); string_to_spv("sigmoid_f16", "sigmoid.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}}); @@ -835,6 +837,8 @@ void process_shaders() { string_to_spv("hardsigmoid_f32","hardsigmoid.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}}); string_to_spv("hardswish_f16", "hardswish.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}}); string_to_spv("hardswish_f32", "hardswish.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}}); + string_to_spv("abs_f16", "abs.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}}); + string_to_spv("abs_f32", "abs.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}}); for (auto rte : {false, true}) { std::string suffix = rte ? "_rte" : ""; From 5ae417388b4ba8c93b52cf7f35e20a20edee119f Mon Sep 17 00:00:00 2001 From: Ruben Ortlam Date: Sat, 15 Nov 2025 15:18:58 +0100 Subject: [PATCH 38/51] vulkan: Replace 16-bit unpack8 calls to work around legacy Windows AMD driver bug (llama/17285) --- .../vulkan-shaders/mul_mmq_funcs.glsl | 33 ++++++++++--------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq_funcs.glsl b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq_funcs.glsl index 51b5bb11e7b..4e3a5611423 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq_funcs.glsl +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mmq_funcs.glsl @@ -300,7 +300,7 @@ void block_a_to_shmem(const uint buf_ib, const uint ib, const uint iqs) { if (iqs == 0) { buf_a[buf_ib].dm = FLOAT_TYPE_VEC2(data_a_packed32[ib_k].dm); - buf_a[buf_ib].scales = unpack8(data_a_packed16[ib_k].scales[iqs_k / 8]); + buf_a[buf_ib].scales = unpack8(uint32_t(data_a_packed16[ib_k].scales[iqs_k / 8])).xy; // vec4 used due to #12147 } } @@ -345,21 +345,22 @@ void block_a_to_shmem(const uint buf_ib, const uint ib, const uint iqs) { // Repack 2x4 quants into one int // Add the 3rd bit instead of subtracting it to allow packing the quants - const i8vec2 vals00 = unpack8(int16_t((data_a_packed16[ib_k].qs[qs_idx * 2 ] >> qs_shift) & uint16_t(0x0303))) | - unpack8(int16_t(((data_a_packed16[ib_k].hmask[hm_idx * 2 ] >> hm_shift) & uint16_t(0x0101)) << 2)); - const i8vec2 vals01 = unpack8(int16_t((data_a_packed16[ib_k].qs[qs_idx * 2 + 1 ] >> qs_shift) & uint16_t(0x0303))) | - unpack8(int16_t(((data_a_packed16[ib_k].hmask[hm_idx * 2 + 1] >> hm_shift) & uint16_t(0x0101)) << 2)); - const i8vec2 vals10 = unpack8(int16_t((data_a_packed16[ib_k].qs[qs_idx * 2 + 2 ] >> qs_shift) & uint16_t(0x0303))) | - unpack8(int16_t(((data_a_packed16[ib_k].hmask[hm_idx * 2 + 2] >> hm_shift) & uint16_t(0x0101)) << 2)); - const i8vec2 vals11 = unpack8(int16_t((data_a_packed16[ib_k].qs[qs_idx * 2 + 3 ] >> qs_shift) & uint16_t(0x0303))) | - unpack8(int16_t(((data_a_packed16[ib_k].hmask[hm_idx * 2 + 3] >> hm_shift) & uint16_t(0x0101)) << 2)); + // vec4 for unpack8 used due to #12147 + const i8vec2 vals00 = unpack8(int32_t(int16_t((data_a_packed16[ib_k].qs[qs_idx * 2 ] >> qs_shift) & uint16_t(0x0303)))).xy | + unpack8(int32_t(int16_t(((data_a_packed16[ib_k].hmask[hm_idx * 2 ] >> hm_shift) & uint16_t(0x0101))) << 2)).xy; + const i8vec2 vals01 = unpack8(int32_t(int16_t((data_a_packed16[ib_k].qs[qs_idx * 2 + 1 ] >> qs_shift) & uint16_t(0x0303)))).xy | + unpack8(int32_t(int16_t(((data_a_packed16[ib_k].hmask[hm_idx * 2 + 1] >> hm_shift) & uint16_t(0x0101))) << 2)).xy; + const i8vec2 vals10 = unpack8(int32_t(int16_t((data_a_packed16[ib_k].qs[qs_idx * 2 + 2 ] >> qs_shift) & uint16_t(0x0303)))).xy | + unpack8(int32_t(int16_t(((data_a_packed16[ib_k].hmask[hm_idx * 2 + 2] >> hm_shift) & uint16_t(0x0101))) << 2)).xy; + const i8vec2 vals11 = unpack8(int32_t(int16_t((data_a_packed16[ib_k].qs[qs_idx * 2 + 3 ] >> qs_shift) & uint16_t(0x0303)))).xy | + unpack8(int32_t(int16_t(((data_a_packed16[ib_k].hmask[hm_idx * 2 + 3] >> hm_shift) & uint16_t(0x0101))) << 2)).xy; buf_a[buf_ib].qs[iqs] = pack32(u8vec4(vals00.x, vals00.y, vals01.x, vals01.y)) | (pack32(u8vec4(vals10.x, vals10.y, vals11.x, vals11.y)) << 4); if (iqs == 0) { const uint is = iqs_k / 4; - const i8vec2 scales = i8vec2(unpack8(((data_a_packed16[ib_k].scales[(is % 8 ) / 2] >> (4 * (is / 8))) & 0x0F0F) | - (((data_a_packed16[ib_k].scales[(8 + (is % 4)) / 2] >> (2 * (is / 4))) & 0x0303) << 4))); + const i8vec2 scales = i8vec2(unpack8(uint32_t(((data_a_packed16[ib_k].scales[(is % 8 ) / 2] >> (4 * (is / 8))) & 0x0F0F) | + (((data_a_packed16[ib_k].scales[(8 + (is % 4)) / 2] >> (2 * (is / 4))) & 0x0303) << 4))).xy); // vec4 used due to #12147 buf_a[buf_ib].d_scales = FLOAT_TYPE(data_a_packed16[ib_k].d) * FLOAT_TYPE_VEC2(scales - 32); } @@ -516,15 +517,15 @@ void block_a_to_shmem(const uint buf_ib, const uint ib, const uint iqs) { const uint qh_idx = (iqs_k / 32) * 8 + iqs; const uint qh_shift = ((iqs_k % 32) / 8) * 2; - const i8vec2 vals00 = (unpack8(int16_t((data_a_packed16[ib_k].ql[ql_idx * 2 ] >> ql_shift) & uint16_t(0x0F0F))) | - unpack8(int16_t(((data_a_packed16[ib_k].qh[qh_idx * 2 ] >> qh_shift) & uint16_t(0x0303)) << 4))) - int8_t(32); - const i8vec2 vals01 = (unpack8(int16_t((data_a_packed16[ib_k].ql[ql_idx * 2 + 1] >> ql_shift) & uint16_t(0x0F0F))) | - unpack8(int16_t(((data_a_packed16[ib_k].qh[qh_idx * 2 + 1] >> qh_shift) & uint16_t(0x0303)) << 4))) - int8_t(32); + const i8vec2 vals00 = (unpack8(int32_t((data_a_packed16[ib_k].ql[ql_idx * 2 ] >> ql_shift) & uint16_t(0x0F0F))).xy | + unpack8(int32_t(((data_a_packed16[ib_k].qh[qh_idx * 2 ] >> qh_shift) & uint16_t(0x0303)) << 4)).xy) - int8_t(32); + const i8vec2 vals01 = (unpack8(int32_t((data_a_packed16[ib_k].ql[ql_idx * 2 + 1] >> ql_shift) & uint16_t(0x0F0F))).xy | + unpack8(int32_t(((data_a_packed16[ib_k].qh[qh_idx * 2 + 1] >> qh_shift) & uint16_t(0x0303)) << 4)).xy) - int8_t(32); buf_a[buf_ib].qs[iqs] = pack32(i8vec4(vals00.x, vals00.y, vals01.x, vals01.y)); if (iqs == 0) { const uint is = iqs_k / 4; - const i8vec2 scales = unpack8(data_a_packed16[ib_k].scales[is / 2]); + const i8vec2 scales = unpack8(int32_t(data_a_packed16[ib_k].scales[is / 2])).xy; buf_a[buf_ib].d_scales = FLOAT_TYPE(data_a_packed16[ib_k].d) * FLOAT_TYPE_VEC2(scales); } From 5d9fba0a9d28fe62404381cdc2988b2d854299ac Mon Sep 17 00:00:00 2001 From: Jeff Bolz Date: Sat, 15 Nov 2025 12:54:23 -0600 Subject: [PATCH 39/51] vulkan: Fuse mul_mat_id+add_id+mul and mul_mat+add+add. (llama/17287) These both show up in gpt-oss. Also, cleanup the mul_mat_vec fusion code a bit. --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 246 +++++++++++++----- .../vulkan-shaders/mul_mat_vec_base.glsl | 96 ++++--- .../vulkan-shaders/mul_mat_vec_iface.glsl | 33 +++ .../vulkan-shaders/mul_mat_vec_nc.comp | 20 +- .../vulkan-shaders/mul_mat_vec_p021.comp | 20 +- 5 files changed, 275 insertions(+), 140 deletions(-) create mode 100644 ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iface.glsl diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index f5812dc4694..ef99c3c1eba 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -32,6 +32,7 @@ DispatchLoaderDynamic & ggml_vk_default_dispatcher(); #include #include #include +#include #include #include #include @@ -824,6 +825,12 @@ struct vk_mat_mat_push_constants { uint32_t ne02; uint32_t ne12; uint32_t broadcast2; uint32_t broadcast3; uint32_t padded_N; }; + +#define MAT_VEC_FUSION_FLAGS_BIAS0 0x1 +#define MAT_VEC_FUSION_FLAGS_BIAS1 0x2 +#define MAT_VEC_FUSION_FLAGS_SCALE0 0x4 +#define MAT_VEC_FUSION_FLAGS_SCALE1 0x8 + struct vk_mat_vec_push_constants { uint32_t ncols; uint32_t stride_a; @@ -832,8 +839,7 @@ struct vk_mat_vec_push_constants { uint32_t batch_stride_a; uint32_t batch_stride_b; uint32_t batch_stride_d; - uint32_t enable_bias; - uint32_t enable_scale; + uint32_t fusion_flags; uint32_t ne02; uint32_t ne12; uint32_t broadcast2; @@ -847,7 +853,7 @@ struct vk_mat_vec_p021_push_constants { uint32_t nchannels_y; uint32_t b_offset; uint32_t d_offset; - uint32_t enable_bias; + uint32_t fusion_flags; }; struct vk_mat_vec_nc_push_constants { @@ -863,7 +869,7 @@ struct vk_mat_vec_nc_push_constants { uint32_t nb03; uint32_t nb13; uint32_t nb23; - uint32_t enable_bias; + uint32_t fusion_flags; }; struct vk_mat_mat_id_push_constants { @@ -881,8 +887,7 @@ struct vk_mat_vec_id_push_constants { uint32_t batch_stride_a; uint32_t batch_stride_b; uint32_t batch_stride_d; - uint32_t enable_bias; - uint32_t enable_scale; + uint32_t fusion_flags; uint32_t nei0; uint32_t ne11; }; @@ -3465,8 +3470,8 @@ static void ggml_vk_load_shaders(vk_device& device) { const uint32_t force_subgroup_size = use_subgroups ? subgroup_size : 0; const uint32_t force_subgroup_size16 = use_subgroups16 ? subgroup_size16 : 0; - static constexpr uint32_t mul_mat_vec_num_bindings = 4; - static constexpr uint32_t mul_mat_vec_id_num_bindings = 5; + static constexpr uint32_t mul_mat_vec_num_bindings = 5; + static constexpr uint32_t mul_mat_vec_id_num_bindings = 6; for (uint32_t w = 0; w < DMMV_WG_SIZE_COUNT; ++w) { const uint32_t wg_size_subgroup = (w == DMMV_WG_SIZE_SUBGROUP) ? subgroup_size : (subgroup_size * 4); @@ -6871,21 +6876,31 @@ static void ggml_vk_mul_mat_vec_q_f16(ggml_backend_vk_context * ctx, vk_context& groups_x = CEIL_DIV(groups_x, groups_z); } - uint32_t enable_bias = ctx->num_additional_fused_ops > 0; + uint32_t fusion_flags = 0; - vk_subbuffer d_B = d_D; - - if (enable_bias) { + vk_subbuffer d_F0 = d_D; + if (ctx->num_additional_fused_ops > 0) { const ggml_tensor * add = cgraph->nodes[node_idx + 1]; const ggml_tensor * bias = add->src[0] == dst ? add->src[1] : add->src[0]; - d_B = ggml_vk_tensor_subbuffer(ctx, bias); + d_F0 = ggml_vk_tensor_subbuffer(ctx, bias); + fusion_flags |= MAT_VEC_FUSION_FLAGS_BIAS0; + } + + vk_subbuffer d_F1 = d_D; + if (ctx->num_additional_fused_ops == 2) { + const ggml_tensor * add = cgraph->nodes[node_idx + 2]; + const ggml_tensor * bias = add->src[0] == cgraph->nodes[node_idx + 1] ? add->src[1] : add->src[0]; + + d_F1 = ggml_vk_tensor_subbuffer(ctx, bias); + fusion_flags |= MAT_VEC_FUSION_FLAGS_BIAS1; } // compute const vk_mat_vec_push_constants pc = { (uint32_t)ne00, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)ne01, - stride_batch_x, stride_batch_y, stride_batch_d, enable_bias, 0, + stride_batch_x, stride_batch_y, stride_batch_d, + fusion_flags, (uint32_t)ne02, (uint32_t)ne12, (uint32_t)r2, (uint32_t)r3, }; ggml_vk_dispatch_pipeline(ctx, subctx, dmmv, @@ -6893,7 +6908,8 @@ static void ggml_vk_mul_mat_vec_q_f16(ggml_backend_vk_context * ctx, vk_context& d_X, d_Y, d_D, - d_B, + d_F0, + d_F1, }, pc, { groups_x, (uint32_t)(ne12 * ne13), groups_z }); @@ -6946,22 +6962,31 @@ static void ggml_vk_mul_mat_vec_p021_f16_f32(ggml_backend_vk_context * ctx, vk_c vk_subbuffer d_Qx = ggml_vk_tensor_subbuffer(ctx, src0); vk_subbuffer d_Qy = ggml_vk_tensor_subbuffer(ctx, src1, true); - vk_subbuffer d_B = d_D; + vk_subbuffer d_F0 = d_D; - uint32_t enable_bias = ctx->num_additional_fused_ops > 0; + uint32_t fusion_flags = 0; - if (enable_bias) { + if (ctx->num_additional_fused_ops > 0) { const ggml_tensor * add = cgraph->nodes[node_idx + 1]; const ggml_tensor * bias = add->src[0] == dst ? add->src[1] : add->src[0]; - d_B = ggml_vk_tensor_subbuffer(ctx, bias); + d_F0 = ggml_vk_tensor_subbuffer(ctx, bias); + fusion_flags |= MAT_VEC_FUSION_FLAGS_BIAS0; + } + + vk_subbuffer d_F1 = d_D; + if (ctx->num_additional_fused_ops > 1) { + const ggml_tensor * bias = cgraph->nodes[node_idx + 2]->src[1]; + + d_F1 = ggml_vk_tensor_subbuffer(ctx, bias); + fusion_flags |= MAT_VEC_FUSION_FLAGS_BIAS1; } // compute vk_mat_vec_p021_push_constants pc = { (uint32_t)ne00, (uint32_t)ne01, (uint32_t)ne02, (uint32_t)ne12, - 0, 0, enable_bias + 0, 0, fusion_flags }; init_pushconst_tensor_offsets(ctx, pc, src0, src1, nullptr, nullptr, cgraph->nodes[node_idx + ctx->num_additional_fused_ops]); @@ -6977,7 +7002,8 @@ static void ggml_vk_mul_mat_vec_p021_f16_f32(ggml_backend_vk_context * ctx, vk_c d_Qx, d_Qy, d_D, - d_B, + d_F0, + d_F1, }, pc, { 1, (uint32_t)ne01, workgroups_z }); } @@ -7029,15 +7055,24 @@ static void ggml_vk_mul_mat_vec_nc_f16_f32(ggml_backend_vk_context * ctx, vk_con vk_subbuffer d_D = ggml_vk_tensor_subbuffer(ctx, cgraph->nodes[node_idx + ctx->num_additional_fused_ops], true); vk_subbuffer d_Qx = ggml_vk_tensor_subbuffer(ctx, src0); vk_subbuffer d_Qy = ggml_vk_tensor_subbuffer(ctx, src1, true); - vk_subbuffer d_B = d_D; + vk_subbuffer d_F0 = d_D; - uint32_t enable_bias = ctx->num_additional_fused_ops > 0; + uint32_t fusion_flags = 0; - if (enable_bias) { + if (ctx->num_additional_fused_ops > 0) { const ggml_tensor * add = cgraph->nodes[node_idx + 1]; const ggml_tensor * bias = add->src[0] == dst ? add->src[1] : add->src[0]; - d_B = ggml_vk_tensor_subbuffer(ctx, bias); + d_F0 = ggml_vk_tensor_subbuffer(ctx, bias); + fusion_flags |= MAT_VEC_FUSION_FLAGS_BIAS0; + } + + vk_subbuffer d_F1 = d_D; + if (ctx->num_additional_fused_ops > 1) { + const ggml_tensor * bias = cgraph->nodes[node_idx + 2]->src[1]; + + d_F1 = ggml_vk_tensor_subbuffer(ctx, bias); + fusion_flags |= MAT_VEC_FUSION_FLAGS_BIAS1; } // compute @@ -7046,7 +7081,7 @@ static void ggml_vk_mul_mat_vec_nc_f16_f32(ggml_backend_vk_context * ctx, vk_con row_stride_x, channel_stride_x, channel_stride_y, (uint32_t)(ne12 / ne02), (uint32_t)ne12, 0, 0, - nb03, nb13, nb23, enable_bias + nb03, nb13, nb23, fusion_flags }; init_pushconst_tensor_offsets(ctx, pc, src0, src1, nullptr, nullptr, cgraph->nodes[node_idx + ctx->num_additional_fused_ops]); @@ -7056,7 +7091,8 @@ static void ggml_vk_mul_mat_vec_nc_f16_f32(ggml_backend_vk_context * ctx, vk_con d_Qx, d_Qy, d_D, - d_B, + d_F0, + d_F1, }, pc, { (uint32_t)ne03, (uint32_t)ne01, (uint32_t)ne12 }); } @@ -7477,7 +7513,7 @@ static void ggml_vk_mul_mat_vec_id_q_f16(ggml_backend_vk_context * ctx, vk_conte vk_subbuffer d_Qx = ggml_vk_tensor_subbuffer(ctx, src0); vk_subbuffer d_Qy = ggml_vk_tensor_subbuffer(ctx, src1); vk_subbuffer d_ids = ggml_vk_tensor_subbuffer(ctx, ids); - vk_subbuffer d_B = d_D; + vk_subbuffer d_F0 = d_D; vk_subbuffer d_X, d_Y; if (qx_needs_dequant) { @@ -7530,30 +7566,34 @@ static void ggml_vk_mul_mat_vec_id_q_f16(ggml_backend_vk_context * ctx, vk_conte groups_x = CEIL_DIV(groups_x, groups_z); } - uint32_t enable_bias = 0; - uint32_t enable_scale = 0; + uint32_t fusion_flags = 0; + if (ctx->num_additional_fused_ops > 0) { + const ggml_tensor * bias = cgraph->nodes[node_idx + 1]->src[1]; + + d_F0 = ggml_vk_tensor_subbuffer(ctx, bias); + if (cgraph->nodes[node_idx + 1]->op == GGML_OP_MUL) { - enable_scale = 1; + fusion_flags |= MAT_VEC_FUSION_FLAGS_SCALE0; } else { GGML_ASSERT(cgraph->nodes[node_idx + 1]->op == GGML_OP_ADD_ID); - enable_bias = 1; + fusion_flags |= MAT_VEC_FUSION_FLAGS_BIAS0; } } - if (enable_bias || enable_scale) { - const ggml_tensor * bias = cgraph->nodes[node_idx + 1]->src[1]; + vk_subbuffer d_F1 = d_D; + if (ctx->num_additional_fused_ops > 1) { + const ggml_tensor * scale = cgraph->nodes[node_idx + 2]->src[1]; - d_B = ggml_vk_tensor_subbuffer(ctx, bias); + d_F1 = ggml_vk_tensor_subbuffer(ctx, scale); + fusion_flags |= MAT_VEC_FUSION_FLAGS_SCALE1; } // compute const vk_mat_vec_id_push_constants pc = { (uint32_t)ne00, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)ne01, (uint32_t)(ne00 * ne01), stride_batch_y, (uint32_t)(ne20 * ne21), - - enable_bias, enable_scale, - + fusion_flags, (uint32_t)nei0, (uint32_t)ne11, }; ggml_vk_dispatch_pipeline(ctx, subctx, dmmv, @@ -7561,7 +7601,8 @@ static void ggml_vk_mul_mat_vec_id_q_f16(ggml_backend_vk_context * ctx, vk_conte d_X, d_Y, d_D, - d_B, + d_F0, + d_F1, d_ids, }, pc, { groups_x, (uint32_t)nei0, groups_z }); @@ -12305,10 +12346,7 @@ static bool ggml_vk_can_fuse(const ggml_backend_vk_context * ctx, const struct g return false; } } - if (ops.size() == 2 && ops.begin()[0] == GGML_OP_MUL_MAT && ops.begin()[1] == GGML_OP_ADD) { - // additional constraints specific to this fusion - const ggml_tensor *mul = cgraph->nodes[node_idx]; - const ggml_tensor *add = cgraph->nodes[node_idx + 1]; + auto const &mm_add_ok = [&](const ggml_tensor *mul, const ggml_tensor *add) { const ggml_tensor *bias = add->src[0] == mul ? add->src[1] : add->src[0]; // mat-vec only @@ -12328,14 +12366,31 @@ static bool ggml_vk_can_fuse(const ggml_backend_vk_context * ctx, const struct g if (get_misalign_bytes(ctx, bias) != 0) { return false; } - } - if (ops.size() == 2 && ops.begin()[0] == GGML_OP_MUL_MAT_ID && ops.begin()[1] == GGML_OP_ADD_ID) { + return true; + }; + + if ((ops.size() == 2 || ops.size() == 3) && ops.begin()[0] == GGML_OP_MUL_MAT && ops.begin()[1] == GGML_OP_ADD) { // additional constraints specific to this fusion const ggml_tensor *mul = cgraph->nodes[node_idx]; const ggml_tensor *add = cgraph->nodes[node_idx + 1]; - const ggml_tensor *bias = add->src[1]; - if (mul != add->src[0]) { + if (!mm_add_ok(mul, add)) { + return false; + } + if (ops.size() == 3) { + if (ops.begin()[2] != GGML_OP_ADD) { + return false; + } + if (!mm_add_ok(add, cgraph->nodes[node_idx + 2])) { + return false; + } + } + } + + auto const &mmid_mul_ok = [&](const ggml_tensor *mmid, const ggml_tensor *mul) { + const ggml_tensor *scale = mul->src[1]; + + if (mmid != mul->src[0]) { return false; } // mat-vec only @@ -12343,30 +12398,34 @@ static bool ggml_vk_can_fuse(const ggml_backend_vk_context * ctx, const struct g return false; } // shaders assume the types match - if (mul->type != bias->type) { + if (mmid->type != scale->type) { return false; } // shaders assume the bias is contiguous - if (!ggml_is_contiguous(bias)) { + if (!ggml_is_contiguous(scale)) { return false; } - // the ID tensor must be the same for mul_mat_id and add_id - if (mul->src[2] != add->src[2]) { + // unaligned bias isn't handled + if (get_misalign_bytes(ctx, scale) != 0) { return false; } - // unaligned bias isn't handled - if (get_misalign_bytes(ctx, bias) != 0) { + // shader only indexes by expert index + if (scale->ne[0] != 1 || + scale->ne[1] != mul->ne[1] || + scale->ne[2] != 1 || + scale->ne[3] != 1) { return false; } - } + return true; + }; - if (ops.size() == 2 && ops.begin()[0] == GGML_OP_MUL_MAT_ID && ops.begin()[1] == GGML_OP_MUL) { + if ((ops.size() == 2 || ops.size() == 3) && ops.begin()[0] == GGML_OP_MUL_MAT_ID && ops.begin()[1] == GGML_OP_ADD_ID) { // additional constraints specific to this fusion - const ggml_tensor *mmid = cgraph->nodes[node_idx]; - const ggml_tensor *mul = cgraph->nodes[node_idx + 1]; - const ggml_tensor *scale = mul->src[1]; + const ggml_tensor *mul = cgraph->nodes[node_idx]; + const ggml_tensor *add = cgraph->nodes[node_idx + 1]; + const ggml_tensor *bias = add->src[1]; - if (mmid != mul->src[0]) { + if (mul != add->src[0]) { return false; } // mat-vec only @@ -12374,22 +12433,37 @@ static bool ggml_vk_can_fuse(const ggml_backend_vk_context * ctx, const struct g return false; } // shaders assume the types match - if (mmid->type != scale->type) { + if (mul->type != bias->type) { return false; } // shaders assume the bias is contiguous - if (!ggml_is_contiguous(scale)) { + if (!ggml_is_contiguous(bias)) { + return false; + } + // the ID tensor must be the same for mul_mat_id and add_id + if (mul->src[2] != add->src[2]) { return false; } // unaligned bias isn't handled - if (get_misalign_bytes(ctx, scale) != 0) { + if (get_misalign_bytes(ctx, bias) != 0) { return false; } - // shader only indexes by expert index - if (scale->ne[0] != 1 || - scale->ne[1] != mul->ne[1] || - scale->ne[2] != 1 || - scale->ne[3] != 1) { + + if (ops.size() == 3) { + if (ops.begin()[2] != GGML_OP_MUL) { + return false; + } + const ggml_tensor *mul = cgraph->nodes[node_idx + 2]; + return mmid_mul_ok(add, mul); + } + } + + if (ops.size() == 2 && ops.begin()[0] == GGML_OP_MUL_MAT_ID && ops.begin()[1] == GGML_OP_MUL) { + // additional constraints specific to this fusion + const ggml_tensor *mmid = cgraph->nodes[node_idx]; + const ggml_tensor *mul = cgraph->nodes[node_idx + 1]; + + if (!mmid_mul_ok(mmid, mul)) { return false; } } @@ -12704,8 +12778,12 @@ static ggml_status ggml_backend_vk_graph_compute(ggml_backend_t backend, ggml_cg uint32_t num_adds = ggml_vk_fuse_multi_add(ctx, cgraph, i); if (num_adds) { ctx->num_additional_fused_ops = num_adds - 1; + } else if (ggml_vk_can_fuse(ctx, cgraph, i, { GGML_OP_MUL_MAT, GGML_OP_ADD, GGML_OP_ADD })) { + ctx->num_additional_fused_ops = 2; } else if (ggml_vk_can_fuse(ctx, cgraph, i, { GGML_OP_MUL_MAT, GGML_OP_ADD })) { ctx->num_additional_fused_ops = 1; + } else if (ggml_vk_can_fuse(ctx, cgraph, i, { GGML_OP_MUL_MAT_ID, GGML_OP_ADD_ID, GGML_OP_MUL })) { + ctx->num_additional_fused_ops = 2; } else if (ggml_vk_can_fuse(ctx, cgraph, i, { GGML_OP_MUL_MAT_ID, GGML_OP_ADD_ID })) { ctx->num_additional_fused_ops = 1; } else if (ggml_vk_can_fuse(ctx, cgraph, i, { GGML_OP_MUL_MAT_ID, GGML_OP_MUL })) { @@ -12872,6 +12950,8 @@ static void ggml_vk_graph_optimize(ggml_backend_t backend, struct ggml_cgraph * std::vector new_order; std::vector used(graph->n_nodes, false); + std::set used_node_set; + int first_unused = 0; while (first_unused < graph->n_nodes) { std::vector current_set; @@ -12894,6 +12974,7 @@ static void ggml_vk_graph_optimize(ggml_backend_t backend, struct ggml_cgraph * if (match_pattern(pattern, first_unused)) { for (size_t j = 0; j < pattern.size(); ++j) { new_order.push_back(graph->nodes[first_unused + j]); + used_node_set.insert(graph->nodes[first_unused + j]); used[first_unused + j] = true; } while (first_unused < graph->n_nodes && used[first_unused]) { @@ -12997,6 +13078,36 @@ static void ggml_vk_graph_optimize(ggml_backend_t backend, struct ggml_cgraph * used[set_rows_idx] = true; } } + // Look for MUL_MAT_ID + ADD_ID + MUL + if (j > 0 && + graph->nodes[j]->op == GGML_OP_ADD_ID && + graph->nodes[j-1]->op == GGML_OP_MUL_MAT_ID) { + for (int k = j + 1; k < std::min(j + 15, graph->n_nodes); ++k) { + if (graph->nodes[k]->op == GGML_OP_MUL && + graph->nodes[k]->src[0] == graph->nodes[j] && + // src1 must either be weights or already processed + (graph->nodes[k]->src[1]->op == GGML_OP_NONE || used_node_set.find(graph->nodes[k]->src[1]) != used_node_set.end())) { + current_set.push_back(k); + used[k] = true; + break; + } + } + } + // Look for MUL_MAT + ADD + ADD + if (j > 0 && + graph->nodes[j]->op == GGML_OP_ADD && + graph->nodes[j-1]->op == GGML_OP_MUL_MAT) { + for (int k = j + 1; k < std::min(j + 15, graph->n_nodes); ++k) { + if (graph->nodes[k]->op == GGML_OP_ADD && + graph->nodes[k]->src[0] == graph->nodes[j] && + // src1 must either be weights or already processed + (graph->nodes[k]->src[1]->op == GGML_OP_NONE || used_node_set.find(graph->nodes[k]->src[1]) != used_node_set.end())) { + current_set.push_back(k); + used[k] = true; + break; + } + } + } } } // Second pass grabs view nodes. @@ -13029,6 +13140,7 @@ static void ggml_vk_graph_optimize(ggml_backend_t backend, struct ggml_cgraph * // Push the current set into new_order for (auto c : current_set) { new_order.push_back(graph->nodes[c]); + used_node_set.insert(graph->nodes[c]); used[c] = true; } while (first_unused < graph->n_nodes && used[first_unused]) { diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_base.glsl b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_base.glsl index eb8fa6dc09f..e4651a683bf 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_base.glsl +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_base.glsl @@ -11,29 +11,7 @@ #define EXPERT_COUNT 8 #endif -#include "types.glsl" - -#ifndef MMQ -layout (binding = 0) readonly buffer A {A_TYPE data_a[];}; -#else -layout (binding = 0) readonly buffer A {A_TYPE_PACKED16 data_a[];}; -#endif - -layout (binding = 1) readonly buffer B {B_TYPE data_b[];}; -#ifdef B_TYPE_VEC2 -layout (binding = 1) readonly buffer BV2 {B_TYPE_VEC2 data_b_v2[];}; -#endif -#ifdef B_TYPE_VEC4 -layout (binding = 1) readonly buffer BV4 {B_TYPE_VEC4 data_b_v4[];}; -#endif - -layout (binding = 2) writeonly buffer D {D_TYPE data_d[];}; - -layout (binding = 3) readonly buffer Bias {D_TYPE data_bias[];}; - -#ifdef MUL_MAT_ID -layout (binding = 4) readonly buffer IDS {int data_ids[];}; -#endif +#include "mul_mat_vec_iface.glsl" #include "dequant_funcs.glsl" @@ -48,8 +26,7 @@ layout (push_constant) uniform parameter uint batch_stride_b; uint batch_stride_d; - uint enable_bias; - uint enable_scale; + uint fusion_flags; #ifdef MUL_MAT_ID uint nei0; @@ -123,17 +100,24 @@ void reduce_result(inout FLOAT_TYPE temp[NUM_COLS][NUM_ROWS], const in uint32_t if (tid == 0) { [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) { [[unroll]] for (uint n = 0; n < num_rows; ++n) { - if (p.enable_bias != 0) { #ifdef MUL_MAT_ID - temp[j][n] += FLOAT_TYPE(data_bias[expert_id*p.stride_d + first_row + n]); -#else - temp[j][n] += FLOAT_TYPE(data_bias[j*p.batch_stride_d + d_offset + first_row + n]); -#endif + if ((p.fusion_flags & MAT_VEC_FUSION_FLAGS_BIAS0) != 0) { + temp[j][n] += FLOAT_TYPE(data_fuse0[expert_id*p.stride_d + first_row + n]); } -#ifdef MUL_MAT_ID - if (p.enable_scale != 0) { + if ((p.fusion_flags & MAT_VEC_FUSION_FLAGS_SCALE0) != 0) { const uint expert_idx = gl_GlobalInvocationID.y; - temp[j][n] *= FLOAT_TYPE(data_bias[expert_idx]); + temp[j][n] *= FLOAT_TYPE(data_fuse0[expert_idx]); + } + if ((p.fusion_flags & MAT_VEC_FUSION_FLAGS_SCALE1) != 0) { + const uint expert_idx = gl_GlobalInvocationID.y; + temp[j][n] *= FLOAT_TYPE(data_fuse1[expert_idx]); + } +#else + if ((p.fusion_flags & MAT_VEC_FUSION_FLAGS_BIAS0) != 0) { + temp[j][n] += FLOAT_TYPE(data_fuse0[j*p.batch_stride_d + d_offset + first_row + n]); + } + if ((p.fusion_flags & MAT_VEC_FUSION_FLAGS_BIAS1) != 0) { + temp[j][n] += FLOAT_TYPE(data_fuse1[j*p.batch_stride_d + d_offset + first_row + n]); } #endif data_d[j*p.batch_stride_d + d_offset + first_row + n] = D_TYPE(temp[j][n]); @@ -171,17 +155,24 @@ void reduce_result(FLOAT_TYPE temp[NUM_COLS][NUM_ROWS], const in uint32_t d_offs [[unroll]] for (uint s = 0; s < gl_NumSubgroups; ++s) { temp[j][n] += tmpsh[j][n][s]; } - if (p.enable_bias != 0) { #ifdef MUL_MAT_ID - temp[j][n] += FLOAT_TYPE(data_bias[expert_id*p.stride_d + first_row + n]); -#else - temp[j][n] += FLOAT_TYPE(data_bias[j*p.batch_stride_d + d_offset + first_row + n]); -#endif + if ((p.fusion_flags & MAT_VEC_FUSION_FLAGS_BIAS0) != 0) { + temp[j][n] += FLOAT_TYPE(data_fuse0[expert_id*p.stride_d + first_row + n]); } -#ifdef MUL_MAT_ID - if (p.enable_scale != 0) { + if ((p.fusion_flags & MAT_VEC_FUSION_FLAGS_SCALE0) != 0) { const uint expert_idx = gl_GlobalInvocationID.y; - temp[j][n] *= FLOAT_TYPE(data_bias[expert_idx]); + temp[j][n] *= FLOAT_TYPE(data_fuse0[expert_idx]); + } + if ((p.fusion_flags & MAT_VEC_FUSION_FLAGS_SCALE1) != 0) { + const uint expert_idx = gl_GlobalInvocationID.y; + temp[j][n] *= FLOAT_TYPE(data_fuse1[expert_idx]); + } +#else + if ((p.fusion_flags & MAT_VEC_FUSION_FLAGS_BIAS0) != 0) { + temp[j][n] += FLOAT_TYPE(data_fuse0[j*p.batch_stride_d + d_offset + first_row + n]); + } + if ((p.fusion_flags & MAT_VEC_FUSION_FLAGS_BIAS1) != 0) { + temp[j][n] += FLOAT_TYPE(data_fuse1[j*p.batch_stride_d + d_offset + first_row + n]); } #endif data_d[j*p.batch_stride_d + d_offset + first_row + n] = D_TYPE(temp[j][n]); @@ -209,17 +200,24 @@ void reduce_result(FLOAT_TYPE temp[NUM_COLS][NUM_ROWS], const in uint32_t d_offs if (tid == 0) { [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) { [[unroll]] for (uint n = 0; n < num_rows; ++n) { - if (p.enable_bias != 0) { #ifdef MUL_MAT_ID - tmpsh[j][n][0] += FLOAT_TYPE(data_bias[expert_id*p.stride_d + first_row + n]); -#else - tmpsh[j][n][0] += FLOAT_TYPE(data_bias[j*p.batch_stride_d + d_offset + first_row + n]); -#endif + if ((p.fusion_flags & MAT_VEC_FUSION_FLAGS_BIAS0) != 0) { + tmpsh[j][n][0] += FLOAT_TYPE(data_fuse0[expert_id*p.stride_d + first_row + n]); } -#ifdef MUL_MAT_ID - if (p.enable_scale != 0) { + if ((p.fusion_flags & MAT_VEC_FUSION_FLAGS_SCALE0) != 0) { + const uint expert_idx = gl_GlobalInvocationID.y; + tmpsh[j][n][0] *= FLOAT_TYPE(data_fuse0[expert_idx]); + } + if ((p.fusion_flags & MAT_VEC_FUSION_FLAGS_SCALE1) != 0) { const uint expert_idx = gl_GlobalInvocationID.y; - tmpsh[j][n][0] *= FLOAT_TYPE(data_bias[expert_idx]); + tmpsh[j][n][0] *= FLOAT_TYPE(data_fuse1[expert_idx]); + } +#else + if ((p.fusion_flags & MAT_VEC_FUSION_FLAGS_BIAS0) != 0) { + tmpsh[j][n][0] += FLOAT_TYPE(data_fuse0[j*p.batch_stride_d + d_offset + first_row + n]); + } + if ((p.fusion_flags & MAT_VEC_FUSION_FLAGS_BIAS1) != 0) { + tmpsh[j][n][0] += FLOAT_TYPE(data_fuse1[j*p.batch_stride_d + d_offset + first_row + n]); } #endif data_d[j*p.batch_stride_d + d_offset + first_row + n] = D_TYPE(tmpsh[j][n][0]); diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iface.glsl b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iface.glsl new file mode 100644 index 00000000000..14ab1fd74c0 --- /dev/null +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_iface.glsl @@ -0,0 +1,33 @@ +#include "types.glsl" + +#define MAT_VEC_FUSION_FLAGS_BIAS0 0x1 +#define MAT_VEC_FUSION_FLAGS_BIAS1 0x2 +#define MAT_VEC_FUSION_FLAGS_SCALE0 0x4 +#define MAT_VEC_FUSION_FLAGS_SCALE1 0x8 + +#ifndef MMQ +layout (binding = 0) readonly buffer A {A_TYPE data_a[];}; +#if defined(A_TYPE_VEC4) +layout (binding = 0) readonly buffer AV4 {A_TYPE_VEC4 data_a_v4[];}; +#endif +#else +layout (binding = 0) readonly buffer A {A_TYPE_PACKED16 data_a[];}; +#endif + +layout (binding = 1) readonly buffer B {B_TYPE data_b[];}; +#ifdef B_TYPE_VEC2 +layout (binding = 1) readonly buffer BV2 {B_TYPE_VEC2 data_b_v2[];}; +#endif +#ifdef B_TYPE_VEC4 +layout (binding = 1) readonly buffer BV4 {B_TYPE_VEC4 data_b_v4[];}; +#endif + +layout (binding = 2) writeonly buffer D {D_TYPE data_d[];}; + +layout (binding = 3) readonly buffer Fuse0 {D_TYPE data_fuse0[];}; +layout (binding = 4) readonly buffer Fuse1 {D_TYPE data_fuse1[];}; + +#ifdef MUL_MAT_ID +layout (binding = 5) readonly buffer IDS {int data_ids[];}; +#endif + diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_nc.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_nc.comp index 3f4584c984c..beea5296225 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_nc.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_nc.comp @@ -8,14 +8,7 @@ layout(local_size_x = BLOCK_SIZE, local_size_y = 1, local_size_z = 1) in; -layout (binding = 0) readonly buffer A {A_TYPE data_a[];}; -layout (binding = 1) readonly buffer B {B_TYPE data_b[];}; -layout (binding = 2) writeonly buffer D {D_TYPE dst[];}; - -layout (binding = 0) readonly buffer AV4 {A_TYPE_VEC4 data_a_v4[];}; -layout (binding = 1) readonly buffer BV4 {B_TYPE_VEC4 data_b_v4[];}; - -layout (binding = 3) readonly buffer Bias {D_TYPE data_bias[];}; +#include "mul_mat_vec_iface.glsl" layout (push_constant) uniform parameter { @@ -31,7 +24,7 @@ layout (push_constant) uniform parameter uint nb03; uint nb13; uint nb23; - uint enable_bias; + uint fusion_flags; } p; shared FLOAT_TYPE tmp[BLOCK_SIZE]; @@ -120,9 +113,12 @@ void main() { } if (tid == 0) { - if (p.enable_bias != 0) { - tmp[0] += FLOAT_TYPE(data_bias[idst]); + if ((p.fusion_flags & MAT_VEC_FUSION_FLAGS_BIAS0) != 0) { + tmp[0] += FLOAT_TYPE(data_fuse0[idst]); + } + if ((p.fusion_flags & MAT_VEC_FUSION_FLAGS_BIAS1) != 0) { + tmp[0] += FLOAT_TYPE(data_fuse1[idst]); } - dst[idst] = tmp[0]; + data_d[idst] = tmp[0]; } } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_p021.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_p021.comp index d51424d4175..32628c6e97d 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_p021.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_p021.comp @@ -10,14 +10,7 @@ layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in; -layout (binding = 0) readonly buffer A {A_TYPE data_a[];}; -layout (binding = 1) readonly buffer B {B_TYPE data_b[];}; -layout (binding = 2) writeonly buffer D {D_TYPE dst[];}; - -layout (binding = 0) readonly buffer AV4 {A_TYPE_VEC4 data_a_v4[];}; -layout (binding = 1) readonly buffer BV4 {B_TYPE_VEC4 data_b_v4[];}; - -layout (binding = 3) readonly buffer Bias {D_TYPE data_bias[];}; +#include "mul_mat_vec_iface.glsl" layout(constant_id = 0) const int BLOCK_SIZE = 32; // gqa_ratio is in the range [1,8] @@ -31,7 +24,7 @@ layout (push_constant) uniform parameter uint nchannels_y; uint b_offset; uint d_offset; - uint enable_bias; + uint fusion_flags; } p; #if !USE_SUBGROUP_ADD @@ -151,10 +144,13 @@ void main() { [[unroll]] for (uint c = 0; c < gqa_ratio; ++c) { // dst is not transposed and not permuted const uint idst = (channel + c)*nrows_dst + row_dst; - if (p.enable_bias != 0) { - temp[c] += FLOAT_TYPE(data_bias[idst]); + if ((p.fusion_flags & MAT_VEC_FUSION_FLAGS_BIAS0) != 0) { + temp[c] += FLOAT_TYPE(data_fuse0[idst]); + } + if ((p.fusion_flags & MAT_VEC_FUSION_FLAGS_BIAS1) != 0) { + temp[c] += FLOAT_TYPE(data_fuse1[idst]); } - dst[idst] = temp[c]; + data_d[idst] = temp[c]; } } } From d7356143b287a30e53cada0159aeee7769a1bc5e Mon Sep 17 00:00:00 2001 From: shani-f Date: Sun, 16 Nov 2025 01:52:42 +0200 Subject: [PATCH 40/51] sycl : unify unary kernels with a generic implementation and enable wide operator support (llama/17213) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * SYCL: add generic unary op implementation for multiple ops (ABS/SGN/…); unify non-contiguous access * SYCL: update documentation and sycl.csv to reflect new unary op support * update ops.md after syncing SYCL.csv changes * Fix SYCL.csv merge conflict * Update ops.md after fixing SYCL.csv conflicts * Fix SYCL.csv tail after merge conflict and regenerate ops.md * Fix line endings and final newline in SYCL.csv * Remove TOPK_MOE entries from SYCL.csv as requested * Update ops.md after removing TOPK_MOE from SYCL.csv * Regenerated SYCL.csv and synced ops.md with upstream * Update ops.md using create_ops_docs.py --- ggml/src/ggml-sycl/element_wise.cpp | 362 +++++++++------------------- ggml/src/ggml-sycl/ggml-sycl.cpp | 11 +- 2 files changed, 118 insertions(+), 255 deletions(-) diff --git a/ggml/src/ggml-sycl/element_wise.cpp b/ggml/src/ggml-sycl/element_wise.cpp index 810995d0cbf..7d54ce600ee 100644 --- a/ggml/src/ggml-sycl/element_wise.cpp +++ b/ggml/src/ggml-sycl/element_wise.cpp @@ -170,73 +170,31 @@ static __dpct_inline__ T op_trunc(T x) { return sycl::trunc(x); } -template -static void unary_op_sgn_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { - SYCL_GLOBAL_ID_LOOP(k, item_ct1) { - dst[i] = op_sgn(x[i]); - } -} - -template -static void unary_op_abs_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { - SYCL_GLOBAL_ID_LOOP(k, item_ct1) { - dst[i] = op_abs(x[i]); - } -} - -template -static void unary_op_elu_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { - SYCL_GLOBAL_ID_LOOP(k, item_ct1) { - dst[i] = op_elu(x[i]); - } -} - -template -static void unary_op_gelu_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { - SYCL_GLOBAL_ID_LOOP(k, item_ct1) { - dst[i] = op_gelu(x[i]); - } -} - -template -static void unary_op_silu_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { - SYCL_GLOBAL_ID_LOOP(k, item_ct1) { - dst[i] = op_silu(x[i]); - } -} - -template -static void unary_op_gelu_quick_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { - SYCL_GLOBAL_ID_LOOP(k, item_ct1) { - dst[i] = op_gelu_quick(x[i]); - } -} - -template -static void unary_op_gelu_erf_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { +template +static void unary_op_generic_kernel( + const T * x, + T * dst, + const int k, + const int64_t ne0, const int64_t ne1, const int64_t ne2, const int64_t ne3, + const size_t nb0, const size_t nb1, const size_t nb2, const size_t nb3, + const size_t nbd0, const size_t nbd1, const size_t nbd2, const size_t nbd3, + const sycl::nd_item<1> & item_ct1, + F func) { + + (void) ne3; SYCL_GLOBAL_ID_LOOP(k, item_ct1) { - dst[i] = op_gelu_erf(x[i]); - } -} + const int64_t i0 = i % ne0; + const int64_t i1 = (i / ne0) % ne1; + const int64_t i2 = (i / (ne0*ne1)) % ne2; + const int64_t i3 = i / (ne0*ne1*ne2); -template -static void unary_op_tanh_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { - SYCL_GLOBAL_ID_LOOP(k, item_ct1) { - dst[i] = op_tanh(x[i]); - } -} + const char * src_base = (const char *) x; + char * dst_base = (char *) dst; -template -static void unary_op_relu_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { - SYCL_GLOBAL_ID_LOOP(k, item_ct1) { - dst[i] = op_relu(x[i]); - } -} + const T * srcp = (const T *)(src_base + i0*nb0 + i1*nb1 + i2*nb2 + i3*nb3 ); + T * dstp = (T *)(dst_base + i0*nbd0 + i1*nbd1 + i2*nbd2 + i3*nbd3); -template -static void unary_op_sigmoid_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { - SYCL_GLOBAL_ID_LOOP(k, item_ct1) { - dst[i] = op_sigmoid(x[i]); + *dstp = func(*srcp); } } @@ -261,27 +219,6 @@ static void unary_op_cos_kernel(const T * x, T * dst, const int k, const sycl::n } } -template -static void unary_op_hardsigmoid_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { - SYCL_GLOBAL_ID_LOOP(k, item_ct1) { - dst[i] = op_hardsigmoid(x[i]); - } -} - -template -static void unary_op_hardswish_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { - SYCL_GLOBAL_ID_LOOP(k, item_ct1) { - dst[i] = op_hardswish(x[i]); - } -} - -template -static void unary_op_exp_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { - SYCL_GLOBAL_ID_LOOP(k, item_ct1) { - dst[i] = op_exp(x[i]); - } -} - template static void unary_op_log_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { SYCL_GLOBAL_ID_LOOP(k, item_ct1) { @@ -289,19 +226,6 @@ static void unary_op_log_kernel(const T * x, T * dst, const int k, const sycl::n } } -template -static void unary_op_neg_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { - SYCL_GLOBAL_ID_LOOP(k, item_ct1) { - dst[i] = op_neg(x[i]); - } -} - -template -static void unary_op_step_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) { - SYCL_GLOBAL_ID_LOOP(k, item_ct1) { - dst[i] = op_step(x[i]); - } -} template static void unary_op_leaky_relu_kernel(const T * x, T * dst, const int k, float negative_slope, const sycl::nd_item<1> &item_ct1) { @@ -620,6 +544,48 @@ static inline void dispatch_ggml_sycl_op_upscale(ggml_backend_sycl_context & ctx } } +template +static inline void ggml_sycl_op_unary( + ggml_backend_sycl_context & ctx, ggml_tensor * dst, F func) { + + ggml_tensor * src0 = dst->src[0]; + + const int64_t ne0 = dst->ne[0]; + const int64_t ne1 = dst->ne[1]; + const int64_t ne2 = dst->ne[2]; + const int64_t ne3 = dst->ne[3]; + + const size_t nb0 = src0->nb[0]; + const size_t nb1 = src0->nb[1]; + const size_t nb2 = src0->nb[2]; + const size_t nb3 = src0->nb[3]; + + const size_t nbd0 = dst->nb[0]; + const size_t nbd1 = dst->nb[1]; + const size_t nbd2 = dst->nb[2]; + const size_t nbd3 = dst->nb[3]; + + ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, + [=](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { + + const int num_blocks = ceil_div(k_elements, 256); + + stream->parallel_for( + sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(256), + sycl::range<1>(256)), + [=](sycl::nd_item<1> item_ct1) { + unary_op_generic_kernel( + src, dst_ptr, k_elements, + ne0, ne1, ne2, ne3, + nb0, nb1, nb2, nb3, + nbd0, nbd1, nbd2, nbd3, + item_ct1, + func + ); + }); + }); +} + static inline void ggml_sycl_op_arange(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { GGML_ASSERT(dst->type == GGML_TYPE_F32); @@ -645,159 +611,75 @@ static inline void ggml_sycl_op_arange(ggml_backend_sycl_context & ctx, ggml_ten static inline void ggml_sycl_op_sgn(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { - ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, - [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { - const int num_blocks = ceil_div(k_elements, 256); - stream->parallel_for( - sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(256), - sycl::range<1>(256)), - [=](sycl::nd_item<1> item_ct1) { - unary_op_sgn_kernel(src, dst_ptr, k_elements, item_ct1); - }); - }); + ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) { + return op_sgn(x); + }); } + static inline void ggml_sycl_op_abs(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { - ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, - [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { - const int num_blocks = ceil_div(k_elements, 256); - stream->parallel_for( - sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(256), - sycl::range<1>(256)), - [=](sycl::nd_item<1> item_ct1) { - unary_op_abs_kernel(src, dst_ptr, k_elements, item_ct1); - }); - }); + ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) { + return op_abs(x); + }); } static inline void ggml_sycl_op_elu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { - ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, - [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { - const int num_blocks = ceil_div(k_elements, 256); - stream->parallel_for( - sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(256), - sycl::range<1>(256)), - [=](sycl::nd_item<1> item_ct1) { - unary_op_elu_kernel(src, dst_ptr, k_elements, item_ct1); - }); - }); + ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) { + return op_elu(x); + }); } - static inline void ggml_sycl_op_silu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { - ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, - [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { - const int num_blocks = ceil_div(k_elements, SYCL_SILU_BLOCK_SIZE); - stream->parallel_for( - sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_SILU_BLOCK_SIZE), - sycl::range<1>(SYCL_SILU_BLOCK_SIZE)), - [=](sycl::nd_item<1> item_ct1) { - unary_op_silu_kernel(src, dst_ptr, k_elements, item_ct1); - }); - }); + ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) { + return op_silu(x); + }); } static inline void ggml_sycl_op_gelu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { - ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, - [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { - const int num_blocks = ceil_div(k_elements, SYCL_GELU_BLOCK_SIZE); - stream->parallel_for( - sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_GELU_BLOCK_SIZE), - sycl::range<1>(SYCL_GELU_BLOCK_SIZE)), - [=](sycl::nd_item<1> item_ct1) { - unary_op_gelu_kernel(src, dst_ptr, k_elements, item_ct1); - }); - }); + ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) { + return op_gelu(x); + }); } -static inline void ggml_sycl_op_gelu_quick(ggml_backend_sycl_context & ctx, ggml_tensor *dst) { - ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, - [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { - const int num_blocks = ceil_div(k_elements, SYCL_GELU_BLOCK_SIZE); - stream->parallel_for( - sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_GELU_BLOCK_SIZE), - sycl::range<1>(SYCL_GELU_BLOCK_SIZE)), - [=](sycl::nd_item<1> item_ct1) { - unary_op_gelu_quick_kernel(src, dst_ptr, k_elements, item_ct1); - }); - }); +static inline void ggml_sycl_op_gelu_quick(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) { + return op_gelu_quick(x); + }); } -static inline void ggml_sycl_op_gelu_erf(ggml_backend_sycl_context & ctx, ggml_tensor *dst) { - ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, - [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { - const int num_blocks = ceil_div(k_elements, SYCL_GELU_BLOCK_SIZE); - stream->parallel_for( - sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_GELU_BLOCK_SIZE), - sycl::range<1>(SYCL_GELU_BLOCK_SIZE)), - [=](sycl::nd_item<1> item_ct1) { - unary_op_gelu_erf_kernel(src, dst_ptr, k_elements, item_ct1); - }); - }); +static inline void ggml_sycl_op_gelu_erf(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) { + return op_gelu_erf(x); + }); } static inline void ggml_sycl_op_tanh(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { - ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, - [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { - const int num_blocks = ceil_div(k_elements, SYCL_TANH_BLOCK_SIZE); - stream->parallel_for( - sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_TANH_BLOCK_SIZE), - sycl::range<1>(SYCL_TANH_BLOCK_SIZE)), - [=](sycl::nd_item<1> item_ct1) { - unary_op_tanh_kernel(src, dst_ptr, k_elements, item_ct1); - }); - }); + ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) { + return op_tanh(x); + }); } static inline void ggml_sycl_op_relu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { - ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, - [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { - const int num_blocks = ceil_div(k_elements, SYCL_RELU_BLOCK_SIZE); - stream->parallel_for( - sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_RELU_BLOCK_SIZE), - sycl::range<1>(SYCL_RELU_BLOCK_SIZE)), - [=](sycl::nd_item<1> item_ct1) { - unary_op_relu_kernel(src, dst_ptr, k_elements, item_ct1); - }); - }); + ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) { + return op_relu(x); + }); } static inline void ggml_sycl_op_hardsigmoid(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { - ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, - [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { - const int num_blocks = ceil_div(k_elements, SYCL_HARDSIGMOID_BLOCK_SIZE); - stream->parallel_for( - sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_HARDSIGMOID_BLOCK_SIZE), - sycl::range<1>(SYCL_HARDSIGMOID_BLOCK_SIZE)), - [=](sycl::nd_item<1> item_ct1) { - unary_op_hardsigmoid_kernel(src, dst_ptr, k_elements, item_ct1); - }); - }); + ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) { + return op_hardsigmoid(x); + }); } static inline void ggml_sycl_op_hardswish(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { - ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, - [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { - const int num_blocks = ceil_div(k_elements, SYCL_HARDSWISH_BLOCK_SIZE); - stream->parallel_for( - sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_HARDSWISH_BLOCK_SIZE), - sycl::range<1>(SYCL_HARDSWISH_BLOCK_SIZE)), - [=](sycl::nd_item<1> item_ct1) { - unary_op_hardswish_kernel(src, dst_ptr, k_elements, item_ct1); - }); - }); + ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) { + return op_hardswish(x); + }); } static inline void ggml_sycl_op_exp(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { - ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, - [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { - const int num_blocks = ceil_div(k_elements, SYCL_EXP_BLOCK_SIZE); - stream->parallel_for( - sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_EXP_BLOCK_SIZE), - sycl::range<1>(SYCL_EXP_BLOCK_SIZE)), - [=](sycl::nd_item<1> item_ct1) { - unary_op_exp_kernel(src, dst_ptr, k_elements, item_ct1); - }); - }); + ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) { + return op_exp(x); + }); } static inline void ggml_sycl_op_log(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { @@ -814,42 +696,22 @@ static inline void ggml_sycl_op_log(ggml_backend_sycl_context & ctx, ggml_tensor } static inline void ggml_sycl_op_neg(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { - ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, - [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { - const int num_blocks = ceil_div(k_elements, SYCL_NEG_BLOCK_SIZE); - stream->parallel_for( - sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_NEG_BLOCK_SIZE), - sycl::range<1>(SYCL_NEG_BLOCK_SIZE)), - [=](sycl::nd_item<1> item_ct1) { - unary_op_neg_kernel(src, dst_ptr, k_elements, item_ct1); - }); - }); + ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) { + return op_neg(x); + }); } + static inline void ggml_sycl_op_step(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { - ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, - [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { - const int num_blocks = ceil_div(k_elements, SYCL_NEG_BLOCK_SIZE); // Using NEG block size - stream->parallel_for( - sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_NEG_BLOCK_SIZE), - sycl::range<1>(SYCL_NEG_BLOCK_SIZE)), - [=](sycl::nd_item<1> item_ct1) { - unary_op_step_kernel(src, dst_ptr, k_elements, item_ct1); - }); - }); + ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) { + return op_step(x); + }); } static inline void ggml_sycl_op_sigmoid(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { - ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst, - [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) { - const int num_blocks = ceil_div(k_elements, SYCL_SIGMOID_BLOCK_SIZE); - stream->parallel_for( - sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_SIGMOID_BLOCK_SIZE), - sycl::range<1>(SYCL_SIGMOID_BLOCK_SIZE)), - [=](sycl::nd_item<1> item_ct1) { - unary_op_sigmoid_kernel(src, dst_ptr, k_elements, item_ct1); - }); - }); + ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) { + return op_sigmoid(x); + }); } static inline void ggml_sycl_op_sqrt(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { diff --git a/ggml/src/ggml-sycl/ggml-sycl.cpp b/ggml/src/ggml-sycl/ggml-sycl.cpp index 941fd41c0d0..3f1bdfb9f1b 100644 --- a/ggml/src/ggml-sycl/ggml-sycl.cpp +++ b/ggml/src/ggml-sycl/ggml-sycl.cpp @@ -4360,21 +4360,22 @@ static bool ggml_backend_sycl_device_supports_op(ggml_backend_dev_t dev, const g } case GGML_OP_UNARY: switch (ggml_get_unary_op(op)) { + case GGML_UNARY_OP_SGN: + case GGML_UNARY_OP_ABS: case GGML_UNARY_OP_NEG: case GGML_UNARY_OP_STEP: + case GGML_UNARY_OP_RELU: + case GGML_UNARY_OP_HARDSIGMOID: + case GGML_UNARY_OP_TANH: case GGML_UNARY_OP_GELU: case GGML_UNARY_OP_SILU: - case GGML_UNARY_OP_RELU: case GGML_UNARY_OP_SIGMOID: - case GGML_UNARY_OP_HARDSIGMOID: case GGML_UNARY_OP_HARDSWISH: case GGML_UNARY_OP_GELU_QUICK: case GGML_UNARY_OP_GELU_ERF: - case GGML_UNARY_OP_TANH: case GGML_UNARY_OP_EXP: - case GGML_UNARY_OP_SGN: - case GGML_UNARY_OP_ABS: case GGML_UNARY_OP_ELU: + return true; case GGML_UNARY_OP_FLOOR: case GGML_UNARY_OP_CEIL: case GGML_UNARY_OP_ROUND: From 14dac59d3a307be631cd3ddac62b7e5717773a14 Mon Sep 17 00:00:00 2001 From: shaofeiqi <109865877+shaofeiqi@users.noreply.github.com> Date: Sat, 15 Nov 2025 17:33:10 -0800 Subject: [PATCH 41/51] opencl: add kernel to handle mat mul in attention to improve encoding speed (llama/17181) * Add mul_mm_f16_f32_kq_kqv kernel * Add ggml_cl_mul_mat_kq_kqv_adreno func * fix whitespace * remove unused variable * remove redundant * refactor and clean up * remove trailing whitespace --- ggml/src/ggml-opencl/CMakeLists.txt | 1 + ggml/src/ggml-opencl/ggml-opencl.cpp | 170 +++++++++++ .../kernels/mul_mm_f16_f32_kq_kqv.cl | 273 ++++++++++++++++++ 3 files changed, 444 insertions(+) create mode 100644 ggml/src/ggml-opencl/kernels/mul_mm_f16_f32_kq_kqv.cl diff --git a/ggml/src/ggml-opencl/CMakeLists.txt b/ggml/src/ggml-opencl/CMakeLists.txt index d3d97f375e8..681c81b88a1 100644 --- a/ggml/src/ggml-opencl/CMakeLists.txt +++ b/ggml/src/ggml-opencl/CMakeLists.txt @@ -119,6 +119,7 @@ set(GGML_OPENCL_KERNELS pad repeat mul_mat_f16_f32 + mul_mm_f16_f32_kq_kqv conv2d conv2d_f16_f32 flash_attn_f32_f16 diff --git a/ggml/src/ggml-opencl/ggml-opencl.cpp b/ggml/src/ggml-opencl/ggml-opencl.cpp index 465272fab90..b0abfa3c1d4 100644 --- a/ggml/src/ggml-opencl/ggml-opencl.cpp +++ b/ggml/src/ggml-opencl/ggml-opencl.cpp @@ -407,6 +407,8 @@ struct ggml_backend_opencl_context { cl_program program_mul_mv_f32_f32; cl_program program_mul; cl_program program_mul_mat_f16_f32_tiled; + cl_program program_mul_mm_f16_f32_kqv; + cl_program program_mul_mm_f16_f32_kq; cl_program program_div; cl_program program_sub; cl_program program_norm; @@ -481,6 +483,8 @@ struct ggml_backend_opencl_context { cl_kernel kernel_mul_mat_f16_f32; cl_kernel kernel_mul_mat_f16_f32_l4; cl_kernel kernel_mul_mat_f16_f32_tiled; + cl_kernel kernel_mul_mm_f16_f32_kqv; + cl_kernel kernel_mul_mm_f16_f32_kq; cl_kernel kernel_mul_mat_q4_0_f32, kernel_mul_mat_q4_0_f32_v; cl_kernel kernel_convert_block_q4_0, kernel_restore_block_q4_0; cl_kernel kernel_convert_block_mxfp4, kernel_convert_block_mxfp4_trans, kernel_restore_block_mxfp4, kernel_restore_block_mxfp4_trans; @@ -1235,6 +1239,25 @@ static void load_cl_kernels(ggml_backend_opencl_context *backend_ctx, ggml_cl_ve GGML_LOG_CONT("."); } + // mul_mm_f16_f32_kq_kqv + { +#ifdef GGML_OPENCL_EMBED_KERNELS + const std::string kernel_src { + #include "mul_mm_f16_f32_kq_kqv.cl.h" + }; +#else + const std::string kernel_src = read_file("mul_mm_f16_f32_kq_kqv.cl"); +#endif + backend_ctx->program_mul_mm_f16_f32_kqv = + build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts+" -DKQV "); + backend_ctx->program_mul_mm_f16_f32_kq = + build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts); + + CL_CHECK((backend_ctx->kernel_mul_mm_f16_f32_kqv = clCreateKernel(backend_ctx->program_mul_mm_f16_f32_kqv, "mul_mm_f16_f32_kqv", &err), err)); + CL_CHECK((backend_ctx->kernel_mul_mm_f16_f32_kq = clCreateKernel(backend_ctx->program_mul_mm_f16_f32_kq, "mul_mm_f16_f32_kq", &err), err)); + GGML_LOG_CONT("."); + } + // mul { #ifdef GGML_OPENCL_EMBED_KERNELS @@ -6665,6 +6688,146 @@ static void ggml_cl_conv_2d(ggml_backend_t backend, const ggml_tensor * src0, co backend_ctx->enqueue_ndrange_kernel(kernel, 2, global_work_size, local_work_size, dst); } +static void ggml_cl_mul_mat_kq_kqv_adreno(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { + ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; + + ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; + ggml_tensor_extra_cl * extra1 = (ggml_tensor_extra_cl *)src1->extra; + ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; + + const int ne00 = src0->ne[0]; + const int ne01 = src0->ne[1]; + const int ne02 = src0->ne[2]; + + const cl_ulong nb01 = src0->nb[1]; + const cl_ulong nb02 = src0->nb[2]; + + const int ne10 = src1->ne[0]; + const int ne11 = src1->ne[1]; + const int ne12 = src1->ne[2]; + + const cl_ulong nb10 = src1->nb[0]; + + const int ne0 = dst->ne[0]; + const int ne1 = dst->ne[1]; + + GGML_ASSERT(ne00 == ne10); + + cl_kernel kernel; + cl_context context = backend_ctx->context; + + cl_int status; + cl_image_format img_fmt_1d; + cl_image_desc img_desc_1d; + cl_buffer_region region; + cl_mem A_image1d; + cl_mem A_sub_buffer; + cl_mem B_sub_buffer; + cl_mem D_image1d; + cl_mem D_sub_buffer; + + int M = ne01; + int N = ne1; + int K = ne00; + + if (nb01 > nb02) { + // KQ + kernel = backend_ctx->kernel_mul_mm_f16_f32_kq; + } else { + // KQV + kernel = backend_ctx->kernel_mul_mm_f16_f32_kqv; + } + // create sub-buffer for A + // <--------------------------------------------> // + extra0 = src0->view_src ? (ggml_tensor_extra_cl *)src0->view_src->extra : (ggml_tensor_extra_cl *)src0->extra; + + region.origin = (extra0->offset); + if (nb01 > nb02) { + // KQ + region.size = nb01 * ne01; + } else { + // KQV + region.size = nb02 * ne02; + } + + A_sub_buffer = clCreateSubBuffer((extra0->data_device), 0, CL_BUFFER_CREATE_TYPE_REGION, ®ion, &status); + CL_CHECK(status); + + // <--------------------------------------------> // + + // create sub-buffer for B + // <--------------------------------------------> // + region.origin = (extra1->offset); + region.size = nb10 * ne10 * ne11 * ne12; + B_sub_buffer = clCreateSubBuffer((extra1->data_device), 0, CL_BUFFER_CREATE_TYPE_REGION, ®ion, &status); + CL_CHECK(status); + // <--------------------------------------------> // + + img_fmt_1d = {CL_RGBA, CL_FLOAT}; + memset(&img_desc_1d, 0, sizeof(img_desc_1d)); + img_desc_1d.image_type = CL_MEM_OBJECT_IMAGE1D_BUFFER; + if (nb01 > nb02) { + img_desc_1d.image_width = (nb01 * ne01 / 4)/4; + } + else { + img_desc_1d.image_width = (nb02 * ne02 / 4)/4; + } + img_desc_1d.buffer = A_sub_buffer; + A_image1d = clCreateImage(context, CL_MEM_READ_ONLY, &img_fmt_1d, &img_desc_1d, NULL, &status); + CL_CHECK(status); + + // create sub-buffer for output C + // <--------------------------------------------> // + region.origin = (extrad->offset); + region.size = ne0 * ne1 * dst->ne[2] * dst->nb[0]; // size of C in bytes + D_sub_buffer = clCreateSubBuffer((extrad->data_device), 0, CL_BUFFER_CREATE_TYPE_REGION, ®ion, &status); + CL_CHECK(status); + // <--------------------------------------------> // + + // create image for C output + // <--------------------------------------------> // + img_fmt_1d = {CL_R, CL_FLOAT}; + memset(&img_desc_1d, 0, sizeof(img_desc_1d)); + img_desc_1d.image_type = CL_MEM_OBJECT_IMAGE1D_BUFFER; + img_desc_1d.image_width = ne0 * ne1 * dst->ne[2] * dst->nb[0] / 4; + img_desc_1d.buffer = D_sub_buffer; + D_image1d = clCreateImage(context, CL_MEM_WRITE_ONLY, &img_fmt_1d, &img_desc_1d, NULL, &status); + CL_CHECK(status); + // <--------------------------------------------> // + + int offset_src0 = 0; + int offset_src1 = 0; + + // set kernel args + // <--------------------------------------------> // + cl_uint k_arg = 0; + CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(cl_mem), &A_image1d)); + CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(int), &offset_src0)); + CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(cl_mem), &B_sub_buffer)); + CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(int), &offset_src1)); + CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(cl_mem), &D_image1d)); + CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(int), &extrad->offset)); + CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(int), &M)); + CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(int), &K)); + CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(int), &N)); + CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(int), &ne02)); + CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(int), &ne12)); + CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(int), &nb01)); + + size_t global_work_size[3] = {64, static_cast(((M+63)/64)), static_cast(((N+31)/32)*ne12)}; + size_t local_work_size[3] = {64, 1, 2}; + + backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); + + // deallocate sub buffers and images + // <--------------------------------------------> // + CL_CHECK(clReleaseMemObject(A_image1d)); + CL_CHECK(clReleaseMemObject(D_image1d)); + CL_CHECK(clReleaseMemObject(A_sub_buffer)); + CL_CHECK(clReleaseMemObject(B_sub_buffer)); + CL_CHECK(clReleaseMemObject(D_sub_buffer)); +} + static void ggml_cl_mul_mat(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { GGML_ASSERT(src0); GGML_ASSERT(src0->extra); @@ -6731,6 +6894,13 @@ static void ggml_cl_mul_mat(ggml_backend_t backend, const ggml_tensor * src0, co #ifdef GGML_OPENCL_USE_ADRENO_KERNELS cl_context context = backend_ctx->context; + if(src0t == GGML_TYPE_F16 && src1t == GGML_TYPE_F32){ + if (ne01 >= 64 && ne1 >= 32 && ne00 >= 16 && (ne12 % ne02) == 0){ + ggml_cl_mul_mat_kq_kqv_adreno(backend, src0, src1, dst); + return; + } + } + if (ne01 && ne1 && use_adreno_kernels(backend_ctx, src0)) { // init CL objects diff --git a/ggml/src/ggml-opencl/kernels/mul_mm_f16_f32_kq_kqv.cl b/ggml/src/ggml-opencl/kernels/mul_mm_f16_f32_kq_kqv.cl new file mode 100644 index 00000000000..ac0274b64fc --- /dev/null +++ b/ggml/src/ggml-opencl/kernels/mul_mm_f16_f32_kq_kqv.cl @@ -0,0 +1,273 @@ +#pragma OPENCL EXTENSION cl_khr_fp16 : enable +#pragma OPENCL EXTENSION cl_khr_subgroups : enable + +#define LM_FIRST_256B 0 +#define LM_SECOND_256B 64 +#define LM_THIRD_256B 128 +#define LM_FOURTH_256B 192 + + +inline float16 mm_load_a( + image1d_buffer_t matrix_A, + uint subMatrixAStartInElements, + int nb01, + int line_stride_matrix_A_in_bytes +) { + __private float8 regA; + size_t sub_block_id_m = get_local_id(0); + +#ifdef KQV + uint a_texCoord = subMatrixAStartInElements/2 + (sub_block_id_m * nb01/4); +#else // KQ + uint a_texCoord = subMatrixAStartInElements/2 + (sub_block_id_m * line_stride_matrix_A_in_bytes/4); +#endif + + regA.s0123 = read_imagef(matrix_A, a_texCoord/4); + regA.s4567 = read_imagef(matrix_A, (a_texCoord+4)/4); + + return convert_float16(as_half16(regA)); +} + +inline float4 alu_32( + float16 regA, + __local float4* matrix_B_vec +) { + + __private float4 rC = 0; + int i = get_sub_group_id() * 64; + + rC += regA.s0 * matrix_B_vec[i]; + rC += regA.s1 * matrix_B_vec[i + 16]; + rC += regA.s4 * matrix_B_vec[i + 1]; + rC += regA.s5 * matrix_B_vec[i + 17]; + rC += regA.s8 * matrix_B_vec[i + 2]; + rC += regA.s9 * matrix_B_vec[i + 18]; + rC += regA.sc * matrix_B_vec[i + 3]; + rC += regA.sd * matrix_B_vec[i + 19]; + + i += 32; + + rC += regA.s2 * matrix_B_vec[i]; + rC += regA.s3 * matrix_B_vec[i + 16]; + rC += regA.s6 * matrix_B_vec[i + 1]; + rC += regA.s7 * matrix_B_vec[i + 17]; + rC += regA.sa * matrix_B_vec[i + 2]; + rC += regA.sb * matrix_B_vec[i + 18]; + rC += regA.se * matrix_B_vec[i + 3]; + rC += regA.sf * matrix_B_vec[i + 19]; + + return rC; +} + +inline float16 alu_16( + float16 regA, + __local float* matrix_B_local +) { + float16 out; + __local float4* matrix_B_vec = (__local float4*)matrix_B_local; + + out.s0123 = alu_32(regA, matrix_B_vec); + out.s4567 = alu_32(regA, matrix_B_vec + 4); + out.s89ab = alu_32(regA, matrix_B_vec + 8); + out.scdef = alu_32(regA, matrix_B_vec + 12); + + return out; +} + +inline void mm_mad( + __local float* matrix_B_local, + float16 regA, + float8 regB, + uint b_localOffsetInWords, + float16* regC0_ptr, + float16* regC1_ptr +) { + int offset = b_localOffsetInWords + get_sub_group_id() * 256; + + matrix_B_local[offset + LM_FIRST_256B] = regB.s0; + matrix_B_local[offset + LM_SECOND_256B] = regB.s1; + matrix_B_local[offset + LM_THIRD_256B] = regB.s2; + matrix_B_local[offset + LM_FOURTH_256B] = regB.s3; + + float16 add0 = alu_16(regA, matrix_B_local); + *regC0_ptr += add0; + + matrix_B_local[offset + LM_FIRST_256B] = regB.s4; + matrix_B_local[offset + LM_SECOND_256B] = regB.s5; + matrix_B_local[offset + LM_THIRD_256B] = regB.s6; + matrix_B_local[offset + LM_FOURTH_256B] = regB.s7; + + float16 add1 = alu_16(regA, matrix_B_local); + *regC1_ptr += add1; +} + +inline void mm_store_c_N( + __write_only image1d_buffer_t matrix_C, + float16 regC0, + float16 regC1, + uint subMatrixCStartInElements, + int line_stride_matrix_C_in_bytes, + int mask +) { + size_t sub_block_id_m = get_local_id(0); + + uint strideInWords = line_stride_matrix_C_in_bytes/4; + uint c_coordInWords_0 = (subMatrixCStartInElements + sub_block_id_m); + + uint c_coordInWords_1 = c_coordInWords_0 + 1 * strideInWords; + uint c_coordInWords_2 = c_coordInWords_0 + 2 * strideInWords; + uint c_coordInWords_3 = c_coordInWords_0 + 3 * strideInWords; + uint c_coordInWords_4 = c_coordInWords_0 + 4 * strideInWords; + uint c_coordInWords_5 = c_coordInWords_0 + 5 * strideInWords; + uint c_coordInWords_6 = c_coordInWords_0 + 6 * strideInWords; + uint c_coordInWords_7 = c_coordInWords_0 + 7 * strideInWords; + uint c_coordInWords_8 = c_coordInWords_0 + 8 * strideInWords; + uint c_coordInWords_9 = c_coordInWords_0 + 9 * strideInWords; + uint c_coordInWords_10 = c_coordInWords_0 + 10 * strideInWords; + uint c_coordInWords_11 = c_coordInWords_0 + 11 * strideInWords; + uint c_coordInWords_12 = c_coordInWords_0 + 12 * strideInWords; + uint c_coordInWords_13 = c_coordInWords_0 + 13 * strideInWords; + uint c_coordInWords_14 = c_coordInWords_0 + 14 * strideInWords; + uint c_coordInWords_15 = c_coordInWords_0 + 15 * strideInWords; + uint c_coordInWords_16 = c_coordInWords_0 + 16 * strideInWords; + uint c_coordInWords_17 = c_coordInWords_0 + 17 * strideInWords; + uint c_coordInWords_18 = c_coordInWords_0 + 18 * strideInWords; + uint c_coordInWords_19 = c_coordInWords_0 + 19 * strideInWords; + uint c_coordInWords_20 = c_coordInWords_0 + 20 * strideInWords; + uint c_coordInWords_21 = c_coordInWords_0 + 21 * strideInWords; + uint c_coordInWords_22 = c_coordInWords_0 + 22 * strideInWords; + uint c_coordInWords_23 = c_coordInWords_0 + 23 * strideInWords; + uint c_coordInWords_24 = c_coordInWords_0 + 24 * strideInWords; + uint c_coordInWords_25 = c_coordInWords_0 + 25 * strideInWords; + uint c_coordInWords_26 = c_coordInWords_0 + 26 * strideInWords; + uint c_coordInWords_27 = c_coordInWords_0 + 27 * strideInWords; + uint c_coordInWords_28 = c_coordInWords_0 + 28 * strideInWords; + uint c_coordInWords_29 = c_coordInWords_0 + 29 * strideInWords; + uint c_coordInWords_30 = c_coordInWords_0 + 30 * strideInWords; + uint c_coordInWords_31 = c_coordInWords_0 + 31 * strideInWords; + + if (mask > 0) { write_imagef(matrix_C, c_coordInWords_0, regC0.s0); } + if (mask > 1) { write_imagef(matrix_C, c_coordInWords_1, regC0.s1); } + if (mask > 2) { write_imagef(matrix_C, c_coordInWords_2, regC0.s2); } + if (mask > 3) { write_imagef(matrix_C, c_coordInWords_3, regC0.s3); } + if (mask > 4) { write_imagef(matrix_C, c_coordInWords_4, regC0.s4); } + if (mask > 5) { write_imagef(matrix_C, c_coordInWords_5, regC0.s5); } + if (mask > 6) { write_imagef(matrix_C, c_coordInWords_6, regC0.s6); } + if (mask > 7) { write_imagef(matrix_C, c_coordInWords_7, regC0.s7); } + if (mask > 8) { write_imagef(matrix_C, c_coordInWords_8, regC0.s8); } + if (mask > 9) { write_imagef(matrix_C, c_coordInWords_9, regC0.s9); } + if (mask > 10) { write_imagef(matrix_C, c_coordInWords_10, regC0.sa); } + if (mask > 11) { write_imagef(matrix_C, c_coordInWords_11, regC0.sb); } + if (mask > 12) { write_imagef(matrix_C, c_coordInWords_12, regC0.sc); } + if (mask > 13) { write_imagef(matrix_C, c_coordInWords_13, regC0.sd); } + if (mask > 14) { write_imagef(matrix_C, c_coordInWords_14, regC0.se); } + if (mask > 15) { write_imagef(matrix_C, c_coordInWords_15, regC0.sf); } + if (mask > 16) { write_imagef(matrix_C, c_coordInWords_16, regC1.s0); } + if (mask > 17) { write_imagef(matrix_C, c_coordInWords_17, regC1.s1); } + if (mask > 18) { write_imagef(matrix_C, c_coordInWords_18, regC1.s2); } + if (mask > 19) { write_imagef(matrix_C, c_coordInWords_19, regC1.s3); } + if (mask > 20) { write_imagef(matrix_C, c_coordInWords_20, regC1.s4); } + if (mask > 21) { write_imagef(matrix_C, c_coordInWords_21, regC1.s5); } + if (mask > 22) { write_imagef(matrix_C, c_coordInWords_22, regC1.s6); } + if (mask > 23) { write_imagef(matrix_C, c_coordInWords_23, regC1.s7); } + if (mask > 24) { write_imagef(matrix_C, c_coordInWords_24, regC1.s8); } + if (mask > 25) { write_imagef(matrix_C, c_coordInWords_25, regC1.s9); } + if (mask > 26) { write_imagef(matrix_C, c_coordInWords_26, regC1.sa); } + if (mask > 27) { write_imagef(matrix_C, c_coordInWords_27, regC1.sb); } + if (mask > 28) { write_imagef(matrix_C, c_coordInWords_28, regC1.sc); } + if (mask > 29) { write_imagef(matrix_C, c_coordInWords_29, regC1.sd); } + if (mask > 30) { write_imagef(matrix_C, c_coordInWords_30, regC1.se); } + if (mask > 31) { write_imagef(matrix_C, c_coordInWords_31, regC1.sf); } +} + +#define TILESIZE_K 16 +#define TILESIZE_M 64 +#define TILESIZE_N 32 +#ifdef KQV +__kernel void mul_mm_f16_f32_kqv( +#else +__kernel void mul_mm_f16_f32_kq( +#endif + __read_only image1d_buffer_t matrix_A, + int offset0, + __global float* matrix_B, + int offset1, + __write_only image1d_buffer_t matrix_C, + int offsetd, + int M, int K, int N, + int D_A, + int D_B, + int nb01 +) { + + uint block_id_m = get_global_id(1); + uint block_id_n = get_global_id(2) % ((N+TILESIZE_N-1)/TILESIZE_N); + uint block_id_d = get_global_id(2) / ((N+TILESIZE_N-1)/TILESIZE_N); + + __private float16 regA; + __private float8 regB; + __private float16 regC0; + __private float16 regC1; + + const uint col = block_id_m * TILESIZE_M; + const uint row = block_id_n * TILESIZE_N; + const uint depth_A = block_id_d / (D_B/D_A); + const uint depth_B = block_id_d; + +#ifdef KQV + int line_stride_matrix_A_in_bytes = nb01 * M; + int line_stride_matrix_B_in_bytes = K * N * 4; +#else + int line_stride_matrix_A_in_bytes = K * D_A * 2; + int line_stride_matrix_B_in_bytes = K * D_B * 4; +#endif + + int line_stride_matrix_C_in_bytes = M * 4; + + const uint strideAinElements = line_stride_matrix_A_in_bytes / 2; + const uint strideBinElements = line_stride_matrix_B_in_bytes / 4; + + size_t sub_block_id_m = get_local_id(0); + + uint b_localOffsetInWords = (sub_block_id_m/16)*16 + + ((((sub_block_id_m)>>0)&1)<<2) + + ((((sub_block_id_m)>>1)&1)<<3) + + ((((sub_block_id_m)>>2)&1)<<0) + + ((((sub_block_id_m)>>3)&1)<<1); + + uint2 b_globalOffsetInWords_xy = {((sub_block_id_m%4)*4), (sub_block_id_m>>2)}; + uint b_globalOffsetInWords00, b_globalOffsetInWords16; +#ifdef KQV + b_globalOffsetInWords00 = b_globalOffsetInWords_xy.x + b_globalOffsetInWords_xy.y*K; + b_globalOffsetInWords16 = b_globalOffsetInWords00 + (16 * K); + uint subMatrixAStartInElements = depth_A * strideAinElements + col * nb01 / 2; + uint subMatrixBStartInElements = depth_B * strideBinElements + row * K; +#else + b_globalOffsetInWords00 = b_globalOffsetInWords_xy.x + b_globalOffsetInWords_xy.y*line_stride_matrix_B_in_bytes/4; + b_globalOffsetInWords16 = b_globalOffsetInWords00 + (16 * line_stride_matrix_B_in_bytes/4); + uint subMatrixAStartInElements = col * strideAinElements + depth_A * K; + uint subMatrixBStartInElements = row * strideBinElements + depth_B * K; +#endif + + __local float matrix_B_local[1024]; + + for (uint step=0; step < K; step+=TILESIZE_K) { + size_t sub_block_id_m = get_local_id(0); + regA = mm_load_a(matrix_A, subMatrixAStartInElements, nb01, line_stride_matrix_A_in_bytes); + + uint b_coordInWords00 = subMatrixBStartInElements + b_globalOffsetInWords00; + uint b_coordInWords16 = subMatrixBStartInElements + b_globalOffsetInWords16; + + regB.s0123 = vload4(b_coordInWords00/4, matrix_B); + regB.s4567 = vload4(b_coordInWords16/4, matrix_B); + + mm_mad(matrix_B_local, regA, regB, b_localOffsetInWords, ®C0, ®C1); + + subMatrixAStartInElements += TILESIZE_K; + subMatrixBStartInElements += TILESIZE_K; + } + + uint subMatrixCStartInElements = depth_B * N * M + row * M + col; + mm_store_c_N(matrix_C, regC0, regC1, subMatrixCStartInElements, line_stride_matrix_C_in_bytes, (N-block_id_n*32)); +} + From 9c2bde0fc02abe563b5e8c0d0f834aa06fdfcb98 Mon Sep 17 00:00:00 2001 From: lhez Date: Sat, 15 Nov 2025 17:40:14 -0800 Subject: [PATCH 42/51] opencl: fix rms_norm_mul (llama/17250) * opencl: use subgrroup reduce for reduction in rms_norm_mul * opencl: add comment about workgroup size --- ggml/src/ggml-opencl/ggml-opencl.cpp | 2 +- ggml/src/ggml-opencl/kernels/rms_norm.cl | 35 +++++++++++++++++------- 2 files changed, 26 insertions(+), 11 deletions(-) diff --git a/ggml/src/ggml-opencl/ggml-opencl.cpp b/ggml/src/ggml-opencl/ggml-opencl.cpp index b0abfa3c1d4..4cb6afe9271 100644 --- a/ggml/src/ggml-opencl/ggml-opencl.cpp +++ b/ggml/src/ggml-opencl/ggml-opencl.cpp @@ -5705,7 +5705,7 @@ static void ggml_opencl_op_rms_norm_fused(ggml_backend_t backend, ggml_tensor * CL_CHECK(clSetKernelArg(kernel, 21, sizeof(cl_ulong), &nb2)); CL_CHECK(clSetKernelArg(kernel, 22, sizeof(cl_ulong), &nb3)); CL_CHECK(clSetKernelArg(kernel, 23, sizeof(float), &eps)); - CL_CHECK(clSetKernelArg(kernel, 24, sizeof(float)*nth/sgs, NULL)); + CL_CHECK(clSetKernelArg(kernel, 24, sizeof(float)*sgs, NULL)); backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst); } diff --git a/ggml/src/ggml-opencl/kernels/rms_norm.cl b/ggml/src/ggml-opencl/kernels/rms_norm.cl index ecd053cb4c1..4b18d17d6f8 100644 --- a/ggml/src/ggml-opencl/kernels/rms_norm.cl +++ b/ggml/src/ggml-opencl/kernels/rms_norm.cl @@ -134,6 +134,15 @@ kernel void kernel_rms_norm_mul( src1 = src1 + offset1; dst = dst + offsetd; + // The size of sum is sizeof(float)*subgroup_size. + // Each subgroup writes its partial sum to this array. + // So the number of subgroups per workgroup for this kernel cannot exceed the subgroup size. + // This is generally true - + // for subgroup size 64, workgroup size should be less than 4096 (the max is usually 1024). + if (get_sub_group_id() == 0) { + sum[get_sub_group_local_id()] = 0.0f; + } + int i03 = get_group_id(2); int i02 = get_group_id(1); int i01 = get_group_id(0); @@ -148,24 +157,30 @@ kernel void kernel_rms_norm_mul( sumf += dot(x[i00], x[i00]); } sumf = sub_group_reduce_add(sumf); + + barrier(CLK_LOCAL_MEM_FENCE); + if (get_sub_group_local_id() == 0) { sum[get_sub_group_id()] = sumf; } barrier(CLK_LOCAL_MEM_FENCE); - for (uint i = get_local_size(0) / get_max_sub_group_size() / 2; i > 0; i /= 2) { - if (get_local_id(0) < i) { - sum[get_local_id(0)] += sum[get_local_id(0) + i]; - } - } - if (get_local_id(0) == 0) { - sum[0] /= ne00; - } + //for (uint i = get_local_size(0) / get_max_sub_group_size() / 2; i > 0; i /= 2) { + // if (get_local_id(0) < i) { + // sum[get_local_id(0)] += sum[get_local_id(0) + i]; + // } + //} + //if (get_local_id(0) == 0) { + // sum[0] /= ne00; + //} - barrier(CLK_LOCAL_MEM_FENCE); + //barrier(CLK_LOCAL_MEM_FENCE); + + sumf = sum[get_sub_group_local_id()]; + sumf = sub_group_reduce_add(sumf); - float mean = sum[0]; + float mean = sumf / ne00; float scale = 1.0f/sqrt(mean + eps); global float4 * y = (global float4 *) (dst + i03*nb3 + i02*nb2 + i01*nb1); From 844275a860fe8372a6f6af98c8a65664ecc8e235 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 16 Nov 2025 09:50:26 +0200 Subject: [PATCH 43/51] metal : remove obosolete asserts (llama/17295) --- ggml/src/ggml-metal/ggml-metal-ops.cpp | 6 ------ 1 file changed, 6 deletions(-) diff --git a/ggml/src/ggml-metal/ggml-metal-ops.cpp b/ggml/src/ggml-metal/ggml-metal-ops.cpp index ae098d371f1..0c1714fdbc0 100644 --- a/ggml/src/ggml-metal/ggml-metal-ops.cpp +++ b/ggml/src/ggml-metal/ggml-metal-ops.cpp @@ -2191,8 +2191,6 @@ int ggml_metal_op_flash_attn_ext(ggml_metal_op_t ctx, int idx) { ggml_metal_encoder_dispatch_threadgroups(enc, ncpsg, std::max(ne12, ne32), std::max(ne13, ne33), 32, 1, 1); need_sync = true; - } else { - assert(ggml_metal_op_flash_attn_ext_extra_pad(op) == 0); } if (has_mask) { @@ -2222,8 +2220,6 @@ int ggml_metal_op_flash_attn_ext(ggml_metal_op_t ctx, int idx) { ggml_metal_encoder_dispatch_threadgroups(enc, nblk0, nblk1, ne32*ne33, 32, 1, 1); need_sync = true; - } else { - assert(ggml_metal_op_flash_attn_ext_extra_blk(op) == 0); } if (need_sync) { @@ -2363,8 +2359,6 @@ int ggml_metal_op_flash_attn_ext(ggml_metal_op_t ctx, int idx) { ggml_metal_encoder_dispatch_threadgroups(enc, ncpsg, std::max(ne12, ne32), std::max(ne13, ne33), 32, 1, 1); need_sync = true; - } else { - assert(ggml_metal_op_flash_attn_ext_extra_pad(op) == 0); } if (need_sync) { From 75cfe4a6cab6cafc5cccd71b36ac8864d84349f6 Mon Sep 17 00:00:00 2001 From: Ruben Ortlam Date: Sun, 16 Nov 2025 19:38:17 +0100 Subject: [PATCH 44/51] vulkan: fix MMQ quantize_y condition (llama/17301) --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index ef99c3c1eba..5bdc675cf6e 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -6444,7 +6444,7 @@ static void ggml_vk_mul_mat_q_f16(ggml_backend_vk_context * ctx, vk_context& sub const bool y_f32_kernel = src1->type == GGML_TYPE_F32 && !y_non_contig; - bool quantize_y = ctx->device->integer_dot_product && src1->type == GGML_TYPE_F32 && ggml_is_contiguous(src1) && (ne11 * ne10) % 4 == 0; + bool quantize_y = ctx->device->integer_dot_product && src1->type == GGML_TYPE_F32 && ggml_is_contiguous(src1) && !y_non_contig && (ne11 * ne10) % 4 == 0; // Check for mmq first vk_matmul_pipeline mmp = quantize_y ? ggml_vk_get_mul_mat_mat_pipeline(ctx, src0->type, GGML_TYPE_Q8_1, (ggml_prec)dst->op_params[0]) : nullptr; @@ -6731,7 +6731,7 @@ static void ggml_vk_mul_mat_vec_q_f16(ggml_backend_vk_context * ctx, vk_context& const bool y_non_contig = !ggml_vk_dim01_contiguous(src1); const bool f16_f32_kernel = src1->type == GGML_TYPE_F32; - bool quantize_y = ctx->device->integer_dot_product && src1->type == GGML_TYPE_F32 && ggml_is_contiguous(src1) && (ne11 * ne10) % 4 == 0 && ggml_vk_should_use_mmvq(ctx->device, ne01, ne11, ne10, src0->type); + bool quantize_y = ctx->device->integer_dot_product && src1->type == GGML_TYPE_F32 && ggml_is_contiguous(src1) && !y_non_contig && (ne11 * ne10) % 4 == 0 && ggml_vk_should_use_mmvq(ctx->device, ne01, ne11, ne10, src0->type); vk_pipeline to_fp16_vk_0 = nullptr; vk_pipeline to_fp16_vk_1 = nullptr; @@ -7220,7 +7220,7 @@ static void ggml_vk_mul_mat_id_q_f16(ggml_backend_vk_context * ctx, vk_context& const bool y_f32_kernel = src1->type == GGML_TYPE_F32 && !y_non_contig; - bool quantize_y = ctx->device->integer_dot_product && src1->type == GGML_TYPE_F32 && ggml_is_contiguous(src1) && (ne11 * ne10) % 4 == 0; + bool quantize_y = ctx->device->integer_dot_product && src1->type == GGML_TYPE_F32 && ggml_is_contiguous(src1) && !y_non_contig && (ne11 * ne10) % 4 == 0; // Check for mmq first vk_matmul_pipeline mmp = quantize_y ? ggml_vk_get_mul_mat_mat_id_pipeline(ctx, src0->type, GGML_TYPE_Q8_1, (ggml_prec)dst->op_params[0]) : nullptr; From 4f694e4f4c0ff4efe50780d7bcea5a7d54dfb997 Mon Sep 17 00:00:00 2001 From: Pavels Zaicenkovs Date: Sun, 16 Nov 2025 22:50:09 +0100 Subject: [PATCH 45/51] vulkan: add LOG operation support for F32 and F16 (llama/17183) * vulkan: add LOG operation support for F32 and F16 Part of #14909. * vulkan: Fix LOG operation types * docs: Update operation support documentation for Vulkan LOG operation * vulkan: fix log_f16 shader * docs: restore missing LOG test cases and regenerate ops.md --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 25 +++++++++++++++++++ ggml/src/ggml-vulkan/vulkan-shaders/log.comp | 17 +++++++++++++ .../vulkan-shaders/vulkan-shaders-gen.cpp | 3 +++ 3 files changed, 45 insertions(+) create mode 100644 ggml/src/ggml-vulkan/vulkan-shaders/log.comp diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index 5bdc675cf6e..bb3eb977cec 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -629,6 +629,7 @@ struct vk_device_struct { vk_pipeline pipeline_sqrt_f32; vk_pipeline pipeline_sin_f32; vk_pipeline pipeline_cos_f32; + vk_pipeline pipeline_log[2]; vk_pipeline pipeline_clamp_f32; vk_pipeline pipeline_pad_f32; vk_pipeline pipeline_roll_f32; @@ -3792,6 +3793,8 @@ static void ggml_vk_load_shaders(vk_device& device) { ggml_vk_create_pipeline(device, device->pipeline_sqrt_f32, "sqrt_f32", sqrt_f32_len, sqrt_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_sin_f32, "sin_f32", sin_f32_len, sin_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_cos_f32, "cos_f32", cos_f32_len, cos_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1); + ggml_vk_create_pipeline(device, device->pipeline_log[0], "log_f32", log_f32_len, log_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1); + ggml_vk_create_pipeline(device, device->pipeline_log[1], "log_f16", log_f16_len, log_f16_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_clamp_f32, "clamp_f32", clamp_f32_len, clamp_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {512, 1, 1}, {}, 1); @@ -8126,6 +8129,12 @@ static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const return ctx->device->pipeline_cos_f32; } return nullptr; + case GGML_OP_LOG: + if (src0->type == dst->type && + (src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16)) { + return ctx->device->pipeline_log[dst->type == GGML_TYPE_F16]; + } + return nullptr; case GGML_OP_CLAMP: if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) { return ctx->device->pipeline_clamp_f32; @@ -8534,6 +8543,7 @@ static bool ggml_vk_op_supports_incontiguous(ggml_op op) { case GGML_OP_SQRT: case GGML_OP_SIN: case GGML_OP_COS: + case GGML_OP_LOG: case GGML_OP_CLAMP: case GGML_OP_PAD: case GGML_OP_REPEAT: @@ -8806,6 +8816,7 @@ static void ggml_vk_op_f32(ggml_backend_vk_context * ctx, vk_context& subctx, co case GGML_OP_SQRT: case GGML_OP_SIN: case GGML_OP_COS: + case GGML_OP_LOG: case GGML_OP_CLAMP: case GGML_OP_PAD: case GGML_OP_ROLL: @@ -9414,6 +9425,10 @@ static void ggml_vk_cos(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_COS, vk_op_unary_push_constants_init(src0, dst)); } +static void ggml_vk_log(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) { + ggml_vk_op_f32(ctx, subctx, src0, nullptr, nullptr, nullptr, dst, GGML_OP_LOG, vk_op_unary_push_constants_init(src0, dst)); +} + static void ggml_vk_clamp(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst) { vk_op_unary_push_constants p = vk_op_unary_push_constants_init(src0, dst); p.param1 = ggml_get_op_params_f32(dst, 0); @@ -11209,6 +11224,7 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr case GGML_OP_SQRT: case GGML_OP_SIN: case GGML_OP_COS: + case GGML_OP_LOG: case GGML_OP_CLAMP: case GGML_OP_PAD: case GGML_OP_ROLL: @@ -11433,6 +11449,10 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr case GGML_OP_COS: ggml_vk_cos(ctx, compute_ctx, src0, node); + break; + case GGML_OP_LOG: + ggml_vk_log(ctx, compute_ctx, src0, node); + break; case GGML_OP_CLAMP: ggml_vk_clamp(ctx, compute_ctx, src0, node); @@ -11703,6 +11723,7 @@ static bool ggml_vk_compute_forward(ggml_backend_vk_context * ctx, ggml_cgraph * case GGML_OP_SQRT: case GGML_OP_SIN: case GGML_OP_COS: + case GGML_OP_LOG: case GGML_OP_CLAMP: case GGML_OP_PAD: case GGML_OP_ROLL: @@ -13664,6 +13685,8 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm case GGML_OP_OPT_STEP_ADAMW: case GGML_OP_OPT_STEP_SGD: return op->src[0]->type == GGML_TYPE_F32; + case GGML_OP_LOG: + return op->src[0]->type == GGML_TYPE_F32 || op->src[0]->type == GGML_TYPE_F16; case GGML_OP_ARGSORT: return op->ne[0] <= max_argsort_cols; case GGML_OP_UPSCALE: @@ -14159,6 +14182,8 @@ static void ggml_vk_check_results_0(ggml_backend_vk_context * ctx, ggml_cgraph * tensor_clone = ggml_sin(ggml_ctx, src_clone[0]); } else if (tensor->op == GGML_OP_COS) { tensor_clone = ggml_cos(ggml_ctx, src_clone[0]); + } else if (tensor->op == GGML_OP_LOG) { + tensor_clone = ggml_log(ggml_ctx, src_clone[0]); } else if (tensor->op == GGML_OP_CLAMP) { const float * params = (const float *)tensor->op_params; tensor_clone = ggml_clamp(ggml_ctx, src_clone[0], params[0], params[1]); diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/log.comp b/ggml/src/ggml-vulkan/vulkan-shaders/log.comp new file mode 100644 index 00000000000..4aef724e907 --- /dev/null +++ b/ggml/src/ggml-vulkan/vulkan-shaders/log.comp @@ -0,0 +1,17 @@ +#version 450 + +#include "types.glsl" +#include "generic_unary_head.glsl" + +layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in; + +void main() { + const uint idx = get_idx(); + + if (idx >= p.ne) { + return; + } + + const FLOAT_TYPE val = FLOAT_TYPE(data_a[get_aoffset() + src0_idx(idx)]); + data_d[get_doffset() + dst_idx(idx)] = D_TYPE(log(val)); +} diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp b/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp index 7623a362087..8a2ce321dc4 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp @@ -802,6 +802,9 @@ void process_shaders() { string_to_spv("cos_f32", "cos.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}}); + string_to_spv("log_f32", "log.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}}); + string_to_spv("log_f16", "log.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}, {"FLOAT_TYPE", "float"}}); + string_to_spv("clamp_f32", "clamp.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}}); string_to_spv("pad_f32", "pad.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}}); From 7e090958768fc539d23b364f91e8a41c0f05cdcd Mon Sep 17 00:00:00 2001 From: hipudding Date: Mon, 17 Nov 2025 08:43:59 +0800 Subject: [PATCH 46/51] CANN: Use smart pointers to manage ACL objects (llama/17238) * CANN: Use smart pointers to manage ACL objects Previously, ACL objects were managed via manual destruction, which led to multiple memory-leak issues during runtime. This patch replaces manual memory management with smart pointers so that ACL objects are properly released and ownership is clearly defined. Note that the ownership of an ACL object belongs to the function that creates it. Other internal functions should operate on these ACL objects using raw pointers to avoid unintended ownership transfers. Additionally, since aclTensorList automatically frees its contained aclTensor objects, any aclTensor added to a tensor list must release ownership to avoid double free operations. This PR also removes the asynchronous task submission mechanism. Due to changes in recent CANN versions, tiling time has significantly decreased. Even with a dual-thread submission model, the dispatch overhead still falls on the critical path, making async submission less beneficial. Moreover, aclGraph support provides a much better path to reducing operator dispatch latency. * CANN: resolve review comments --- ggml/src/ggml-cann/Doxyfile | 2579 ----------------------------- ggml/src/ggml-cann/acl_tensor.cpp | 29 +- ggml/src/ggml-cann/acl_tensor.h | 118 +- ggml/src/ggml-cann/aclnn_ops.cpp | 1280 +++++++------- ggml/src/ggml-cann/aclnn_ops.h | 237 +-- ggml/src/ggml-cann/common.h | 157 +- ggml/src/ggml-cann/ggml-cann.cpp | 41 +- 7 files changed, 782 insertions(+), 3659 deletions(-) delete mode 100755 ggml/src/ggml-cann/Doxyfile diff --git a/ggml/src/ggml-cann/Doxyfile b/ggml/src/ggml-cann/Doxyfile deleted file mode 100755 index 3290a485930..00000000000 --- a/ggml/src/ggml-cann/Doxyfile +++ /dev/null @@ -1,2579 +0,0 @@ -# Doxyfile 1.8.17 - -# This file describes the settings to be used by the documentation system -# doxygen (www.doxygen.org) for a project. -# -# All text after a double hash (##) is considered a comment and is placed in -# front of the TAG it is preceding. -# -# All text after a single hash (#) is considered a comment and will be ignored. -# The format is: -# TAG = value [value, ...] -# For lists, items can also be appended using: -# TAG += value [value, ...] -# Values that contain spaces should be placed between quotes (\" \"). - -#--------------------------------------------------------------------------- -# Project related configuration options -#--------------------------------------------------------------------------- - -# This tag specifies the encoding used for all characters in the configuration -# file that follow. The default is UTF-8 which is also the encoding used for all -# text before the first occurrence of this tag. Doxygen uses libiconv (or the -# iconv built into libc) for the transcoding. See -# https://www.gnu.org/software/libiconv/ for the list of possible encodings. -# The default value is: UTF-8. - -DOXYFILE_ENCODING = UTF-8 - -# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by -# double-quotes, unless you are using Doxywizard) that should identify the -# project for which the documentation is generated. This name is used in the -# title of most generated pages and in a few other places. -# The default value is: My Project. - -PROJECT_NAME = "ggml" - -# The PROJECT_NUMBER tag can be used to enter a project or revision number. This -# could be handy for archiving the generated documentation or if some version -# control system is used. - -PROJECT_NUMBER = - -# Using the PROJECT_BRIEF tag one can provide an optional one line description -# for a project that appears at the top of each page and should give viewer a -# quick idea about the purpose of the project. Keep the description short. - -PROJECT_BRIEF = "Tensor library for machine learning" - -# With the PROJECT_LOGO tag one can specify a logo or an icon that is included -# in the documentation. The maximum height of the logo should not exceed 55 -# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy -# the logo to the output directory. - -PROJECT_LOGO = - -# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path -# into which the generated documentation will be written. If a relative path is -# entered, it will be relative to the location where doxygen was started. If -# left blank the current directory will be used. - -OUTPUT_DIRECTORY = docs - -# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- -# directories (in 2 levels) under the output directory of each output format and -# will distribute the generated files over these directories. Enabling this -# option can be useful when feeding doxygen a huge amount of source files, where -# putting all generated files in the same directory would otherwise causes -# performance problems for the file system. -# The default value is: NO. - -CREATE_SUBDIRS = NO - -# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII -# characters to appear in the names of generated files. If set to NO, non-ASCII -# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode -# U+3044. -# The default value is: NO. - -ALLOW_UNICODE_NAMES = NO - -# The OUTPUT_LANGUAGE tag is used to specify the language in which all -# documentation generated by doxygen is written. Doxygen will use this -# information to generate all constant output in the proper language. -# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, -# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), -# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, -# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), -# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, -# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, -# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, -# Ukrainian and Vietnamese. -# The default value is: English. - -OUTPUT_LANGUAGE = English - -# The OUTPUT_TEXT_DIRECTION tag is used to specify the direction in which all -# documentation generated by doxygen is written. Doxygen will use this -# information to generate all generated output in the proper direction. -# Possible values are: None, LTR, RTL and Context. -# The default value is: None. - -OUTPUT_TEXT_DIRECTION = None - -# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member -# descriptions after the members that are listed in the file and class -# documentation (similar to Javadoc). Set to NO to disable this. -# The default value is: YES. - -BRIEF_MEMBER_DESC = YES - -# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief -# description of a member or function before the detailed description -# -# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the -# brief descriptions will be completely suppressed. -# The default value is: YES. - -REPEAT_BRIEF = YES - -# This tag implements a quasi-intelligent brief description abbreviator that is -# used to form the text in various listings. Each string in this list, if found -# as the leading text of the brief description, will be stripped from the text -# and the result, after processing the whole list, is used as the annotated -# text. Otherwise, the brief description is used as-is. If left blank, the -# following values are used ($name is automatically replaced with the name of -# the entity):The $name class, The $name widget, The $name file, is, provides, -# specifies, contains, represents, a, an and the. - -ABBREVIATE_BRIEF = "The $name class" \ - "The $name widget" \ - "The $name file" \ - is \ - provides \ - specifies \ - contains \ - represents \ - a \ - an \ - the - -# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then -# doxygen will generate a detailed section even if there is only a brief -# description. -# The default value is: NO. - -ALWAYS_DETAILED_SEC = NO - -# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all -# inherited members of a class in the documentation of that class as if those -# members were ordinary class members. Constructors, destructors and assignment -# operators of the base classes will not be shown. -# The default value is: NO. - -INLINE_INHERITED_MEMB = NO - -# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path -# before files name in the file list and in the header files. If set to NO the -# shortest path that makes the file name unique will be used -# The default value is: YES. - -FULL_PATH_NAMES = YES - -# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. -# Stripping is only done if one of the specified strings matches the left-hand -# part of the path. The tag can be used to show relative paths in the file list. -# If left blank the directory from which doxygen is run is used as the path to -# strip. -# -# Note that you can specify absolute paths here, but also relative paths, which -# will be relative from the directory where doxygen is started. -# This tag requires that the tag FULL_PATH_NAMES is set to YES. - -STRIP_FROM_PATH = - -# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the -# path mentioned in the documentation of a class, which tells the reader which -# header file to include in order to use a class. If left blank only the name of -# the header file containing the class definition is used. Otherwise one should -# specify the list of include paths that are normally passed to the compiler -# using the -I flag. - -STRIP_FROM_INC_PATH = - -# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but -# less readable) file names. This can be useful is your file systems doesn't -# support long names like on DOS, Mac, or CD-ROM. -# The default value is: NO. - -SHORT_NAMES = NO - -# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the -# first line (until the first dot) of a Javadoc-style comment as the brief -# description. If set to NO, the Javadoc-style will behave just like regular Qt- -# style comments (thus requiring an explicit @brief command for a brief -# description.) -# The default value is: NO. - -JAVADOC_AUTOBRIEF = NO - -# If the JAVADOC_BANNER tag is set to YES then doxygen will interpret a line -# such as -# /*************** -# as being the beginning of a Javadoc-style comment "banner". If set to NO, the -# Javadoc-style will behave just like regular comments and it will not be -# interpreted by doxygen. -# The default value is: NO. - -JAVADOC_BANNER = NO - -# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first -# line (until the first dot) of a Qt-style comment as the brief description. If -# set to NO, the Qt-style will behave just like regular Qt-style comments (thus -# requiring an explicit \brief command for a brief description.) -# The default value is: NO. - -QT_AUTOBRIEF = NO - -# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a -# multi-line C++ special comment block (i.e. a block of //! or /// comments) as -# a brief description. This used to be the default behavior. The new default is -# to treat a multi-line C++ comment block as a detailed description. Set this -# tag to YES if you prefer the old behavior instead. -# -# Note that setting this tag to YES also means that rational rose comments are -# not recognized any more. -# The default value is: NO. - -MULTILINE_CPP_IS_BRIEF = NO - -# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the -# documentation from any documented member that it re-implements. -# The default value is: YES. - -INHERIT_DOCS = YES - -# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new -# page for each member. If set to NO, the documentation of a member will be part -# of the file/class/namespace that contains it. -# The default value is: NO. - -SEPARATE_MEMBER_PAGES = NO - -# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen -# uses this value to replace tabs by spaces in code fragments. -# Minimum value: 1, maximum value: 16, default value: 4. - -TAB_SIZE = 4 - -# This tag can be used to specify a number of aliases that act as commands in -# the documentation. An alias has the form: -# name=value -# For example adding -# "sideeffect=@par Side Effects:\n" -# will allow you to put the command \sideeffect (or @sideeffect) in the -# documentation, which will result in a user-defined paragraph with heading -# "Side Effects:". You can put \n's in the value part of an alias to insert -# newlines (in the resulting output). You can put ^^ in the value part of an -# alias to insert a newline as if a physical newline was in the original file. -# When you need a literal { or } or , in the value part of an alias you have to -# escape them by means of a backslash (\), this can lead to conflicts with the -# commands \{ and \} for these it is advised to use the version @{ and @} or use -# a double escape (\\{ and \\}) - -ALIASES = - -# This tag can be used to specify a number of word-keyword mappings (TCL only). -# A mapping has the form "name=value". For example adding "class=itcl::class" -# will allow you to use the command class in the itcl::class meaning. - -TCL_SUBST = - -# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources -# only. Doxygen will then generate output that is more tailored for C. For -# instance, some of the names that are used will be different. The list of all -# members will be omitted, etc. -# The default value is: NO. - -OPTIMIZE_OUTPUT_FOR_C = NO - -# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or -# Python sources only. Doxygen will then generate output that is more tailored -# for that language. For instance, namespaces will be presented as packages, -# qualified scopes will look different, etc. -# The default value is: NO. - -OPTIMIZE_OUTPUT_JAVA = NO - -# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran -# sources. Doxygen will then generate output that is tailored for Fortran. -# The default value is: NO. - -OPTIMIZE_FOR_FORTRAN = NO - -# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL -# sources. Doxygen will then generate output that is tailored for VHDL. -# The default value is: NO. - -OPTIMIZE_OUTPUT_VHDL = NO - -# Set the OPTIMIZE_OUTPUT_SLICE tag to YES if your project consists of Slice -# sources only. Doxygen will then generate output that is more tailored for that -# language. For instance, namespaces will be presented as modules, types will be -# separated into more groups, etc. -# The default value is: NO. - -OPTIMIZE_OUTPUT_SLICE = NO - -# Doxygen selects the parser to use depending on the extension of the files it -# parses. With this tag you can assign which parser to use for a given -# extension. Doxygen has a built-in mapping, but you can override or extend it -# using this tag. The format is ext=language, where ext is a file extension, and -# language is one of the parsers supported by doxygen: IDL, Java, JavaScript, -# Csharp (C#), C, C++, D, PHP, md (Markdown), Objective-C, Python, Slice, -# Fortran (fixed format Fortran: FortranFixed, free formatted Fortran: -# FortranFree, unknown formatted Fortran: Fortran. In the later case the parser -# tries to guess whether the code is fixed or free formatted code, this is the -# default for Fortran type files), VHDL, tcl. For instance to make doxygen treat -# .inc files as Fortran files (default is PHP), and .f files as C (default is -# Fortran), use: inc=Fortran f=C. -# -# Note: For files without extension you can use no_extension as a placeholder. -# -# Note that for custom extensions you also need to set FILE_PATTERNS otherwise -# the files are not read by doxygen. - -EXTENSION_MAPPING = - -# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments -# according to the Markdown format, which allows for more readable -# documentation. See https://daringfireball.net/projects/markdown/ for details. -# The output of markdown processing is further processed by doxygen, so you can -# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in -# case of backward compatibilities issues. -# The default value is: YES. - -MARKDOWN_SUPPORT = YES - -# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up -# to that level are automatically included in the table of contents, even if -# they do not have an id attribute. -# Note: This feature currently applies only to Markdown headings. -# Minimum value: 0, maximum value: 99, default value: 5. -# This tag requires that the tag MARKDOWN_SUPPORT is set to YES. - -TOC_INCLUDE_HEADINGS = 5 - -# When enabled doxygen tries to link words that correspond to documented -# classes, or namespaces to their corresponding documentation. Such a link can -# be prevented in individual cases by putting a % sign in front of the word or -# globally by setting AUTOLINK_SUPPORT to NO. -# The default value is: YES. - -AUTOLINK_SUPPORT = YES - -# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want -# to include (a tag file for) the STL sources as input, then you should set this -# tag to YES in order to let doxygen match functions declarations and -# definitions whose arguments contain STL classes (e.g. func(std::string); -# versus func(std::string) {}). This also make the inheritance and collaboration -# diagrams that involve STL classes more complete and accurate. -# The default value is: NO. - -BUILTIN_STL_SUPPORT = NO - -# If you use Microsoft's C++/CLI language, you should set this option to YES to -# enable parsing support. -# The default value is: NO. - -CPP_CLI_SUPPORT = NO - -# Set the SIP_SUPPORT tag to YES if your project consists of sip (see: -# https://www.riverbankcomputing.com/software/sip/intro) sources only. Doxygen -# will parse them like normal C++ but will assume all classes use public instead -# of private inheritance when no explicit protection keyword is present. -# The default value is: NO. - -SIP_SUPPORT = NO - -# For Microsoft's IDL there are propget and propput attributes to indicate -# getter and setter methods for a property. Setting this option to YES will make -# doxygen to replace the get and set methods by a property in the documentation. -# This will only work if the methods are indeed getting or setting a simple -# type. If this is not the case, or you want to show the methods anyway, you -# should set this option to NO. -# The default value is: YES. - -IDL_PROPERTY_SUPPORT = YES - -# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC -# tag is set to YES then doxygen will reuse the documentation of the first -# member in the group (if any) for the other members of the group. By default -# all members of a group must be documented explicitly. -# The default value is: NO. - -DISTRIBUTE_GROUP_DOC = NO - -# If one adds a struct or class to a group and this option is enabled, then also -# any nested class or struct is added to the same group. By default this option -# is disabled and one has to add nested compounds explicitly via \ingroup. -# The default value is: NO. - -GROUP_NESTED_COMPOUNDS = NO - -# Set the SUBGROUPING tag to YES to allow class member groups of the same type -# (for instance a group of public functions) to be put as a subgroup of that -# type (e.g. under the Public Functions section). Set it to NO to prevent -# subgrouping. Alternatively, this can be done per class using the -# \nosubgrouping command. -# The default value is: YES. - -SUBGROUPING = YES - -# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions -# are shown inside the group in which they are included (e.g. using \ingroup) -# instead of on a separate page (for HTML and Man pages) or section (for LaTeX -# and RTF). -# -# Note that this feature does not work in combination with -# SEPARATE_MEMBER_PAGES. -# The default value is: NO. - -INLINE_GROUPED_CLASSES = NO - -# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions -# with only public data fields or simple typedef fields will be shown inline in -# the documentation of the scope in which they are defined (i.e. file, -# namespace, or group documentation), provided this scope is documented. If set -# to NO, structs, classes, and unions are shown on a separate page (for HTML and -# Man pages) or section (for LaTeX and RTF). -# The default value is: NO. - -INLINE_SIMPLE_STRUCTS = NO - -# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or -# enum is documented as struct, union, or enum with the name of the typedef. So -# typedef struct TypeS {} TypeT, will appear in the documentation as a struct -# with name TypeT. When disabled the typedef will appear as a member of a file, -# namespace, or class. And the struct will be named TypeS. This can typically be -# useful for C code in case the coding convention dictates that all compound -# types are typedef'ed and only the typedef is referenced, never the tag name. -# The default value is: NO. - -TYPEDEF_HIDES_STRUCT = NO - -# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This -# cache is used to resolve symbols given their name and scope. Since this can be -# an expensive process and often the same symbol appears multiple times in the -# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small -# doxygen will become slower. If the cache is too large, memory is wasted. The -# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range -# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 -# symbols. At the end of a run doxygen will report the cache usage and suggest -# the optimal cache size from a speed point of view. -# Minimum value: 0, maximum value: 9, default value: 0. - -LOOKUP_CACHE_SIZE = 0 - -#--------------------------------------------------------------------------- -# Build related configuration options -#--------------------------------------------------------------------------- - -# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in -# documentation are documented, even if no documentation was available. Private -# class members and static file members will be hidden unless the -# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. -# Note: This will also disable the warnings about undocumented members that are -# normally produced when WARNINGS is set to YES. -# The default value is: NO. - -EXTRACT_ALL = YES - -# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will -# be included in the documentation. -# The default value is: NO. - -EXTRACT_PRIVATE = YES - -# If the EXTRACT_PRIV_VIRTUAL tag is set to YES, documented private virtual -# methods of a class will be included in the documentation. -# The default value is: NO. - -EXTRACT_PRIV_VIRTUAL = YES - -# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal -# scope will be included in the documentation. -# The default value is: NO. - -EXTRACT_PACKAGE = YES - -# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be -# included in the documentation. -# The default value is: NO. - -EXTRACT_STATIC = YES - -# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined -# locally in source files will be included in the documentation. If set to NO, -# only classes defined in header files are included. Does not have any effect -# for Java sources. -# The default value is: YES. - -EXTRACT_LOCAL_CLASSES = YES - -# This flag is only useful for Objective-C code. If set to YES, local methods, -# which are defined in the implementation section but not in the interface are -# included in the documentation. If set to NO, only methods in the interface are -# included. -# The default value is: NO. - -EXTRACT_LOCAL_METHODS = YES - -# If this flag is set to YES, the members of anonymous namespaces will be -# extracted and appear in the documentation as a namespace called -# 'anonymous_namespace{file}', where file will be replaced with the base name of -# the file that contains the anonymous namespace. By default anonymous namespace -# are hidden. -# The default value is: NO. - -EXTRACT_ANON_NSPACES = NO - -# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all -# undocumented members inside documented classes or files. If set to NO these -# members will be included in the various overviews, but no documentation -# section is generated. This option has no effect if EXTRACT_ALL is enabled. -# The default value is: NO. - -HIDE_UNDOC_MEMBERS = NO - -# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all -# undocumented classes that are normally visible in the class hierarchy. If set -# to NO, these classes will be included in the various overviews. This option -# has no effect if EXTRACT_ALL is enabled. -# The default value is: NO. - -HIDE_UNDOC_CLASSES = NO - -# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend -# declarations. If set to NO, these declarations will be included in the -# documentation. -# The default value is: NO. - -HIDE_FRIEND_COMPOUNDS = NO - -# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any -# documentation blocks found inside the body of a function. If set to NO, these -# blocks will be appended to the function's detailed documentation block. -# The default value is: NO. - -HIDE_IN_BODY_DOCS = NO - -# The INTERNAL_DOCS tag determines if documentation that is typed after a -# \internal command is included. If the tag is set to NO then the documentation -# will be excluded. Set it to YES to include the internal documentation. -# The default value is: NO. - -INTERNAL_DOCS = NO - -# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file -# names in lower-case letters. If set to YES, upper-case letters are also -# allowed. This is useful if you have classes or files whose names only differ -# in case and if your file system supports case sensitive file names. Windows -# (including Cygwin) ands Mac users are advised to set this option to NO. -# The default value is: system dependent. - -CASE_SENSE_NAMES = YES - -# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with -# their full class and namespace scopes in the documentation. If set to YES, the -# scope will be hidden. -# The default value is: NO. - -HIDE_SCOPE_NAMES = NO - -# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will -# append additional text to a page's title, such as Class Reference. If set to -# YES the compound reference will be hidden. -# The default value is: NO. - -HIDE_COMPOUND_REFERENCE= NO - -# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of -# the files that are included by a file in the documentation of that file. -# The default value is: YES. - -SHOW_INCLUDE_FILES = YES - -# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each -# grouped member an include statement to the documentation, telling the reader -# which file to include in order to use the member. -# The default value is: NO. - -SHOW_GROUPED_MEMB_INC = NO - -# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include -# files with double quotes in the documentation rather than with sharp brackets. -# The default value is: NO. - -FORCE_LOCAL_INCLUDES = NO - -# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the -# documentation for inline members. -# The default value is: YES. - -INLINE_INFO = YES - -# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the -# (detailed) documentation of file and class members alphabetically by member -# name. If set to NO, the members will appear in declaration order. -# The default value is: YES. - -SORT_MEMBER_DOCS = YES - -# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief -# descriptions of file, namespace and class members alphabetically by member -# name. If set to NO, the members will appear in declaration order. Note that -# this will also influence the order of the classes in the class list. -# The default value is: NO. - -SORT_BRIEF_DOCS = NO - -# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the -# (brief and detailed) documentation of class members so that constructors and -# destructors are listed first. If set to NO the constructors will appear in the -# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. -# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief -# member documentation. -# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting -# detailed member documentation. -# The default value is: NO. - -SORT_MEMBERS_CTORS_1ST = NO - -# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy -# of group names into alphabetical order. If set to NO the group names will -# appear in their defined order. -# The default value is: NO. - -SORT_GROUP_NAMES = NO - -# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by -# fully-qualified names, including namespaces. If set to NO, the class list will -# be sorted only by class name, not including the namespace part. -# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. -# Note: This option applies only to the class list, not to the alphabetical -# list. -# The default value is: NO. - -SORT_BY_SCOPE_NAME = NO - -# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper -# type resolution of all parameters of a function it will reject a match between -# the prototype and the implementation of a member function even if there is -# only one candidate or it is obvious which candidate to choose by doing a -# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still -# accept a match between prototype and implementation in such cases. -# The default value is: NO. - -STRICT_PROTO_MATCHING = NO - -# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo -# list. This list is created by putting \todo commands in the documentation. -# The default value is: YES. - -GENERATE_TODOLIST = YES - -# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test -# list. This list is created by putting \test commands in the documentation. -# The default value is: YES. - -GENERATE_TESTLIST = YES - -# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug -# list. This list is created by putting \bug commands in the documentation. -# The default value is: YES. - -GENERATE_BUGLIST = YES - -# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO) -# the deprecated list. This list is created by putting \deprecated commands in -# the documentation. -# The default value is: YES. - -GENERATE_DEPRECATEDLIST= YES - -# The ENABLED_SECTIONS tag can be used to enable conditional documentation -# sections, marked by \if ... \endif and \cond -# ... \endcond blocks. - -ENABLED_SECTIONS = - -# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the -# initial value of a variable or macro / define can have for it to appear in the -# documentation. If the initializer consists of more lines than specified here -# it will be hidden. Use a value of 0 to hide initializers completely. The -# appearance of the value of individual variables and macros / defines can be -# controlled using \showinitializer or \hideinitializer command in the -# documentation regardless of this setting. -# Minimum value: 0, maximum value: 10000, default value: 30. - -MAX_INITIALIZER_LINES = 30 - -# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at -# the bottom of the documentation of classes and structs. If set to YES, the -# list will mention the files that were used to generate the documentation. -# The default value is: YES. - -SHOW_USED_FILES = YES - -# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This -# will remove the Files entry from the Quick Index and from the Folder Tree View -# (if specified). -# The default value is: YES. - -SHOW_FILES = YES - -# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces -# page. This will remove the Namespaces entry from the Quick Index and from the -# Folder Tree View (if specified). -# The default value is: YES. - -SHOW_NAMESPACES = YES - -# The FILE_VERSION_FILTER tag can be used to specify a program or script that -# doxygen should invoke to get the current version for each file (typically from -# the version control system). Doxygen will invoke the program by executing (via -# popen()) the command command input-file, where command is the value of the -# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided -# by doxygen. Whatever the program writes to standard output is used as the file -# version. For an example see the documentation. - -FILE_VERSION_FILTER = - -# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed -# by doxygen. The layout file controls the global structure of the generated -# output files in an output format independent way. To create the layout file -# that represents doxygen's defaults, run doxygen with the -l option. You can -# optionally specify a file name after the option, if omitted DoxygenLayout.xml -# will be used as the name of the layout file. -# -# Note that if you run doxygen from a directory containing a file called -# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE -# tag is left empty. - -LAYOUT_FILE = - -# The CITE_BIB_FILES tag can be used to specify one or more bib files containing -# the reference definitions. This must be a list of .bib files. The .bib -# extension is automatically appended if omitted. This requires the bibtex tool -# to be installed. See also https://en.wikipedia.org/wiki/BibTeX for more info. -# For LaTeX the style of the bibliography can be controlled using -# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the -# search path. See also \cite for info how to create references. - -CITE_BIB_FILES = - -#--------------------------------------------------------------------------- -# Configuration options related to warning and progress messages -#--------------------------------------------------------------------------- - -# The QUIET tag can be used to turn on/off the messages that are generated to -# standard output by doxygen. If QUIET is set to YES this implies that the -# messages are off. -# The default value is: NO. - -QUIET = NO - -# The WARNINGS tag can be used to turn on/off the warning messages that are -# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES -# this implies that the warnings are on. -# -# Tip: Turn warnings on while writing the documentation. -# The default value is: YES. - -WARNINGS = YES - -# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate -# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag -# will automatically be disabled. -# The default value is: YES. - -WARN_IF_UNDOCUMENTED = YES - -# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for -# potential errors in the documentation, such as not documenting some parameters -# in a documented function, or documenting parameters that don't exist or using -# markup commands wrongly. -# The default value is: YES. - -WARN_IF_DOC_ERROR = YES - -# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that -# are documented, but have no documentation for their parameters or return -# value. If set to NO, doxygen will only warn about wrong or incomplete -# parameter documentation, but not about the absence of documentation. If -# EXTRACT_ALL is set to YES then this flag will automatically be disabled. -# The default value is: NO. - -WARN_NO_PARAMDOC = NO - -# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when -# a warning is encountered. -# The default value is: NO. - -WARN_AS_ERROR = NO - -# The WARN_FORMAT tag determines the format of the warning messages that doxygen -# can produce. The string should contain the $file, $line, and $text tags, which -# will be replaced by the file and line number from which the warning originated -# and the warning text. Optionally the format may contain $version, which will -# be replaced by the version of the file (if it could be obtained via -# FILE_VERSION_FILTER) -# The default value is: $file:$line: $text. - -WARN_FORMAT = "$file:$line: $text" - -# The WARN_LOGFILE tag can be used to specify a file to which warning and error -# messages should be written. If left blank the output is written to standard -# error (stderr). - -WARN_LOGFILE = - -#--------------------------------------------------------------------------- -# Configuration options related to the input files -#--------------------------------------------------------------------------- - -# The INPUT tag is used to specify the files and/or directories that contain -# documented source files. You may enter file names like myfile.cpp or -# directories like /usr/src/myproject. Separate the files or directories with -# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING -# Note: If this tag is empty the current directory is searched. - -INPUT = - -# This tag can be used to specify the character encoding of the source files -# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses -# libiconv (or the iconv built into libc) for the transcoding. See the libiconv -# documentation (see: https://www.gnu.org/software/libiconv/) for the list of -# possible encodings. -# The default value is: UTF-8. - -INPUT_ENCODING = UTF-8 - -# If the value of the INPUT tag contains directories, you can use the -# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and -# *.h) to filter out the source-files in the directories. -# -# Note that for custom extensions or not directly supported extensions you also -# need to set EXTENSION_MAPPING for the extension otherwise the files are not -# read by doxygen. -# -# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp, -# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, -# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, -# *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C comment), -# *.doc (to be provided as doxygen C comment), *.txt (to be provided as doxygen -# C comment), *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f, *.for, *.tcl, *.vhd, -# *.vhdl, *.ucf, *.qsf and *.ice. - -FILE_PATTERNS = *.c \ - *.cc \ - *.cxx \ - *.cpp \ - *.c++ \ - *.java \ - *.ii \ - *.ixx \ - *.ipp \ - *.i++ \ - *.inl \ - *.idl \ - *.ddl \ - *.odl \ - *.h \ - *.hh \ - *.hxx \ - *.hpp \ - *.h++ \ - *.cs \ - *.d \ - *.php \ - *.php4 \ - *.php5 \ - *.phtml \ - *.inc \ - *.m \ - *.markdown \ - *.md \ - *.mm \ - *.dox \ - *.doc \ - *.txt \ - *.py \ - *.pyw \ - *.f90 \ - *.f95 \ - *.f03 \ - *.f08 \ - *.f \ - *.for \ - *.tcl \ - *.vhd \ - *.vhdl \ - *.ucf \ - *.qsf \ - *.ice - -# The RECURSIVE tag can be used to specify whether or not subdirectories should -# be searched for input files as well. -# The default value is: NO. - -RECURSIVE = YES - -# The EXCLUDE tag can be used to specify files and/or directories that should be -# excluded from the INPUT source files. This way you can easily exclude a -# subdirectory from a directory tree whose root is specified with the INPUT tag. -# -# Note that relative paths are relative to the directory from which doxygen is -# run. - -EXCLUDE = - -# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or -# directories that are symbolic links (a Unix file system feature) are excluded -# from the input. -# The default value is: NO. - -EXCLUDE_SYMLINKS = NO - -# If the value of the INPUT tag contains directories, you can use the -# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude -# certain files from those directories. -# -# Note that the wildcards are matched against the file with absolute path, so to -# exclude all test directories for example use the pattern */test/* - -EXCLUDE_PATTERNS = - -# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names -# (namespaces, classes, functions, etc.) that should be excluded from the -# output. The symbol name can be a fully qualified name, a word, or if the -# wildcard * is used, a substring. Examples: ANamespace, AClass, -# AClass::ANamespace, ANamespace::*Test -# -# Note that the wildcards are matched against the file with absolute path, so to -# exclude all test directories use the pattern */test/* - -EXCLUDE_SYMBOLS = - -# The EXAMPLE_PATH tag can be used to specify one or more files or directories -# that contain example code fragments that are included (see the \include -# command). - -EXAMPLE_PATH = - -# If the value of the EXAMPLE_PATH tag contains directories, you can use the -# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and -# *.h) to filter out the source-files in the directories. If left blank all -# files are included. - -EXAMPLE_PATTERNS = * - -# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be -# searched for input files to be used with the \include or \dontinclude commands -# irrespective of the value of the RECURSIVE tag. -# The default value is: NO. - -EXAMPLE_RECURSIVE = NO - -# The IMAGE_PATH tag can be used to specify one or more files or directories -# that contain images that are to be included in the documentation (see the -# \image command). - -IMAGE_PATH = - -# The INPUT_FILTER tag can be used to specify a program that doxygen should -# invoke to filter for each input file. Doxygen will invoke the filter program -# by executing (via popen()) the command: -# -# -# -# where is the value of the INPUT_FILTER tag, and is the -# name of an input file. Doxygen will then use the output that the filter -# program writes to standard output. If FILTER_PATTERNS is specified, this tag -# will be ignored. -# -# Note that the filter must not add or remove lines; it is applied before the -# code is scanned, but not when the output code is generated. If lines are added -# or removed, the anchors will not be placed correctly. -# -# Note that for custom extensions or not directly supported extensions you also -# need to set EXTENSION_MAPPING for the extension otherwise the files are not -# properly processed by doxygen. - -INPUT_FILTER = - -# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern -# basis. Doxygen will compare the file name with each pattern and apply the -# filter if there is a match. The filters are a list of the form: pattern=filter -# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how -# filters are used. If the FILTER_PATTERNS tag is empty or if none of the -# patterns match the file name, INPUT_FILTER is applied. -# -# Note that for custom extensions or not directly supported extensions you also -# need to set EXTENSION_MAPPING for the extension otherwise the files are not -# properly processed by doxygen. - -FILTER_PATTERNS = - -# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using -# INPUT_FILTER) will also be used to filter the input files that are used for -# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). -# The default value is: NO. - -FILTER_SOURCE_FILES = NO - -# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file -# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and -# it is also possible to disable source filtering for a specific pattern using -# *.ext= (so without naming a filter). -# This tag requires that the tag FILTER_SOURCE_FILES is set to YES. - -FILTER_SOURCE_PATTERNS = - -# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that -# is part of the input, its contents will be placed on the main page -# (index.html). This can be useful if you have a project on for instance GitHub -# and want to reuse the introduction page also for the doxygen output. - -USE_MDFILE_AS_MAINPAGE = - -#--------------------------------------------------------------------------- -# Configuration options related to source browsing -#--------------------------------------------------------------------------- - -# If the SOURCE_BROWSER tag is set to YES then a list of source files will be -# generated. Documented entities will be cross-referenced with these sources. -# -# Note: To get rid of all source code in the generated output, make sure that -# also VERBATIM_HEADERS is set to NO. -# The default value is: NO. - -SOURCE_BROWSER = NO - -# Setting the INLINE_SOURCES tag to YES will include the body of functions, -# classes and enums directly into the documentation. -# The default value is: NO. - -INLINE_SOURCES = NO - -# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any -# special comment blocks from generated source code fragments. Normal C, C++ and -# Fortran comments will always remain visible. -# The default value is: YES. - -STRIP_CODE_COMMENTS = YES - -# If the REFERENCED_BY_RELATION tag is set to YES then for each documented -# entity all documented functions referencing it will be listed. -# The default value is: NO. - -REFERENCED_BY_RELATION = NO - -# If the REFERENCES_RELATION tag is set to YES then for each documented function -# all documented entities called/used by that function will be listed. -# The default value is: NO. - -REFERENCES_RELATION = NO - -# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set -# to YES then the hyperlinks from functions in REFERENCES_RELATION and -# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will -# link to the documentation. -# The default value is: YES. - -REFERENCES_LINK_SOURCE = YES - -# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the -# source code will show a tooltip with additional information such as prototype, -# brief description and links to the definition and documentation. Since this -# will make the HTML file larger and loading of large files a bit slower, you -# can opt to disable this feature. -# The default value is: YES. -# This tag requires that the tag SOURCE_BROWSER is set to YES. - -SOURCE_TOOLTIPS = YES - -# If the USE_HTAGS tag is set to YES then the references to source code will -# point to the HTML generated by the htags(1) tool instead of doxygen built-in -# source browser. The htags tool is part of GNU's global source tagging system -# (see https://www.gnu.org/software/global/global.html). You will need version -# 4.8.6 or higher. -# -# To use it do the following: -# - Install the latest version of global -# - Enable SOURCE_BROWSER and USE_HTAGS in the configuration file -# - Make sure the INPUT points to the root of the source tree -# - Run doxygen as normal -# -# Doxygen will invoke htags (and that will in turn invoke gtags), so these -# tools must be available from the command line (i.e. in the search path). -# -# The result: instead of the source browser generated by doxygen, the links to -# source code will now point to the output of htags. -# The default value is: NO. -# This tag requires that the tag SOURCE_BROWSER is set to YES. - -USE_HTAGS = NO - -# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a -# verbatim copy of the header file for each class for which an include is -# specified. Set to NO to disable this. -# See also: Section \class. -# The default value is: YES. - -VERBATIM_HEADERS = YES - -# If the CLANG_ASSISTED_PARSING tag is set to YES then doxygen will use the -# clang parser (see: http://clang.llvm.org/) for more accurate parsing at the -# cost of reduced performance. This can be particularly helpful with template -# rich C++ code for which doxygen's built-in parser lacks the necessary type -# information. -# Note: The availability of this option depends on whether or not doxygen was -# generated with the -Duse_libclang=ON option for CMake. -# The default value is: NO. - -CLANG_ASSISTED_PARSING = NO - -# If clang assisted parsing is enabled you can provide the compiler with command -# line options that you would normally use when invoking the compiler. Note that -# the include paths will already be set by doxygen for the files and directories -# specified with INPUT and INCLUDE_PATH. -# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES. - -CLANG_OPTIONS = - -# If clang assisted parsing is enabled you can provide the clang parser with the -# path to the compilation database (see: -# http://clang.llvm.org/docs/HowToSetupToolingForLLVM.html) used when the files -# were built. This is equivalent to specifying the "-p" option to a clang tool, -# such as clang-check. These options will then be passed to the parser. -# Note: The availability of this option depends on whether or not doxygen was -# generated with the -Duse_libclang=ON option for CMake. - -CLANG_DATABASE_PATH = - -#--------------------------------------------------------------------------- -# Configuration options related to the alphabetical class index -#--------------------------------------------------------------------------- - -# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all -# compounds will be generated. Enable this if the project contains a lot of -# classes, structs, unions or interfaces. -# The default value is: YES. - -ALPHABETICAL_INDEX = YES - -# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in -# which the alphabetical index list will be split. -# Minimum value: 1, maximum value: 20, default value: 5. -# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. - -COLS_IN_ALPHA_INDEX = 5 - -# In case all classes in a project start with a common prefix, all classes will -# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag -# can be used to specify a prefix (or a list of prefixes) that should be ignored -# while generating the index headers. -# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. - -IGNORE_PREFIX = - -#--------------------------------------------------------------------------- -# Configuration options related to the HTML output -#--------------------------------------------------------------------------- - -# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output -# The default value is: YES. - -GENERATE_HTML = YES - -# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a -# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of -# it. -# The default directory is: html. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_OUTPUT = html - -# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each -# generated HTML page (for example: .htm, .php, .asp). -# The default value is: .html. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_FILE_EXTENSION = .html - -# The HTML_HEADER tag can be used to specify a user-defined HTML header file for -# each generated HTML page. If the tag is left blank doxygen will generate a -# standard header. -# -# To get valid HTML the header file that includes any scripts and style sheets -# that doxygen needs, which is dependent on the configuration options used (e.g. -# the setting GENERATE_TREEVIEW). It is highly recommended to start with a -# default header using -# doxygen -w html new_header.html new_footer.html new_stylesheet.css -# YourConfigFile -# and then modify the file new_header.html. See also section "Doxygen usage" -# for information on how to generate the default header that doxygen normally -# uses. -# Note: The header is subject to change so you typically have to regenerate the -# default header when upgrading to a newer version of doxygen. For a description -# of the possible markers and block names see the documentation. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_HEADER = - -# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each -# generated HTML page. If the tag is left blank doxygen will generate a standard -# footer. See HTML_HEADER for more information on how to generate a default -# footer and what special commands can be used inside the footer. See also -# section "Doxygen usage" for information on how to generate the default footer -# that doxygen normally uses. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_FOOTER = - -# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style -# sheet that is used by each HTML page. It can be used to fine-tune the look of -# the HTML output. If left blank doxygen will generate a default style sheet. -# See also section "Doxygen usage" for information on how to generate the style -# sheet that doxygen normally uses. -# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as -# it is more robust and this tag (HTML_STYLESHEET) will in the future become -# obsolete. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_STYLESHEET = - -# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined -# cascading style sheets that are included after the standard style sheets -# created by doxygen. Using this option one can overrule certain style aspects. -# This is preferred over using HTML_STYLESHEET since it does not replace the -# standard style sheet and is therefore more robust against future updates. -# Doxygen will copy the style sheet files to the output directory. -# Note: The order of the extra style sheet files is of importance (e.g. the last -# style sheet in the list overrules the setting of the previous ones in the -# list). For an example see the documentation. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_EXTRA_STYLESHEET = - -# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or -# other source files which should be copied to the HTML output directory. Note -# that these files will be copied to the base HTML output directory. Use the -# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these -# files. In the HTML_STYLESHEET file, use the file name only. Also note that the -# files will be copied as-is; there are no commands or markers available. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_EXTRA_FILES = - -# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen -# will adjust the colors in the style sheet and background images according to -# this color. Hue is specified as an angle on a colorwheel, see -# https://en.wikipedia.org/wiki/Hue for more information. For instance the value -# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 -# purple, and 360 is red again. -# Minimum value: 0, maximum value: 359, default value: 220. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_COLORSTYLE_HUE = 220 - -# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors -# in the HTML output. For a value of 0 the output will use grayscales only. A -# value of 255 will produce the most vivid colors. -# Minimum value: 0, maximum value: 255, default value: 100. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_COLORSTYLE_SAT = 100 - -# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the -# luminance component of the colors in the HTML output. Values below 100 -# gradually make the output lighter, whereas values above 100 make the output -# darker. The value divided by 100 is the actual gamma applied, so 80 represents -# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not -# change the gamma. -# Minimum value: 40, maximum value: 240, default value: 80. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_COLORSTYLE_GAMMA = 80 - -# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML -# page will contain the date and time when the page was generated. Setting this -# to YES can help to show when doxygen was last run and thus if the -# documentation is up to date. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_TIMESTAMP = NO - -# If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML -# documentation will contain a main index with vertical navigation menus that -# are dynamically created via JavaScript. If disabled, the navigation index will -# consists of multiple levels of tabs that are statically embedded in every HTML -# page. Disable this option to support browsers that do not have JavaScript, -# like the Qt help browser. -# The default value is: YES. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_DYNAMIC_MENUS = YES - -# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML -# documentation will contain sections that can be hidden and shown after the -# page has loaded. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_DYNAMIC_SECTIONS = NO - -# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries -# shown in the various tree structured indices initially; the user can expand -# and collapse entries dynamically later on. Doxygen will expand the tree to -# such a level that at most the specified number of entries are visible (unless -# a fully collapsed tree already exceeds this amount). So setting the number of -# entries 1 will produce a full collapsed tree by default. 0 is a special value -# representing an infinite number of entries and will result in a full expanded -# tree by default. -# Minimum value: 0, maximum value: 9999, default value: 100. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_INDEX_NUM_ENTRIES = 100 - -# If the GENERATE_DOCSET tag is set to YES, additional index files will be -# generated that can be used as input for Apple's Xcode 3 integrated development -# environment (see: https://developer.apple.com/xcode/), introduced with OSX -# 10.5 (Leopard). To create a documentation set, doxygen will generate a -# Makefile in the HTML output directory. Running make will produce the docset in -# that directory and running make install will install the docset in -# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at -# startup. See https://developer.apple.com/library/archive/featuredarticles/Doxy -# genXcode/_index.html for more information. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_DOCSET = NO - -# This tag determines the name of the docset feed. A documentation feed provides -# an umbrella under which multiple documentation sets from a single provider -# (such as a company or product suite) can be grouped. -# The default value is: Doxygen generated docs. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_FEEDNAME = "Doxygen generated docs" - -# This tag specifies a string that should uniquely identify the documentation -# set bundle. This should be a reverse domain-name style string, e.g. -# com.mycompany.MyDocSet. Doxygen will append .docset to the name. -# The default value is: org.doxygen.Project. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_BUNDLE_ID = org.doxygen.Project - -# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify -# the documentation publisher. This should be a reverse domain-name style -# string, e.g. com.mycompany.MyDocSet.documentation. -# The default value is: org.doxygen.Publisher. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_PUBLISHER_ID = org.doxygen.Publisher - -# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. -# The default value is: Publisher. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_PUBLISHER_NAME = Publisher - -# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three -# additional HTML index files: index.hhp, index.hhc, and index.hhk. The -# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop -# (see: https://www.microsoft.com/en-us/download/details.aspx?id=21138) on -# Windows. -# -# The HTML Help Workshop contains a compiler that can convert all HTML output -# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML -# files are now used as the Windows 98 help format, and will replace the old -# Windows help format (.hlp) on all Windows platforms in the future. Compressed -# HTML files also contain an index, a table of contents, and you can search for -# words in the documentation. The HTML workshop also contains a viewer for -# compressed HTML files. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_HTMLHELP = NO - -# The CHM_FILE tag can be used to specify the file name of the resulting .chm -# file. You can add a path in front of the file if the result should not be -# written to the html output directory. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -CHM_FILE = - -# The HHC_LOCATION tag can be used to specify the location (absolute path -# including file name) of the HTML help compiler (hhc.exe). If non-empty, -# doxygen will try to run the HTML help compiler on the generated index.hhp. -# The file has to be specified with full path. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -HHC_LOCATION = - -# The GENERATE_CHI flag controls if a separate .chi index file is generated -# (YES) or that it should be included in the master .chm file (NO). -# The default value is: NO. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -GENERATE_CHI = NO - -# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc) -# and project file content. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -CHM_INDEX_ENCODING = - -# The BINARY_TOC flag controls whether a binary table of contents is generated -# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it -# enables the Previous and Next buttons. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -BINARY_TOC = NO - -# The TOC_EXPAND flag can be set to YES to add extra items for group members to -# the table of contents of the HTML help documentation and to the tree view. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -TOC_EXPAND = NO - -# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and -# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that -# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help -# (.qch) of the generated HTML documentation. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_QHP = NO - -# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify -# the file name of the resulting .qch file. The path specified is relative to -# the HTML output folder. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QCH_FILE = - -# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help -# Project output. For more information please see Qt Help Project / Namespace -# (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace). -# The default value is: org.doxygen.Project. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_NAMESPACE = org.doxygen.Project - -# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt -# Help Project output. For more information please see Qt Help Project / Virtual -# Folders (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual- -# folders). -# The default value is: doc. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_VIRTUAL_FOLDER = doc - -# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom -# filter to add. For more information please see Qt Help Project / Custom -# Filters (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom- -# filters). -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_CUST_FILTER_NAME = - -# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the -# custom filter to add. For more information please see Qt Help Project / Custom -# Filters (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom- -# filters). -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_CUST_FILTER_ATTRS = - -# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this -# project's filter section matches. Qt Help Project / Filter Attributes (see: -# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#filter-attributes). -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_SECT_FILTER_ATTRS = - -# The QHG_LOCATION tag can be used to specify the location of Qt's -# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the -# generated .qhp file. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHG_LOCATION = - -# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be -# generated, together with the HTML files, they form an Eclipse help plugin. To -# install this plugin and make it available under the help contents menu in -# Eclipse, the contents of the directory containing the HTML and XML files needs -# to be copied into the plugins directory of eclipse. The name of the directory -# within the plugins directory should be the same as the ECLIPSE_DOC_ID value. -# After copying Eclipse needs to be restarted before the help appears. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_ECLIPSEHELP = NO - -# A unique identifier for the Eclipse help plugin. When installing the plugin -# the directory name containing the HTML and XML files should also have this -# name. Each documentation set should have its own identifier. -# The default value is: org.doxygen.Project. -# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. - -ECLIPSE_DOC_ID = org.doxygen.Project - -# If you want full control over the layout of the generated HTML pages it might -# be necessary to disable the index and replace it with your own. The -# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top -# of each HTML page. A value of NO enables the index and the value YES disables -# it. Since the tabs in the index contain the same information as the navigation -# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -DISABLE_INDEX = NO - -# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index -# structure should be generated to display hierarchical information. If the tag -# value is set to YES, a side panel will be generated containing a tree-like -# index structure (just like the one that is generated for HTML Help). For this -# to work a browser that supports JavaScript, DHTML, CSS and frames is required -# (i.e. any modern browser). Windows users are probably better off using the -# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can -# further fine-tune the look of the index. As an example, the default style -# sheet generated by doxygen has an example that shows how to put an image at -# the root of the tree instead of the PROJECT_NAME. Since the tree basically has -# the same information as the tab index, you could consider setting -# DISABLE_INDEX to YES when enabling this option. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_TREEVIEW = NO - -# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that -# doxygen will group on one line in the generated HTML documentation. -# -# Note that a value of 0 will completely suppress the enum values from appearing -# in the overview section. -# Minimum value: 0, maximum value: 20, default value: 4. -# This tag requires that the tag GENERATE_HTML is set to YES. - -ENUM_VALUES_PER_LINE = 4 - -# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used -# to set the initial width (in pixels) of the frame in which the tree is shown. -# Minimum value: 0, maximum value: 1500, default value: 250. -# This tag requires that the tag GENERATE_HTML is set to YES. - -TREEVIEW_WIDTH = 250 - -# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to -# external symbols imported via tag files in a separate window. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -EXT_LINKS_IN_WINDOW = NO - -# Use this tag to change the font size of LaTeX formulas included as images in -# the HTML documentation. When you change the font size after a successful -# doxygen run you need to manually remove any form_*.png images from the HTML -# output directory to force them to be regenerated. -# Minimum value: 8, maximum value: 50, default value: 10. -# This tag requires that the tag GENERATE_HTML is set to YES. - -FORMULA_FONTSIZE = 10 - -# Use the FORMULA_TRANSPARENT tag to determine whether or not the images -# generated for formulas are transparent PNGs. Transparent PNGs are not -# supported properly for IE 6.0, but are supported on all modern browsers. -# -# Note that when changing this option you need to delete any form_*.png files in -# the HTML output directory before the changes have effect. -# The default value is: YES. -# This tag requires that the tag GENERATE_HTML is set to YES. - -FORMULA_TRANSPARENT = YES - -# The FORMULA_MACROFILE can contain LaTeX \newcommand and \renewcommand commands -# to create new LaTeX commands to be used in formulas as building blocks. See -# the section "Including formulas" for details. - -FORMULA_MACROFILE = - -# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see -# https://www.mathjax.org) which uses client side JavaScript for the rendering -# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX -# installed or if you want to formulas look prettier in the HTML output. When -# enabled you may also need to install MathJax separately and configure the path -# to it using the MATHJAX_RELPATH option. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -USE_MATHJAX = YES - -# When MathJax is enabled you can set the default output format to be used for -# the MathJax output. See the MathJax site (see: -# http://docs.mathjax.org/en/latest/output.html) for more details. -# Possible values are: HTML-CSS (which is slower, but has the best -# compatibility), NativeMML (i.e. MathML) and SVG. -# The default value is: HTML-CSS. -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_FORMAT = HTML-CSS - -# When MathJax is enabled you need to specify the location relative to the HTML -# output directory using the MATHJAX_RELPATH option. The destination directory -# should contain the MathJax.js script. For instance, if the mathjax directory -# is located at the same level as the HTML output directory, then -# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax -# Content Delivery Network so you can quickly see the result without installing -# MathJax. However, it is strongly recommended to install a local copy of -# MathJax from https://www.mathjax.org before deployment. -# The default value is: https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/. -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_RELPATH = https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/ - -# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax -# extension names that should be enabled during MathJax rendering. For example -# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_EXTENSIONS = - -# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces -# of code that will be used on startup of the MathJax code. See the MathJax site -# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an -# example see the documentation. -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_CODEFILE = - -# When the SEARCHENGINE tag is enabled doxygen will generate a search box for -# the HTML output. The underlying search engine uses javascript and DHTML and -# should work on any modern browser. Note that when using HTML help -# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) -# there is already a search function so this one should typically be disabled. -# For large projects the javascript based search engine can be slow, then -# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to -# search using the keyboard; to jump to the search box use + S -# (what the is depends on the OS and browser, but it is typically -# , /