Skip to content

ggml-cpu: add mxfp4 VSX intrinsics for Power9+ (ppc64le) hardware – 3-4x performance boost #15385

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Aug 19, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion ggml/src/ggml-cpu/arch-fallback.h
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,6 @@
#define ggml_vec_dot_tq1_0_q8_K_generic ggml_vec_dot_tq1_0_q8_K
#define ggml_vec_dot_tq2_0_q8_K_generic ggml_vec_dot_tq2_0_q8_K
#define ggml_vec_dot_iq1_m_q8_K_generic ggml_vec_dot_iq1_m_q8_K
#define ggml_vec_dot_mxfp4_q8_0_generic ggml_vec_dot_mxfp4_q8_0
// repack.cpp
#define ggml_quantize_mat_q8_0_4x4_generic ggml_quantize_mat_q8_0_4x4
#define ggml_quantize_mat_q8_0_4x8_generic ggml_quantize_mat_q8_0_4x8
Expand Down
66 changes: 66 additions & 0 deletions ggml/src/ggml-cpu/arch/powerpc/quants.c
Original file line number Diff line number Diff line change
Expand Up @@ -278,6 +278,72 @@ void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const voi
#endif
}

void ggml_vec_dot_mxfp4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
assert(nrc == 1);
UNUSED(nrc);
UNUSED(bx);
UNUSED(by);
UNUSED(bs);
assert(n % QK_MXFP4 == 0);
static_assert(QK_MXFP4 == QK8_0, "QK_MXFP4 and QK8_0 must be the same");

const block_mxfp4 * GGML_RESTRICT x = vx;
const block_q8_0 * GGML_RESTRICT y = vy;

const int nb = n / QK_MXFP4;

int ib = 0;
float sumf = 0;

#if defined(__POWER9_VECTOR__)
const vector signed char lowMask = vec_splats((signed char)0xF);
const vector unsigned char vshift4 = vec_splats((unsigned char)4);
vector float vsumf0 = vec_splats(0.0f);

vector signed char kv = vec_xl(0, (const signed char *)kvalues_mxfp4);

#pragma GCC unroll 8
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I highly doubt this unroll actually does anything. The compiler has to know the loop bounds at compile-time to be able to unroll.

Copy link

@Tom94 Tom94 Aug 19, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If the compiler knows the loop bounds it can get rid of jumps entirely, but dynamic loops can also be unrolled as long as a factor (in this case 8) is provided. Worse than a static unroll but often still effective.

Example of such an unrolled loop from wikipedia
#include <stdio.h>

/* The number of entries processed per loop iteration.                        */
/* Note that this number is a 'constant constant' reflecting the code below.  */
enum {
  BUNCHSIZE = 8
};

int main(void)
{ 
  int i = 0;                                    /* counter */
  int entries = 50;                             /* total number to process    */ 
 
  /* If the number of elements is not divisible by BUNCHSIZE,              */ 
  /* get repeat times required to do most processing in the while loop        */

  int repeat = (entries / BUNCHSIZE);                /* number of times to repeat */
  int left   = (entries % BUNCHSIZE);                /* calculate remainder       */

  /* Unroll the loop in 'bunches' of 8                                        */ 
  while (repeat--) 
  { 
    printf("process(%d)\n", i    );
    printf("process(%d)\n", i + 1); 
    printf("process(%d)\n", i + 2); 
    printf("process(%d)\n", i + 3); 
    printf("process(%d)\n", i + 4); 
    printf("process(%d)\n", i + 5); 
    printf("process(%d)\n", i + 6); 
    printf("process(%d)\n", i + 7);

    /* update the index by amount processed in one go                         */ 
    i += BUNCHSIZE;
  }

  /* Use a switch statement to process remaining by jumping to the case label */ 
  /* at the label that will then drop through to complete the set             */ 
  switch (left) 
  {
     case 7 : printf("process(%d)\n", i + 6);   /* process and rely on drop 
                                                   through                    */
     case 6 : printf("process(%d)\n", i + 5); 
     case 5 : printf("process(%d)\n", i + 4);  
     case 4 : printf("process(%d)\n", i + 3);  
     case 3 : printf("process(%d)\n", i + 2); 
     case 2 : printf("process(%d)\n", i + 1);   /* two left                   */
     case 1 : printf("process(%d)\n", i);       /* just one left to process   */ 
     case 0 : ;                                 /* none left                  */
  } 
}

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I wasn't too sure about that either - just saw it was done for the other quants and in my testing (which might have gotten impacted by other factors as well) I saw an improvement of ~1-2 t/s using llama-bench

for (; ib < nb; ++ib) {
__builtin_prefetch(x[ib].qs, 0, 1);
__builtin_prefetch(y[ib].qs, 0, 1);

vector float vyd = vec_splats(GGML_CPU_FP16_TO_FP32(y[ib].d) *
GGML_E8M0_TO_FP32_HALF(x[ib].e));

vector signed char q8y0 = vec_xl( 0, y[ib].qs);
vector signed char q8y1 = vec_xl(16, y[ib].qs);

vector signed char qxs = (vector signed char)vec_xl(0, x[ib].qs);

vector unsigned char lo_nibbles = (vector unsigned char)vec_and(qxs, lowMask);
vector unsigned char hi_nibbles = (vector unsigned char)vec_sr(qxs, vshift4);

vector signed char q4x0 = vec_perm(kv, kv, lo_nibbles);
vector signed char q4x1 = vec_perm(kv, kv, hi_nibbles);

vector signed short qv0 = vec_add(vec_mule(q4x0, q8y0), vec_mulo(q4x0, q8y0));
vector signed short qv1 = vec_add(vec_mule(q4x1, q8y1), vec_mulo(q4x1, q8y1));

vector signed int vsumi0 = vec_splats((int32_t)0);
vsumi0 = vec_sum4s(qv0, vsumi0);
vsumi0 = vec_sum4s(qv1, vsumi0);

vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vyd, vsumf0);
}

vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4));
vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8));
sumf = vec_extract(vsumf0, 0);
*s = sumf;
#else
UNUSED(x);
UNUSED(y);
UNUSED(ib);
UNUSED(sumf);
ggml_vec_dot_mxfp4_q8_0_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}

void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
const int qk = QK8_0;
const int nb = n / qk;
Expand Down
Loading