Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
223 changes: 223 additions & 0 deletions ggml/src/ggml-hexagon/ggml-hexagon.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -529,6 +529,213 @@ static void init_row_q4x4x2(block_q4_0 * x, int64_t k) {
}
}


static void repack_row_q4kx2(uint8_t * y, const block_q4_K * x, int64_t k) {
static const int qk = QK_Q4_Kx2 / 2; // 256
const int nb = (k + qk - 1) / qk;

const int dblk_size = 8 * 2; // 8x __fp16 for d
const int mblk_size = 8 * 2; // 8x __fp16 for m
const int qblk_size = qk / 2; // 128 bytes

uint8_t * y_q = y;
uint8_t * y_d = y + k / 2;
uint8_t * y_m = y_d + nb * dblk_size;

for (int i = 0; i < nb; i++) {
uint8_t qs[256];

const float d = GGML_FP16_TO_FP32(x[i].d);
const float dmin = GGML_FP16_TO_FP32(x[i].dmin);

ggml_half * d_ptr = (ggml_half *)(y_d + i * dblk_size);
ggml_half * m_ptr = (ggml_half *)(y_m + i * mblk_size);

for (int is = 0; is < 8; is++) {
uint8_t sc, m_scale;
if (is < 4) {
sc = x[i].scales[is] & 63;
m_scale = x[i].scales[is + 4] & 63;
} else {
sc = (x[i].scales[is+4] & 0xF) | ((x[i].scales[is-4] >> 6) << 4);
m_scale = (x[i].scales[is+4] >> 4) | ((x[i].scales[is-0] >> 6) << 4);
}
d_ptr[is] = GGML_FP32_TO_FP16(d * sc);
m_ptr[is] = GGML_FP32_TO_FP16(dmin * m_scale);

for (int l = 0; l < 32; l++) {
int q_idx = (is / 2) * 32 + l;
qs[is * 32 + l] = is % 2 == 0 ? (x[i].qs[q_idx] & 0xF) : (x[i].qs[q_idx] >> 4);
}
}

block_q4_0 temp_x[8];
pack_q4_0_quants(&temp_x[0], qs, 0);
pack_q4_0_quants(&temp_x[1], qs, 1);
pack_q4_0_quants(&temp_x[2], qs, 2);
pack_q4_0_quants(&temp_x[3], qs, 3);
pack_q4_0_quants(&temp_x[4], qs, 4);
pack_q4_0_quants(&temp_x[5], qs, 5);
pack_q4_0_quants(&temp_x[6], qs, 6);
pack_q4_0_quants(&temp_x[7], qs, 7);

for (int j = 0; j < 8; j++) {
memcpy(y_q + i * qblk_size + j * 16, temp_x[j].qs, 16);
}
}
}

static void unpack_row_q4kx2(block_q4_K * x, const uint8_t * y, int64_t k) {
static const int qk = QK_Q4_Kx2 / 2; // 256
const int nb = (k + qk - 1) / qk;

const int dblk_size = 8 * 2; // 8x __fp16 for d
const int mblk_size = 8 * 2; // 8x __fp16 for m
const int qblk_size = qk / 2; // 128 bytes

const uint8_t * y_q = y;
const uint8_t * y_d = y + k / 2;
const uint8_t * y_m = y_d + nb * dblk_size;

for (int i = 0; i < nb; i++) {
uint8_t qs[256];

block_q4_0 temp_x[8];
for (int j = 0; j < 8; j++) {
memcpy(temp_x[j].qs, y_q + i * qblk_size + j * 16, 16);
}

unpack_q4_0_quants(qs, &temp_x[0], 0);
unpack_q4_0_quants(qs, &temp_x[1], 1);
unpack_q4_0_quants(qs, &temp_x[2], 2);
unpack_q4_0_quants(qs, &temp_x[3], 3);
unpack_q4_0_quants(qs, &temp_x[4], 4);
unpack_q4_0_quants(qs, &temp_x[5], 5);
unpack_q4_0_quants(qs, &temp_x[6], 6);
unpack_q4_0_quants(qs, &temp_x[7], 7);

for (int is = 0; is < 8; is++) {
for (int l = 0; l < 32; l++) {
int q_idx = (is / 2) * 32 + l;
if (is % 2 == 0) {
x[i].qs[q_idx] = qs[is * 32 + l] & 0xF;
} else {
x[i].qs[q_idx] |= (qs[is * 32 + l] & 0xF) << 4;
}
}
}

const ggml_half * d_ptr = (const ggml_half *)(y_d + i * dblk_size);
const ggml_half * m_ptr = (const ggml_half *)(y_m + i * mblk_size);

x[i].d = d_ptr[0];
x[i].dmin = m_ptr[0];

for (int is = 0; is < 8; is++) {
int sc = (int)(GGML_FP16_TO_FP32(d_ptr[is]) / GGML_FP16_TO_FP32(d_ptr[0]));
int m_scale = (int)(GGML_FP16_TO_FP32(m_ptr[is]) / GGML_FP16_TO_FP32(m_ptr[0]));
if (sc > 63) sc = 63; if (sc < 0) sc = 0;
if (m_scale > 63) m_scale = 63; if (m_scale < 0) m_scale = 0;

if (is < 4) {
x[i].scales[is] = (sc & 63);
x[i].scales[is + 4] = (m_scale & 63);
} else {
x[i].scales[is + 4] = (sc & 0xF) | ((m_scale & 0xF) << 4);
x[i].scales[is - 4] |= ((sc >> 4) << 6);
x[i].scales[is - 0] |= ((m_scale >> 4) << 6);
}
}
}
}

static void init_row_q4kx2(block_q4_K * x, int64_t k) {
static const int qk = QK_Q4_Kx2 / 2; // 256
const int nb = (k + qk - 1) / qk;
for (int i = 0; i < nb; i++) {
memset(x[i].qs, 8, qk / 2);
memset(x[i].scales, 0, 12);
x[i].d = 0;
x[i].dmin = 0;
}
}

static void repack_q4_K_q4kx2(ggml_tensor * t, const void * data, size_t size) {
int64_t nrows = ggml_nrows(t);
size_t row_size = ggml_row_size(t->type, t->ne[0]);
size_t row_size_pd = ggml_row_size(t->type, hex_round_up(t->ne[0], QK_Q4_Kx2 / 2));
size_t row_size_rp = (t->ne[0] / 2) + (t->ne[0] / 256) * 32;

const size_t total_tensor_size = (size_t)nrows * row_size;
const size_t n_bytes_to_copy = size < total_tensor_size ? size : total_tensor_size;
const int64_t n_full_rows = n_bytes_to_copy / row_size;
const size_t n_rem_bytes = n_bytes_to_copy % row_size;

void * buf_pd = ggml_aligned_malloc(row_size_pd);
GGML_ASSERT(buf_pd != NULL);
void * buf_rp = ggml_aligned_malloc(row_size_rp);
GGML_ASSERT(buf_rp != NULL);

memset(buf_pd, 0, row_size_pd);

for (int64_t i = 0; i < n_full_rows; i++) {
const uint8_t * src = (const uint8_t *) data + (i * row_size);
uint8_t * dst = (uint8_t *) t->data + (i * row_size_rp);
memcpy(buf_pd, src, row_size);
repack_row_q4kx2(dst, (const block_q4_K *) buf_pd, t->ne[0]);
}

if (n_rem_bytes > 0) {
const int64_t i = n_full_rows;
const uint8_t * src = (const uint8_t *) data + (i * row_size);
uint8_t * dst = (uint8_t *) t->data + (i * row_size_rp);
memcpy(buf_pd, src, n_rem_bytes);
repack_row_q4kx2((uint8_t *) buf_rp, (const block_q4_K *) buf_pd, t->ne[0]);
memcpy(dst, buf_rp, row_size_rp);
}

ggml_aligned_free(buf_pd, row_size_pd);
ggml_aligned_free(buf_rp, row_size_rp);
}

static void repack_q4kx2_q4_K(void * data, const ggml_tensor * t, size_t size) {
int64_t nrows = ggml_nrows(t);
size_t row_size = ggml_row_size(t->type, t->ne[0]);
size_t row_size_pd = ggml_row_size(t->type, hex_round_up(t->ne[0], QK_Q4_Kx2 / 2));
size_t row_size_rp = (t->ne[0] / 2) + (t->ne[0] / 256) * 32;

const size_t total_tensor_size = (size_t)nrows * row_size;
const size_t n_bytes_to_copy = size < total_tensor_size ? size : total_tensor_size;
const int64_t n_full_rows = n_bytes_to_copy / row_size;
const size_t n_rem_bytes = n_bytes_to_copy % row_size;

void * buf_pd = ggml_aligned_malloc(row_size_pd);
GGML_ASSERT(buf_pd != NULL);
void * buf_rp = ggml_aligned_malloc(row_size_rp);
GGML_ASSERT(buf_rp != NULL);

memset(buf_pd, 0, row_size_pd);

for (int64_t i = 0; i < n_full_rows; i++) {
const uint8_t * src = (const uint8_t *) t->data + (i * row_size_rp);
uint8_t * dst = (uint8_t *) data + (i * row_size);
unpack_row_q4kx2((block_q4_K *) buf_pd, src, t->ne[0]);
memcpy(dst, buf_pd, row_size);
}

if (n_rem_bytes > 0) {
const int64_t i = n_full_rows;
const uint8_t * src = (const uint8_t *) t->data + (i * row_size_rp);
uint8_t * dst = (uint8_t *) data + (i * row_size);
unpack_row_q4kx2((block_q4_K *) buf_pd, src, t->ne[0]);
memcpy(dst, buf_pd, n_rem_bytes);
}

ggml_aligned_free(buf_pd, row_size_pd);
ggml_aligned_free(buf_rp, row_size_rp);
}


// repack q4_0 data into q4x4x2 tensor
static void repack_q4_0_q4x4x2(ggml_tensor * t, const void * data, size_t size) {
int64_t nrows = ggml_nrows(t);
Expand Down Expand Up @@ -1350,6 +1557,12 @@ static void ggml_backend_hexagon_buffer_set_tensor(ggml_backend_buffer_t buffer,
repack_q4_0_q4x4x2(tensor, data, size);
break;

case GGML_TYPE_Q4_K:
GGML_ASSERT(offset == 0);
GGML_ASSERT(offset + size <= ggml_nbytes(tensor));
repack_q4_K_q4kx2(tensor, data, size);
break;

case GGML_TYPE_Q8_0:
GGML_ASSERT(offset == 0);
GGML_ASSERT(offset + size <= ggml_nbytes(tensor));
Expand Down Expand Up @@ -1392,6 +1605,12 @@ static void ggml_backend_hexagon_buffer_get_tensor(ggml_backend_buffer_t buffer,
repack_q4x4x2_q4_0(data, tensor, size);
break;

case GGML_TYPE_Q4_K:
GGML_ASSERT(offset == 0);
GGML_ASSERT(offset + size <= ggml_nbytes(tensor));
repack_q4kx2_q4_K(data, tensor, size);
break;

case GGML_TYPE_Q8_0:
GGML_ASSERT(offset == 0);
GGML_ASSERT(offset + size <= ggml_nbytes(tensor));
Expand Down Expand Up @@ -2163,6 +2382,7 @@ static bool ggml_hexagon_supported_mul_mat(const struct ggml_hexagon_session * s

switch (src0->type) {
case GGML_TYPE_Q4_0:
case GGML_TYPE_Q4_K:
case GGML_TYPE_Q8_0:
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_MXFP4:
Expand Down Expand Up @@ -2213,6 +2433,7 @@ static bool ggml_hexagon_supported_mul_mat_id(const struct ggml_hexagon_session

switch (src0->type) {
case GGML_TYPE_Q4_0:
case GGML_TYPE_Q4_K:
case GGML_TYPE_Q8_0:
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_MXFP4:
Expand Down Expand Up @@ -3298,6 +3519,8 @@ static void ggml_hexagon_init(ggml_backend_reg * reg) {
// Basic sanity checks to make sure definitions match
static_assert((unsigned int) HTP_TYPE_Q4_0 == (unsigned int) GGML_TYPE_Q4_0,
"please update hexagon_type to match ggml_type");
static_assert((unsigned int) HTP_TYPE_Q4_K == (unsigned int) GGML_TYPE_Q4_K,
"please update hexagon_type to match ggml_type");
static_assert((unsigned int) HTP_TYPE_Q8_0 == (unsigned int) GGML_TYPE_Q8_0,
"please update hexagon_type to match ggml_type");
static_assert((unsigned int) HTP_TYPE_MXFP4 == (unsigned int) GGML_TYPE_MXFP4,
Expand Down
3 changes: 3 additions & 0 deletions ggml/src/ggml-hexagon/htp/htp-ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ enum htp_data_type {
HTP_TYPE_F16 = 1,
HTP_TYPE_Q4_0 = 2,
HTP_TYPE_Q8_0 = 8,
HTP_TYPE_Q4_K = 12,
HTP_TYPE_IQ4_NL = 20,
HTP_TYPE_I32 = 26,
HTP_TYPE_I64 = 27,
Expand All @@ -29,6 +30,7 @@ enum htp_data_type {
// types used internally for repack, dyn.quant, etc
HTP_TYPE_Q4_0x4x2 = 200,
HTP_TYPE_Q8_0x4x2,
HTP_TYPE_Q4_Kx2,
HTP_TYPE_MXFP4x4x2,

HTP_TYPE_INVALID
Expand All @@ -37,6 +39,7 @@ enum htp_data_type {
// Constats for internal types
#define QK_Q4_0x4x2 256 // 4x Q4_0 blocks packed with next 4x Q4_0 blocks (size in bytes 128)
#define QK_Q8_0x4x2 256 // 4x Q8_0 blocks concat with next 4x Q8_0 blocks
#define QK_Q4_Kx2 512 // 2x Q4_K blocks packed together
#define QK_MXFP4x4x2 256 // 4x MXFP4 blocks concat with next 4x MXFP4 blocks


Expand Down
Loading
Loading