Skip to content

Commit d74105f

Browse files
committed
ggml : rename ggml_internal_get_type_traits -> ggml_get_type_traits
it's not really internal if everybody uses it
1 parent e2e10ff commit d74105f

File tree

13 files changed

+19
-19
lines changed

13 files changed

+19
-19
lines changed

examples/export-lora/export-lora.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -314,7 +314,7 @@ struct lora_merge_ctx {
314314
// optionally dequantize it
315315
printf("%s : + dequantize base tensor from %s to F32\n", __func__, ggml_type_name(base->type));
316316
auto nels = ggml_nelements(inp_base);
317-
const auto * qtype = ggml_internal_get_type_traits(base->type);
317+
const auto * qtype = ggml_get_type_traits(base->type);
318318
std::vector<uint8_t> dequant_buf(nels * sizeof(float));
319319
qtype->to_float(read_buf.data(), (float *)dequant_buf.data(), nels);
320320
ggml_backend_tensor_set(inp_base, dequant_buf.data(), 0, dequant_buf.size());

examples/quantize-stats/quantize-stats.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -371,7 +371,7 @@ int main(int argc, char ** argv) {
371371
if (!params.include_types.empty() && std::find(params.include_types.begin(), params.include_types.end(), i) == params.include_types.end()) {
372372
continue;
373373
}
374-
const auto * qfns = ggml_internal_get_type_traits(type);
374+
const auto * qfns = ggml_get_type_traits(type);
375375
if (qfns->from_float && qfns->to_float) {
376376
if (params.verbose) {
377377
printf("testing %s ...\n", ggml_type_name(type));

ggml/include/ggml.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2553,7 +2553,7 @@ extern "C" {
25532553
ggml_gemm_t gemm;
25542554
};
25552555

2556-
GGML_API const struct ggml_type_traits * ggml_internal_get_type_traits(enum ggml_type type);
2556+
GGML_API const struct ggml_type_traits * ggml_get_type_traits(enum ggml_type type);
25572557

25582558
#ifdef __cplusplus
25592559
}

ggml/src/ggml-backend.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1177,7 +1177,7 @@ static bool ggml_backend_cpu_device_supports_op(ggml_backend_dev_t dev, const st
11771177
op->type != GGML_TYPE_IQ1_S &&
11781178
op->type != GGML_TYPE_IQ1_M; // missing type_traits.from_float
11791179
case GGML_OP_MUL_MAT:
1180-
return op->src[1]->type == GGML_TYPE_F32 || op->src[1]->type == ggml_internal_get_type_traits(op->src[0]->type)->vec_dot_type;
1180+
return op->src[1]->type == GGML_TYPE_F32 || op->src[1]->type == ggml_get_type_traits(op->src[0]->type)->vec_dot_type;
11811181
case GGML_OP_ROPE_BACK:
11821182
return op->src[2] == NULL && (op->op_params[2] & 4) == 0;
11831183
case GGML_OP_IM2COL_BACK:

ggml/src/ggml-blas.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ static void ggml_backend_blas_mul_mat(ggml_backend_blas_context * ctx, struct gg
6565

6666
// convert src0 to float
6767
if (type != GGML_TYPE_F32) {
68-
const auto * type_traits = ggml_internal_get_type_traits(type);
68+
const auto * type_traits = ggml_get_type_traits(type);
6969
ggml_to_float_t const to_float = type_traits->to_float;
7070

7171
for (int64_t i03 = 0; i03 < ne03; i03++) {
@@ -424,7 +424,7 @@ static bool ggml_backend_blas_device_supports_op(ggml_backend_dev_t dev, const s
424424
ggml_is_contiguous(src1) &&
425425
src1->type == GGML_TYPE_F32 &&
426426
(ne0 >= min_batch && ne1 >= min_batch && ne10 >= min_batch) &&
427-
(src0->type == GGML_TYPE_F32 || ggml_internal_get_type_traits(src0->type)->to_float != NULL);
427+
(src0->type == GGML_TYPE_F32 || ggml_get_type_traits(src0->type)->to_float != NULL);
428428
}
429429

430430
case GGML_OP_OUT_PROD:
@@ -434,7 +434,7 @@ static bool ggml_backend_blas_device_supports_op(ggml_backend_dev_t dev, const s
434434
ggml_is_matrix(src1) &&
435435
ggml_is_contiguous(src0) &&
436436
(ggml_is_contiguous(src1) || ggml_is_transposed(src1)) &&
437-
(src0->type == GGML_TYPE_F32 || ggml_internal_get_type_traits(src0->type)->to_float != NULL);
437+
(src0->type == GGML_TYPE_F32 || ggml_get_type_traits(src0->type)->to_float != NULL);
438438

439439
default:
440440
return false;

ggml/src/ggml-vulkan.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5287,7 +5287,7 @@ static void ggml_vk_dequantize_data(const void * from, float * to, size_t ne, gg
52875287
return;
52885288
}
52895289

5290-
const auto * tt = ggml_internal_get_type_traits(quant);
5290+
const auto * tt = ggml_get_type_traits(quant);
52915291

52925292
ggml_to_float_t dequant_fn = tt->to_float;
52935293

ggml/src/ggml.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1151,7 +1151,7 @@ static const struct ggml_type_traits type_traits[GGML_TYPE_COUNT] = {
11511151
};
11521152

11531153
// For internal test use
1154-
const struct ggml_type_traits * ggml_internal_get_type_traits(enum ggml_type type) {
1154+
const struct ggml_type_traits * ggml_get_type_traits(enum ggml_type type) {
11551155
GGML_ASSERT(type < GGML_TYPE_COUNT);
11561156
return &type_traits[type];
11571157
}

pocs/vdot/q8dot.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -136,7 +136,7 @@ int main(int argc, char** argv) {
136136

137137
auto ggml_type = type == 0 ? GGML_TYPE_Q4_0 : GGML_TYPE_Q4_1;
138138

139-
const auto * funcs = ggml_internal_get_type_traits(ggml_type);
139+
const auto * funcs = ggml_get_type_traits(ggml_type);
140140

141141
Stat simple, ggml;
142142

pocs/vdot/vdot.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -236,7 +236,7 @@ int main(int argc, char** argv) {
236236
int n4 = useQ4_1 ? kVecSize / QK4_1 : kVecSize / QK4_0; n4 = 64*((n4 + 63)/64);
237237
int n8 = kVecSize / QK8_0; n8 = 64*((n8 + 63)/64);
238238

239-
const auto * funcs = useQ4_1 ? ggml_internal_get_type_traits(GGML_TYPE_Q4_1) : ggml_internal_get_type_traits(GGML_TYPE_Q4_0);
239+
const auto * funcs = useQ4_1 ? ggml_get_type_traits(GGML_TYPE_Q4_1) : ggml_get_type_traits(GGML_TYPE_Q4_0);
240240

241241
std::vector<block_q4_0> q40;
242242
std::vector<block_q4_1> q41;
@@ -282,7 +282,7 @@ int main(int argc, char** argv) {
282282
dot_q4_q8(kVecSize, &result, q40.data(), q8.data());
283283
}
284284
else {
285-
const auto * vdot = ggml_internal_get_type_traits(funcs->vec_dot_type);
285+
const auto * vdot = ggml_get_type_traits(funcs->vec_dot_type);
286286
vdot->from_float(y1.data(), q8.data(), kVecSize);
287287
if (useQ4_1) funcs->vec_dot(kVecSize, &result, 0, q41.data(), 0, q8.data(), 0, 1);
288288
else funcs->vec_dot(kVecSize, &result, 0, q40.data(), 0, q8.data(), 0, 1);

src/llama.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17872,7 +17872,7 @@ static void llama_tensor_dequantize_internal(
1787217872
}
1787317873
float * f32_output = (float *) output.data();
1787417874

17875-
const ggml_type_traits * qtype = ggml_internal_get_type_traits(tensor->type);
17875+
const ggml_type_traits * qtype = ggml_get_type_traits(tensor->type);
1787617876
if (ggml_is_quantized(tensor->type)) {
1787717877
if (qtype->to_float == NULL) {
1787817878
throw std::runtime_error(format("type %s unsupported for integer quantization: no dequantization available", ggml_type_name(tensor->type)));

0 commit comments

Comments
 (0)