190190// TODO: support for clang
191191#ifdef __GNUC__
192192# define GGML_DEPRECATED (func , hint ) func __attribute__((deprecated(hint)))
193- # define GGML_PACKED __attribute__((__packed__))
193+ # define GGML_PACKED_ENUM enum __attribute__((__packed__))
194+ # define GGML_PACKED_ENUM_END
194195#elif defined(_MSC_VER )
195196# define GGML_DEPRECATED (func , hint ) __declspec(deprecated(hint)) func
197+ # define GGML_PACKED_ENUM __pragma(pack(push, 1)) enum
198+ # define GGML_PACKED_ENUM_END __pragma(pack(pop))
196199#else
197200# define GGML_DEPRECATED (func , hint ) func
201+ # define GGML_PACKED_ENUM
202+ # define GGML_PACKED_ENUM_END
198203#endif
199204
200205#ifndef __GNUC__
@@ -318,12 +323,13 @@ extern "C" {
318323 GGML_NORETURN GGML_ATTRIBUTE_FORMAT (3 , 4 )
319324 GGML_API void ggml_abort (const char * file , int line , const char * fmt , ...);
320325
321- enum GGML_PACKED ggml_status {
326+ GGML_PACKED_ENUM ggml_status {
322327 GGML_STATUS_ALLOC_FAILED = -2 ,
323328 GGML_STATUS_FAILED = -1 ,
324329 GGML_STATUS_SUCCESS = 0 ,
325330 GGML_STATUS_ABORTED = 1 ,
326331 };
332+ GGML_PACKED_ENUM_END
327333
328334 // get ggml_status name string
329335 GGML_API const char * ggml_status_to_string (enum ggml_status status );
@@ -349,7 +355,7 @@ extern "C" {
349355 struct ggml_cgraph ;
350356
351357 // NOTE: always add types at the end of the enum to keep backward compatibility
352- enum GGML_PACKED ggml_type {
358+ GGML_PACKED_ENUM ggml_type {
353359 GGML_TYPE_F32 = 0 ,
354360 GGML_TYPE_F16 = 1 ,
355361 GGML_TYPE_Q4_0 = 2 ,
@@ -391,15 +397,17 @@ extern "C" {
391397 // GGML_TYPE_IQ4_NL_8_8 = 38,
392398 GGML_TYPE_COUNT = 39 ,
393399 };
400+ GGML_PACKED_ENUM_END
394401
395402 // precision
396- enum GGML_PACKED ggml_prec {
403+ GGML_PACKED_ENUM ggml_prec {
397404 GGML_PREC_DEFAULT ,
398405 GGML_PREC_F32 ,
399406 };
407+ GGML_PACKED_ENUM_END
400408
401409 // model file types
402- enum GGML_PACKED ggml_ftype {
410+ GGML_PACKED_ENUM ggml_ftype {
403411 GGML_FTYPE_UNKNOWN = -1 ,
404412 GGML_FTYPE_ALL_F32 = 0 ,
405413 GGML_FTYPE_MOSTLY_F16 = 1 , // except 1d tensors
@@ -425,9 +433,10 @@ extern "C" {
425433 GGML_FTYPE_MOSTLY_IQ1_M = 23 , // except 1d tensors
426434 GGML_FTYPE_MOSTLY_BF16 = 24 , // except 1d tensors
427435 };
436+ GGML_PACKED_ENUM_END
428437
429438 // available tensor operations:
430- enum GGML_PACKED ggml_op {
439+ GGML_PACKED_ENUM ggml_op {
431440 GGML_OP_NONE = 0 ,
432441
433442 GGML_OP_DUP ,
@@ -520,8 +529,9 @@ extern "C" {
520529
521530 GGML_OP_COUNT ,
522531 };
532+ GGML_PACKED_ENUM_END
523533
524- enum GGML_PACKED ggml_unary_op {
534+ GGML_PACKED_ENUM ggml_unary_op {
525535 GGML_UNARY_OP_ABS ,
526536 GGML_UNARY_OP_SGN ,
527537 GGML_UNARY_OP_NEG ,
@@ -539,29 +549,33 @@ extern "C" {
539549
540550 GGML_UNARY_OP_COUNT ,
541551 };
552+ GGML_PACKED_ENUM_END
542553
543- enum GGML_PACKED ggml_object_type {
554+ GGML_PACKED_ENUM ggml_object_type {
544555 GGML_OBJECT_TYPE_TENSOR ,
545556 GGML_OBJECT_TYPE_GRAPH ,
546557 GGML_OBJECT_TYPE_WORK_BUFFER
547558 };
559+ GGML_PACKED_ENUM_END
548560
549- enum GGML_PACKED ggml_log_level {
561+ GGML_PACKED_ENUM ggml_log_level {
550562 GGML_LOG_LEVEL_NONE = 0 ,
551563 GGML_LOG_LEVEL_DEBUG = 1 ,
552564 GGML_LOG_LEVEL_INFO = 2 ,
553565 GGML_LOG_LEVEL_WARN = 3 ,
554566 GGML_LOG_LEVEL_ERROR = 4 ,
555567 GGML_LOG_LEVEL_CONT = 5 , // continue previous log
556568 };
569+ GGML_PACKED_ENUM_END
557570
558571 // this tensor...
559- enum GGML_PACKED ggml_tensor_flag {
572+ GGML_PACKED_ENUM ggml_tensor_flag {
560573 GGML_TENSOR_FLAG_INPUT = 1 , // ...is an input for the GGML compute graph
561574 GGML_TENSOR_FLAG_OUTPUT = 2 , // ...is an output for the GGML compute graph
562575 GGML_TENSOR_FLAG_PARAM = 4 , // ...contains trainable parameters
563576 GGML_TENSOR_FLAG_LOSS = 8 , // ...defines loss for numerical optimization (multiple loss tensors add up)
564577 };
578+ GGML_PACKED_ENUM_END
565579
566580 struct ggml_init_params {
567581 // memory pool
@@ -1679,11 +1693,12 @@ extern "C" {
16791693 struct ggml_tensor * b ,
16801694 int stride );
16811695
1682- enum GGML_PACKED ggml_op_pool {
1696+ GGML_PACKED_ENUM ggml_op_pool {
16831697 GGML_OP_POOL_MAX ,
16841698 GGML_OP_POOL_AVG ,
16851699 GGML_OP_POOL_COUNT ,
16861700 };
1701+ GGML_PACKED_ENUM_END
16871702
16881703 GGML_API struct ggml_tensor * ggml_pool_1d (
16891704 struct ggml_context * ctx ,
@@ -1718,10 +1733,11 @@ extern "C" {
17181733 float p0 ,
17191734 float p1 );
17201735
1721- enum GGML_PACKED ggml_scale_mode {
1736+ GGML_PACKED_ENUM ggml_scale_mode {
17221737 GGML_SCALE_MODE_NEAREST = 0 ,
17231738 GGML_SCALE_MODE_BILINEAR = 1 ,
17241739 };
1740+ GGML_PACKED_ENUM_END
17251741
17261742 // interpolate
17271743 // multiplies ne0 and ne1 by scale factor
@@ -1768,10 +1784,11 @@ extern "C" {
17681784 int max_period );
17691785
17701786 // sort rows
1771- enum GGML_PACKED ggml_sort_order {
1787+ GGML_PACKED_ENUM ggml_sort_order {
17721788 GGML_SORT_ORDER_ASC ,
17731789 GGML_SORT_ORDER_DESC ,
17741790 };
1791+ GGML_PACKED_ENUM_END
17751792
17761793 GGML_API struct ggml_tensor * ggml_argsort (
17771794 struct ggml_context * ctx ,
@@ -2138,12 +2155,13 @@ extern "C" {
21382155 // the goal should be to create an API that other backends can use move everything to the ggml base
21392156
21402157 // scheduling priorities
2141- enum GGML_PACKED ggml_sched_priority {
2158+ GGML_PACKED_ENUM ggml_sched_priority {
21422159 GGML_SCHED_PRIO_NORMAL ,
21432160 GGML_SCHED_PRIO_MEDIUM ,
21442161 GGML_SCHED_PRIO_HIGH ,
21452162 GGML_SCHED_PRIO_REALTIME
21462163 };
2164+ GGML_PACKED_ENUM_END
21472165
21482166 // threadpool params
21492167 // Use ggml_threadpool_params_default() or ggml_threadpool_params_init() to populate the defaults
0 commit comments