Skip to content

Commit afbff14

Browse files
committed
fix for both C11 and cpp
1 parent 7cfc6a7 commit afbff14

File tree

1 file changed

+74
-23
lines changed

1 file changed

+74
-23
lines changed

ggml/include/ggml.h

Lines changed: 74 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -648,26 +648,94 @@ extern "C" {
648648
extern __thread int ggml_current_numa_node;
649649
#endif
650650

651+
#ifndef __cplusplus
652+
// C-only implementation using _Generic
651653
#define tensor_data(tensor) \
652654
_Generic((tensor), \
653655
struct ggml_tensor*: _tensor_data_ptr(tensor), \
654656
const struct ggml_tensor*: _tensor_data_ptr(tensor), \
655-
default: _tensor_data_instance(tensor) \
657+
default: _tensor_data_ptr(&(tensor)) \
656658
)
657659

658660
#define tensor_set_data(tensor, value) \
659661
_Generic((tensor), \
660662
struct ggml_tensor*: _tensor_set_data_ptr(tensor, value), \
661-
default: _tensor_set_data_instance(tensor, value) \
663+
default: _tensor_set_data_ptr(&(tensor), value) \
662664
)
665+
#else
666+
// C++ implementation using function overloading
667+
static inline void * tensor_data(struct ggml_tensor * tensor) {
668+
#ifdef GGML_NUMA_MIRROR
669+
int n = ggml_current_numa_node == -1 ? 0 : ggml_current_numa_node;
670+
return tensor->__data[n];
671+
#else
672+
return tensor->data;
673+
#endif
674+
}
675+
static inline void * tensor_data(const struct ggml_tensor * tensor) {
676+
#ifdef GGML_NUMA_MIRROR
677+
int n = ggml_current_numa_node == -1 ? 0 : ggml_current_numa_node;
678+
return tensor->__data[n];
679+
#else
680+
return tensor->data;
681+
#endif
682+
}
683+
static inline void * tensor_data(struct ggml_tensor & tensor) {
684+
#ifdef GGML_NUMA_MIRROR
685+
int n = ggml_current_numa_node == -1 ? 0 : ggml_current_numa_node;
686+
return tensor.__data[n];
687+
#else
688+
return tensor.data;
689+
#endif
690+
}
691+
static inline void * tensor_data(const struct ggml_tensor & tensor) {
692+
#ifdef GGML_NUMA_MIRROR
693+
int n = ggml_current_numa_node == -1 ? 0 : ggml_current_numa_node;
694+
return tensor.__data[n];
695+
#else
696+
return tensor.data;
697+
#endif
698+
}
663699

700+
static inline void tensor_set_data(struct ggml_tensor * tensor, void * value) {
701+
#ifdef GGML_NUMA_MIRROR
702+
void* data_ = value;
703+
if ((uint64_t)data_ >= GGML_MMAP_VIRTUAL_MEMORY_BASE_OFFSET + GGML_MMAP_VIRTUAL_MEMORY_NUMA_INCREMENT && (uint64_t)data_ < GGML_MMAP_VIRTUAL_MEMORY_BASE_OFFSET + 2 * GGML_MMAP_VIRTUAL_MEMORY_NUMA_INCREMENT) {
704+
data_ = (void*)((uint64_t)data_ - GGML_MMAP_VIRTUAL_MEMORY_NUMA_INCREMENT);
705+
}
706+
tensor->__data[0] = data_;
707+
if ((uint64_t)data_ >= GGML_MMAP_VIRTUAL_MEMORY_BASE_OFFSET && (uint64_t)data_ < GGML_MMAP_VIRTUAL_MEMORY_BASE_OFFSET + GGML_MMAP_VIRTUAL_MEMORY_NUMA_INCREMENT) {
708+
tensor->__data[1] = (void*)((uint64_t)data_ + GGML_MMAP_VIRTUAL_MEMORY_NUMA_INCREMENT);
709+
} else {
710+
tensor->__data[1] = data_;
711+
}
712+
#else
713+
tensor->data = value;
714+
#endif
715+
}
716+
static inline void tensor_set_data(struct ggml_tensor & tensor, void * value) {
717+
#ifdef GGML_NUMA_MIRROR
718+
void* data_ = value;
719+
if ((uint64_t)data_ >= GGML_MMAP_VIRTUAL_MEMORY_BASE_OFFSET + GGML_MMAP_VIRTUAL_MEMORY_NUMA_INCREMENT && (uint64_t)data_ < GGML_MMAP_VIRTUAL_MEMORY_BASE_OFFSET + 2 * GGML_MMAP_VIRTUAL_MEMORY_NUMA_INCREMENT) {
720+
data_ = (void*)((uint64_t)data_ - GGML_MMAP_VIRTUAL_MEMORY_NUMA_INCREMENT);
721+
}
722+
tensor.__data[0] = data_;
723+
if ((uint64_t)data_ >= GGML_MMAP_VIRTUAL_MEMORY_BASE_OFFSET && (uint64_t)data_ < GGML_MMAP_VIRTUAL_MEMORY_BASE_OFFSET + GGML_MMAP_VIRTUAL_MEMORY_NUMA_INCREMENT) {
724+
tensor.__data[1] = (void*)((uint64_t)data_ + GGML_MMAP_VIRTUAL_MEMORY_NUMA_INCREMENT);
725+
} else {
726+
tensor.__data[1] = data_;
727+
}
728+
#else
729+
tensor.data = value;
730+
#endif
731+
}
732+
#endif
733+
734+
#if !defined(__cplusplus)
664735
#ifdef GGML_NUMA_MIRROR
665736
#define _tensor_data_ptr(tensor) \
666737
(ggml_current_numa_node == -1 ? (tensor)->__data[0] : (tensor)->__data[ggml_current_numa_node])
667738

668-
#define _tensor_data_instance(tensor) \
669-
(ggml_current_numa_node == -1 ? (tensor).__data[0] : (tensor).__data[ggml_current_numa_node])
670-
671739
#define _tensor_set_data_ptr(tensor, data_ptr) \
672740
do { \
673741
void* data_ = (data_ptr); \
@@ -683,27 +751,10 @@ extern "C" {
683751
(tensor)->__data[1] = data_; \
684752
} \
685753
} while (0)
686-
687-
#define _tensor_set_data_instance(tensor, data_ptr) \
688-
do { \
689-
void* data_ = (data_ptr); \
690-
if ((uint64_t)data_ >= GGML_MMAP_VIRTUAL_MEMORY_BASE_OFFSET + GGML_MMAP_VIRTUAL_MEMORY_NUMA_INCREMENT && \
691-
(uint64_t)data_ < GGML_MMAP_VIRTUAL_MEMORY_BASE_OFFSET + 2 * GGML_MMAP_VIRTUAL_MEMORY_NUMA_INCREMENT) { \
692-
data_ = (void*)((uint64_t)data_ - GGML_MMAP_VIRTUAL_MEMORY_NUMA_INCREMENT); \
693-
} \
694-
(tensor).__data[0] = data_; \
695-
if ((uint64_t)data_ >= GGML_MMAP_VIRTUAL_MEMORY_BASE_OFFSET && \
696-
(uint64_t)data_ < GGML_MMAP_VIRTUAL_MEMORY_BASE_OFFSET + GGML_MMAP_VIRTUAL_MEMORY_NUMA_INCREMENT) { \
697-
(tensor).__data[1] = (void*)((uint64_t)data_ + GGML_MMAP_VIRTUAL_MEMORY_NUMA_INCREMENT); \
698-
} else { \
699-
(tensor).__data[1] = data_; \
700-
} \
701-
} while (0)
702754
#else
703755
#define _tensor_data_ptr(tensor) ((tensor)->data)
704-
#define _tensor_data_instance(tensor) ((tensor).data)
705756
#define _tensor_set_data_ptr(tensor, value) ((tensor)->data = (value))
706-
#define _tensor_set_data_instance(tensor, value) ((tensor).data = (value))
757+
#endif
707758
#endif
708759

709760
static const size_t GGML_TENSOR_SIZE = sizeof(struct ggml_tensor);

0 commit comments

Comments
 (0)