@@ -662,14 +662,57 @@ extern "C" {
662662 struct ggml_tensor*: _tensor_set_data_ptr(tensor, value), \
663663 default: _tensor_set_data_ptr(&(tensor), value) \
664664 )
665+
666+ #ifdef GGML_NUMA_MIRROR
667+ #define _tensor_data_ptr (p ) \
668+ (ggml_current_numa_node == -1 ? (p)->__data[0] : (p)->__data[ggml_current_numa_node])
669+
670+ #define _tensor_set_data_ptr (p , d ) \
671+ do { \
672+ void* data_ = (d); \
673+ if ((uint64_t)data_ >= GGML_MMAP_VIRTUAL_MEMORY_BASE_OFFSET + GGML_MMAP_VIRTUAL_MEMORY_NUMA_INCREMENT && \
674+ (uint64_t)data_ < GGML_MMAP_VIRTUAL_MEMORY_BASE_OFFSET + 2 * GGML_MMAP_VIRTUAL_MEMORY_NUMA_INCREMENT) { \
675+ data_ = (void*)((uint64_t)data_ - GGML_MMAP_VIRTUAL_MEMORY_NUMA_INCREMENT); \
676+ } \
677+ (p)->__data[0] = data_; \
678+ if ((uint64_t)data_ >= GGML_MMAP_VIRTUAL_MEMORY_BASE_OFFSET && \
679+ (uint64_t)data_ < GGML_MMAP_VIRTUAL_MEMORY_BASE_OFFSET + GGML_MMAP_VIRTUAL_MEMORY_NUMA_INCREMENT) { \
680+ (p)->__data[1] = (void*)((uint64_t)data_ + GGML_MMAP_VIRTUAL_MEMORY_NUMA_INCREMENT); \
681+ } else { \
682+ (p)->__data[1] = data_; \
683+ } \
684+ } while (0)
665685#else
666- // C++ implementation using function overloading
667- static inline void * tensor_data (struct ggml_tensor * tensor );
668- static inline void * tensor_data (const struct ggml_tensor * tensor );
669- static inline void * tensor_data (struct ggml_tensor & tensor );
670- static inline void * tensor_data (const struct ggml_tensor & tensor );
671- static inline void tensor_set_data (struct ggml_tensor * tensor , void * value );
672- static inline void tensor_set_data (struct ggml_tensor & tensor , void * value );
686+ #define _tensor_data_ptr (p ) ((p)->data)
687+ #define _tensor_set_data_ptr (p , d ) ((p)->data = (d))
688+ #endif
689+
690+ #endif // !__cplusplus
691+
692+ static const size_t GGML_TENSOR_SIZE = sizeof (struct ggml_tensor );
693+
694+ // Abort callback
695+ // If not NULL, called before ggml computation
696+ // If it returns true, the computation is aborted
697+ typedef bool (* ggml_abort_callback )(void * data );
698+
699+
700+ //
701+ // GUID
702+ //
703+
704+ // GUID types
705+ typedef uint8_t ggml_guid [16 ];
706+ typedef ggml_guid * ggml_guid_t ;
707+
708+ GGML_API bool ggml_guid_matches (ggml_guid_t guid_a , ggml_guid_t guid_b );
709+ // ...existing code...
710+ #ifdef __cplusplus
711+ }
712+ #endif
713+
714+ // C++ overloaded functions - must be outside extern "C" block
715+ #ifdef __cplusplus
673716
674717static inline void * tensor_data (struct ggml_tensor * tensor ) {
675718#ifdef GGML_NUMA_MIRROR
@@ -679,6 +722,7 @@ static inline void * tensor_data(struct ggml_tensor * tensor) {
679722 return tensor -> data ;
680723#endif
681724}
725+
682726static inline void * tensor_data (const struct ggml_tensor * tensor ) {
683727#ifdef GGML_NUMA_MIRROR
684728 int n = ggml_current_numa_node == -1 ? 0 : ggml_current_numa_node ;
@@ -687,9 +731,11 @@ static inline void * tensor_data(const struct ggml_tensor * tensor) {
687731 return tensor -> data ;
688732#endif
689733}
734+
690735static inline void * tensor_data (struct ggml_tensor & tensor ) {
691736 return tensor_data (& tensor );
692737}
738+
693739static inline void * tensor_data (const struct ggml_tensor & tensor ) {
694740 return tensor_data (& tensor );
695741}
@@ -710,10 +756,12 @@ static inline void tensor_set_data(struct ggml_tensor * tensor, void * value) {
710756 tensor -> data = value ;
711757#endif
712758}
759+
713760static inline void tensor_set_data (struct ggml_tensor & tensor , void * value ) {
714761 tensor_set_data (& tensor , value );
715762}
716- #endif
763+
764+ #endif // __cplusplus
717765
718766#if !defined(__cplusplus )
719767#ifdef GGML_NUMA_MIRROR
0 commit comments