void *VkMemoryAllocateInfo pMemoryAllocateNext
Additional pNext chain to be attached to VkMemoryAllocateInfo used for every allocation made by this ...
Definition vk_mem_alloc.h:1397
Represents custom memory pool.
Note that the structure passed as VmaPoolCreateInfo::pMemoryAllocateNext must remain alive and unchanged for the whole lifetime of the custom pool, because it will be used when the pool allocates a new device memory block. No copy is made internally. This is why variable exportMemAllocInfo is defined as static.
-If you want to export all memory allocated by VMA from certain memory types, also dedicated allocations or other allocations made from default pools, an alternative solution is to fill in VmaAllocatorCreateInfo::pTypeExternalMemoryHandleTypes. It should point to an array with VkExternalMemoryHandleTypeFlagsKHR to be automatically passed by the library through VkExportMemoryAllocateInfoKHR on each allocation made from a specific memory type. You should not mix these two methods in a way that allows to apply both to the same memory type. Otherwise, VkExportMemoryAllocateInfoKHR structure would be attached twice to the pNext chain of VkMemoryAllocateInfo.
+If you want to export all memory allocated by VMA from certain memory types, including dedicated allocations and allocations made from default pools, an alternative solution is to fill in VmaAllocatorCreateInfo::pTypeExternalMemoryHandleTypes. It should point to an array with VkExternalMemoryHandleTypeFlagsKHR to be automatically passed by the library through VkExportMemoryAllocateInfoKHR on each allocation made from a specific memory type. You should not mix these two methods in a way that allows to apply both to the same memory type. Otherwise, VkExportMemoryAllocateInfoKHR structure would be attached twice to the pNext chain of VkMemoryAllocateInfo.
Memory allocation
Finally, you can create a buffer with an allocation out of the custom pool. The buffer should use same flags as the sample buffer used to find the memory type. It should also specify VkExternalMemoryBufferCreateInfoKHR in its pNext chain.
diff --git a/include/vk_mem_alloc.h b/include/vk_mem_alloc.h
index 000813b6..220fb408 100644
--- a/include/vk_mem_alloc.h
+++ b/include/vk_mem_alloc.h
@@ -3153,7 +3153,9 @@ remove them if not needed.
#if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
#include
-static void* vma_aligned_alloc(size_t alignment, size_t size)
+namespace
+{
+void* vma_aligned_alloc(size_t alignment, size_t size)
{
// alignment must be >= sizeof(void*)
if(alignment < sizeof(void*))
@@ -3163,6 +3165,7 @@ static void* vma_aligned_alloc(size_t alignment, size_t size)
return memalign(alignment, size);
}
+} // namespace
#elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC))
#include
@@ -3170,7 +3173,9 @@ static void* vma_aligned_alloc(size_t alignment, size_t size)
#include
#endif
-static void* vma_aligned_alloc(size_t alignment, size_t size)
+namespace
+{
+void* vma_aligned_alloc(size_t alignment, size_t size)
{
// Unfortunately, aligned_alloc causes VMA to crash due to it returning null pointers. (At least under 11.4)
// Therefore, for now disable this specific exception until a proper solution is found.
@@ -3198,35 +3203,46 @@ static void* vma_aligned_alloc(size_t alignment, size_t size)
return pointer;
return VMA_NULL;
}
+} // namespace
#elif defined(_WIN32)
-static void* vma_aligned_alloc(size_t alignment, size_t size)
+namespace {
+void* vma_aligned_alloc(size_t alignment, size_t size)
{
return _aligned_malloc(size, alignment);
}
+} // namespace
#elif __cplusplus >= 201703L || _MSVC_LANG >= 201703L // C++17
-static void* vma_aligned_alloc(size_t alignment, size_t size)
+namespace {
+void* vma_aligned_alloc(size_t alignment, size_t size)
{
return aligned_alloc(alignment, size);
}
+} // namespace
#else
-static void* vma_aligned_alloc(size_t alignment, size_t size)
+namespace
+{
+void* vma_aligned_alloc(size_t alignment, size_t size)
{
VMA_ASSERT(0 && "Could not implement aligned_alloc automatically. Please enable C++17 or later in your compiler or provide custom implementation of macro VMA_SYSTEM_ALIGNED_MALLOC (and VMA_SYSTEM_ALIGNED_FREE if needed) using the API of your system.");
return VMA_NULL;
}
+} // namespace
#endif
+namespace
+{
#if defined(_WIN32)
-static void vma_aligned_free(void* ptr)
+void vma_aligned_free(void* ptr)
{
_aligned_free(ptr);
}
#else
-static void vma_aligned_free(void* VMA_NULLABLE ptr)
+void vma_aligned_free(void* VMA_NULLABLE ptr)
{
free(ptr);
}
#endif
+} // namespace
#ifndef VMA_ALIGN_OF
#define VMA_ALIGN_OF(type) (alignof(type))
@@ -3307,18 +3323,20 @@ static void vma_aligned_free(void* VMA_NULLABLE ptr)
// Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
#if VMA_STATS_STRING_ENABLED
- static inline void VmaUint32ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint32_t num)
+namespace {
+ inline void VmaUint32ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint32_t num)
{
snprintf(outStr, strLen, "%" PRIu32, num);
}
- static inline void VmaUint64ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint64_t num)
+ inline void VmaUint64ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint64_t num)
{
snprintf(outStr, strLen, "%" PRIu64, num);
}
- static inline void VmaPtrToStr(char* VMA_NOT_NULL outStr, size_t strLen, const void* ptr)
+ inline void VmaPtrToStr(char* VMA_NOT_NULL outStr, size_t strLen, const void* ptr)
{
snprintf(outStr, strLen, "%p", ptr);
}
+} // namespace
#endif
#ifndef VMA_MUTEX
@@ -3513,31 +3531,33 @@ END OF CONFIGURATION
*/
#endif // _VMA_CONFIGURATION
-
-static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
-static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
+namespace
+{
+constexpr uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
+constexpr uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
// Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
-static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
+constexpr uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
// Copy of some Vulkan definitions so we don't need to check their existence just to handle few constants.
-static const uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040;
-static const uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080;
-static const uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000;
-static const uint32_t VK_IMAGE_CREATE_DISJOINT_BIT_COPY = 0x00000200;
-static const int32_t VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT_COPY = 1000158000;
-static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000U;
-static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
-static const uint32_t VMA_VENDOR_ID_AMD = 4098;
+constexpr uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040;
+constexpr uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080;
+constexpr uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000;
+constexpr uint32_t VK_IMAGE_CREATE_DISJOINT_BIT_COPY = 0x00000200;
+constexpr int32_t VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT_COPY = 1000158000;
+constexpr uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000U;
+constexpr uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
+constexpr uint32_t VMA_VENDOR_ID_AMD = 4098;
// This one is tricky. Vulkan specification defines this code as available since
// Vulkan 1.0, but doesn't actually define it in Vulkan SDK earlier than 1.2.131.
// See pull request #207.
#define VK_ERROR_UNKNOWN_COPY ((VkResult)-13)
+} // namespace
#if VMA_STATS_STRING_ENABLED
// Correspond to values of enum VmaSuballocationType.
-static const char* const VMA_SUBALLOCATION_TYPE_NAMES[] =
+const char* const VMA_SUBALLOCATION_TYPE_NAMES[] =
{
"FREE",
"UNKNOWN",
@@ -3548,7 +3568,7 @@ static const char* const VMA_SUBALLOCATION_TYPE_NAMES[] =
};
#endif
-static const VkAllocationCallbacks VmaEmptyAllocationCallbacks =
+const VkAllocationCallbacks VmaEmptyAllocationCallbacks =
{ VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
@@ -3587,6 +3607,8 @@ enum class VmaAllocationRequestType
// Opaque handle used by allocation algorithms to identify single allocation in any conforming way.
VK_DEFINE_NON_DISPATCHABLE_HANDLE(VmaAllocHandle);
+struct VmaBufferImageUsage;
+
struct VmaMutexLock;
struct VmaMutexLockRead;
struct VmaMutexLockWrite;
@@ -3651,9 +3673,91 @@ class VmaAllocationObjectAllocator;
#endif // _VMA_FORWARD_DECLARATIONS
+#ifndef _VMA_BUFFER_IMAGE_USAGE
+
+// Finds structure with s->sType == sType in mainStruct->pNext chain.
+// Returns pointer to it. If not found, returns null.
+template
+inline const FindT* VmaPnextChainFind(const MainT* mainStruct, VkStructureType sType)
+{
+ for(const VkBaseInStructure* s = (const VkBaseInStructure*)mainStruct->pNext;
+ s != VMA_NULL; s = s->pNext)
+ {
+ if(s->sType == sType)
+ {
+ return (const FindT*)s;
+ }
+ }
+ return VMA_NULL;
+}
+
+// An abstraction over buffer or image `usage` flags, depending on available extensions.
+struct VmaBufferImageUsage
+{
+#if VMA_KHR_MAINTENANCE5
+ typedef uint64_t BaseType; // VkFlags64
+#else
+ typedef uint32_t BaseType; // VkFlags32
+#endif
+
+ static const VmaBufferImageUsage UNKNOWN;
+
+ BaseType Value;
+
+ VmaBufferImageUsage() { *this = UNKNOWN; }
+ explicit VmaBufferImageUsage(BaseType usage) : Value(usage) { }
+ VmaBufferImageUsage(const VkBufferCreateInfo &createInfo, bool useKhrMaintenance5);
+ explicit VmaBufferImageUsage(const VkImageCreateInfo &createInfo);
+
+ bool operator==(const VmaBufferImageUsage& rhs) const { return Value == rhs.Value; }
+ bool operator!=(const VmaBufferImageUsage& rhs) const { return Value != rhs.Value; }
+
+ bool Contains(BaseType flag) const { return (Value & flag) != 0; }
+ bool ContainsDeviceAccess() const
+ {
+ // This relies on values of VK_IMAGE_USAGE_TRANSFER* being the same as VK_BUFFER_IMAGE_TRANSFER*.
+ return (Value & ~BaseType(VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT)) != 0;
+ }
+};
+
+const VmaBufferImageUsage VmaBufferImageUsage::UNKNOWN = VmaBufferImageUsage(0);
+
+VmaBufferImageUsage::VmaBufferImageUsage(const VkBufferCreateInfo &createInfo,
+ bool useKhrMaintenance5)
+{
+#if VMA_KHR_MAINTENANCE5
+ if(useKhrMaintenance5)
+ {
+ // If VkBufferCreateInfo::pNext chain contains VkBufferUsageFlags2CreateInfoKHR,
+ // take usage from it and ignore VkBufferCreateInfo::usage, per specification
+ // of the VK_KHR_maintenance5 extension.
+ const VkBufferUsageFlags2CreateInfoKHR* const usageFlags2 =
+ VmaPnextChainFind(&createInfo, VK_STRUCTURE_TYPE_BUFFER_USAGE_FLAGS_2_CREATE_INFO_KHR);
+ if(usageFlags2 != VMA_NULL)
+ {
+ this->Value = usageFlags2->usage;
+ return;
+ }
+ }
+#endif
+
+ this->Value = (BaseType)createInfo.usage;
+}
+
+VmaBufferImageUsage::VmaBufferImageUsage(const VkImageCreateInfo &createInfo)
+ : Value((BaseType)createInfo.usage)
+{
+ // Maybe in the future there will be VK_KHR_maintenanceN extension with structure
+ // VkImageUsageFlags2CreateInfoKHR, like the one for buffers...
+}
+
+#endif // _VMA_BUFFER_IMAGE_USAGE
#ifndef _VMA_FUNCTIONS
+namespace
+{
+
/*
Returns number of bits set to 1 in (v).
@@ -3667,7 +3771,7 @@ GCC, Clang:
Define macro VMA_COUNT_BITS_SET to provide your optimized implementation.
But you need to check in runtime whether user's CPU supports these, as some old processors don't.
*/
-static inline uint32_t VmaCountBitsSet(uint32_t v)
+inline uint32_t VmaCountBitsSet(uint32_t v)
{
#if VMA_CPP20
return std::popcount(v);
@@ -3681,7 +3785,7 @@ static inline uint32_t VmaCountBitsSet(uint32_t v)
#endif
}
-static inline uint8_t VmaBitScanLSB(uint64_t mask)
+inline uint8_t VmaBitScanLSB(uint64_t mask)
{
#if defined(_MSC_VER) && defined(_WIN64)
unsigned long pos;
@@ -3707,7 +3811,7 @@ static inline uint8_t VmaBitScanLSB(uint64_t mask)
#endif
}
-static inline uint8_t VmaBitScanLSB(uint32_t mask)
+inline uint8_t VmaBitScanLSB(uint32_t mask)
{
#ifdef _MSC_VER
unsigned long pos;
@@ -3733,7 +3837,7 @@ static inline uint8_t VmaBitScanLSB(uint32_t mask)
#endif
}
-static inline uint8_t VmaBitScanMSB(uint64_t mask)
+inline uint8_t VmaBitScanMSB(uint64_t mask)
{
#if defined(_MSC_VER) && defined(_WIN64)
unsigned long pos;
@@ -3758,7 +3862,7 @@ static inline uint8_t VmaBitScanMSB(uint64_t mask)
return UINT8_MAX;
}
-static inline uint8_t VmaBitScanMSB(uint32_t mask)
+inline uint8_t VmaBitScanMSB(uint32_t mask)
{
#ifdef _MSC_VER
unsigned long pos;
@@ -3797,7 +3901,7 @@ inline bool VmaIsPow2(T x)
// Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
// Use types like uint32_t, uint64_t as T.
template
-static inline T VmaAlignUp(T val, T alignment)
+inline T VmaAlignUp(T val, T alignment)
{
VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
return (val + alignment - 1) & ~(alignment - 1);
@@ -3806,7 +3910,7 @@ static inline T VmaAlignUp(T val, T alignment)
// Aligns given value down to nearest multiply of align value. For example: VmaAlignDown(11, 8) = 8.
// Use types like uint32_t, uint64_t as T.
template
-static inline T VmaAlignDown(T val, T alignment)
+inline T VmaAlignDown(T val, T alignment)
{
VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
return val & ~(alignment - 1);
@@ -3814,20 +3918,20 @@ static inline T VmaAlignDown(T val, T alignment)
// Division with mathematical rounding to nearest number.
template
-static inline T VmaRoundDiv(T x, T y)
+inline T VmaRoundDiv(T x, T y)
{
return (x + (y / (T)2)) / y;
}
// Divide by 'y' and round up to nearest integer.
template
-static inline T VmaDivideRoundingUp(T x, T y)
+inline T VmaDivideRoundingUp(T x, T y)
{
return (x + y - (T)1) / y;
}
// Returns smallest power of 2 greater or equal to v.
-static inline uint32_t VmaNextPow2(uint32_t v)
+inline uint32_t VmaNextPow2(uint32_t v)
{
v--;
v |= v >> 1;
@@ -3839,7 +3943,7 @@ static inline uint32_t VmaNextPow2(uint32_t v)
return v;
}
-static inline uint64_t VmaNextPow2(uint64_t v)
+inline uint64_t VmaNextPow2(uint64_t v)
{
v--;
v |= v >> 1;
@@ -3853,7 +3957,7 @@ static inline uint64_t VmaNextPow2(uint64_t v)
}
// Returns largest power of 2 less or equal to v.
-static inline uint32_t VmaPrevPow2(uint32_t v)
+inline uint32_t VmaPrevPow2(uint32_t v)
{
v |= v >> 1;
v |= v >> 2;
@@ -3864,7 +3968,7 @@ static inline uint32_t VmaPrevPow2(uint32_t v)
return v;
}
-static inline uint64_t VmaPrevPow2(uint64_t v)
+inline uint64_t VmaPrevPow2(uint64_t v)
{
v |= v >> 1;
v |= v >> 2;
@@ -3876,7 +3980,7 @@ static inline uint64_t VmaPrevPow2(uint64_t v)
return v;
}
-static inline bool VmaStrIsEmpty(const char* pStr)
+inline bool VmaStrIsEmpty(const char* pStr)
{
return pStr == VMA_NULL || *pStr == '\0';
}
@@ -3888,7 +3992,7 @@ ResourceA must be in less memory offset than ResourceB.
Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
*/
-static inline bool VmaBlocksOnSamePage(
+inline bool VmaBlocksOnSamePage(
VkDeviceSize resourceAOffset,
VkDeviceSize resourceASize,
VkDeviceSize resourceBOffset,
@@ -3908,7 +4012,7 @@ VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
or linear image and another one is optimal image. If type is unknown, behave
conservatively.
*/
-static inline bool VmaIsBufferImageGranularityConflict(
+inline bool VmaIsBufferImageGranularityConflict(
VmaSuballocationType suballocType1,
VmaSuballocationType suballocType2)
{
@@ -3943,7 +4047,7 @@ static inline bool VmaIsBufferImageGranularityConflict(
}
}
-static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
+void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
{
#if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
uint32_t* pDst = (uint32_t*)((char*)pData + offset);
@@ -3957,7 +4061,7 @@ static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
#endif
}
-static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
+bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
{
#if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
@@ -3977,7 +4081,7 @@ static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
Fills structure with parameters of an example buffer to be used for transfers
during GPU memory defragmentation.
*/
-static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
+void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
{
memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
@@ -3996,7 +4100,7 @@ Returned value is the found element, if present in the collection or place where
new element with value (key) should be inserted.
*/
template
-static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT& key, const CmpLess& cmp)
+IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT& key, const CmpLess& cmp)
{
size_t down = 0;
size_t up = size_t(end - beg);
@@ -4034,7 +4138,7 @@ Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
T must be pointer type, e.g. VmaAllocation, VmaPool.
*/
template
-static bool VmaValidatePointerArray(uint32_t count, const T* arr)
+bool VmaValidatePointerArray(uint32_t count, const T* arr)
{
for (uint32_t i = 0; i < count; ++i)
{
@@ -4055,90 +4159,15 @@ static bool VmaValidatePointerArray(uint32_t count, const T* arr)
}
template
-static inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct)
+inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct)
{
newStruct->pNext = mainStruct->pNext;
mainStruct->pNext = newStruct;
}
-// Finds structure with s->sType == sType in mainStruct->pNext chain.
-// Returns pointer to it. If not found, returns null.
-template
-static inline const FindT* VmaPnextChainFind(const MainT* mainStruct, VkStructureType sType)
-{
- for(const VkBaseInStructure* s = (const VkBaseInStructure*)mainStruct->pNext;
- s != VMA_NULL; s = s->pNext)
- {
- if(s->sType == sType)
- {
- return (const FindT*)s;
- }
- }
- return VMA_NULL;
-}
-
-// An abstraction over buffer or image `usage` flags, depending on available extensions.
-struct VmaBufferImageUsage
-{
-#if VMA_KHR_MAINTENANCE5
- typedef uint64_t BaseType; // VkFlags64
-#else
- typedef uint32_t BaseType; // VkFlags32
-#endif
-
- static const VmaBufferImageUsage UNKNOWN;
-
- BaseType Value;
-
- VmaBufferImageUsage() { *this = UNKNOWN; }
- explicit VmaBufferImageUsage(BaseType usage) : Value(usage) { }
- VmaBufferImageUsage(const VkBufferCreateInfo &createInfo, bool useKhrMaintenance5);
- explicit VmaBufferImageUsage(const VkImageCreateInfo &createInfo);
-
- bool operator==(const VmaBufferImageUsage& rhs) const { return Value == rhs.Value; }
- bool operator!=(const VmaBufferImageUsage& rhs) const { return Value != rhs.Value; }
-
- bool Contains(BaseType flag) const { return (Value & flag) != 0; }
- bool ContainsDeviceAccess() const
- {
- // This relies on values of VK_IMAGE_USAGE_TRANSFER* being the same as VK_BUFFER_IMAGE_TRANSFER*.
- return (Value & ~BaseType(VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT)) != 0;
- }
-};
-
-const VmaBufferImageUsage VmaBufferImageUsage::UNKNOWN = VmaBufferImageUsage(0);
-
-VmaBufferImageUsage::VmaBufferImageUsage(const VkBufferCreateInfo &createInfo,
- bool useKhrMaintenance5)
-{
-#if VMA_KHR_MAINTENANCE5
- if(useKhrMaintenance5)
- {
- // If VkBufferCreateInfo::pNext chain contains VkBufferUsageFlags2CreateInfoKHR,
- // take usage from it and ignore VkBufferCreateInfo::usage, per specification
- // of the VK_KHR_maintenance5 extension.
- const VkBufferUsageFlags2CreateInfoKHR* const usageFlags2 =
- VmaPnextChainFind(&createInfo, VK_STRUCTURE_TYPE_BUFFER_USAGE_FLAGS_2_CREATE_INFO_KHR);
- if(usageFlags2 != VMA_NULL)
- {
- this->Value = usageFlags2->usage;
- return;
- }
- }
-#endif
-
- this->Value = (BaseType)createInfo.usage;
-}
-
-VmaBufferImageUsage::VmaBufferImageUsage(const VkImageCreateInfo &createInfo)
- : Value((BaseType)createInfo.usage)
-{
- // Maybe in the future there will be VK_KHR_maintenanceN extension with structure
- // VkImageUsageFlags2CreateInfoKHR, like the one for buffers...
-}
// This is the main algorithm that guides the selection of a memory type best for an allocation -
// converts usage to required/preferred/not preferred flags.
-static bool FindMemoryPreferences(
+bool FindMemoryPreferences(
bool isIntegratedGPU,
const VmaAllocationCreateInfo& allocCreateInfo,
VmaBufferImageUsage bufImgUsage,
@@ -4290,7 +4319,7 @@ static bool FindMemoryPreferences(
////////////////////////////////////////////////////////////////////////////////
// Memory allocation
-static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
+inline void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
{
void* result = VMA_NULL;
if ((pAllocationCallbacks != VMA_NULL) &&
@@ -4310,7 +4339,7 @@ static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t
return result;
}
-static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
+inline void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
{
if ((pAllocationCallbacks != VMA_NULL) &&
(pAllocationCallbacks->pfnFree != VMA_NULL))
@@ -4324,13 +4353,13 @@ static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr
}
template
-static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
+T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
{
return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
}
template
-static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
+T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
{
return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
}
@@ -4340,14 +4369,14 @@ static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, si
#define vma_new_array(allocator, type, count) new(VmaAllocateArray((allocator), (count)))(type)
template
-static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
+void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
{
ptr->~T();
VmaFree(pAllocationCallbacks, ptr);
}
template
-static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
+void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
{
if (ptr != VMA_NULL)
{
@@ -4359,7 +4388,7 @@ static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks,
}
}
-static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr)
+char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr)
{
if (srcStr != VMA_NULL)
{
@@ -4372,7 +4401,7 @@ static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char
}
#if VMA_STATS_STRING_ENABLED
-static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr, size_t strLen)
+char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr, size_t strLen)
{
if (srcStr != VMA_NULL)
{
@@ -4385,7 +4414,7 @@ static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char
}
#endif // VMA_STATS_STRING_ENABLED
-static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str)
+void VmaFreeString(const VkAllocationCallbacks* allocs, char* str)
{
if (str != VMA_NULL)
{
@@ -4423,11 +4452,17 @@ bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type&
}
return false;
}
+
+} // namespace
+
#endif // _VMA_FUNCTIONS
#ifndef _VMA_STATISTICS_FUNCTIONS
-static void VmaClearStatistics(VmaStatistics& outStats)
+namespace
+{
+
+void VmaClearStatistics(VmaStatistics& outStats)
{
outStats.blockCount = 0;
outStats.allocationCount = 0;
@@ -4435,7 +4470,7 @@ static void VmaClearStatistics(VmaStatistics& outStats)
outStats.allocationBytes = 0;
}
-static void VmaAddStatistics(VmaStatistics& inoutStats, const VmaStatistics& src)
+void VmaAddStatistics(VmaStatistics& inoutStats, const VmaStatistics& src)
{
inoutStats.blockCount += src.blockCount;
inoutStats.allocationCount += src.allocationCount;
@@ -4443,7 +4478,7 @@ static void VmaAddStatistics(VmaStatistics& inoutStats, const VmaStatistics& src
inoutStats.allocationBytes += src.allocationBytes;
}
-static void VmaClearDetailedStatistics(VmaDetailedStatistics& outStats)
+void VmaClearDetailedStatistics(VmaDetailedStatistics& outStats)
{
VmaClearStatistics(outStats.statistics);
outStats.unusedRangeCount = 0;
@@ -4453,7 +4488,7 @@ static void VmaClearDetailedStatistics(VmaDetailedStatistics& outStats)
outStats.unusedRangeSizeMax = 0;
}
-static void VmaAddDetailedStatisticsAllocation(VmaDetailedStatistics& inoutStats, VkDeviceSize size)
+void VmaAddDetailedStatisticsAllocation(VmaDetailedStatistics& inoutStats, VkDeviceSize size)
{
inoutStats.statistics.allocationCount++;
inoutStats.statistics.allocationBytes += size;
@@ -4461,14 +4496,14 @@ static void VmaAddDetailedStatisticsAllocation(VmaDetailedStatistics& inoutStats
inoutStats.allocationSizeMax = VMA_MAX(inoutStats.allocationSizeMax, size);
}
-static void VmaAddDetailedStatisticsUnusedRange(VmaDetailedStatistics& inoutStats, VkDeviceSize size)
+void VmaAddDetailedStatisticsUnusedRange(VmaDetailedStatistics& inoutStats, VkDeviceSize size)
{
inoutStats.unusedRangeCount++;
inoutStats.unusedRangeSizeMin = VMA_MIN(inoutStats.unusedRangeSizeMin, size);
inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, size);
}
-static void VmaAddDetailedStatistics(VmaDetailedStatistics& inoutStats, const VmaDetailedStatistics& src)
+void VmaAddDetailedStatistics(VmaDetailedStatistics& inoutStats, const VmaDetailedStatistics& src)
{
VmaAddStatistics(inoutStats.statistics, src.statistics);
inoutStats.unusedRangeCount += src.unusedRangeCount;
@@ -4478,6 +4513,8 @@ static void VmaAddDetailedStatistics(VmaDetailedStatistics& inoutStats, const Vm
inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, src.unusedRangeSizeMax);
}
+} // namespace
+
#endif // _VMA_STATISTICS_FUNCTIONS
#ifndef _VMA_MUTEX_LOCK
@@ -4577,8 +4614,8 @@ struct VmaStlAllocator
VmaStlAllocator(const VmaStlAllocator&) = default;
VmaStlAllocator& operator=(const VmaStlAllocator&) = delete;
- T* allocate(size_t n) { return VmaAllocateArray(m_pCallbacks, n); }
- void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
+ T* allocate(size_t n);
+ void deallocate(T* p, size_t n);
template
bool operator==(const VmaStlAllocator& rhs) const
@@ -4591,6 +4628,12 @@ struct VmaStlAllocator
return m_pCallbacks != rhs.m_pCallbacks;
}
};
+
+template
+T* VmaStlAllocator::allocate(size_t n) { return VmaAllocateArray(m_pCallbacks, n); }
+
+template
+void VmaStlAllocator::deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
#endif // _VMA_STL_ALLOCATOR
#ifndef _VMA_VECTOR
@@ -4612,7 +4655,7 @@ class VmaVector
VmaVector(size_t count, const T& value, const AllocatorT& allocator) : VmaVector(count, allocator) {}
VmaVector(const VmaVector& src);
VmaVector& operator=(const VmaVector& rhs);
- ~VmaVector() { VmaFree(m_Allocator.m_pCallbacks, m_pArray); }
+ ~VmaVector();
bool empty() const { return m_Count == 0; }
size_t size() const { return m_Count; }
@@ -4653,6 +4696,9 @@ class VmaVector
};
#ifndef _VMA_VECTOR_FUNCTIONS
+template
+VmaVector::~VmaVector() { VmaFree(m_Allocator.m_pCallbacks, m_pArray); }
+
template
VmaVector::VmaVector(const AllocatorT& allocator)
: m_Allocator(allocator),
@@ -4794,17 +4840,23 @@ void VmaVector::remove(size_t index)
}
#endif // _VMA_VECTOR_FUNCTIONS
+namespace
+{
+
template
-static void VmaVectorInsert(VmaVector& vec, size_t index, const T& item)
+void VmaVectorInsert(VmaVector& vec, size_t index, const T& item)
{
vec.insert(index, item);
}
template
-static void VmaVectorRemove(VmaVector& vec, size_t index)
+void VmaVectorRemove(VmaVector& vec, size_t index)
{
vec.remove(index);
}
+
+} // namespace
+
#endif // _VMA_VECTOR
#ifndef _VMA_SMALL_VECTOR
@@ -6202,7 +6254,10 @@ void VmaJsonWriter::WriteIndent(bool oneLess)
}
#endif // _VMA_JSON_WRITER_FUNCTIONS
-static void VmaPrintDetailedStatistics(VmaJsonWriter& json, const VmaDetailedStatistics& stat)
+namespace
+{
+
+void VmaPrintDetailedStatistics(VmaJsonWriter& json, const VmaDetailedStatistics& stat)
{
json.BeginObject();
@@ -6233,6 +6288,9 @@ static void VmaPrintDetailedStatistics(VmaJsonWriter& json, const VmaDetailedSta
}
json.EndObject();
}
+
+} // namespace
+
#endif // _VMA_JSON_WRITER
#ifndef _VMA_MAPPING_HYSTERESIS
@@ -6313,7 +6371,7 @@ class VmaMappingHysteresis
}
private:
- static const int32_t COUNTER_MIN_EXTRA_MAPPING = 7;
+ static constexpr int32_t COUNTER_MIN_EXTRA_MAPPING = 7;
uint32_t m_MinorCounter = 0;
uint32_t m_MajorCounter = 0;
@@ -7116,7 +7174,7 @@ class VmaBlockBufferImageGranularity final
bool FinishValidation(ValidationContext& ctx) const;
private:
- static const uint16_t MAX_LOW_BUFFER_IMAGE_GRANULARITY = 256;
+ static constexpr uint16_t MAX_LOW_BUFFER_IMAGE_GRANULARITY = 256;
struct RegionInfo
{
@@ -9057,11 +9115,11 @@ class VmaBlockMetadata_TLSF : public VmaBlockMetadata
// According to original paper it should be preferable 4 or 5:
// M. Masmano, I. Ripoll, A. Crespo, and J. Real "TLSF: a New Dynamic Memory Allocator for Real-Time Systems"
// http://www.gii.upv.es/tlsf/files/ecrts04_tlsf.pdf
- static const uint8_t SECOND_LEVEL_INDEX = 5;
- static const uint16_t SMALL_BUFFER_SIZE = 256;
- static const uint32_t INITIAL_BLOCK_ALLOC_COUNT = 16;
- static const uint8_t MEMORY_CLASS_SHIFT = 7;
- static const uint8_t MAX_MEMORY_CLASSES = 65 - MEMORY_CLASS_SHIFT;
+ static constexpr uint8_t SECOND_LEVEL_INDEX = 5;
+ static constexpr uint16_t SMALL_BUFFER_SIZE = 256;
+ static constexpr uint32_t INITIAL_BLOCK_ALLOC_COUNT = 16;
+ static constexpr uint8_t MEMORY_CLASS_SHIFT = 7;
+ static constexpr uint8_t MAX_MEMORY_CLASSES = 65 - MEMORY_CLASS_SHIFT;
class Block
{
@@ -10077,7 +10135,7 @@ struct VmaDefragmentationContext_T
private:
// Max number of allocations to ignore due to size constraints before ending single pass
- static const uint8_t MAX_ALLOCS_TO_IGNORE = 16;
+ static constexpr uint8_t MAX_ALLOCS_TO_IGNORE = 16;
enum class CounterStatus { Pass, Ignore, End };
struct FragmentedBlock
@@ -10779,30 +10837,32 @@ struct VmaAllocator_T
#ifndef _VMA_MEMORY_FUNCTIONS
-static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
+namespace
+{
+inline void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
{
return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
}
-static void VmaFree(VmaAllocator hAllocator, void* ptr)
+inline void VmaFree(VmaAllocator hAllocator, void* ptr)
{
VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
}
template
-static T* VmaAllocate(VmaAllocator hAllocator)
+T* VmaAllocate(VmaAllocator hAllocator)
{
return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
}
template
-static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
+T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
{
return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
}
template
-static void vma_delete(VmaAllocator hAllocator, T* ptr)
+void vma_delete(VmaAllocator hAllocator, T* ptr)
{
if(ptr != VMA_NULL)
{
@@ -10812,7 +10872,7 @@ static void vma_delete(VmaAllocator hAllocator, T* ptr)
}
template
-static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
+void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
{
if(ptr != VMA_NULL)
{
@@ -10821,6 +10881,7 @@ static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
VmaFree(hAllocator, ptr);
}
}
+} // namespace
#endif // _VMA_MEMORY_FUNCTIONS
#ifndef _VMA_DEVICE_MEMORY_BLOCK_FUNCTIONS
@@ -18949,7 +19010,7 @@ for the whole lifetime of the custom pool, because it will be used when the pool
No copy is made internally. This is why variable `exportMemAllocInfo` is defined as static.
If you want to export all memory allocated by VMA from certain memory types,
-also dedicated allocations or other allocations made from default pools,
+including dedicated allocations and allocations made from default pools,
an alternative solution is to fill in VmaAllocatorCreateInfo::pTypeExternalMemoryHandleTypes.
It should point to an array with `VkExternalMemoryHandleTypeFlagsKHR` to be automatically passed by the library
through `VkExportMemoryAllocateInfoKHR` on each allocation made from a specific memory type.