|
1 | 1 | #include "Cafe/HW/Latte/Renderer/Metal/MetalCommon.h"
|
2 | 2 | #include "Cafe/HW/Latte/Renderer/Metal/MetalMemoryManager.h"
|
3 | 3 | #include "Cafe/HW/Latte/Renderer/Metal/MetalRenderer.h"
|
4 |
| -#include "Metal/MTLResource.hpp" |
| 4 | +#include "Cafe/HW/Latte/Renderer/Metal/MetalHybridComputePipeline.h" |
| 5 | +#include "Common/precompiled.h" |
| 6 | +#include "Foundation/NSRange.hpp" |
| 7 | +#include "Metal/MTLRenderCommandEncoder.hpp" |
5 | 8 |
|
6 | 9 | const size_t BUFFER_ALLOCATION_SIZE = 8 * 1024 * 1024;
|
7 | 10 |
|
@@ -93,21 +96,51 @@ MetalRestridedBufferRange MetalVertexBufferCache::RestrideBufferIfNeeded(MTL::Bu
|
93 | 96 |
|
94 | 97 | if (restrideInfo.memoryInvalidated || stride != restrideInfo.lastStride)
|
95 | 98 | {
|
96 |
| - // TODO: use compute/void vertex function instead |
97 | 99 | size_t newStride = Align(stride, 4);
|
98 | 100 | size_t newSize = vertexBufferRange.size / stride * newStride;
|
99 |
| - // TODO: use one big buffer for all restrided buffers |
100 |
| - restrideInfo.buffer = m_mtlr->GetDevice()->newBuffer(newSize, MTL::StorageModeShared); |
101 |
| - |
102 |
| - uint8* oldPtr = (uint8*)bufferCache->contents() + vertexBufferRange.offset; |
103 |
| - uint8* newPtr = (uint8*)restrideInfo.buffer->contents(); |
104 |
| - |
105 |
| - for (size_t elem = 0; elem < vertexBufferRange.size / stride; elem++) |
106 |
| - { |
107 |
| - memcpy(newPtr + elem * newStride, oldPtr + elem * stride, stride); |
108 |
| - } |
109 |
| - // TODO: remove |
110 |
| - debug_printf("Restrided vertex buffer (old stride: %zu, new stride: %zu, old size: %zu, new size: %zu)\n", stride, newStride, vertexBufferRange.size, newSize); |
| 101 | + if (!restrideInfo.buffer || newSize != restrideInfo.buffer->length()) |
| 102 | + { |
| 103 | + if (restrideInfo.buffer) |
| 104 | + restrideInfo.buffer->release(); |
| 105 | + // TODO: use one big buffer for all restrided buffers |
| 106 | + restrideInfo.buffer = m_mtlr->GetDevice()->newBuffer(newSize, MTL::StorageModeShared); |
| 107 | + } |
| 108 | + |
| 109 | + //uint8* oldPtr = (uint8*)bufferCache->contents() + vertexBufferRange.offset; |
| 110 | + //uint8* newPtr = (uint8*)restrideInfo.buffer->contents(); |
| 111 | + |
| 112 | + //for (size_t elem = 0; elem < vertexBufferRange.size / stride; elem++) |
| 113 | + //{ |
| 114 | + // memcpy(newPtr + elem * newStride, oldPtr + elem * stride, stride); |
| 115 | + //} |
| 116 | + //debug_printf("Restrided vertex buffer (old stride: %zu, new stride: %zu, old size: %zu, new size: %zu)\n", stride, newStride, vertexBufferRange.size, newSize); |
| 117 | + |
| 118 | + if (m_mtlr->GetEncoderType() == MetalEncoderType::Render) |
| 119 | + { |
| 120 | + auto renderCommandEncoder = static_cast<MTL::RenderCommandEncoder*>(m_mtlr->GetCommandEncoder()); |
| 121 | + |
| 122 | + renderCommandEncoder->setRenderPipelineState(m_restrideBufferPipeline->GetRenderPipelineState()); |
| 123 | + MTL::Buffer* buffers[] = {bufferCache, restrideInfo.buffer}; |
| 124 | + size_t offsets[] = {vertexBufferRange.offset, 0}; |
| 125 | + renderCommandEncoder->setVertexBuffers(buffers, offsets, NS::Range(0, 2)); |
| 126 | + |
| 127 | + struct |
| 128 | + { |
| 129 | + uint32 oldStride; |
| 130 | + uint32 newStride; |
| 131 | + } strideData = {static_cast<uint32>(stride), static_cast<uint32>(newStride)}; |
| 132 | + renderCommandEncoder->setVertexBytes(&strideData, sizeof(strideData), 2); |
| 133 | + |
| 134 | + renderCommandEncoder->drawPrimitives(MTL::PrimitiveTypePoint, NS::UInteger(0), vertexBufferRange.size / stride); |
| 135 | + |
| 136 | + MTL::Resource* barrierBuffers[] = {restrideInfo.buffer}; |
| 137 | + renderCommandEncoder->memoryBarrier(barrierBuffers, 1, MTL::RenderStageVertex, MTL::RenderStageVertex); |
| 138 | + } |
| 139 | + else |
| 140 | + { |
| 141 | + debug_printf("vertex buffer restride needs an active render encoder\n"); |
| 142 | + cemu_assert_suspicious(); |
| 143 | + } |
111 | 144 |
|
112 | 145 | restrideInfo.memoryInvalidated = false;
|
113 | 146 | restrideInfo.lastStride = newStride;
|
|
0 commit comments