@@ -73,11 +73,11 @@ class PipelineInfo
73
73
return true ;
74
74
}
75
75
76
-
76
+
77
77
template <typename T>
78
78
struct direct_hash
79
79
{
80
- size_t operator ()(const uint64& k) const noexcept
80
+ size_t operator ()(const uint64& k) const noexcept
81
81
{
82
82
return k;
83
83
}
@@ -277,7 +277,6 @@ class VulkanRenderer : public Renderer
277
277
// texture functions
278
278
void * texture_acquireTextureUploadBuffer (uint32 size) override ;
279
279
void texture_releaseTextureUploadBuffer (uint8* mem) override ;
280
-
281
280
282
281
TextureDecoder* texture_chooseDecodedFormat (Latte::E_GX2SURFFMT format, bool isDepth, Latte::E_DIM dim, uint32 width, uint32 height) override ;
283
282
@@ -370,7 +369,7 @@ class VulkanRenderer : public Renderer
370
369
VkRect2D currentScissorRect{};
371
370
372
371
// vertex bindings
373
- struct
372
+ struct
374
373
{
375
374
uint32 offset;
376
375
}currentVertexBinding[LATTE_MAX_VERTEX_BUFFERS]{};
@@ -457,17 +456,17 @@ class VulkanRenderer : public Renderer
457
456
bool shaderRoundingModeRTEFloat32{ false };
458
457
}shaderFloatControls; // from VK_KHR_shader_float_controls
459
458
460
- struct
459
+ struct
461
460
{
462
461
bool debug_utils = false ; // VK_EXT_DEBUG_UTILS
463
462
}instanceExtensions;
464
463
465
- struct
464
+ struct
466
465
{
467
466
bool useTFEmulationViaSSBO = true ; // emulate transform feedback via shader writes to a storage buffer
468
467
}mode;
469
468
470
- struct
469
+ struct
471
470
{
472
471
uint32 minUniformBufferOffsetAlignment = 256 ;
473
472
uint32 nonCoherentAtomSize = 256 ;
@@ -497,7 +496,7 @@ class VulkanRenderer : public Renderer
497
496
void CreateCommandBuffers ();
498
497
499
498
void swapchain_createDescriptorSetLayout ();
500
-
499
+
501
500
// shader
502
501
503
502
bool IsAsyncPipelineAllowed (uint32 numIndices);
@@ -512,6 +511,8 @@ class VulkanRenderer : public Renderer
512
511
void DeleteFontTextures () override ;
513
512
bool BeginFrame (bool mainWindow) override ;
514
513
514
+ bool UseTFViaSSBO () const override { return m_featureControl.mode .useTFEmulationViaSSBO ; }
515
+
515
516
// drawcall emulation
516
517
PipelineInfo* draw_createGraphicsPipeline (uint32 indexCount);
517
518
PipelineInfo* draw_getOrCreateGraphicsPipeline (uint32 indexCount);
@@ -574,7 +575,7 @@ class VulkanRenderer : public Renderer
574
575
VkDevice m_logicalDevice = VK_NULL_HANDLE;
575
576
VkDebugUtilsMessengerEXT m_debugCallback = nullptr ;
576
577
volatile bool m_destructionRequested = false ;
577
-
578
+
578
579
QueueFamilyIndices m_indices{};
579
580
580
581
Semaphore m_pipeline_cache_semaphore;
@@ -583,7 +584,7 @@ class VulkanRenderer : public Renderer
583
584
VkPipelineCache m_pipeline_cache{ nullptr };
584
585
VkPipelineLayout m_pipelineLayout{nullptr };
585
586
VkCommandPool m_commandPool{ nullptr };
586
-
587
+
587
588
// buffer to cache uniform vars
588
589
VkBuffer m_uniformVarBuffer = VK_NULL_HANDLE;
589
590
VkDeviceMemory m_uniformVarBufferMemory = VK_NULL_HANDLE;
@@ -652,19 +653,19 @@ class VulkanRenderer : public Renderer
652
653
bool m_submitOnIdle{}; // submit current buffer if Latte command processor goes into idle state (no more commands or waiting for externally signaled condition)
653
654
654
655
// tracking for dynamic offsets
655
- struct
656
+ struct
656
657
{
657
658
uint32 uniformVarBufferOffset[VulkanRendererConst::SHADER_STAGE_INDEX_COUNT];
658
- struct
659
+ struct
659
660
{
660
661
uint32 unformBufferOffset[LATTE_NUM_MAX_UNIFORM_BUFFERS];
661
662
}shaderUB[VulkanRendererConst::SHADER_STAGE_INDEX_COUNT];
662
663
}dynamicOffsetInfo{};
663
664
664
665
// streamout
665
- struct
666
+ struct
666
667
{
667
- struct
668
+ struct
668
669
{
669
670
bool enabled;
670
671
uint32 ringBufferOffset;
@@ -714,11 +715,11 @@ class VulkanRenderer : public Renderer
714
715
accessFlags = 0 ;
715
716
if constexpr ((TSyncOp & BUFFER_SHADER_READ) != 0 )
716
717
{
717
- // in theory: VK_ACCESS_INDEX_READ_BIT should be set here too but indices are currently separated
718
+ // in theory: VK_ACCESS_INDEX_READ_BIT should be set here too but indices are currently separated
718
719
stages |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
719
720
accessFlags |= VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_UNIFORM_READ_BIT | VK_ACCESS_SHADER_READ_BIT;
720
721
}
721
-
722
+
722
723
if constexpr ((TSyncOp & BUFFER_SHADER_WRITE) != 0 )
723
724
{
724
725
stages |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
@@ -921,7 +922,6 @@ class VulkanRenderer : public Renderer
921
922
922
923
public:
923
924
bool GetDisableMultithreadedCompilation () const { return m_featureControl.disableMultithreadedCompilation ; }
924
- bool UseTFViaSSBO () const { return m_featureControl.mode .useTFEmulationViaSSBO ; }
925
925
bool HasSPRIVRoundingModeRTE32 () const { return m_featureControl.shaderFloatControls .shaderRoundingModeRTEFloat32 ; }
926
926
bool IsDebugUtilsEnabled () const { return m_featureControl.debugMarkersSupported && m_featureControl.instanceExtensions .debug_utils ; }
927
927
@@ -931,7 +931,7 @@ class VulkanRenderer : public Renderer
931
931
void debug_genericBarrier ();
932
932
933
933
// shaders
934
- struct
934
+ struct
935
935
{
936
936
RendererShaderVk* copySurface_vs{};
937
937
RendererShaderVk* copySurface_psDepth2Color{};
0 commit comments