-
Notifications
You must be signed in to change notification settings - Fork 110
Expand file tree
/
Copy patheffect_cache.rs
More file actions
1576 lines (1434 loc) Β· 55.1 KB
/
effect_cache.rs
File metadata and controls
1576 lines (1434 loc) Β· 55.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
use std::{
cmp::Ordering,
num::{NonZeroU32, NonZeroU64},
ops::Range,
};
use bevy::{
asset::Handle,
ecs::{component::Component, resource::Resource},
log::{trace, warn},
platform::collections::HashMap,
render::{
mesh::allocator::MeshBufferSlice,
render_resource::{
binding_types::{storage_buffer_read_only, storage_buffer_read_only_sized},
*,
},
renderer::RenderDevice,
},
utils::default,
};
use bytemuck::cast_slice_mut;
use super::{buffer_table::BufferTableId, BufferBindingSource};
use crate::{
asset::EffectAsset,
render::{
calc_hash, event::GpuChildInfo, GpuDrawIndexedIndirectArgs, GpuDrawIndirectArgs,
GpuEffectMetadata, GpuIndirectIndex, GpuSpawnerParams, StorageType as _,
},
ParticleLayout,
};
/// Describes all particle slices of particles in the particle buffer
/// for a single effect.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct EffectSlice {
/// Slice into the underlying [`BufferVec`].
///
/// This is measured in items, not bytes.
pub slice: Range<u32>,
/// ID of the particle slab in the [`EffectCache`].
pub slab_id: SlabId,
/// Particle layout of the effect.
pub particle_layout: ParticleLayout,
}
impl Ord for EffectSlice {
fn cmp(&self, other: &Self) -> Ordering {
match self.slab_id.cmp(&other.slab_id) {
Ordering::Equal => self.slice.start.cmp(&other.slice.start),
ord => ord,
}
}
}
impl PartialOrd for EffectSlice {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
/// A reference to a slice allocated inside an [`ParticleSlab`].
#[derive(Debug, Default, Clone, PartialEq, Eq)]
pub struct SlabSliceRef {
/// Range into a [`ParticleSlab`], in item count.
range: Range<u32>,
/// Particle layout for the effect stored in that slice.
pub(crate) particle_layout: ParticleLayout,
}
impl SlabSliceRef {
/// The length of the slice, in number of items.
#[allow(dead_code)]
pub fn len(&self) -> u32 {
self.range.end - self.range.start
}
/// The size in bytes of the slice.
#[allow(dead_code)]
pub fn byte_size(&self) -> usize {
(self.len() as usize) * (self.particle_layout.min_binding_size().get() as usize)
}
pub fn range(&self) -> Range<u32> {
self.range.clone()
}
}
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
struct SimBindGroupKey {
buffer: Option<BufferId>,
offset: u32,
size: u32,
}
impl SimBindGroupKey {
/// Invalid key, often used as placeholder.
pub const INVALID: Self = Self {
buffer: None,
offset: u32::MAX,
size: 0,
};
}
impl From<&BufferBindingSource> for SimBindGroupKey {
fn from(value: &BufferBindingSource) -> Self {
Self {
buffer: Some(value.buffer.id()),
offset: value.offset,
size: value.size.get(),
}
}
}
impl From<Option<&BufferBindingSource>> for SimBindGroupKey {
fn from(value: Option<&BufferBindingSource>) -> Self {
if let Some(bbs) = value {
Self {
buffer: Some(bbs.buffer.id()),
offset: bbs.offset,
size: bbs.size.get(),
}
} else {
Self::INVALID
}
}
}
/// State of a [`ParticleSlab`] after an insertion or removal operation.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum SlabState {
/// The slab is in use, with allocated resources.
Used,
/// Like `Used`, but the slab was resized, so any bind group is
/// nonetheless invalid.
Resized,
/// The slab is free (its resources were deallocated).
Free,
}
/// ID of a [`ParticleSlab`] inside an [`EffectCache`].
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct SlabId(u32);
impl SlabId {
/// An invalid value, often used as placeholder.
pub const INVALID: SlabId = SlabId(u32::MAX);
/// Create a new slab ID from its underlying index.
pub const fn new(index: u32) -> Self {
assert!(index != u32::MAX);
Self(index)
}
/// Check if the current ID is valid, that is, is different from
/// [`INVALID`].
///
/// [`INVALID`]: Self::INVALID
#[inline]
#[allow(dead_code)]
pub const fn is_valid(&self) -> bool {
self.0 != Self::INVALID.0
}
/// Get the raw underlying index.
///
/// This is mostly used for debugging / logging.
#[inline]
pub const fn index(&self) -> u32 {
self.0
}
}
impl Default for SlabId {
fn default() -> Self {
Self::INVALID
}
}
/// Storage for the per-particle data of effects sharing compatible layouts.
///
/// Currently only accepts a single unique particle layout, fixed at creation.
/// If an effect has a different particle layout, it needs to be stored in a
/// different slab.
///
/// Also currently only accepts instances of a unique effect asset, although
/// this restriction is purely for convenience and may be relaxed in the future
/// to improve batching.
#[derive(Debug)]
pub struct ParticleSlab {
/// GPU buffer storing all particles for the entire slab of effects.
///
/// Each particle is a collection of attributes arranged according to
/// [`Self::particle_layout`]. The buffer contains storage for exactly
/// [`Self::capacity`] particles.
particle_buffer: Buffer,
/// GPU buffer storing the indirection indices for the entire slab of
/// effects.
///
/// Each indirection item contains 3 values:
/// - the ping-pong alive particles and render indirect indices at offsets 0
/// and 1
/// - the dead particle indices at offset 2
///
/// The buffer contains storage for exactly [`Self::capacity`] items.
indirect_index_buffer: Buffer,
/// Layout of particles.
particle_layout: ParticleLayout,
/// Total slab capacity, in number of particles.
capacity: u32,
/// Used slab size, in number of particles, either from allocated slices
/// or from slices in the free list.
used_size: u32,
/// Array of free slices for new allocations, sorted in increasing order
/// inside the slab buffers.
free_slices: Vec<Range<u32>>,
/// Handle of all effects common in this slab. TODO - replace with
/// compatible layout.
asset: Handle<EffectAsset>,
/// Layout of the particle@1 bind group for the render pass.
// TODO - move; this only depends on the particle and spawner layouts, can be shared across
// slabs
render_particles_buffer_layout: BindGroupLayout,
/// Bind group particle@1 of the simulation passes (init and udpate).
sim_bind_group: Option<BindGroup>,
/// Key the `sim_bind_group` was created from.
sim_bind_group_key: SimBindGroupKey,
}
impl ParticleSlab {
/// Minimum buffer capacity to allocate, in number of particles.
pub const MIN_CAPACITY: u32 = 65536; // at least 64k particles
/// Create a new slab and the GPU resources to back it up.
///
/// The slab cannot contain less than [`MIN_CAPACITY`] particles. If the
/// input `capacity` is smaller, it's rounded up to [`MIN_CAPACITY`].
///
/// # Panics
///
/// This panics if the `capacity` is zero.
///
/// [`MIN_CAPACITY`]: Self::MIN_CAPACITY
pub fn new(
slab_id: SlabId,
asset: Handle<EffectAsset>,
capacity: u32,
particle_layout: ParticleLayout,
render_device: &RenderDevice,
) -> Self {
trace!(
"ParticleSlab::new(slab_id={}, capacity={}, particle_layout={:?}, item_size={}B)",
slab_id.0,
capacity,
particle_layout,
particle_layout.min_binding_size().get(),
);
// Calculate the clamped capacity of the group, in number of particles.
let capacity = capacity.max(Self::MIN_CAPACITY);
assert!(
capacity > 0,
"Attempted to create a zero-sized effect buffer."
);
// Allocate the particle buffer itself, containing the attributes of each
// particle.
#[cfg(debug_assertions)]
let mapped_at_creation = true;
#[cfg(not(debug_assertions))]
let mapped_at_creation = false;
let particle_capacity_bytes: BufferAddress =
capacity as u64 * particle_layout.min_binding_size().get();
let particle_label = format!("hanabi:buffer:slab{}:particle", slab_id.0);
let particle_buffer = render_device.create_buffer(&BufferDescriptor {
label: Some(&particle_label),
size: particle_capacity_bytes,
usage: BufferUsages::COPY_DST | BufferUsages::STORAGE,
mapped_at_creation,
});
// Set content
#[cfg(debug_assertions)]
{
// Scope get_mapped_range_mut() to force a drop before unmap()
{
let slice: &mut [u8] = &mut particle_buffer
.slice(..particle_capacity_bytes)
.get_mapped_range_mut();
let slice: &mut [u32] = cast_slice_mut(slice);
slice.fill(0xFFFFFFFF);
}
particle_buffer.unmap();
}
// Each indirect buffer stores 3 arrays of u32, of length the number of
// particles.
let indirect_capacity_bytes: BufferAddress = capacity as u64 * 4 * 3;
let indirect_label = format!("hanabi:buffer:slab{}:indirect", slab_id.0);
let indirect_index_buffer = render_device.create_buffer(&BufferDescriptor {
label: Some(&indirect_label),
size: indirect_capacity_bytes,
usage: BufferUsages::COPY_DST | BufferUsages::STORAGE,
mapped_at_creation: true,
});
// Set content
{
// Scope get_mapped_range_mut() to force a drop before unmap()
{
let slice: &mut [u8] = &mut indirect_index_buffer
.slice(..indirect_capacity_bytes)
.get_mapped_range_mut();
let slice: &mut [u32] = cast_slice_mut(slice);
for index in 0..capacity {
slice[3 * index as usize + 2] = index;
}
}
indirect_index_buffer.unmap();
}
// Create the render layout.
// TODO - move; this only depends on the particle and spawner layouts, can be
// shared across slabs
let spawner_params_size = GpuSpawnerParams::aligned_size(
render_device.limits().min_storage_buffer_offset_alignment,
);
let label = format!("hanabi:bgl:render:particles@1:slab{}", slab_id.0);
let render_particles_buffer_layout = render_device.create_bind_group_layout(
&label[..],
&BindGroupLayoutEntries::sequential(
ShaderStages::VERTEX,
(
// @group(1) @binding(0) var<storage, read> particle_buffer : ParticleBuffer;
storage_buffer_read_only_sized(false, Some(particle_layout.min_binding_size()))
.visibility(ShaderStages::VERTEX_FRAGMENT),
// @group(1) @binding(1) var<storage, read> indirect_buffer : IndirectBuffer;
storage_buffer_read_only::<GpuIndirectIndex>(false),
// @group(1) @binding(2) var<storage, read> spawner : Spawner;
storage_buffer_read_only_sized(true, Some(spawner_params_size)),
),
),
);
Self {
particle_buffer,
indirect_index_buffer,
particle_layout,
render_particles_buffer_layout,
capacity,
used_size: 0,
free_slices: vec![],
asset,
sim_bind_group: None,
sim_bind_group_key: SimBindGroupKey::INVALID,
}
}
// TODO - move; this only depends on the particle and spawner layouts, can be
// shared across slabs
pub fn render_particles_buffer_layout(&self) -> &BindGroupLayout {
&self.render_particles_buffer_layout
}
#[inline]
pub fn particle_buffer(&self) -> &Buffer {
&self.particle_buffer
}
#[inline]
pub fn indirect_index_buffer(&self) -> &Buffer {
&self.indirect_index_buffer
}
/// Return a binding for the entire particle buffer.
pub fn as_entire_binding_particle(&self) -> BindingResource<'_> {
let capacity_bytes = self.capacity as u64 * self.particle_layout.min_binding_size().get();
BindingResource::Buffer(BufferBinding {
buffer: &self.particle_buffer,
offset: 0,
size: Some(NonZeroU64::new(capacity_bytes).unwrap()),
})
//self.particle_buffer.as_entire_binding()
}
/// Return a binding source for the entire particle buffer.
pub fn max_binding_source(&self) -> BufferBindingSource {
let capacity_bytes = self.capacity * self.particle_layout.min_binding_size32().get();
BufferBindingSource {
buffer: self.particle_buffer.clone(),
offset: 0,
size: NonZeroU32::new(capacity_bytes).unwrap(),
}
}
/// Return a binding for the entire indirect buffer associated with the
/// current effect buffer.
pub fn as_entire_binding_indirect(&self) -> BindingResource<'_> {
let capacity_bytes = self.capacity as u64 * 12;
BindingResource::Buffer(BufferBinding {
buffer: &self.indirect_index_buffer,
offset: 0,
size: Some(NonZeroU64::new(capacity_bytes).unwrap()),
})
//self.indirect_index_buffer.as_entire_binding()
}
/// Create the "particle" bind group @1 for the init and update passes if
/// needed.
///
/// The `slab_id` must be the ID of the current [`ParticleSlab`] inside the
/// [`EffectCache`].
pub fn create_particle_sim_bind_group(
&mut self,
layout: &BindGroupLayout,
slab_id: &SlabId,
render_device: &RenderDevice,
parent_binding_source: Option<&BufferBindingSource>,
) {
let key: SimBindGroupKey = parent_binding_source.into();
if self.sim_bind_group.is_some() && self.sim_bind_group_key == key {
return;
}
let label = format!("hanabi:bg:sim:particle@1:vfx{}", slab_id.index());
let entries: &[BindGroupEntry] = if let Some(parent_binding) =
parent_binding_source.as_ref().map(|bbs| bbs.as_binding())
{
&BindGroupEntries::sequential((
self.as_entire_binding_particle(),
self.as_entire_binding_indirect(),
parent_binding,
))
} else {
&BindGroupEntries::sequential((
self.as_entire_binding_particle(),
self.as_entire_binding_indirect(),
))
};
trace!(
"Create particle simulation bind group '{}' with {} entries (has_parent:{})",
label,
entries.len(),
parent_binding_source.is_some(),
);
let bind_group = render_device.create_bind_group(Some(&label[..]), layout, entries);
self.sim_bind_group = Some(bind_group);
self.sim_bind_group_key = key;
}
/// Invalidate any existing simulate bind group.
///
/// Invalidate any existing bind group previously created by
/// [`create_particle_sim_bind_group()`], generally because a buffer was
/// re-allocated. This forces a re-creation of the bind group
/// next time [`create_particle_sim_bind_group()`] is called.
///
/// [`create_particle_sim_bind_group()`]: self::ParticleSlab::create_particle_sim_bind_group
#[allow(dead_code)] // FIXME - review this...
fn invalidate_particle_sim_bind_group(&mut self) {
self.sim_bind_group = None;
self.sim_bind_group_key = SimBindGroupKey::INVALID;
}
/// Return the cached particle@1 bind group for the simulation (init and
/// update) passes.
///
/// This is the per-buffer bind group at binding @1 which binds all
/// per-buffer resources shared by all effect instances batched in a single
/// buffer. The bind group is created by
/// [`create_particle_sim_bind_group()`], and cached until a call to
/// [`invalidate_particle_sim_bind_groups()`] clears the
/// cached reference.
///
/// [`create_particle_sim_bind_group()`]: self::ParticleSlab::create_particle_sim_bind_group
/// [`invalidate_particle_sim_bind_groups()`]: self::ParticleSlab::invalidate_particle_sim_bind_groups
pub fn particle_sim_bind_group(&self) -> Option<&BindGroup> {
self.sim_bind_group.as_ref()
}
/// Try to recycle a free slice to store `size` items.
fn pop_free_slice(&mut self, size: u32) -> Option<Range<u32>> {
if self.free_slices.is_empty() {
return None;
}
struct BestRange {
range: Range<u32>,
capacity: u32,
index: usize,
}
let mut result = BestRange {
range: 0..0, // marker for "invalid"
capacity: u32::MAX,
index: usize::MAX,
};
for (index, slice) in self.free_slices.iter().enumerate() {
let capacity = slice.end - slice.start;
if size > capacity {
continue;
}
if capacity < result.capacity {
result = BestRange {
range: slice.clone(),
capacity,
index,
};
}
}
if !result.range.is_empty() {
if result.capacity > size {
// split
let start = result.range.start;
let used_end = start + size;
let free_end = result.range.end;
let range = start..used_end;
self.free_slices[result.index] = used_end..free_end;
Some(range)
} else {
// recycle entirely
self.free_slices.remove(result.index);
Some(result.range)
}
} else {
None
}
}
/// Allocate a new entry in the slab to store the particles of a single
/// effect.
pub fn allocate(&mut self, capacity: u32) -> Option<SlabSliceRef> {
trace!("ParticleSlab::allocate(capacity={})", capacity);
if capacity > self.capacity {
return None;
}
let range = if let Some(range) = self.pop_free_slice(capacity) {
range
} else {
let new_size = self.used_size.checked_add(capacity).unwrap();
if new_size <= self.capacity {
let range = self.used_size..new_size;
self.used_size = new_size;
range
} else {
if self.used_size == 0 {
warn!(
"Cannot allocate slice of size {} in particle slab of capacity {}.",
capacity, self.capacity
);
}
return None;
}
};
trace!("-> allocated slice {:?}", range);
Some(SlabSliceRef {
range,
particle_layout: self.particle_layout.clone(),
})
}
/// Free an allocated slice, and if this was the last allocated slice also
/// free the buffer.
pub fn free_slice(&mut self, slice: SlabSliceRef) -> SlabState {
// If slice is at the end of the buffer, reduce total used size
if slice.range.end == self.used_size {
self.used_size = slice.range.start;
// Check other free slices to further reduce used size and drain the free slice
// list
while let Some(free_slice) = self.free_slices.last() {
if free_slice.end == self.used_size {
self.used_size = free_slice.start;
self.free_slices.pop();
} else {
break;
}
}
if self.used_size == 0 {
assert!(self.free_slices.is_empty());
// The buffer is not used anymore, free it too
SlabState::Free
} else {
// There are still some slices used, the last one of which ends at
// self.used_size
SlabState::Used
}
} else {
// Free slice is not at end; insert it in free list
let range = slice.range;
match self.free_slices.binary_search_by(|s| {
if s.end <= range.start {
Ordering::Less
} else if s.start >= range.end {
Ordering::Greater
} else {
Ordering::Equal
}
}) {
Ok(_) => warn!("Range {:?} already present in free list!", range),
Err(index) => self.free_slices.insert(index, range),
}
SlabState::Used
}
}
/// Check whether this slab is compatible with the given asset.
///
/// This allows determining whether an instance of the effect can be stored
/// inside this slab.
pub fn is_compatible(
&self,
handle: &Handle<EffectAsset>,
_particle_layout: &ParticleLayout,
) -> bool {
// TODO - replace with check particle layout is compatible to allow tighter
// packing in less buffers, and update in the less dispatch calls
*handle == self.asset
}
}
/// A single cached effect in the [`EffectCache`].
#[derive(Debug, Component)]
pub(crate) struct CachedEffect {
/// ID of the slab of the slab storing the particles for this effect in the
/// [`EffectCache`].
pub slab_id: SlabId,
/// The allocated effect slice within that slab.
pub slice: SlabSliceRef,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub(crate) enum AnyDrawIndirectArgs {
/// Args of a non-indexed draw call.
NonIndexed(GpuDrawIndirectArgs),
/// Args of an indexed draw call.
Indexed(GpuDrawIndexedIndirectArgs),
}
impl AnyDrawIndirectArgs {
/// Create from a vertex buffer slice and an optional index buffer one.
pub fn from_slices(
vertex_slice: &MeshBufferSlice<'_>,
index_slice: Option<&MeshBufferSlice<'_>>,
) -> Self {
if let Some(index_slice) = index_slice {
Self::Indexed(GpuDrawIndexedIndirectArgs {
index_count: index_slice.range.len() as u32,
instance_count: 0,
first_index: index_slice.range.start,
base_vertex: vertex_slice.range.start as i32,
first_instance: 0,
})
} else {
Self::NonIndexed(GpuDrawIndirectArgs {
vertex_count: vertex_slice.range.len() as u32,
instance_count: 0,
first_vertex: vertex_slice.range.start,
first_instance: 0,
})
}
}
/// Check if this args are for an indexed draw call.
#[inline(always)]
#[allow(dead_code)]
pub fn is_indexed(&self) -> bool {
matches!(*self, Self::Indexed(..))
}
/// Bit-cast the args to the row entry of the GPU buffer.
///
/// If non-indexed, this returns an indexed struct bit-cast from the actual
/// non-indexed one, ready for GPU upload.
pub fn bitcast_to_row_entry(&self) -> GpuDrawIndexedIndirectArgs {
match self {
AnyDrawIndirectArgs::NonIndexed(args) => GpuDrawIndexedIndirectArgs {
index_count: args.vertex_count,
instance_count: args.instance_count,
first_index: args.first_vertex,
base_vertex: args.first_instance as i32,
first_instance: 0,
},
AnyDrawIndirectArgs::Indexed(args) => *args,
}
}
}
impl From<GpuDrawIndirectArgs> for AnyDrawIndirectArgs {
fn from(args: GpuDrawIndirectArgs) -> Self {
Self::NonIndexed(args)
}
}
impl From<GpuDrawIndexedIndirectArgs> for AnyDrawIndirectArgs {
fn from(args: GpuDrawIndexedIndirectArgs) -> Self {
Self::Indexed(args)
}
}
/// Index of a row (entry) into the [`BufferTable`] storing the indirect draw
/// args of a single draw call.
#[derive(Debug, Clone, Copy, Component)]
pub(crate) struct CachedDrawIndirectArgs {
pub row: BufferTableId,
pub args: AnyDrawIndirectArgs,
}
impl Default for CachedDrawIndirectArgs {
fn default() -> Self {
Self {
row: BufferTableId::INVALID,
args: AnyDrawIndirectArgs::NonIndexed(default()),
}
}
}
impl CachedDrawIndirectArgs {
/// Check if the index is valid.
///
/// An invalid index doesn't correspond to any allocated args entry. A valid
/// one may, but note that the args entry in the buffer may have been freed
/// already with this index. There's no mechanism to detect reuse either.
#[inline(always)]
#[allow(dead_code)]
pub fn is_valid(&self) -> bool {
self.get_row_raw().is_valid()
}
/// Check if this row index refers to an indexed draw args entry.
#[inline(always)]
#[allow(dead_code)]
pub fn is_indexed(&self) -> bool {
self.args.is_indexed()
}
/// Get the raw index value.
///
/// Retrieve the raw index value, losing the discriminant between indexed
/// and non-indexed draw. This is useful when storing the index value into a
/// GPU buffer. The rest of the time, prefer retaining the typed enum for
/// safety.
///
/// # Panics
///
/// Panics if the index is invalid, whether indexed or non-indexed.
pub fn get_row(&self) -> BufferTableId {
let idx = self.get_row_raw();
assert!(idx.is_valid());
idx
}
#[inline(always)]
fn get_row_raw(&self) -> BufferTableId {
self.row
}
}
/// The indices in the indirect dispatch buffers for a single effect, as well as
/// that of the metadata buffer.
#[derive(Debug, Default, Clone, Copy, Component)]
pub(crate) struct DispatchBufferIndices {
/// The index of the [`GpuDispatchIndirect`] row in the GPU buffer
/// [`EffectsMeta::update_dispatch_indirect_buffer`].
///
/// [`GpuDispatchIndirect`]: super::GpuDispatchIndirect
/// [`EffectsMeta::update_dispatch_indirect_buffer`]: super::EffectsMeta::dispatch_indirect_buffer
pub(crate) update_dispatch_indirect_buffer_row_index: u32,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
struct ParticleBindGroupLayoutKey {
pub min_binding_size: NonZeroU32,
pub parent_min_binding_size: Option<NonZeroU32>,
}
/// Cache for effect instances sharing common GPU data structures.
#[derive(Resource)]
pub struct EffectCache {
/// Render device the GPU resources (buffers) are allocated from.
render_device: RenderDevice,
/// Collection of particle slabs managed by this cache. Some slabs might be
/// `None` if the entry is not used. Since the slabs are referenced
/// by index, we cannot move them once they're allocated.
particle_slabs: Vec<Option<ParticleSlab>>,
/// Cache of bind group layouts for the particle@1 bind groups of the
/// simulation passes (init and update). Since all bindings depend only
/// on buffers managed by the [`EffectCache`], we also cache the layouts
/// here for convenience.
particle_bind_group_layout_descs:
HashMap<ParticleBindGroupLayoutKey, BindGroupLayoutDescriptor>,
/// Cache of bind group layouts for the metadata@3 bind group of the init
/// pass.
metadata_init_bind_group_layout_desc: [Option<BindGroupLayoutDescriptor>; 2],
/// Cache of bind group layouts for the metadata@3 bind group of the
/// updatepass.
metadata_update_bind_group_layout_descs: HashMap<u32, BindGroupLayoutDescriptor>,
}
impl EffectCache {
/// Create a new empty cache.
pub fn new(device: RenderDevice) -> Self {
Self {
render_device: device,
particle_slabs: vec![],
particle_bind_group_layout_descs: default(),
metadata_init_bind_group_layout_desc: [None, None],
metadata_update_bind_group_layout_descs: default(),
}
}
/// Get all the particle slab slots. Unallocated slots are `None`. This can
/// be indexed by the slab index.
#[allow(dead_code)]
#[inline]
pub fn slabs(&self) -> &[Option<ParticleSlab>] {
&self.particle_slabs
}
/// Get all the particle slab slots. Unallocated slots are `None`. This can
/// be indexed by the slab ID.
#[allow(dead_code)]
#[inline]
pub fn slabs_mut(&mut self) -> &mut [Option<ParticleSlab>] {
&mut self.particle_slabs
}
/// Fetch a specific slab by ID.
#[inline]
pub fn get_slab(&self, slab_id: &SlabId) -> Option<&ParticleSlab> {
self.particle_slabs.get(slab_id.0 as usize)?.as_ref()
}
/// Fetch a specific buffer by ID.
#[allow(dead_code)]
#[inline]
pub fn get_slab_mut(&mut self, slab_id: &SlabId) -> Option<&mut ParticleSlab> {
self.particle_slabs.get_mut(slab_id.0 as usize)?.as_mut()
}
/// Invalidate all the particle@1 bind group for all buffers.
///
/// This iterates over all valid buffers and calls
/// [`ParticleSlab::invalidate_particle_sim_bind_group()`] on each one.
#[allow(dead_code)] // FIXME - review this...
pub fn invalidate_particle_sim_bind_groups(&mut self) {
for buffer in self.particle_slabs.iter_mut().flatten() {
buffer.invalidate_particle_sim_bind_group();
}
}
/// Insert a new effect instance in the cache.
pub fn insert(
&mut self,
asset: Handle<EffectAsset>,
capacity: u32,
particle_layout: &ParticleLayout,
) -> CachedEffect {
trace!("Inserting new effect into cache: capacity={capacity}");
let (slab_id, slice) = self
.particle_slabs
.iter_mut()
.enumerate()
.find_map(|(slab_index, maybe_slab)| {
// Ignore empty (non-allocated) entries as we're trying to fit the new allocation inside an existing slab.
let Some(slab) = maybe_slab else { return None; };
// The slab must be compatible with the effect's layout, otherwise ignore it.
if !slab.is_compatible(&asset, particle_layout) {
return None;
}
// Try to allocate a slice into the slab
slab
.allocate(capacity)
.map(|slice| (SlabId::new(slab_index as u32), slice))
})
.unwrap_or_else(|| {
// Cannot find any suitable slab; allocate a new one
let index = self.particle_slabs.iter().position(|buf| buf.is_none()).unwrap_or(self.particle_slabs.len());
let byte_size = capacity.checked_mul(particle_layout.min_binding_size().get() as u32).unwrap_or_else(|| panic!(
"Effect size overflow: capacity={:?} particle_layout={:?} item_size={}",
capacity, particle_layout, particle_layout.min_binding_size().get()
));
trace!(
"Creating new particle slab #{} for effect {:?} (capacity={:?}, particle_layout={:?} item_size={}, byte_size={})",
index,
asset,
capacity,
particle_layout,
particle_layout.min_binding_size().get(),
byte_size
);
let slab_id = SlabId::new(index as u32);
let mut slab = ParticleSlab::new(
slab_id,
asset,
capacity,
particle_layout.clone(),
&self.render_device,
);
let slice_ref = slab.allocate(capacity).unwrap();
if index >= self.particle_slabs.len() {
self.particle_slabs.push(Some(slab));
} else {
debug_assert!(self.particle_slabs[index].is_none());
self.particle_slabs[index] = Some(slab);
}
(slab_id, slice_ref)
});
let slice = SlabSliceRef {
range: slice.range.clone(),
particle_layout: slice.particle_layout,
};
trace!(
"Insert effect slab_id={} slice={}B particle_layout={:?}",
slab_id.0,
slice.particle_layout.min_binding_size().get(),
slice.particle_layout,
);
CachedEffect { slab_id, slice }
}
/// Remove an effect from the cache. If this was the last effect, drop the
/// underlying buffer and return the index of the dropped buffer.
pub fn remove(&mut self, cached_effect: &CachedEffect) -> Result<SlabState, ()> {
// Resolve the buffer by index
let Some(maybe_buffer) = self
.particle_slabs
.get_mut(cached_effect.slab_id.0 as usize)
else {
return Err(());
};
let Some(buffer) = maybe_buffer.as_mut() else {
return Err(());
};
// Free the slice inside the resolved buffer
if buffer.free_slice(cached_effect.slice.clone()) == SlabState::Free {
*maybe_buffer = None;
return Ok(SlabState::Free);
}
Ok(SlabState::Used)
}
//
// Bind group layouts
//
/// Ensure a bind group layout exists for the bind group @1 ("particles")
/// for use with the given min binding sizes.
pub fn ensure_particle_bind_group_layout_desc(
&mut self,
min_binding_size: NonZeroU32,
parent_min_binding_size: Option<NonZeroU32>,
) -> &BindGroupLayoutDescriptor {
// FIXME - This "ensure" pattern means we never de-allocate entries. This is
// probably fine, because there's a limited number of realistic combinations,
// but could cause wastes if e.g. loading widely different scenes.
let key = ParticleBindGroupLayoutKey {
min_binding_size,
parent_min_binding_size,
};
self.particle_bind_group_layout_descs
.entry(key)
.or_insert_with(|| {
trace!("Creating new particle sim bind group @1 for min_binding_size={} parent_min_binding_size={:?}", min_binding_size, parent_min_binding_size);
create_particle_sim_bind_group_layout_desc(
min_binding_size,
parent_min_binding_size,
)
})
}
/// Get the bind group layout for the bind group @1 ("particles") for use
/// with the given min binding sizes.
pub fn particle_bind_group_layout_desc(
&self,
min_binding_size: NonZeroU32,
parent_min_binding_size: Option<NonZeroU32>,
) -> Option<&BindGroupLayoutDescriptor> {
let key = ParticleBindGroupLayoutKey {
min_binding_size,
parent_min_binding_size,
};
self.particle_bind_group_layout_descs.get(&key)
}
/// Ensure a bind group layout exists for the metadata@3 bind group of
/// the init pass.
pub fn ensure_metadata_init_bind_group_layout_desc(&mut self, consume_gpu_spawn_events: bool) {
let layout =
&mut self.metadata_init_bind_group_layout_desc[consume_gpu_spawn_events as usize];
if layout.is_none() {