@@ -97,6 +97,9 @@ struct panthor_heap_pool {
97
97
98
98
/** @gpu_contexts: Buffer object containing the GPU heap contexts. */
99
99
struct panthor_kernel_bo * gpu_contexts ;
100
+
101
+ /** @size: Size of all chunks across all heaps in the pool. */
102
+ atomic_t size ;
100
103
};
101
104
102
105
static int panthor_heap_ctx_stride (struct panthor_device * ptdev )
@@ -118,7 +121,7 @@ static void *panthor_get_heap_ctx(struct panthor_heap_pool *pool, int id)
118
121
panthor_get_heap_ctx_offset (pool , id );
119
122
}
120
123
121
- static void panthor_free_heap_chunk (struct panthor_vm * vm ,
124
+ static void panthor_free_heap_chunk (struct panthor_heap_pool * pool ,
122
125
struct panthor_heap * heap ,
123
126
struct panthor_heap_chunk * chunk )
124
127
{
@@ -127,12 +130,13 @@ static void panthor_free_heap_chunk(struct panthor_vm *vm,
127
130
heap -> chunk_count -- ;
128
131
mutex_unlock (& heap -> lock );
129
132
133
+ atomic_sub (heap -> chunk_size , & pool -> size );
134
+
130
135
panthor_kernel_bo_destroy (chunk -> bo );
131
136
kfree (chunk );
132
137
}
133
138
134
- static int panthor_alloc_heap_chunk (struct panthor_device * ptdev ,
135
- struct panthor_vm * vm ,
139
+ static int panthor_alloc_heap_chunk (struct panthor_heap_pool * pool ,
136
140
struct panthor_heap * heap ,
137
141
bool initial_chunk )
138
142
{
@@ -144,7 +148,7 @@ static int panthor_alloc_heap_chunk(struct panthor_device *ptdev,
144
148
if (!chunk )
145
149
return - ENOMEM ;
146
150
147
- chunk -> bo = panthor_kernel_bo_create (ptdev , vm , heap -> chunk_size ,
151
+ chunk -> bo = panthor_kernel_bo_create (pool -> ptdev , pool -> vm , heap -> chunk_size ,
148
152
DRM_PANTHOR_BO_NO_MMAP ,
149
153
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC ,
150
154
PANTHOR_VM_KERNEL_AUTO_VA );
@@ -180,6 +184,8 @@ static int panthor_alloc_heap_chunk(struct panthor_device *ptdev,
180
184
heap -> chunk_count ++ ;
181
185
mutex_unlock (& heap -> lock );
182
186
187
+ atomic_add (heap -> chunk_size , & pool -> size );
188
+
183
189
return 0 ;
184
190
185
191
err_destroy_bo :
@@ -191,25 +197,24 @@ static int panthor_alloc_heap_chunk(struct panthor_device *ptdev,
191
197
return ret ;
192
198
}
193
199
194
- static void panthor_free_heap_chunks (struct panthor_vm * vm ,
200
+ static void panthor_free_heap_chunks (struct panthor_heap_pool * pool ,
195
201
struct panthor_heap * heap )
196
202
{
197
203
struct panthor_heap_chunk * chunk , * tmp ;
198
204
199
205
list_for_each_entry_safe (chunk , tmp , & heap -> chunks , node )
200
- panthor_free_heap_chunk (vm , heap , chunk );
206
+ panthor_free_heap_chunk (pool , heap , chunk );
201
207
}
202
208
203
- static int panthor_alloc_heap_chunks (struct panthor_device * ptdev ,
204
- struct panthor_vm * vm ,
209
+ static int panthor_alloc_heap_chunks (struct panthor_heap_pool * pool ,
205
210
struct panthor_heap * heap ,
206
211
u32 chunk_count )
207
212
{
208
213
int ret ;
209
214
u32 i ;
210
215
211
216
for (i = 0 ; i < chunk_count ; i ++ ) {
212
- ret = panthor_alloc_heap_chunk (ptdev , vm , heap , true);
217
+ ret = panthor_alloc_heap_chunk (pool , heap , true);
213
218
if (ret )
214
219
return ret ;
215
220
}
@@ -226,7 +231,7 @@ panthor_heap_destroy_locked(struct panthor_heap_pool *pool, u32 handle)
226
231
if (!heap )
227
232
return - EINVAL ;
228
233
229
- panthor_free_heap_chunks (pool -> vm , heap );
234
+ panthor_free_heap_chunks (pool , heap );
230
235
mutex_destroy (& heap -> lock );
231
236
kfree (heap );
232
237
return 0 ;
@@ -308,8 +313,7 @@ int panthor_heap_create(struct panthor_heap_pool *pool,
308
313
heap -> max_chunks = max_chunks ;
309
314
heap -> target_in_flight = target_in_flight ;
310
315
311
- ret = panthor_alloc_heap_chunks (pool -> ptdev , vm , heap ,
312
- initial_chunk_count );
316
+ ret = panthor_alloc_heap_chunks (pool , heap , initial_chunk_count );
313
317
if (ret )
314
318
goto err_free_heap ;
315
319
@@ -342,7 +346,7 @@ int panthor_heap_create(struct panthor_heap_pool *pool,
342
346
return id ;
343
347
344
348
err_free_heap :
345
- panthor_free_heap_chunks (pool -> vm , heap );
349
+ panthor_free_heap_chunks (pool , heap );
346
350
mutex_destroy (& heap -> lock );
347
351
kfree (heap );
348
352
@@ -389,6 +393,7 @@ int panthor_heap_return_chunk(struct panthor_heap_pool *pool,
389
393
removed = chunk ;
390
394
list_del (& chunk -> node );
391
395
heap -> chunk_count -- ;
396
+ atomic_sub (heap -> chunk_size , & pool -> size );
392
397
break ;
393
398
}
394
399
}
@@ -466,7 +471,7 @@ int panthor_heap_grow(struct panthor_heap_pool *pool,
466
471
* further jobs in this queue fail immediately instead of having to
467
472
* wait for the job timeout.
468
473
*/
469
- ret = panthor_alloc_heap_chunk (pool -> ptdev , pool -> vm , heap , false);
474
+ ret = panthor_alloc_heap_chunk (pool , heap , false);
470
475
if (ret )
471
476
goto out_unlock ;
472
477
@@ -560,6 +565,8 @@ panthor_heap_pool_create(struct panthor_device *ptdev, struct panthor_vm *vm)
560
565
if (ret )
561
566
goto err_destroy_pool ;
562
567
568
+ atomic_add (pool -> gpu_contexts -> obj -> size , & pool -> size );
569
+
563
570
return pool ;
564
571
565
572
err_destroy_pool :
@@ -594,8 +601,10 @@ void panthor_heap_pool_destroy(struct panthor_heap_pool *pool)
594
601
xa_for_each (& pool -> xa , i , heap )
595
602
drm_WARN_ON (& pool -> ptdev -> base , panthor_heap_destroy_locked (pool , i ));
596
603
597
- if (!IS_ERR_OR_NULL (pool -> gpu_contexts ))
604
+ if (!IS_ERR_OR_NULL (pool -> gpu_contexts )) {
605
+ atomic_sub (pool -> gpu_contexts -> obj -> size , & pool -> size );
598
606
panthor_kernel_bo_destroy (pool -> gpu_contexts );
607
+ }
599
608
600
609
/* Reflects the fact the pool has been destroyed. */
601
610
pool -> vm = NULL ;
@@ -605,27 +614,16 @@ void panthor_heap_pool_destroy(struct panthor_heap_pool *pool)
605
614
}
606
615
607
616
/**
608
- * panthor_heap_pool_size() - Calculate size of all chunks across all heaps in a pool
609
- * @pool: Pool whose total chunk size to calculate.
617
+ * panthor_heap_pool_size() - Get a heap pool's total size
618
+ * @pool: Pool whose total chunks size to return
610
619
*
611
- * This function adds the size of all heap chunks across all heaps in the
612
- * argument pool. It also adds the size of the gpu contexts kernel bo.
613
- * It is meant to be used by fdinfo for displaying the size of internal
614
- * driver BO's that aren't exposed to userspace through a GEM handle.
620
+ * Returns the aggregated size of all chunks for all heaps in the pool
615
621
*
616
622
*/
617
623
size_t panthor_heap_pool_size (struct panthor_heap_pool * pool )
618
624
{
619
- struct panthor_heap * heap ;
620
- unsigned long i ;
621
- size_t size = 0 ;
622
-
623
- down_read (& pool -> lock );
624
- xa_for_each (& pool -> xa , i , heap )
625
- size += heap -> chunk_size * heap -> chunk_count ;
626
- up_read (& pool -> lock );
627
-
628
- size += pool -> gpu_contexts -> obj -> size ;
625
+ if (!pool )
626
+ return 0 ;
629
627
630
- return size ;
628
+ return atomic_read ( & pool -> size ) ;
631
629
}
0 commit comments