Skip to content

Commit c63c3bf

Browse files
larunbeSteven Price
authored andcommitted
drm/panthor: Avoid sleep locking in the internal BO size path
Commit 434e5ca ("drm/panthor: Expose size of driver internal BO's over fdinfo") locks the VMS xarray, to avoid UAF errors when the same VM is being concurrently destroyed by another thread. However, that puts the current thread in atomic context, which means taking the VMS' heap locks will trigger a warning as the thread is no longer allowed to sleep. Because in this case replacing the heap mutex with a spinlock isn't feasible, the fdinfo handler no longer traverses the list of heaps for every single VM associated with an open DRM file. Instead, when a new heap chunk is allocated, its size is accumulated into a pool-wide tally, which also makes the atomic context code path somewhat faster. Signed-off-by: Adrián Larumbe <[email protected]> Fixes: 434e5ca ("drm/panthor: Expose size of driver internal BO's over fdinfo") Reviewed-by: Boris Brezillon <[email protected]> Reviewed-by: Steven Price <[email protected]> Signed-off-by: Steven Price <[email protected]> Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
1 parent e379856 commit c63c3bf

File tree

2 files changed

+31
-39
lines changed

2 files changed

+31
-39
lines changed

drivers/gpu/drm/panthor/panthor_heap.c

Lines changed: 30 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -97,6 +97,9 @@ struct panthor_heap_pool {
9797

9898
/** @gpu_contexts: Buffer object containing the GPU heap contexts. */
9999
struct panthor_kernel_bo *gpu_contexts;
100+
101+
/** @size: Size of all chunks across all heaps in the pool. */
102+
atomic_t size;
100103
};
101104

102105
static int panthor_heap_ctx_stride(struct panthor_device *ptdev)
@@ -118,7 +121,7 @@ static void *panthor_get_heap_ctx(struct panthor_heap_pool *pool, int id)
118121
panthor_get_heap_ctx_offset(pool, id);
119122
}
120123

121-
static void panthor_free_heap_chunk(struct panthor_vm *vm,
124+
static void panthor_free_heap_chunk(struct panthor_heap_pool *pool,
122125
struct panthor_heap *heap,
123126
struct panthor_heap_chunk *chunk)
124127
{
@@ -127,12 +130,13 @@ static void panthor_free_heap_chunk(struct panthor_vm *vm,
127130
heap->chunk_count--;
128131
mutex_unlock(&heap->lock);
129132

133+
atomic_sub(heap->chunk_size, &pool->size);
134+
130135
panthor_kernel_bo_destroy(chunk->bo);
131136
kfree(chunk);
132137
}
133138

134-
static int panthor_alloc_heap_chunk(struct panthor_device *ptdev,
135-
struct panthor_vm *vm,
139+
static int panthor_alloc_heap_chunk(struct panthor_heap_pool *pool,
136140
struct panthor_heap *heap,
137141
bool initial_chunk)
138142
{
@@ -144,7 +148,7 @@ static int panthor_alloc_heap_chunk(struct panthor_device *ptdev,
144148
if (!chunk)
145149
return -ENOMEM;
146150

147-
chunk->bo = panthor_kernel_bo_create(ptdev, vm, heap->chunk_size,
151+
chunk->bo = panthor_kernel_bo_create(pool->ptdev, pool->vm, heap->chunk_size,
148152
DRM_PANTHOR_BO_NO_MMAP,
149153
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC,
150154
PANTHOR_VM_KERNEL_AUTO_VA);
@@ -180,6 +184,8 @@ static int panthor_alloc_heap_chunk(struct panthor_device *ptdev,
180184
heap->chunk_count++;
181185
mutex_unlock(&heap->lock);
182186

187+
atomic_add(heap->chunk_size, &pool->size);
188+
183189
return 0;
184190

185191
err_destroy_bo:
@@ -191,25 +197,24 @@ static int panthor_alloc_heap_chunk(struct panthor_device *ptdev,
191197
return ret;
192198
}
193199

194-
static void panthor_free_heap_chunks(struct panthor_vm *vm,
200+
static void panthor_free_heap_chunks(struct panthor_heap_pool *pool,
195201
struct panthor_heap *heap)
196202
{
197203
struct panthor_heap_chunk *chunk, *tmp;
198204

199205
list_for_each_entry_safe(chunk, tmp, &heap->chunks, node)
200-
panthor_free_heap_chunk(vm, heap, chunk);
206+
panthor_free_heap_chunk(pool, heap, chunk);
201207
}
202208

203-
static int panthor_alloc_heap_chunks(struct panthor_device *ptdev,
204-
struct panthor_vm *vm,
209+
static int panthor_alloc_heap_chunks(struct panthor_heap_pool *pool,
205210
struct panthor_heap *heap,
206211
u32 chunk_count)
207212
{
208213
int ret;
209214
u32 i;
210215

211216
for (i = 0; i < chunk_count; i++) {
212-
ret = panthor_alloc_heap_chunk(ptdev, vm, heap, true);
217+
ret = panthor_alloc_heap_chunk(pool, heap, true);
213218
if (ret)
214219
return ret;
215220
}
@@ -226,7 +231,7 @@ panthor_heap_destroy_locked(struct panthor_heap_pool *pool, u32 handle)
226231
if (!heap)
227232
return -EINVAL;
228233

229-
panthor_free_heap_chunks(pool->vm, heap);
234+
panthor_free_heap_chunks(pool, heap);
230235
mutex_destroy(&heap->lock);
231236
kfree(heap);
232237
return 0;
@@ -308,8 +313,7 @@ int panthor_heap_create(struct panthor_heap_pool *pool,
308313
heap->max_chunks = max_chunks;
309314
heap->target_in_flight = target_in_flight;
310315

311-
ret = panthor_alloc_heap_chunks(pool->ptdev, vm, heap,
312-
initial_chunk_count);
316+
ret = panthor_alloc_heap_chunks(pool, heap, initial_chunk_count);
313317
if (ret)
314318
goto err_free_heap;
315319

@@ -342,7 +346,7 @@ int panthor_heap_create(struct panthor_heap_pool *pool,
342346
return id;
343347

344348
err_free_heap:
345-
panthor_free_heap_chunks(pool->vm, heap);
349+
panthor_free_heap_chunks(pool, heap);
346350
mutex_destroy(&heap->lock);
347351
kfree(heap);
348352

@@ -389,6 +393,7 @@ int panthor_heap_return_chunk(struct panthor_heap_pool *pool,
389393
removed = chunk;
390394
list_del(&chunk->node);
391395
heap->chunk_count--;
396+
atomic_sub(heap->chunk_size, &pool->size);
392397
break;
393398
}
394399
}
@@ -466,7 +471,7 @@ int panthor_heap_grow(struct panthor_heap_pool *pool,
466471
* further jobs in this queue fail immediately instead of having to
467472
* wait for the job timeout.
468473
*/
469-
ret = panthor_alloc_heap_chunk(pool->ptdev, pool->vm, heap, false);
474+
ret = panthor_alloc_heap_chunk(pool, heap, false);
470475
if (ret)
471476
goto out_unlock;
472477

@@ -560,6 +565,8 @@ panthor_heap_pool_create(struct panthor_device *ptdev, struct panthor_vm *vm)
560565
if (ret)
561566
goto err_destroy_pool;
562567

568+
atomic_add(pool->gpu_contexts->obj->size, &pool->size);
569+
563570
return pool;
564571

565572
err_destroy_pool:
@@ -594,8 +601,10 @@ void panthor_heap_pool_destroy(struct panthor_heap_pool *pool)
594601
xa_for_each(&pool->xa, i, heap)
595602
drm_WARN_ON(&pool->ptdev->base, panthor_heap_destroy_locked(pool, i));
596603

597-
if (!IS_ERR_OR_NULL(pool->gpu_contexts))
604+
if (!IS_ERR_OR_NULL(pool->gpu_contexts)) {
605+
atomic_sub(pool->gpu_contexts->obj->size, &pool->size);
598606
panthor_kernel_bo_destroy(pool->gpu_contexts);
607+
}
599608

600609
/* Reflects the fact the pool has been destroyed. */
601610
pool->vm = NULL;
@@ -605,27 +614,16 @@ void panthor_heap_pool_destroy(struct panthor_heap_pool *pool)
605614
}
606615

607616
/**
608-
* panthor_heap_pool_size() - Calculate size of all chunks across all heaps in a pool
609-
* @pool: Pool whose total chunk size to calculate.
617+
* panthor_heap_pool_size() - Get a heap pool's total size
618+
* @pool: Pool whose total chunks size to return
610619
*
611-
* This function adds the size of all heap chunks across all heaps in the
612-
* argument pool. It also adds the size of the gpu contexts kernel bo.
613-
* It is meant to be used by fdinfo for displaying the size of internal
614-
* driver BO's that aren't exposed to userspace through a GEM handle.
620+
* Returns the aggregated size of all chunks for all heaps in the pool
615621
*
616622
*/
617623
size_t panthor_heap_pool_size(struct panthor_heap_pool *pool)
618624
{
619-
struct panthor_heap *heap;
620-
unsigned long i;
621-
size_t size = 0;
622-
623-
down_read(&pool->lock);
624-
xa_for_each(&pool->xa, i, heap)
625-
size += heap->chunk_size * heap->chunk_count;
626-
up_read(&pool->lock);
627-
628-
size += pool->gpu_contexts->obj->size;
625+
if (!pool)
626+
return 0;
629627

630-
return size;
628+
return atomic_read(&pool->size);
631629
}

drivers/gpu/drm/panthor/panthor_mmu.c

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1963,13 +1963,7 @@ void panthor_vm_heaps_sizes(struct panthor_file *pfile, struct drm_memory_stats
19631963

19641964
xa_lock(&pfile->vms->xa);
19651965
xa_for_each(&pfile->vms->xa, i, vm) {
1966-
size_t size = 0;
1967-
1968-
mutex_lock(&vm->heaps.lock);
1969-
if (vm->heaps.pool)
1970-
size = panthor_heap_pool_size(vm->heaps.pool);
1971-
mutex_unlock(&vm->heaps.lock);
1972-
1966+
size_t size = panthor_heap_pool_size(vm->heaps.pool);
19731967
stats->resident += size;
19741968
if (vm->as.id >= 0)
19751969
stats->active += size;

0 commit comments

Comments
 (0)