Skip to content

Commit e379856

Browse files
larunbeSteven Price
authored andcommitted
drm/panthor: Replace sleep locks with spinlocks in fdinfo path
Commit 0590c94 ("drm/panthor: Fix race condition when gathering fdinfo group samples") introduced an xarray lock to deal with potential use-after-free errors when accessing groups fdinfo figures. However, this toggles the kernel's atomic context status, so the next nested mutex lock will raise a warning when the kernel is compiled with mutex debug options: CONFIG_DEBUG_RT_MUTEXES=y CONFIG_DEBUG_MUTEXES=y Replace Panthor's group fdinfo data mutex with a guarded spinlock. Signed-off-by: Adrián Larumbe <[email protected]> Fixes: 0590c94 ("drm/panthor: Fix race condition when gathering fdinfo group samples") Reviewed-by: Liviu Dudau <[email protected]> Reviewed-by: Boris Brezillon <[email protected]> Reviewed-by: Steven Price <[email protected]> Signed-off-by: Steven Price <[email protected]> Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
1 parent e4c0fd3 commit e379856

File tree

1 file changed

+12
-14
lines changed

1 file changed

+12
-14
lines changed

drivers/gpu/drm/panthor/panthor_sched.c

Lines changed: 12 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
#include <drm/panthor_drm.h>
1010

1111
#include <linux/build_bug.h>
12+
#include <linux/cleanup.h>
1213
#include <linux/clk.h>
1314
#include <linux/delay.h>
1415
#include <linux/dma-mapping.h>
@@ -631,10 +632,10 @@ struct panthor_group {
631632
struct panthor_gpu_usage data;
632633

633634
/**
634-
* @lock: Mutex to govern concurrent access from drm file's fdinfo callback
635-
* and job post-completion processing function
635+
* @fdinfo.lock: Spinlock to govern concurrent access from drm file's fdinfo
636+
* callback and job post-completion processing function
636637
*/
637-
struct mutex lock;
638+
spinlock_t lock;
638639

639640
/** @fdinfo.kbo_sizes: Aggregate size of private kernel BO's held by the group. */
640641
size_t kbo_sizes;
@@ -910,8 +911,6 @@ static void group_release_work(struct work_struct *work)
910911
release_work);
911912
u32 i;
912913

913-
mutex_destroy(&group->fdinfo.lock);
914-
915914
for (i = 0; i < group->queue_count; i++)
916915
group_free_queue(group, group->queues[i]);
917916

@@ -2861,12 +2860,12 @@ static void update_fdinfo_stats(struct panthor_job *job)
28612860
struct panthor_job_profiling_data *slots = queue->profiling.slots->kmap;
28622861
struct panthor_job_profiling_data *data = &slots[job->profiling.slot];
28632862

2864-
mutex_lock(&group->fdinfo.lock);
2865-
if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_CYCLES)
2866-
fdinfo->cycles += data->cycles.after - data->cycles.before;
2867-
if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_TIMESTAMP)
2868-
fdinfo->time += data->time.after - data->time.before;
2869-
mutex_unlock(&group->fdinfo.lock);
2863+
scoped_guard(spinlock, &group->fdinfo.lock) {
2864+
if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_CYCLES)
2865+
fdinfo->cycles += data->cycles.after - data->cycles.before;
2866+
if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_TIMESTAMP)
2867+
fdinfo->time += data->time.after - data->time.before;
2868+
}
28702869
}
28712870

28722871
void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile)
@@ -2880,12 +2879,11 @@ void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile)
28802879

28812880
xa_lock(&gpool->xa);
28822881
xa_for_each(&gpool->xa, i, group) {
2883-
mutex_lock(&group->fdinfo.lock);
2882+
guard(spinlock)(&group->fdinfo.lock);
28842883
pfile->stats.cycles += group->fdinfo.data.cycles;
28852884
pfile->stats.time += group->fdinfo.data.time;
28862885
group->fdinfo.data.cycles = 0;
28872886
group->fdinfo.data.time = 0;
2888-
mutex_unlock(&group->fdinfo.lock);
28892887
}
28902888
xa_unlock(&gpool->xa);
28912889
}
@@ -3537,7 +3535,7 @@ int panthor_group_create(struct panthor_file *pfile,
35373535
mutex_unlock(&sched->reset.lock);
35383536

35393537
add_group_kbo_sizes(group->ptdev, group);
3540-
mutex_init(&group->fdinfo.lock);
3538+
spin_lock_init(&group->fdinfo.lock);
35413539

35423540
return gid;
35433541

0 commit comments

Comments
 (0)