Skip to content

Commit 4312b60

Browse files
Lijo Lazargregkh
authored andcommitted
drm/amdkfd: Use device based logging for errors
[ Upstream commit 62ec7d3 ] Convert some pr_* to some dev_* APIs to identify the device. Signed-off-by: Lijo Lazar <[email protected]> Reviewed-by: Alex Deucher <[email protected]> Signed-off-by: Alex Deucher <[email protected]> Stable-dep-of: 438b39a ("drm/amdkfd: pause autosuspend when creating pdd") Signed-off-by: Sasha Levin <[email protected]>
1 parent 9f7042f commit 4312b60

File tree

5 files changed

+74
-45
lines changed

5 files changed

+74
-45
lines changed

drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -373,7 +373,8 @@ int kfd_init_apertures(struct kfd_process *process)
373373

374374
pdd = kfd_create_process_device_data(dev, process);
375375
if (!pdd) {
376-
pr_err("Failed to create process device data\n");
376+
dev_err(dev->adev->dev,
377+
"Failed to create process device data\n");
377378
return -ENOMEM;
378379
}
379380
/*

drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_node *dev,
6868
kq->mqd_mgr = dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ];
6969
break;
7070
default:
71-
pr_err("Invalid queue type %d\n", type);
71+
dev_err(dev->adev->dev, "Invalid queue type %d\n", type);
7272
return false;
7373
}
7474

@@ -78,13 +78,14 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_node *dev,
7878
prop.doorbell_ptr = kfd_get_kernel_doorbell(dev->kfd, &prop.doorbell_off);
7979

8080
if (!prop.doorbell_ptr) {
81-
pr_err("Failed to initialize doorbell");
81+
dev_err(dev->adev->dev, "Failed to initialize doorbell");
8282
goto err_get_kernel_doorbell;
8383
}
8484

8585
retval = kfd_gtt_sa_allocate(dev, queue_size, &kq->pq);
8686
if (retval != 0) {
87-
pr_err("Failed to init pq queues size %d\n", queue_size);
87+
dev_err(dev->adev->dev, "Failed to init pq queues size %d\n",
88+
queue_size);
8889
goto err_pq_allocate_vidmem;
8990
}
9091

@@ -332,7 +333,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_node *dev,
332333
if (kq_initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE))
333334
return kq;
334335

335-
pr_err("Failed to init kernel queue\n");
336+
dev_err(dev->adev->dev, "Failed to init kernel queue\n");
336337

337338
kfree(kq);
338339
return NULL;
@@ -351,26 +352,26 @@ static __attribute__((unused)) void test_kq(struct kfd_node *dev)
351352
uint32_t *buffer, i;
352353
int retval;
353354

354-
pr_err("Starting kernel queue test\n");
355+
dev_err(dev->adev->dev, "Starting kernel queue test\n");
355356

356357
kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ);
357358
if (unlikely(!kq)) {
358-
pr_err(" Failed to initialize HIQ\n");
359-
pr_err("Kernel queue test failed\n");
359+
dev_err(dev->adev->dev, " Failed to initialize HIQ\n");
360+
dev_err(dev->adev->dev, "Kernel queue test failed\n");
360361
return;
361362
}
362363

363364
retval = kq_acquire_packet_buffer(kq, 5, &buffer);
364365
if (unlikely(retval != 0)) {
365-
pr_err(" Failed to acquire packet buffer\n");
366-
pr_err("Kernel queue test failed\n");
366+
dev_err(dev->adev->dev, " Failed to acquire packet buffer\n");
367+
dev_err(dev->adev->dev, "Kernel queue test failed\n");
367368
return;
368369
}
369370
for (i = 0; i < 5; i++)
370371
buffer[i] = kq->nop_packet;
371372
kq_submit_packet(kq);
372373

373-
pr_err("Ending kernel queue test\n");
374+
dev_err(dev->adev->dev, "Ending kernel queue test\n");
374375
}
375376

376377

drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -118,12 +118,14 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
118118
* attention grabbing.
119119
*/
120120
if (gfx_info->max_shader_engines > KFD_MAX_NUM_SE) {
121-
pr_err("Exceeded KFD_MAX_NUM_SE, chip reports %d\n",
122-
gfx_info->max_shader_engines);
121+
dev_err(mm->dev->adev->dev,
122+
"Exceeded KFD_MAX_NUM_SE, chip reports %d\n",
123+
gfx_info->max_shader_engines);
123124
return;
124125
}
125126
if (gfx_info->max_sh_per_se > KFD_MAX_NUM_SH_PER_SE) {
126-
pr_err("Exceeded KFD_MAX_NUM_SH, chip reports %d\n",
127+
dev_err(mm->dev->adev->dev,
128+
"Exceeded KFD_MAX_NUM_SH, chip reports %d\n",
127129
gfx_info->max_sh_per_se * gfx_info->max_shader_engines);
128130
return;
129131
}

drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c

Lines changed: 41 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,8 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
4545
unsigned int process_count, queue_count, compute_queue_count, gws_queue_count;
4646
unsigned int map_queue_size;
4747
unsigned int max_proc_per_quantum = 1;
48-
struct kfd_node *dev = pm->dqm->dev;
48+
struct kfd_node *node = pm->dqm->dev;
49+
struct device *dev = node->adev->dev;
4950

5051
process_count = pm->dqm->processes_count;
5152
queue_count = pm->dqm->active_queue_count;
@@ -59,14 +60,14 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
5960
*/
6061
*over_subscription = false;
6162

62-
if (dev->max_proc_per_quantum > 1)
63-
max_proc_per_quantum = dev->max_proc_per_quantum;
63+
if (node->max_proc_per_quantum > 1)
64+
max_proc_per_quantum = node->max_proc_per_quantum;
6465

6566
if ((process_count > max_proc_per_quantum) ||
6667
compute_queue_count > get_cp_queues_num(pm->dqm) ||
6768
gws_queue_count > 1) {
6869
*over_subscription = true;
69-
pr_debug("Over subscribed runlist\n");
70+
dev_dbg(dev, "Over subscribed runlist\n");
7071
}
7172

7273
map_queue_size = pm->pmf->map_queues_size;
@@ -81,7 +82,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
8182
if (*over_subscription)
8283
*rlib_size += pm->pmf->runlist_size;
8384

84-
pr_debug("runlist ib size %d\n", *rlib_size);
85+
dev_dbg(dev, "runlist ib size %d\n", *rlib_size);
8586
}
8687

8788
static int pm_allocate_runlist_ib(struct packet_manager *pm,
@@ -90,6 +91,8 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm,
9091
unsigned int *rl_buffer_size,
9192
bool *is_over_subscription)
9293
{
94+
struct kfd_node *node = pm->dqm->dev;
95+
struct device *dev = node->adev->dev;
9396
int retval;
9497

9598
if (WARN_ON(pm->allocated))
@@ -99,11 +102,10 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm,
99102

100103
mutex_lock(&pm->lock);
101104

102-
retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size,
103-
&pm->ib_buffer_obj);
105+
retval = kfd_gtt_sa_allocate(node, *rl_buffer_size, &pm->ib_buffer_obj);
104106

105107
if (retval) {
106-
pr_err("Failed to allocate runlist IB\n");
108+
dev_err(dev, "Failed to allocate runlist IB\n");
107109
goto out;
108110
}
109111

@@ -125,6 +127,8 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
125127
{
126128
unsigned int alloc_size_bytes;
127129
unsigned int *rl_buffer, rl_wptr, i;
130+
struct kfd_node *node = pm->dqm->dev;
131+
struct device *dev = node->adev->dev;
128132
int retval, processes_mapped;
129133
struct device_process_node *cur;
130134
struct qcm_process_device *qpd;
@@ -142,15 +146,15 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
142146
*rl_size_bytes = alloc_size_bytes;
143147
pm->ib_size_bytes = alloc_size_bytes;
144148

145-
pr_debug("Building runlist ib process count: %d queues count %d\n",
149+
dev_dbg(dev, "Building runlist ib process count: %d queues count %d\n",
146150
pm->dqm->processes_count, pm->dqm->active_queue_count);
147151

148152
/* build the run list ib packet */
149153
list_for_each_entry(cur, queues, list) {
150154
qpd = cur->qpd;
151155
/* build map process packet */
152156
if (processes_mapped >= pm->dqm->processes_count) {
153-
pr_debug("Not enough space left in runlist IB\n");
157+
dev_dbg(dev, "Not enough space left in runlist IB\n");
154158
pm_release_ib(pm);
155159
return -ENOMEM;
156160
}
@@ -167,7 +171,8 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
167171
if (!kq->queue->properties.is_active)
168172
continue;
169173

170-
pr_debug("static_queue, mapping kernel q %d, is debug status %d\n",
174+
dev_dbg(dev,
175+
"static_queue, mapping kernel q %d, is debug status %d\n",
171176
kq->queue->queue, qpd->is_debug);
172177

173178
retval = pm->pmf->map_queues(pm,
@@ -186,7 +191,8 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
186191
if (!q->properties.is_active)
187192
continue;
188193

189-
pr_debug("static_queue, mapping user queue %d, is debug status %d\n",
194+
dev_dbg(dev,
195+
"static_queue, mapping user queue %d, is debug status %d\n",
190196
q->queue, qpd->is_debug);
191197

192198
retval = pm->pmf->map_queues(pm,
@@ -203,11 +209,13 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
203209
}
204210
}
205211

206-
pr_debug("Finished map process and queues to runlist\n");
212+
dev_dbg(dev, "Finished map process and queues to runlist\n");
207213

208214
if (is_over_subscription) {
209215
if (!pm->is_over_subscription)
210-
pr_warn("Runlist is getting oversubscribed. Expect reduced ROCm performance.\n");
216+
dev_warn(
217+
dev,
218+
"Runlist is getting oversubscribed. Expect reduced ROCm performance.\n");
211219
retval = pm->pmf->runlist(pm, &rl_buffer[rl_wptr],
212220
*rl_gpu_addr,
213221
alloc_size_bytes / sizeof(uint32_t),
@@ -272,6 +280,8 @@ void pm_uninit(struct packet_manager *pm, bool hanging)
272280
int pm_send_set_resources(struct packet_manager *pm,
273281
struct scheduling_resources *res)
274282
{
283+
struct kfd_node *node = pm->dqm->dev;
284+
struct device *dev = node->adev->dev;
275285
uint32_t *buffer, size;
276286
int retval = 0;
277287

@@ -281,7 +291,7 @@ int pm_send_set_resources(struct packet_manager *pm,
281291
size / sizeof(uint32_t),
282292
(unsigned int **)&buffer);
283293
if (!buffer) {
284-
pr_err("Failed to allocate buffer on kernel queue\n");
294+
dev_err(dev, "Failed to allocate buffer on kernel queue\n");
285295
retval = -ENOMEM;
286296
goto out;
287297
}
@@ -343,6 +353,8 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
343353
int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
344354
uint64_t fence_value)
345355
{
356+
struct kfd_node *node = pm->dqm->dev;
357+
struct device *dev = node->adev->dev;
346358
uint32_t *buffer, size;
347359
int retval = 0;
348360

@@ -354,7 +366,7 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
354366
kq_acquire_packet_buffer(pm->priv_queue,
355367
size / sizeof(uint32_t), (unsigned int **)&buffer);
356368
if (!buffer) {
357-
pr_err("Failed to allocate buffer on kernel queue\n");
369+
dev_err(dev, "Failed to allocate buffer on kernel queue\n");
358370
retval = -ENOMEM;
359371
goto out;
360372
}
@@ -372,6 +384,8 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
372384

373385
int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period)
374386
{
387+
struct kfd_node *node = pm->dqm->dev;
388+
struct device *dev = node->adev->dev;
375389
int retval = 0;
376390
uint32_t *buffer, size;
377391

@@ -385,7 +399,8 @@ int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period)
385399
(unsigned int **)&buffer);
386400

387401
if (!buffer) {
388-
pr_err("Failed to allocate buffer on kernel queue\n");
402+
dev_err(dev,
403+
"Failed to allocate buffer on kernel queue\n");
389404
retval = -ENOMEM;
390405
goto out;
391406
}
@@ -406,6 +421,8 @@ int pm_send_unmap_queue(struct packet_manager *pm,
406421
enum kfd_unmap_queues_filter filter,
407422
uint32_t filter_param, bool reset)
408423
{
424+
struct kfd_node *node = pm->dqm->dev;
425+
struct device *dev = node->adev->dev;
409426
uint32_t *buffer, size;
410427
int retval = 0;
411428

@@ -414,7 +431,7 @@ int pm_send_unmap_queue(struct packet_manager *pm,
414431
kq_acquire_packet_buffer(pm->priv_queue,
415432
size / sizeof(uint32_t), (unsigned int **)&buffer);
416433
if (!buffer) {
417-
pr_err("Failed to allocate buffer on kernel queue\n");
434+
dev_err(dev, "Failed to allocate buffer on kernel queue\n");
418435
retval = -ENOMEM;
419436
goto out;
420437
}
@@ -463,6 +480,8 @@ int pm_debugfs_runlist(struct seq_file *m, void *data)
463480

464481
int pm_debugfs_hang_hws(struct packet_manager *pm)
465482
{
483+
struct kfd_node *node = pm->dqm->dev;
484+
struct device *dev = node->adev->dev;
466485
uint32_t *buffer, size;
467486
int r = 0;
468487

@@ -474,16 +493,16 @@ int pm_debugfs_hang_hws(struct packet_manager *pm)
474493
kq_acquire_packet_buffer(pm->priv_queue,
475494
size / sizeof(uint32_t), (unsigned int **)&buffer);
476495
if (!buffer) {
477-
pr_err("Failed to allocate buffer on kernel queue\n");
496+
dev_err(dev, "Failed to allocate buffer on kernel queue\n");
478497
r = -ENOMEM;
479498
goto out;
480499
}
481500
memset(buffer, 0x55, size);
482501
kq_submit_packet(pm->priv_queue);
483502

484-
pr_info("Submitting %x %x %x %x %x %x %x to HIQ to hang the HWS.",
485-
buffer[0], buffer[1], buffer[2], buffer[3],
486-
buffer[4], buffer[5], buffer[6]);
503+
dev_info(dev, "Submitting %x %x %x %x %x %x %x to HIQ to hang the HWS.",
504+
buffer[0], buffer[1], buffer[2], buffer[3], buffer[4],
505+
buffer[5], buffer[6]);
487506
out:
488507
mutex_unlock(&pm->lock);
489508
return r;

0 commit comments

Comments
 (0)