@@ -45,7 +45,8 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
45
45
unsigned int process_count , queue_count , compute_queue_count , gws_queue_count ;
46
46
unsigned int map_queue_size ;
47
47
unsigned int max_proc_per_quantum = 1 ;
48
- struct kfd_node * dev = pm -> dqm -> dev ;
48
+ struct kfd_node * node = pm -> dqm -> dev ;
49
+ struct device * dev = node -> adev -> dev ;
49
50
50
51
process_count = pm -> dqm -> processes_count ;
51
52
queue_count = pm -> dqm -> active_queue_count ;
@@ -59,14 +60,14 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
59
60
*/
60
61
* over_subscription = false;
61
62
62
- if (dev -> max_proc_per_quantum > 1 )
63
- max_proc_per_quantum = dev -> max_proc_per_quantum ;
63
+ if (node -> max_proc_per_quantum > 1 )
64
+ max_proc_per_quantum = node -> max_proc_per_quantum ;
64
65
65
66
if ((process_count > max_proc_per_quantum ) ||
66
67
compute_queue_count > get_cp_queues_num (pm -> dqm ) ||
67
68
gws_queue_count > 1 ) {
68
69
* over_subscription = true;
69
- pr_debug ( "Over subscribed runlist\n" );
70
+ dev_dbg ( dev , "Over subscribed runlist\n" );
70
71
}
71
72
72
73
map_queue_size = pm -> pmf -> map_queues_size ;
@@ -81,7 +82,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
81
82
if (* over_subscription )
82
83
* rlib_size += pm -> pmf -> runlist_size ;
83
84
84
- pr_debug ( "runlist ib size %d\n" , * rlib_size );
85
+ dev_dbg ( dev , "runlist ib size %d\n" , * rlib_size );
85
86
}
86
87
87
88
static int pm_allocate_runlist_ib (struct packet_manager * pm ,
@@ -90,6 +91,8 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm,
90
91
unsigned int * rl_buffer_size ,
91
92
bool * is_over_subscription )
92
93
{
94
+ struct kfd_node * node = pm -> dqm -> dev ;
95
+ struct device * dev = node -> adev -> dev ;
93
96
int retval ;
94
97
95
98
if (WARN_ON (pm -> allocated ))
@@ -99,11 +102,10 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm,
99
102
100
103
mutex_lock (& pm -> lock );
101
104
102
- retval = kfd_gtt_sa_allocate (pm -> dqm -> dev , * rl_buffer_size ,
103
- & pm -> ib_buffer_obj );
105
+ retval = kfd_gtt_sa_allocate (node , * rl_buffer_size , & pm -> ib_buffer_obj );
104
106
105
107
if (retval ) {
106
- pr_err ( "Failed to allocate runlist IB\n" );
108
+ dev_err ( dev , "Failed to allocate runlist IB\n" );
107
109
goto out ;
108
110
}
109
111
@@ -125,6 +127,8 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
125
127
{
126
128
unsigned int alloc_size_bytes ;
127
129
unsigned int * rl_buffer , rl_wptr , i ;
130
+ struct kfd_node * node = pm -> dqm -> dev ;
131
+ struct device * dev = node -> adev -> dev ;
128
132
int retval , processes_mapped ;
129
133
struct device_process_node * cur ;
130
134
struct qcm_process_device * qpd ;
@@ -142,15 +146,15 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
142
146
* rl_size_bytes = alloc_size_bytes ;
143
147
pm -> ib_size_bytes = alloc_size_bytes ;
144
148
145
- pr_debug ( "Building runlist ib process count: %d queues count %d\n" ,
149
+ dev_dbg ( dev , "Building runlist ib process count: %d queues count %d\n" ,
146
150
pm -> dqm -> processes_count , pm -> dqm -> active_queue_count );
147
151
148
152
/* build the run list ib packet */
149
153
list_for_each_entry (cur , queues , list ) {
150
154
qpd = cur -> qpd ;
151
155
/* build map process packet */
152
156
if (processes_mapped >= pm -> dqm -> processes_count ) {
153
- pr_debug ( "Not enough space left in runlist IB\n" );
157
+ dev_dbg ( dev , "Not enough space left in runlist IB\n" );
154
158
pm_release_ib (pm );
155
159
return - ENOMEM ;
156
160
}
@@ -167,7 +171,8 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
167
171
if (!kq -> queue -> properties .is_active )
168
172
continue ;
169
173
170
- pr_debug ("static_queue, mapping kernel q %d, is debug status %d\n" ,
174
+ dev_dbg (dev ,
175
+ "static_queue, mapping kernel q %d, is debug status %d\n" ,
171
176
kq -> queue -> queue , qpd -> is_debug );
172
177
173
178
retval = pm -> pmf -> map_queues (pm ,
@@ -186,7 +191,8 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
186
191
if (!q -> properties .is_active )
187
192
continue ;
188
193
189
- pr_debug ("static_queue, mapping user queue %d, is debug status %d\n" ,
194
+ dev_dbg (dev ,
195
+ "static_queue, mapping user queue %d, is debug status %d\n" ,
190
196
q -> queue , qpd -> is_debug );
191
197
192
198
retval = pm -> pmf -> map_queues (pm ,
@@ -203,11 +209,13 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
203
209
}
204
210
}
205
211
206
- pr_debug ( "Finished map process and queues to runlist\n" );
212
+ dev_dbg ( dev , "Finished map process and queues to runlist\n" );
207
213
208
214
if (is_over_subscription ) {
209
215
if (!pm -> is_over_subscription )
210
- pr_warn ("Runlist is getting oversubscribed. Expect reduced ROCm performance.\n" );
216
+ dev_warn (
217
+ dev ,
218
+ "Runlist is getting oversubscribed. Expect reduced ROCm performance.\n" );
211
219
retval = pm -> pmf -> runlist (pm , & rl_buffer [rl_wptr ],
212
220
* rl_gpu_addr ,
213
221
alloc_size_bytes / sizeof (uint32_t ),
@@ -272,6 +280,8 @@ void pm_uninit(struct packet_manager *pm, bool hanging)
272
280
int pm_send_set_resources (struct packet_manager * pm ,
273
281
struct scheduling_resources * res )
274
282
{
283
+ struct kfd_node * node = pm -> dqm -> dev ;
284
+ struct device * dev = node -> adev -> dev ;
275
285
uint32_t * buffer , size ;
276
286
int retval = 0 ;
277
287
@@ -281,7 +291,7 @@ int pm_send_set_resources(struct packet_manager *pm,
281
291
size / sizeof (uint32_t ),
282
292
(unsigned int * * )& buffer );
283
293
if (!buffer ) {
284
- pr_err ( "Failed to allocate buffer on kernel queue\n" );
294
+ dev_err ( dev , "Failed to allocate buffer on kernel queue\n" );
285
295
retval = - ENOMEM ;
286
296
goto out ;
287
297
}
@@ -343,6 +353,8 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
343
353
int pm_send_query_status (struct packet_manager * pm , uint64_t fence_address ,
344
354
uint64_t fence_value )
345
355
{
356
+ struct kfd_node * node = pm -> dqm -> dev ;
357
+ struct device * dev = node -> adev -> dev ;
346
358
uint32_t * buffer , size ;
347
359
int retval = 0 ;
348
360
@@ -354,7 +366,7 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
354
366
kq_acquire_packet_buffer (pm -> priv_queue ,
355
367
size / sizeof (uint32_t ), (unsigned int * * )& buffer );
356
368
if (!buffer ) {
357
- pr_err ( "Failed to allocate buffer on kernel queue\n" );
369
+ dev_err ( dev , "Failed to allocate buffer on kernel queue\n" );
358
370
retval = - ENOMEM ;
359
371
goto out ;
360
372
}
@@ -372,6 +384,8 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
372
384
373
385
int pm_update_grace_period (struct packet_manager * pm , uint32_t grace_period )
374
386
{
387
+ struct kfd_node * node = pm -> dqm -> dev ;
388
+ struct device * dev = node -> adev -> dev ;
375
389
int retval = 0 ;
376
390
uint32_t * buffer , size ;
377
391
@@ -385,7 +399,8 @@ int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period)
385
399
(unsigned int * * )& buffer );
386
400
387
401
if (!buffer ) {
388
- pr_err ("Failed to allocate buffer on kernel queue\n" );
402
+ dev_err (dev ,
403
+ "Failed to allocate buffer on kernel queue\n" );
389
404
retval = - ENOMEM ;
390
405
goto out ;
391
406
}
@@ -406,6 +421,8 @@ int pm_send_unmap_queue(struct packet_manager *pm,
406
421
enum kfd_unmap_queues_filter filter ,
407
422
uint32_t filter_param , bool reset )
408
423
{
424
+ struct kfd_node * node = pm -> dqm -> dev ;
425
+ struct device * dev = node -> adev -> dev ;
409
426
uint32_t * buffer , size ;
410
427
int retval = 0 ;
411
428
@@ -414,7 +431,7 @@ int pm_send_unmap_queue(struct packet_manager *pm,
414
431
kq_acquire_packet_buffer (pm -> priv_queue ,
415
432
size / sizeof (uint32_t ), (unsigned int * * )& buffer );
416
433
if (!buffer ) {
417
- pr_err ( "Failed to allocate buffer on kernel queue\n" );
434
+ dev_err ( dev , "Failed to allocate buffer on kernel queue\n" );
418
435
retval = - ENOMEM ;
419
436
goto out ;
420
437
}
@@ -463,6 +480,8 @@ int pm_debugfs_runlist(struct seq_file *m, void *data)
463
480
464
481
int pm_debugfs_hang_hws (struct packet_manager * pm )
465
482
{
483
+ struct kfd_node * node = pm -> dqm -> dev ;
484
+ struct device * dev = node -> adev -> dev ;
466
485
uint32_t * buffer , size ;
467
486
int r = 0 ;
468
487
@@ -474,16 +493,16 @@ int pm_debugfs_hang_hws(struct packet_manager *pm)
474
493
kq_acquire_packet_buffer (pm -> priv_queue ,
475
494
size / sizeof (uint32_t ), (unsigned int * * )& buffer );
476
495
if (!buffer ) {
477
- pr_err ( "Failed to allocate buffer on kernel queue\n" );
496
+ dev_err ( dev , "Failed to allocate buffer on kernel queue\n" );
478
497
r = - ENOMEM ;
479
498
goto out ;
480
499
}
481
500
memset (buffer , 0x55 , size );
482
501
kq_submit_packet (pm -> priv_queue );
483
502
484
- pr_info ( "Submitting %x %x %x %x %x %x %x to HIQ to hang the HWS." ,
485
- buffer [0 ], buffer [1 ], buffer [2 ], buffer [3 ],
486
- buffer [ 4 ], buffer [5 ], buffer [6 ]);
503
+ dev_info ( dev , "Submitting %x %x %x %x %x %x %x to HIQ to hang the HWS." ,
504
+ buffer [0 ], buffer [1 ], buffer [2 ], buffer [3 ], buffer [ 4 ],
505
+ buffer [5 ], buffer [6 ]);
487
506
out :
488
507
mutex_unlock (& pm -> lock );
489
508
return r ;
0 commit comments