Skip to content

Commit 4d95a12

Browse files
committed
Merge tag 'drm-xe-fixes-2024-10-24-1' of https://gitlab.freedesktop.org/drm/xe/kernel into drm-fixes
Driver Changes: - Increase invalidation timeout to avoid errors in some hosts (Shuicheng) - Flush worker on timeout (Badal) - Better handling for force wake failure (Shuicheng) - Improve argument check on user fence creation (Nirmoy) - Don't restart parallel queues multiple times on GT reset (Nirmoy) Signed-off-by: Dave Airlie <[email protected]> From: Lucas De Marchi <[email protected]> Link: https://patchwork.freedesktop.org/patch/msgid/trlkoiewtc4x2cyhsxmj3atayyq4zwto4iryea5pvya2ymc3yp@fdx5nhwmiyem
2 parents e3e1cfe + cdc2102 commit 4d95a12

File tree

5 files changed

+42
-7
lines changed

5 files changed

+42
-7
lines changed

drivers/gpu/drm/xe/xe_device.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -890,7 +890,7 @@ void xe_device_l2_flush(struct xe_device *xe)
890890
spin_lock(&gt->global_invl_lock);
891891
xe_mmio_write32(gt, XE2_GLOBAL_INVAL, 0x1);
892892

893-
if (xe_mmio_wait32(gt, XE2_GLOBAL_INVAL, 0x1, 0x0, 150, NULL, true))
893+
if (xe_mmio_wait32(gt, XE2_GLOBAL_INVAL, 0x1, 0x0, 500, NULL, true))
894894
xe_gt_err_once(gt, "Global invalidation timeout\n");
895895
spin_unlock(&gt->global_invl_lock);
896896

drivers/gpu/drm/xe/xe_force_wake.c

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -115,9 +115,15 @@ static int __domain_wait(struct xe_gt *gt, struct xe_force_wake_domain *domain,
115115
XE_FORCE_WAKE_ACK_TIMEOUT_MS * USEC_PER_MSEC,
116116
&value, true);
117117
if (ret)
118-
xe_gt_notice(gt, "Force wake domain %d failed to ack %s (%pe) reg[%#x] = %#x\n",
119-
domain->id, str_wake_sleep(wake), ERR_PTR(ret),
120-
domain->reg_ack.addr, value);
118+
xe_gt_err(gt, "Force wake domain %d failed to ack %s (%pe) reg[%#x] = %#x\n",
119+
domain->id, str_wake_sleep(wake), ERR_PTR(ret),
120+
domain->reg_ack.addr, value);
121+
if (value == ~0) {
122+
xe_gt_err(gt,
123+
"Force wake domain %d: %s. MMIO unreliable (forcewake register returns 0xFFFFFFFF)!\n",
124+
domain->id, str_wake_sleep(wake));
125+
ret = -EIO;
126+
}
121127

122128
return ret;
123129
}

drivers/gpu/drm/xe/xe_guc_ct.c

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -897,6 +897,24 @@ static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
897897

898898
ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ);
899899

900+
/*
901+
* Occasionally it is seen that the G2H worker starts running after a delay of more than
902+
* a second even after being queued and activated by the Linux workqueue subsystem. This
903+
* leads to G2H timeout error. The root cause of issue lies with scheduling latency of
904+
* Lunarlake Hybrid CPU. Issue dissappears if we disable Lunarlake atom cores from BIOS
905+
* and this is beyond xe kmd.
906+
*
907+
* TODO: Drop this change once workqueue scheduling delay issue is fixed on LNL Hybrid CPU.
908+
*/
909+
if (!ret) {
910+
flush_work(&ct->g2h_worker);
911+
if (g2h_fence.done) {
912+
xe_gt_warn(gt, "G2H fence %u, action %04x, done\n",
913+
g2h_fence.seqno, action[0]);
914+
ret = 1;
915+
}
916+
}
917+
900918
/*
901919
* Ensure we serialize with completion side to prevent UAF with fence going out of scope on
902920
* the stack, since we have no clue if it will fire after the timeout before we can erase

drivers/gpu/drm/xe/xe_guc_submit.c

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1726,8 +1726,13 @@ void xe_guc_submit_stop(struct xe_guc *guc)
17261726

17271727
mutex_lock(&guc->submission_state.lock);
17281728

1729-
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
1729+
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
1730+
/* Prevent redundant attempts to stop parallel queues */
1731+
if (q->guc->id != index)
1732+
continue;
1733+
17301734
guc_exec_queue_stop(guc, q);
1735+
}
17311736

17321737
mutex_unlock(&guc->submission_state.lock);
17331738

@@ -1765,8 +1770,13 @@ int xe_guc_submit_start(struct xe_guc *guc)
17651770

17661771
mutex_lock(&guc->submission_state.lock);
17671772
atomic_dec(&guc->submission_state.stopped);
1768-
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
1773+
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
1774+
/* Prevent redundant attempts to start parallel queues */
1775+
if (q->guc->id != index)
1776+
continue;
1777+
17691778
guc_exec_queue_start(q);
1779+
}
17701780
mutex_unlock(&guc->submission_state.lock);
17711781

17721782
wake_up_all(&guc->ct.wq);

drivers/gpu/drm/xe/xe_sync.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -54,8 +54,9 @@ static struct xe_user_fence *user_fence_create(struct xe_device *xe, u64 addr,
5454
{
5555
struct xe_user_fence *ufence;
5656
u64 __user *ptr = u64_to_user_ptr(addr);
57+
u64 __maybe_unused prefetch_val;
5758

58-
if (!access_ok(ptr, sizeof(*ptr)))
59+
if (get_user(prefetch_val, ptr))
5960
return ERR_PTR(-EFAULT);
6061

6162
ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);

0 commit comments

Comments
 (0)