Skip to content

Commit 9d7a1cb

Browse files
matt-auldrodrigovivi
authored andcommitted
drm/xe/migrate: prevent infinite recursion
If the buf + offset is not aligned to XE_CAHELINE_BYTES we fallback to using a bounce buffer. However the bounce buffer here is allocated on the stack, and the only alignment requirement here is that it's naturally aligned to u8, and not XE_CACHELINE_BYTES. If the bounce buffer is also misaligned we then recurse back into the function again, however the new bounce buffer might also not be aligned, and might never be until we eventually blow through the stack, as we keep recursing. Instead of using the stack use kmalloc, which should respect the power-of-two alignment request here. Fixes a kernel panic when triggering this path through eudebug. v2 (Stuart): - Add build bug check for power-of-two restriction - s/EINVAL/ENOMEM/ Fixes: 270172f ("drm/xe: Update xe_ttm_access_memory to use GPU for non-visible access") Signed-off-by: Matthew Auld <[email protected]> Cc: Maciej Patelczyk <[email protected]> Cc: Stuart Summers <[email protected]> Cc: Matthew Brost <[email protected]> Reviewed-by: Stuart Summers <[email protected]> Link: https://lore.kernel.org/r/[email protected] (cherry picked from commit 38b34e9) Signed-off-by: Rodrigo Vivi <[email protected]>
1 parent 8f5ae30 commit 9d7a1cb

File tree

1 file changed

+17
-12
lines changed

1 file changed

+17
-12
lines changed

drivers/gpu/drm/xe/xe_migrate.c

Lines changed: 17 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1820,15 +1820,19 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
18201820
if (!IS_ALIGNED(len, XE_CACHELINE_BYTES) ||
18211821
!IS_ALIGNED((unsigned long)buf + offset, XE_CACHELINE_BYTES)) {
18221822
int buf_offset = 0;
1823+
void *bounce;
1824+
int err;
1825+
1826+
BUILD_BUG_ON(!is_power_of_2(XE_CACHELINE_BYTES));
1827+
bounce = kmalloc(XE_CACHELINE_BYTES, GFP_KERNEL);
1828+
if (!bounce)
1829+
return -ENOMEM;
18231830

18241831
/*
18251832
* Less than ideal for large unaligned access but this should be
18261833
* fairly rare, can fixup if this becomes common.
18271834
*/
18281835
do {
1829-
u8 bounce[XE_CACHELINE_BYTES];
1830-
void *ptr = (void *)bounce;
1831-
int err;
18321836
int copy_bytes = min_t(int, bytes_left,
18331837
XE_CACHELINE_BYTES -
18341838
(offset & XE_CACHELINE_MASK));
@@ -1837,22 +1841,22 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
18371841
err = xe_migrate_access_memory(m, bo,
18381842
offset &
18391843
~XE_CACHELINE_MASK,
1840-
(void *)ptr,
1841-
sizeof(bounce), 0);
1844+
bounce,
1845+
XE_CACHELINE_BYTES, 0);
18421846
if (err)
1843-
return err;
1847+
break;
18441848

18451849
if (write) {
1846-
memcpy(ptr + ptr_offset, buf + buf_offset, copy_bytes);
1850+
memcpy(bounce + ptr_offset, buf + buf_offset, copy_bytes);
18471851

18481852
err = xe_migrate_access_memory(m, bo,
18491853
offset & ~XE_CACHELINE_MASK,
1850-
(void *)ptr,
1851-
sizeof(bounce), write);
1854+
bounce,
1855+
XE_CACHELINE_BYTES, write);
18521856
if (err)
1853-
return err;
1857+
break;
18541858
} else {
1855-
memcpy(buf + buf_offset, ptr + ptr_offset,
1859+
memcpy(buf + buf_offset, bounce + ptr_offset,
18561860
copy_bytes);
18571861
}
18581862

@@ -1861,7 +1865,8 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
18611865
offset += copy_bytes;
18621866
} while (bytes_left);
18631867

1864-
return 0;
1868+
kfree(bounce);
1869+
return err;
18651870
}
18661871

18671872
dma_addr = xe_migrate_dma_map(xe, buf, len + page_offset, write);

0 commit comments

Comments
 (0)