@@ -1820,15 +1820,19 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
18201820 if (!IS_ALIGNED (len , XE_CACHELINE_BYTES ) ||
18211821 !IS_ALIGNED ((unsigned long )buf + offset , XE_CACHELINE_BYTES )) {
18221822 int buf_offset = 0 ;
1823+ void * bounce ;
1824+ int err ;
1825+
1826+ BUILD_BUG_ON (!is_power_of_2 (XE_CACHELINE_BYTES ));
1827+ bounce = kmalloc (XE_CACHELINE_BYTES , GFP_KERNEL );
1828+ if (!bounce )
1829+ return - ENOMEM ;
18231830
18241831 /*
18251832 * Less than ideal for large unaligned access but this should be
18261833 * fairly rare, can fixup if this becomes common.
18271834 */
18281835 do {
1829- u8 bounce [XE_CACHELINE_BYTES ];
1830- void * ptr = (void * )bounce ;
1831- int err ;
18321836 int copy_bytes = min_t (int , bytes_left ,
18331837 XE_CACHELINE_BYTES -
18341838 (offset & XE_CACHELINE_MASK ));
@@ -1837,22 +1841,22 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
18371841 err = xe_migrate_access_memory (m , bo ,
18381842 offset &
18391843 ~XE_CACHELINE_MASK ,
1840- ( void * ) ptr ,
1841- sizeof ( bounce ) , 0 );
1844+ bounce ,
1845+ XE_CACHELINE_BYTES , 0 );
18421846 if (err )
1843- return err ;
1847+ break ;
18441848
18451849 if (write ) {
1846- memcpy (ptr + ptr_offset , buf + buf_offset , copy_bytes );
1850+ memcpy (bounce + ptr_offset , buf + buf_offset , copy_bytes );
18471851
18481852 err = xe_migrate_access_memory (m , bo ,
18491853 offset & ~XE_CACHELINE_MASK ,
1850- ( void * ) ptr ,
1851- sizeof ( bounce ) , write );
1854+ bounce ,
1855+ XE_CACHELINE_BYTES , write );
18521856 if (err )
1853- return err ;
1857+ break ;
18541858 } else {
1855- memcpy (buf + buf_offset , ptr + ptr_offset ,
1859+ memcpy (buf + buf_offset , bounce + ptr_offset ,
18561860 copy_bytes );
18571861 }
18581862
@@ -1861,7 +1865,8 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
18611865 offset += copy_bytes ;
18621866 } while (bytes_left );
18631867
1864- return 0 ;
1868+ kfree (bounce );
1869+ return err ;
18651870 }
18661871
18671872 dma_addr = xe_migrate_dma_map (xe , buf , len + page_offset , write );
@@ -1882,8 +1887,11 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
18821887 else
18831888 current_bytes = min_t (int , bytes_left , cursor .size );
18841889
1885- if (fence )
1886- dma_fence_put (fence );
1890+ if (current_bytes & ~PAGE_MASK ) {
1891+ int pitch = 4 ;
1892+
1893+ current_bytes = min_t (int , current_bytes , S16_MAX * pitch );
1894+ }
18871895
18881896 __fence = xe_migrate_vram (m , current_bytes ,
18891897 (unsigned long )buf & ~PAGE_MASK ,
@@ -1892,11 +1900,15 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
18921900 XE_MIGRATE_COPY_TO_VRAM :
18931901 XE_MIGRATE_COPY_TO_SRAM );
18941902 if (IS_ERR (__fence )) {
1895- if (fence )
1903+ if (fence ) {
18961904 dma_fence_wait (fence , false);
1905+ dma_fence_put (fence );
1906+ }
18971907 fence = __fence ;
18981908 goto out_err ;
18991909 }
1910+
1911+ dma_fence_put (fence );
19001912 fence = __fence ;
19011913
19021914 buf += current_bytes ;
0 commit comments