26
26
27
27
#include <linux/scatterlist.h>
28
28
29
+ #include <drm/drm_pagemap.h>
29
30
#include <drm/ttm/ttm_placement.h>
30
31
#include <drm/ttm/ttm_range_manager.h>
31
32
#include <drm/ttm/ttm_resource.h>
34
35
#include "xe_bo.h"
35
36
#include "xe_device.h"
36
37
#include "xe_macros.h"
38
+ #include "xe_svm.h"
37
39
#include "xe_ttm_vram_mgr.h"
38
40
39
- /* state back for walking over vram_mgr, stolen_mgr, and gtt_mgr allocations */
41
+ /**
42
+ * struct xe_res_cursor - state for walking over dma mapping, vram_mgr,
43
+ * stolen_mgr, and gtt_mgr allocations
44
+ */
40
45
struct xe_res_cursor {
46
+ /** @start: Start of cursor */
41
47
u64 start ;
48
+ /** @size: Size of the current segment. */
42
49
u64 size ;
50
+ /** @remaining: Remaining bytes in cursor */
43
51
u64 remaining ;
52
+ /** @node: Opaque point current node cursor */
44
53
void * node ;
54
+ /** @mem_type: Memory type */
45
55
u32 mem_type ;
56
+ /** @sgl: Scatterlist for cursor */
46
57
struct scatterlist * sgl ;
58
+ /** @dma_addr: Current element in a struct drm_pagemap_device_addr array */
59
+ const struct drm_pagemap_device_addr * dma_addr ;
60
+ /** @mm: Buddy allocator for VRAM cursor */
47
61
struct drm_buddy * mm ;
62
+ /**
63
+ * @dma_start: DMA start address for the current segment.
64
+ * This may be different to @dma_addr.addr since elements in
65
+ * the array may be coalesced to a single segment.
66
+ */
67
+ u64 dma_start ;
68
+ /** @dma_seg_size: Size of the current DMA segment. */
69
+ u64 dma_seg_size ;
48
70
};
49
71
50
72
static struct drm_buddy * xe_res_get_buddy (struct ttm_resource * res )
@@ -70,6 +92,7 @@ static inline void xe_res_first(struct ttm_resource *res,
70
92
struct xe_res_cursor * cur )
71
93
{
72
94
cur -> sgl = NULL ;
95
+ cur -> dma_addr = NULL ;
73
96
if (!res )
74
97
goto fallback ;
75
98
@@ -141,6 +164,36 @@ static inline void __xe_res_sg_next(struct xe_res_cursor *cur)
141
164
cur -> sgl = sgl ;
142
165
}
143
166
167
+ /**
168
+ * __xe_res_dma_next() - Advance the cursor when end-of-segment is reached
169
+ * @cur: The cursor
170
+ */
171
+ static inline void __xe_res_dma_next (struct xe_res_cursor * cur )
172
+ {
173
+ const struct drm_pagemap_device_addr * addr = cur -> dma_addr ;
174
+ u64 start = cur -> start ;
175
+
176
+ while (start >= cur -> dma_seg_size ) {
177
+ start -= cur -> dma_seg_size ;
178
+ addr ++ ;
179
+ cur -> dma_seg_size = PAGE_SIZE << addr -> order ;
180
+ }
181
+ cur -> dma_start = addr -> addr ;
182
+
183
+ /* Coalesce array_elements */
184
+ while (cur -> dma_seg_size - start < cur -> remaining ) {
185
+ if (cur -> dma_start + cur -> dma_seg_size != addr [1 ].addr ||
186
+ addr -> proto != addr [1 ].proto )
187
+ break ;
188
+ addr ++ ;
189
+ cur -> dma_seg_size += PAGE_SIZE << addr -> order ;
190
+ }
191
+
192
+ cur -> dma_addr = addr ;
193
+ cur -> start = start ;
194
+ cur -> size = cur -> dma_seg_size - start ;
195
+ }
196
+
144
197
/**
145
198
* xe_res_first_sg - initialize a xe_res_cursor with a scatter gather table
146
199
*
@@ -160,11 +213,42 @@ static inline void xe_res_first_sg(const struct sg_table *sg,
160
213
cur -> start = start ;
161
214
cur -> remaining = size ;
162
215
cur -> size = 0 ;
216
+ cur -> dma_addr = NULL ;
163
217
cur -> sgl = sg -> sgl ;
164
218
cur -> mem_type = XE_PL_TT ;
165
219
__xe_res_sg_next (cur );
166
220
}
167
221
222
+ /**
223
+ * xe_res_first_dma - initialize a xe_res_cursor with dma_addr array
224
+ *
225
+ * @dma_addr: struct drm_pagemap_device_addr array to walk
226
+ * @start: Start of the range
227
+ * @size: Size of the range
228
+ * @cur: cursor object to initialize
229
+ *
230
+ * Start walking over the range of allocations between @start and @size.
231
+ */
232
+ static inline void xe_res_first_dma (const struct drm_pagemap_device_addr * dma_addr ,
233
+ u64 start , u64 size ,
234
+ struct xe_res_cursor * cur )
235
+ {
236
+ XE_WARN_ON (!dma_addr );
237
+ XE_WARN_ON (!IS_ALIGNED (start , PAGE_SIZE ) ||
238
+ !IS_ALIGNED (size , PAGE_SIZE ));
239
+
240
+ cur -> node = NULL ;
241
+ cur -> start = start ;
242
+ cur -> remaining = size ;
243
+ cur -> dma_seg_size = PAGE_SIZE << dma_addr -> order ;
244
+ cur -> dma_start = 0 ;
245
+ cur -> size = 0 ;
246
+ cur -> dma_addr = dma_addr ;
247
+ __xe_res_dma_next (cur );
248
+ cur -> sgl = NULL ;
249
+ cur -> mem_type = XE_PL_TT ;
250
+ }
251
+
168
252
/**
169
253
* xe_res_next - advance the cursor
170
254
*
@@ -191,6 +275,12 @@ static inline void xe_res_next(struct xe_res_cursor *cur, u64 size)
191
275
return ;
192
276
}
193
277
278
+ if (cur -> dma_addr ) {
279
+ cur -> start += size ;
280
+ __xe_res_dma_next (cur );
281
+ return ;
282
+ }
283
+
194
284
if (cur -> sgl ) {
195
285
cur -> start += size ;
196
286
__xe_res_sg_next (cur );
@@ -232,6 +322,35 @@ static inline void xe_res_next(struct xe_res_cursor *cur, u64 size)
232
322
*/
233
323
static inline u64 xe_res_dma (const struct xe_res_cursor * cur )
234
324
{
235
- return cur -> sgl ? sg_dma_address (cur -> sgl ) + cur -> start : cur -> start ;
325
+ if (cur -> dma_addr )
326
+ return cur -> dma_start + cur -> start ;
327
+ else if (cur -> sgl )
328
+ return sg_dma_address (cur -> sgl ) + cur -> start ;
329
+ else
330
+ return cur -> start ;
331
+ }
332
+
333
+ /**
334
+ * xe_res_is_vram() - Whether the cursor current dma address points to
335
+ * same-device VRAM
336
+ * @cur: The cursor.
337
+ *
338
+ * Return: true iff the address returned by xe_res_dma() points to internal vram.
339
+ */
340
+ static inline bool xe_res_is_vram (const struct xe_res_cursor * cur )
341
+ {
342
+ if (cur -> dma_addr )
343
+ return cur -> dma_addr -> proto == XE_INTERCONNECT_VRAM ;
344
+
345
+ switch (cur -> mem_type ) {
346
+ case XE_PL_STOLEN :
347
+ case XE_PL_VRAM0 :
348
+ case XE_PL_VRAM1 :
349
+ return true;
350
+ default :
351
+ break ;
352
+ }
353
+
354
+ return false;
236
355
}
237
356
#endif
0 commit comments