42
42
#include <linux/uaccess.h>
43
43
#include <linux/mem_encrypt.h>
44
44
45
+ #define TTM_BO_VM_NUM_PREFAULT 16
46
+
45
47
static vm_fault_t ttm_bo_vm_fault_idle (struct ttm_buffer_object * bo ,
46
48
struct vm_fault * vmf )
47
49
{
@@ -104,30 +106,25 @@ static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
104
106
+ page_offset ;
105
107
}
106
108
107
- /**
108
- * ttm_bo_vm_reserve - Reserve a buffer object in a retryable vm callback
109
- * @bo: The buffer object
110
- * @vmf: The fault structure handed to the callback
111
- *
112
- * vm callbacks like fault() and *_mkwrite() allow for the mm_sem to be dropped
113
- * during long waits, and after the wait the callback will be restarted. This
114
- * is to allow other threads using the same virtual memory space concurrent
115
- * access to map(), unmap() completely unrelated buffer objects. TTM buffer
116
- * object reservations sometimes wait for GPU and should therefore be
117
- * considered long waits. This function reserves the buffer object interruptibly
118
- * taking this into account. Starvation is avoided by the vm system not
119
- * allowing too many repeated restarts.
120
- * This function is intended to be used in customized fault() and _mkwrite()
121
- * handlers.
122
- *
123
- * Return:
124
- * 0 on success and the bo was reserved.
125
- * VM_FAULT_RETRY if blocking wait.
126
- * VM_FAULT_NOPAGE if blocking wait and retrying was not allowed.
127
- */
128
- vm_fault_t ttm_bo_vm_reserve (struct ttm_buffer_object * bo ,
129
- struct vm_fault * vmf )
109
+ static vm_fault_t ttm_bo_vm_fault (struct vm_fault * vmf )
130
110
{
111
+ struct vm_area_struct * vma = vmf -> vma ;
112
+ struct ttm_buffer_object * bo = (struct ttm_buffer_object * )
113
+ vma -> vm_private_data ;
114
+ struct ttm_bo_device * bdev = bo -> bdev ;
115
+ unsigned long page_offset ;
116
+ unsigned long page_last ;
117
+ unsigned long pfn ;
118
+ struct ttm_tt * ttm = NULL ;
119
+ struct page * page ;
120
+ int err ;
121
+ int i ;
122
+ vm_fault_t ret = VM_FAULT_NOPAGE ;
123
+ unsigned long address = vmf -> address ;
124
+ struct ttm_mem_type_manager * man =
125
+ & bdev -> man [bo -> mem .mem_type ];
126
+ struct vm_area_struct cvma ;
127
+
131
128
/*
132
129
* Work around locking order reversal in fault / nopfn
133
130
* between mmap_sem and bo_reserve: Perform a trylock operation
@@ -154,55 +151,14 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
154
151
return VM_FAULT_NOPAGE ;
155
152
}
156
153
157
- return 0 ;
158
- }
159
- EXPORT_SYMBOL (ttm_bo_vm_reserve );
160
-
161
- /**
162
- * ttm_bo_vm_fault_reserved - TTM fault helper
163
- * @vmf: The struct vm_fault given as argument to the fault callback
164
- * @prot: The page protection to be used for this memory area.
165
- * @num_prefault: Maximum number of prefault pages. The caller may want to
166
- * specify this based on madvice settings and the size of the GPU object
167
- * backed by the memory.
168
- *
169
- * This function inserts one or more page table entries pointing to the
170
- * memory backing the buffer object, and then returns a return code
171
- * instructing the caller to retry the page access.
172
- *
173
- * Return:
174
- * VM_FAULT_NOPAGE on success or pending signal
175
- * VM_FAULT_SIGBUS on unspecified error
176
- * VM_FAULT_OOM on out-of-memory
177
- * VM_FAULT_RETRY if retryable wait
178
- */
179
- vm_fault_t ttm_bo_vm_fault_reserved (struct vm_fault * vmf ,
180
- pgprot_t prot ,
181
- pgoff_t num_prefault )
182
- {
183
- struct vm_area_struct * vma = vmf -> vma ;
184
- struct vm_area_struct cvma = * vma ;
185
- struct ttm_buffer_object * bo = (struct ttm_buffer_object * )
186
- vma -> vm_private_data ;
187
- struct ttm_bo_device * bdev = bo -> bdev ;
188
- unsigned long page_offset ;
189
- unsigned long page_last ;
190
- unsigned long pfn ;
191
- struct ttm_tt * ttm = NULL ;
192
- struct page * page ;
193
- int err ;
194
- pgoff_t i ;
195
- vm_fault_t ret = VM_FAULT_NOPAGE ;
196
- unsigned long address = vmf -> address ;
197
- struct ttm_mem_type_manager * man =
198
- & bdev -> man [bo -> mem .mem_type ];
199
-
200
154
/*
201
155
* Refuse to fault imported pages. This should be handled
202
156
* (if at all) by redirecting mmap to the exporter.
203
157
*/
204
- if (bo -> ttm && (bo -> ttm -> page_flags & TTM_PAGE_FLAG_SG ))
205
- return VM_FAULT_SIGBUS ;
158
+ if (bo -> ttm && (bo -> ttm -> page_flags & TTM_PAGE_FLAG_SG )) {
159
+ ret = VM_FAULT_SIGBUS ;
160
+ goto out_unlock ;
161
+ }
206
162
207
163
if (bdev -> driver -> fault_reserve_notify ) {
208
164
struct dma_fence * moving = dma_fence_get (bo -> moving );
@@ -213,9 +169,11 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
213
169
break ;
214
170
case - EBUSY :
215
171
case - ERESTARTSYS :
216
- return VM_FAULT_NOPAGE ;
172
+ ret = VM_FAULT_NOPAGE ;
173
+ goto out_unlock ;
217
174
default :
218
- return VM_FAULT_SIGBUS ;
175
+ ret = VM_FAULT_SIGBUS ;
176
+ goto out_unlock ;
219
177
}
220
178
221
179
if (bo -> moving != moving ) {
@@ -231,12 +189,21 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
231
189
* move.
232
190
*/
233
191
ret = ttm_bo_vm_fault_idle (bo , vmf );
234
- if (unlikely (ret != 0 ))
235
- return ret ;
192
+ if (unlikely (ret != 0 )) {
193
+ if (ret == VM_FAULT_RETRY &&
194
+ !(vmf -> flags & FAULT_FLAG_RETRY_NOWAIT )) {
195
+ /* The BO has already been unreserved. */
196
+ return ret ;
197
+ }
198
+
199
+ goto out_unlock ;
200
+ }
236
201
237
202
err = ttm_mem_io_lock (man , true);
238
- if (unlikely (err != 0 ))
239
- return VM_FAULT_NOPAGE ;
203
+ if (unlikely (err != 0 )) {
204
+ ret = VM_FAULT_NOPAGE ;
205
+ goto out_unlock ;
206
+ }
240
207
err = ttm_mem_io_reserve_vm (bo );
241
208
if (unlikely (err != 0 )) {
242
209
ret = VM_FAULT_SIGBUS ;
@@ -253,8 +220,18 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
253
220
goto out_io_unlock ;
254
221
}
255
222
256
- cvma .vm_page_prot = ttm_io_prot (bo -> mem .placement , prot );
257
- if (!bo -> mem .bus .is_iomem ) {
223
+ /*
224
+ * Make a local vma copy to modify the page_prot member
225
+ * and vm_flags if necessary. The vma parameter is protected
226
+ * by mmap_sem in write mode.
227
+ */
228
+ cvma = * vma ;
229
+ cvma .vm_page_prot = vm_get_page_prot (cvma .vm_flags );
230
+
231
+ if (bo -> mem .bus .is_iomem ) {
232
+ cvma .vm_page_prot = ttm_io_prot (bo -> mem .placement ,
233
+ cvma .vm_page_prot );
234
+ } else {
258
235
struct ttm_operation_ctx ctx = {
259
236
.interruptible = false,
260
237
.no_wait_gpu = false,
@@ -263,21 +240,24 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
263
240
};
264
241
265
242
ttm = bo -> ttm ;
266
- if (ttm_tt_populate (bo -> ttm , & ctx )) {
243
+ cvma .vm_page_prot = ttm_io_prot (bo -> mem .placement ,
244
+ cvma .vm_page_prot );
245
+
246
+ /* Allocate all page at once, most common usage */
247
+ if (ttm_tt_populate (ttm , & ctx )) {
267
248
ret = VM_FAULT_OOM ;
268
249
goto out_io_unlock ;
269
250
}
270
- } else {
271
- /* Iomem should not be marked encrypted */
272
- cvma .vm_page_prot = pgprot_decrypted (cvma .vm_page_prot );
273
251
}
274
252
275
253
/*
276
254
* Speculatively prefault a number of pages. Only error on
277
255
* first page.
278
256
*/
279
- for (i = 0 ; i < num_prefault ; ++ i ) {
257
+ for (i = 0 ; i < TTM_BO_VM_NUM_PREFAULT ; ++ i ) {
280
258
if (bo -> mem .bus .is_iomem ) {
259
+ /* Iomem should not be marked encrypted */
260
+ cvma .vm_page_prot = pgprot_decrypted (cvma .vm_page_prot );
281
261
pfn = ttm_bo_io_mem_pfn (bo , page_offset );
282
262
} else {
283
263
page = ttm -> pages [page_offset ];
@@ -315,26 +295,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
315
295
ret = VM_FAULT_NOPAGE ;
316
296
out_io_unlock :
317
297
ttm_mem_io_unlock (man );
318
- return ret ;
319
- }
320
- EXPORT_SYMBOL (ttm_bo_vm_fault_reserved );
321
-
322
- static vm_fault_t ttm_bo_vm_fault (struct vm_fault * vmf )
323
- {
324
- struct vm_area_struct * vma = vmf -> vma ;
325
- pgprot_t prot ;
326
- struct ttm_buffer_object * bo = vma -> vm_private_data ;
327
- vm_fault_t ret ;
328
-
329
- ret = ttm_bo_vm_reserve (bo , vmf );
330
- if (ret )
331
- return ret ;
332
-
333
- prot = vm_get_page_prot (vma -> vm_flags );
334
- ret = ttm_bo_vm_fault_reserved (vmf , prot , TTM_BO_VM_NUM_PREFAULT );
335
- if (ret == VM_FAULT_RETRY && !(vmf -> flags & FAULT_FLAG_RETRY_NOWAIT ))
336
- return ret ;
337
-
298
+ out_unlock :
338
299
reservation_object_unlock (bo -> resv );
339
300
return ret ;
340
301
}
@@ -434,7 +395,7 @@ static int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
434
395
return ret ;
435
396
}
436
397
437
- const struct vm_operations_struct ttm_bo_vm_ops = {
398
+ static const struct vm_operations_struct ttm_bo_vm_ops = {
438
399
.fault = ttm_bo_vm_fault ,
439
400
.open = ttm_bo_vm_open ,
440
401
.close = ttm_bo_vm_close ,
@@ -487,7 +448,7 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
487
448
if (unlikely (ret != 0 ))
488
449
goto out_unref ;
489
450
490
- vma -> vm_ops = bdev -> vm_ops ;
451
+ vma -> vm_ops = & ttm_bo_vm_ops ;
491
452
492
453
/*
493
454
* Note: We're transferring the bo reference to
@@ -519,7 +480,7 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
519
480
520
481
ttm_bo_get (bo );
521
482
522
- vma -> vm_ops = bo -> bdev -> vm_ops ;
483
+ vma -> vm_ops = & ttm_bo_vm_ops ;
523
484
vma -> vm_private_data = bo ;
524
485
vma -> vm_flags |= VM_MIXEDMAP ;
525
486
vma -> vm_flags |= VM_IO | VM_DONTEXPAND ;
0 commit comments