Skip to content

Commit 3729fe2

Browse files
committed
Revert "Merge branch 'vmwgfx-next' of git://people.freedesktop.org/~thomash/linux into drm-next"
This reverts commit 031e610, reversing changes made to 52d2d44. The mm changes in there we premature and not fully ack or reviewed by core mm folks, I dropped the ball by merging them via this tree, so lets take em all back out. Signed-off-by: Dave Airlie <[email protected]>
1 parent 7e4b4df commit 3729fe2

30 files changed

+483
-2136
lines changed

MAINTAINERS

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5191,7 +5191,6 @@ T: git git://people.freedesktop.org/~thomash/linux
51915191
S: Supported
51925192
F: drivers/gpu/drm/vmwgfx/
51935193
F: include/uapi/drm/vmwgfx_drm.h
5194-
F: mm/as_dirty_helpers.c
51955194

51965195
DRM DRIVERS
51975196
M: David Airlie <[email protected]>

drivers/gpu/drm/ttm/ttm_bo.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1739,7 +1739,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
17391739
mutex_lock(&ttm_global_mutex);
17401740
list_add_tail(&bdev->device_list, &glob->device_list);
17411741
mutex_unlock(&ttm_global_mutex);
1742-
bdev->vm_ops = &ttm_bo_vm_ops;
17431742

17441743
return 0;
17451744
out_no_sys:

drivers/gpu/drm/ttm/ttm_bo_vm.c

Lines changed: 65 additions & 104 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,8 @@
4242
#include <linux/uaccess.h>
4343
#include <linux/mem_encrypt.h>
4444

45+
#define TTM_BO_VM_NUM_PREFAULT 16
46+
4547
static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
4648
struct vm_fault *vmf)
4749
{
@@ -104,30 +106,25 @@ static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
104106
+ page_offset;
105107
}
106108

107-
/**
108-
* ttm_bo_vm_reserve - Reserve a buffer object in a retryable vm callback
109-
* @bo: The buffer object
110-
* @vmf: The fault structure handed to the callback
111-
*
112-
* vm callbacks like fault() and *_mkwrite() allow for the mm_sem to be dropped
113-
* during long waits, and after the wait the callback will be restarted. This
114-
* is to allow other threads using the same virtual memory space concurrent
115-
* access to map(), unmap() completely unrelated buffer objects. TTM buffer
116-
* object reservations sometimes wait for GPU and should therefore be
117-
* considered long waits. This function reserves the buffer object interruptibly
118-
* taking this into account. Starvation is avoided by the vm system not
119-
* allowing too many repeated restarts.
120-
* This function is intended to be used in customized fault() and _mkwrite()
121-
* handlers.
122-
*
123-
* Return:
124-
* 0 on success and the bo was reserved.
125-
* VM_FAULT_RETRY if blocking wait.
126-
* VM_FAULT_NOPAGE if blocking wait and retrying was not allowed.
127-
*/
128-
vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
129-
struct vm_fault *vmf)
109+
static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
130110
{
111+
struct vm_area_struct *vma = vmf->vma;
112+
struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
113+
vma->vm_private_data;
114+
struct ttm_bo_device *bdev = bo->bdev;
115+
unsigned long page_offset;
116+
unsigned long page_last;
117+
unsigned long pfn;
118+
struct ttm_tt *ttm = NULL;
119+
struct page *page;
120+
int err;
121+
int i;
122+
vm_fault_t ret = VM_FAULT_NOPAGE;
123+
unsigned long address = vmf->address;
124+
struct ttm_mem_type_manager *man =
125+
&bdev->man[bo->mem.mem_type];
126+
struct vm_area_struct cvma;
127+
131128
/*
132129
* Work around locking order reversal in fault / nopfn
133130
* between mmap_sem and bo_reserve: Perform a trylock operation
@@ -154,55 +151,14 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
154151
return VM_FAULT_NOPAGE;
155152
}
156153

157-
return 0;
158-
}
159-
EXPORT_SYMBOL(ttm_bo_vm_reserve);
160-
161-
/**
162-
* ttm_bo_vm_fault_reserved - TTM fault helper
163-
* @vmf: The struct vm_fault given as argument to the fault callback
164-
* @prot: The page protection to be used for this memory area.
165-
* @num_prefault: Maximum number of prefault pages. The caller may want to
166-
* specify this based on madvice settings and the size of the GPU object
167-
* backed by the memory.
168-
*
169-
* This function inserts one or more page table entries pointing to the
170-
* memory backing the buffer object, and then returns a return code
171-
* instructing the caller to retry the page access.
172-
*
173-
* Return:
174-
* VM_FAULT_NOPAGE on success or pending signal
175-
* VM_FAULT_SIGBUS on unspecified error
176-
* VM_FAULT_OOM on out-of-memory
177-
* VM_FAULT_RETRY if retryable wait
178-
*/
179-
vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
180-
pgprot_t prot,
181-
pgoff_t num_prefault)
182-
{
183-
struct vm_area_struct *vma = vmf->vma;
184-
struct vm_area_struct cvma = *vma;
185-
struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
186-
vma->vm_private_data;
187-
struct ttm_bo_device *bdev = bo->bdev;
188-
unsigned long page_offset;
189-
unsigned long page_last;
190-
unsigned long pfn;
191-
struct ttm_tt *ttm = NULL;
192-
struct page *page;
193-
int err;
194-
pgoff_t i;
195-
vm_fault_t ret = VM_FAULT_NOPAGE;
196-
unsigned long address = vmf->address;
197-
struct ttm_mem_type_manager *man =
198-
&bdev->man[bo->mem.mem_type];
199-
200154
/*
201155
* Refuse to fault imported pages. This should be handled
202156
* (if at all) by redirecting mmap to the exporter.
203157
*/
204-
if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG))
205-
return VM_FAULT_SIGBUS;
158+
if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
159+
ret = VM_FAULT_SIGBUS;
160+
goto out_unlock;
161+
}
206162

207163
if (bdev->driver->fault_reserve_notify) {
208164
struct dma_fence *moving = dma_fence_get(bo->moving);
@@ -213,9 +169,11 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
213169
break;
214170
case -EBUSY:
215171
case -ERESTARTSYS:
216-
return VM_FAULT_NOPAGE;
172+
ret = VM_FAULT_NOPAGE;
173+
goto out_unlock;
217174
default:
218-
return VM_FAULT_SIGBUS;
175+
ret = VM_FAULT_SIGBUS;
176+
goto out_unlock;
219177
}
220178

221179
if (bo->moving != moving) {
@@ -231,12 +189,21 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
231189
* move.
232190
*/
233191
ret = ttm_bo_vm_fault_idle(bo, vmf);
234-
if (unlikely(ret != 0))
235-
return ret;
192+
if (unlikely(ret != 0)) {
193+
if (ret == VM_FAULT_RETRY &&
194+
!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
195+
/* The BO has already been unreserved. */
196+
return ret;
197+
}
198+
199+
goto out_unlock;
200+
}
236201

237202
err = ttm_mem_io_lock(man, true);
238-
if (unlikely(err != 0))
239-
return VM_FAULT_NOPAGE;
203+
if (unlikely(err != 0)) {
204+
ret = VM_FAULT_NOPAGE;
205+
goto out_unlock;
206+
}
240207
err = ttm_mem_io_reserve_vm(bo);
241208
if (unlikely(err != 0)) {
242209
ret = VM_FAULT_SIGBUS;
@@ -253,8 +220,18 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
253220
goto out_io_unlock;
254221
}
255222

256-
cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, prot);
257-
if (!bo->mem.bus.is_iomem) {
223+
/*
224+
* Make a local vma copy to modify the page_prot member
225+
* and vm_flags if necessary. The vma parameter is protected
226+
* by mmap_sem in write mode.
227+
*/
228+
cvma = *vma;
229+
cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
230+
231+
if (bo->mem.bus.is_iomem) {
232+
cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
233+
cvma.vm_page_prot);
234+
} else {
258235
struct ttm_operation_ctx ctx = {
259236
.interruptible = false,
260237
.no_wait_gpu = false,
@@ -263,21 +240,24 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
263240
};
264241

265242
ttm = bo->ttm;
266-
if (ttm_tt_populate(bo->ttm, &ctx)) {
243+
cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
244+
cvma.vm_page_prot);
245+
246+
/* Allocate all page at once, most common usage */
247+
if (ttm_tt_populate(ttm, &ctx)) {
267248
ret = VM_FAULT_OOM;
268249
goto out_io_unlock;
269250
}
270-
} else {
271-
/* Iomem should not be marked encrypted */
272-
cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot);
273251
}
274252

275253
/*
276254
* Speculatively prefault a number of pages. Only error on
277255
* first page.
278256
*/
279-
for (i = 0; i < num_prefault; ++i) {
257+
for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
280258
if (bo->mem.bus.is_iomem) {
259+
/* Iomem should not be marked encrypted */
260+
cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot);
281261
pfn = ttm_bo_io_mem_pfn(bo, page_offset);
282262
} else {
283263
page = ttm->pages[page_offset];
@@ -315,26 +295,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
315295
ret = VM_FAULT_NOPAGE;
316296
out_io_unlock:
317297
ttm_mem_io_unlock(man);
318-
return ret;
319-
}
320-
EXPORT_SYMBOL(ttm_bo_vm_fault_reserved);
321-
322-
static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
323-
{
324-
struct vm_area_struct *vma = vmf->vma;
325-
pgprot_t prot;
326-
struct ttm_buffer_object *bo = vma->vm_private_data;
327-
vm_fault_t ret;
328-
329-
ret = ttm_bo_vm_reserve(bo, vmf);
330-
if (ret)
331-
return ret;
332-
333-
prot = vm_get_page_prot(vma->vm_flags);
334-
ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
335-
if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
336-
return ret;
337-
298+
out_unlock:
338299
reservation_object_unlock(bo->resv);
339300
return ret;
340301
}
@@ -434,7 +395,7 @@ static int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
434395
return ret;
435396
}
436397

437-
const struct vm_operations_struct ttm_bo_vm_ops = {
398+
static const struct vm_operations_struct ttm_bo_vm_ops = {
438399
.fault = ttm_bo_vm_fault,
439400
.open = ttm_bo_vm_open,
440401
.close = ttm_bo_vm_close,
@@ -487,7 +448,7 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
487448
if (unlikely(ret != 0))
488449
goto out_unref;
489450

490-
vma->vm_ops = bdev->vm_ops;
451+
vma->vm_ops = &ttm_bo_vm_ops;
491452

492453
/*
493454
* Note: We're transferring the bo reference to
@@ -519,7 +480,7 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
519480

520481
ttm_bo_get(bo);
521482

522-
vma->vm_ops = bo->bdev->vm_ops;
483+
vma->vm_ops = &ttm_bo_vm_ops;
523484
vma->vm_private_data = bo;
524485
vma->vm_flags |= VM_MIXEDMAP;
525486
vma->vm_flags |= VM_IO | VM_DONTEXPAND;

drivers/gpu/drm/vmwgfx/Kconfig

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@ config DRM_VMWGFX
88
select FB_CFB_IMAGEBLIT
99
select DRM_TTM
1010
select FB
11-
select AS_DIRTY_HELPERS
1211
# Only needed for the transitional use of drm_crtc_init - can be removed
1312
# again once vmwgfx sets up the primary plane itself.
1413
select DRM_KMS_HELPER

drivers/gpu/drm/vmwgfx/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
88
vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
99
vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
1010
vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \
11-
vmwgfx_validation.o vmwgfx_page_dirty.o \
11+
vmwgfx_validation.o \
1212
ttm_object.o ttm_lock.o
1313

1414
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o

0 commit comments

Comments
 (0)