@@ -252,6 +252,7 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
252252 struct vm_area_struct * area = vmf -> vma ;
253253 struct i915_mmap_offset * mmo = area -> vm_private_data ;
254254 struct drm_i915_gem_object * obj = mmo -> obj ;
255+ unsigned long obj_offset ;
255256 resource_size_t iomap ;
256257 int err ;
257258
@@ -273,10 +274,11 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
273274 iomap -= obj -> mm .region -> region .start ;
274275 }
275276
277+ obj_offset = area -> vm_pgoff - drm_vma_node_start (& mmo -> vma_node );
276278 /* PTEs are revoked in obj->ops->put_pages() */
277279 err = remap_io_sg (area ,
278280 area -> vm_start , area -> vm_end - area -> vm_start ,
279- obj -> mm .pages -> sgl , iomap );
281+ obj -> mm .pages -> sgl , obj_offset , iomap );
280282
281283 if (area -> vm_flags & VM_WRITE ) {
282284 GEM_BUG_ON (!i915_gem_object_has_pinned_pages (obj ));
@@ -290,6 +292,47 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
290292 return i915_error_to_vmf_fault (err );
291293}
292294
295+ static void set_address_limits (struct vm_area_struct * area ,
296+ struct i915_vma * vma ,
297+ unsigned long obj_offset ,
298+ resource_size_t gmadr_start ,
299+ unsigned long * start_vaddr ,
300+ unsigned long * end_vaddr ,
301+ unsigned long * pfn )
302+ {
303+ unsigned long vm_start , vm_end , vma_size ; /* user's memory parameters */
304+ long start , end ; /* memory boundaries */
305+
306+ /*
307+ * Let's move into the ">> PAGE_SHIFT"
308+ * domain to be sure not to lose bits
309+ */
310+ vm_start = area -> vm_start >> PAGE_SHIFT ;
311+ vm_end = area -> vm_end >> PAGE_SHIFT ;
312+ vma_size = vma -> size >> PAGE_SHIFT ;
313+
314+ /*
315+ * Calculate the memory boundaries by considering the offset
316+ * provided by the user during memory mapping and the offset
317+ * provided for the partial mapping.
318+ */
319+ start = vm_start ;
320+ start -= obj_offset ;
321+ start += vma -> gtt_view .partial .offset ;
322+ end = start + vma_size ;
323+
324+ start = max_t (long , start , vm_start );
325+ end = min_t (long , end , vm_end );
326+
327+ /* Let's move back into the "<< PAGE_SHIFT" domain */
328+ * start_vaddr = (unsigned long )start << PAGE_SHIFT ;
329+ * end_vaddr = (unsigned long )end << PAGE_SHIFT ;
330+
331+ * pfn = (gmadr_start + i915_ggtt_offset (vma )) >> PAGE_SHIFT ;
332+ * pfn += (* start_vaddr - area -> vm_start ) >> PAGE_SHIFT ;
333+ * pfn += obj_offset - vma -> gtt_view .partial .offset ;
334+ }
335+
293336static vm_fault_t vm_fault_gtt (struct vm_fault * vmf )
294337{
295338#define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
@@ -302,14 +345,18 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
302345 struct i915_ggtt * ggtt = to_gt (i915 )-> ggtt ;
303346 bool write = area -> vm_flags & VM_WRITE ;
304347 struct i915_gem_ww_ctx ww ;
348+ unsigned long obj_offset ;
349+ unsigned long start , end ; /* memory boundaries */
305350 intel_wakeref_t wakeref ;
306351 struct i915_vma * vma ;
307352 pgoff_t page_offset ;
353+ unsigned long pfn ;
308354 int srcu ;
309355 int ret ;
310356
311- /* We don't use vmf->pgoff since that has the fake offset */
357+ obj_offset = area -> vm_pgoff - drm_vma_node_start ( & mmo -> vma_node );
312358 page_offset = (vmf -> address - area -> vm_start ) >> PAGE_SHIFT ;
359+ page_offset += obj_offset ;
313360
314361 trace_i915_gem_object_fault (obj , page_offset , true, write );
315362
@@ -402,12 +449,16 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
402449 if (ret )
403450 goto err_unpin ;
404451
452+ /*
453+ * Dump all the necessary parameters in this function to perform the
454+ * arithmetic calculation for the virtual address start and end and
455+ * the PFN (Page Frame Number).
456+ */
457+ set_address_limits (area , vma , obj_offset , ggtt -> gmadr .start ,
458+ & start , & end , & pfn );
459+
405460 /* Finally, remap it using the new GTT offset */
406- ret = remap_io_mapping (area ,
407- area -> vm_start + (vma -> gtt_view .partial .offset << PAGE_SHIFT ),
408- (ggtt -> gmadr .start + i915_ggtt_offset (vma )) >> PAGE_SHIFT ,
409- min_t (u64 , vma -> size , area -> vm_end - area -> vm_start ),
410- & ggtt -> iomap );
461+ ret = remap_io_mapping (area , start , pfn , end - start , & ggtt -> iomap );
411462 if (ret )
412463 goto err_fence ;
413464
@@ -1030,9 +1081,9 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
10301081
10311082 rcu_read_lock ();
10321083 drm_vma_offset_lock_lookup (dev -> vma_offset_manager );
1033- node = drm_vma_offset_exact_lookup_locked (dev -> vma_offset_manager ,
1034- vma -> vm_pgoff ,
1035- vma_pages (vma ));
1084+ node = drm_vma_offset_lookup_locked (dev -> vma_offset_manager ,
1085+ vma -> vm_pgoff ,
1086+ vma_pages (vma ));
10361087 if (node && drm_vma_node_is_allowed (node , priv )) {
10371088 /*
10381089 * Skip 0-refcnted objects as it is in the process of being
@@ -1084,6 +1135,8 @@ int i915_gem_fb_mmap(struct drm_i915_gem_object *obj, struct vm_area_struct *vma
10841135 mmo = mmap_offset_attach (obj , mmap_type , NULL );
10851136 if (IS_ERR (mmo ))
10861137 return PTR_ERR (mmo );
1138+
1139+ vma -> vm_pgoff += drm_vma_node_start (& mmo -> vma_node );
10871140 }
10881141
10891142 /*
0 commit comments