Skip to content

Commit 0b72a25

Browse files
icklerodrigovivi
authored andcommitted
drm/i915/gt: Fill all the unused space in the GGTT
When we allocate space in the GGTT we may have to allocate a larger region than will be populated by the object to accommodate fencing. Make sure that this space beyond the end of the buffer points safely into scratch space, in case the HW tries to access it anyway (e.g. fenced access to the last tile row). v2: Preemptively / conservatively guard gen6 ggtt as well. Reported-by: Imre Deak <[email protected]> References: https://gitlab.freedesktop.org/drm/intel/-/issues/1554 Signed-off-by: Chris Wilson <[email protected]> Cc: Matthew Auld <[email protected]> Cc: Imre Deak <[email protected]> Cc: [email protected] Reviewed-by: Matthew Auld <[email protected]> Reviewed-by: Imre Deak <[email protected]> Link: https://patchwork.freedesktop.org/patch/msgid/[email protected] (cherry picked from commit 4d6c185) Signed-off-by: Rodrigo Vivi <[email protected]>
1 parent 8262b49 commit 0b72a25

File tree

1 file changed

+27
-10
lines changed

1 file changed

+27
-10
lines changed

drivers/gpu/drm/i915/gt/intel_ggtt.c

Lines changed: 27 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -191,21 +191,29 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
191191
enum i915_cache_level level,
192192
u32 flags)
193193
{
194-
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
195-
struct sgt_iter sgt_iter;
196-
gen8_pte_t __iomem *gtt_entries;
197194
const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, 0);
195+
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
196+
gen8_pte_t __iomem *gte;
197+
gen8_pte_t __iomem *end;
198+
struct sgt_iter iter;
198199
dma_addr_t addr;
199200

200201
/*
201202
* Note that we ignore PTE_READ_ONLY here. The caller must be careful
202203
* not to allow the user to override access to a read only page.
203204
*/
204205

205-
gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
206-
gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE;
207-
for_each_sgt_daddr(addr, sgt_iter, vma->pages)
208-
gen8_set_pte(gtt_entries++, pte_encode | addr);
206+
gte = (gen8_pte_t __iomem *)ggtt->gsm;
207+
gte += vma->node.start / I915_GTT_PAGE_SIZE;
208+
end = gte + vma->node.size / I915_GTT_PAGE_SIZE;
209+
210+
for_each_sgt_daddr(addr, iter, vma->pages)
211+
gen8_set_pte(gte++, pte_encode | addr);
212+
GEM_BUG_ON(gte > end);
213+
214+
/* Fill the allocated but "unused" space beyond the end of the buffer */
215+
while (gte < end)
216+
gen8_set_pte(gte++, vm->scratch[0].encode);
209217

210218
/*
211219
* We want to flush the TLBs only after we're certain all the PTE
@@ -241,13 +249,22 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
241249
u32 flags)
242250
{
243251
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
244-
gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
245-
unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE;
252+
gen6_pte_t __iomem *gte;
253+
gen6_pte_t __iomem *end;
246254
struct sgt_iter iter;
247255
dma_addr_t addr;
248256

257+
gte = (gen6_pte_t __iomem *)ggtt->gsm;
258+
gte += vma->node.start / I915_GTT_PAGE_SIZE;
259+
end = gte + vma->node.size / I915_GTT_PAGE_SIZE;
260+
249261
for_each_sgt_daddr(addr, iter, vma->pages)
250-
iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
262+
iowrite32(vm->pte_encode(addr, level, flags), gte++);
263+
GEM_BUG_ON(gte > end);
264+
265+
/* Fill the allocated but "unused" space beyond the end of the buffer */
266+
while (gte < end)
267+
iowrite32(vm->scratch[0].encode, gte++);
251268

252269
/*
253270
* We want to flush the TLBs only after we're certain all the PTE

0 commit comments

Comments
 (0)