@@ -191,21 +191,29 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
191
191
enum i915_cache_level level ,
192
192
u32 flags )
193
193
{
194
- struct i915_ggtt * ggtt = i915_vm_to_ggtt (vm );
195
- struct sgt_iter sgt_iter ;
196
- gen8_pte_t __iomem * gtt_entries ;
197
194
const gen8_pte_t pte_encode = gen8_ggtt_pte_encode (0 , level , 0 );
195
+ struct i915_ggtt * ggtt = i915_vm_to_ggtt (vm );
196
+ gen8_pte_t __iomem * gte ;
197
+ gen8_pte_t __iomem * end ;
198
+ struct sgt_iter iter ;
198
199
dma_addr_t addr ;
199
200
200
201
/*
201
202
* Note that we ignore PTE_READ_ONLY here. The caller must be careful
202
203
* not to allow the user to override access to a read only page.
203
204
*/
204
205
205
- gtt_entries = (gen8_pte_t __iomem * )ggtt -> gsm ;
206
- gtt_entries += vma -> node .start / I915_GTT_PAGE_SIZE ;
207
- for_each_sgt_daddr (addr , sgt_iter , vma -> pages )
208
- gen8_set_pte (gtt_entries ++ , pte_encode | addr );
206
+ gte = (gen8_pte_t __iomem * )ggtt -> gsm ;
207
+ gte += vma -> node .start / I915_GTT_PAGE_SIZE ;
208
+ end = gte + vma -> node .size / I915_GTT_PAGE_SIZE ;
209
+
210
+ for_each_sgt_daddr (addr , iter , vma -> pages )
211
+ gen8_set_pte (gte ++ , pte_encode | addr );
212
+ GEM_BUG_ON (gte > end );
213
+
214
+ /* Fill the allocated but "unused" space beyond the end of the buffer */
215
+ while (gte < end )
216
+ gen8_set_pte (gte ++ , vm -> scratch [0 ].encode );
209
217
210
218
/*
211
219
* We want to flush the TLBs only after we're certain all the PTE
@@ -241,13 +249,22 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
241
249
u32 flags )
242
250
{
243
251
struct i915_ggtt * ggtt = i915_vm_to_ggtt (vm );
244
- gen6_pte_t __iomem * entries = ( gen6_pte_t __iomem * ) ggtt -> gsm ;
245
- unsigned int i = vma -> node . start / I915_GTT_PAGE_SIZE ;
252
+ gen6_pte_t __iomem * gte ;
253
+ gen6_pte_t __iomem * end ;
246
254
struct sgt_iter iter ;
247
255
dma_addr_t addr ;
248
256
257
+ gte = (gen6_pte_t __iomem * )ggtt -> gsm ;
258
+ gte += vma -> node .start / I915_GTT_PAGE_SIZE ;
259
+ end = gte + vma -> node .size / I915_GTT_PAGE_SIZE ;
260
+
249
261
for_each_sgt_daddr (addr , iter , vma -> pages )
250
- iowrite32 (vm -> pte_encode (addr , level , flags ), & entries [i ++ ]);
262
+ iowrite32 (vm -> pte_encode (addr , level , flags ), gte ++ );
263
+ GEM_BUG_ON (gte > end );
264
+
265
+ /* Fill the allocated but "unused" space beyond the end of the buffer */
266
+ while (gte < end )
267
+ iowrite32 (vm -> scratch [0 ].encode , gte ++ );
251
268
252
269
/*
253
270
* We want to flush the TLBs only after we're certain all the PTE
0 commit comments