35
35
#include <linux/slab.h>
36
36
#include <linux/highmem.h>
37
37
#include <linux/refcount.h>
38
+ #include <linux/workqueue.h>
38
39
39
40
#include <xen/xen.h>
40
41
#include <xen/grant_table.h>
@@ -60,10 +61,11 @@ module_param(limit, uint, 0644);
60
61
MODULE_PARM_DESC (limit ,
61
62
"Maximum number of grants that may be mapped by one mapping request" );
62
63
64
+ /* True in PV mode, false otherwise */
63
65
static int use_ptemod ;
64
66
65
- static int unmap_grant_pages (struct gntdev_grant_map * map ,
66
- int offset , int pages );
67
+ static void unmap_grant_pages (struct gntdev_grant_map * map ,
68
+ int offset , int pages );
67
69
68
70
static struct miscdevice gntdev_miscdev ;
69
71
@@ -120,6 +122,7 @@ static void gntdev_free_map(struct gntdev_grant_map *map)
120
122
kvfree (map -> unmap_ops );
121
123
kvfree (map -> kmap_ops );
122
124
kvfree (map -> kunmap_ops );
125
+ kvfree (map -> being_removed );
123
126
kfree (map );
124
127
}
125
128
@@ -140,10 +143,13 @@ struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
140
143
add -> unmap_ops = kvmalloc_array (count , sizeof (add -> unmap_ops [0 ]),
141
144
GFP_KERNEL );
142
145
add -> pages = kvcalloc (count , sizeof (add -> pages [0 ]), GFP_KERNEL );
146
+ add -> being_removed =
147
+ kvcalloc (count , sizeof (add -> being_removed [0 ]), GFP_KERNEL );
143
148
if (NULL == add -> grants ||
144
149
NULL == add -> map_ops ||
145
150
NULL == add -> unmap_ops ||
146
- NULL == add -> pages )
151
+ NULL == add -> pages ||
152
+ NULL == add -> being_removed )
147
153
goto err ;
148
154
if (use_ptemod ) {
149
155
add -> kmap_ops = kvmalloc_array (count , sizeof (add -> kmap_ops [0 ]),
@@ -250,9 +256,36 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
250
256
if (!refcount_dec_and_test (& map -> users ))
251
257
return ;
252
258
253
- if (map -> pages && !use_ptemod )
259
+ if (map -> pages && !use_ptemod ) {
260
+ /*
261
+ * Increment the reference count. This ensures that the
262
+ * subsequent call to unmap_grant_pages() will not wind up
263
+ * re-entering itself. It *can* wind up calling
264
+ * gntdev_put_map() recursively, but such calls will be with a
265
+ * reference count greater than 1, so they will return before
266
+ * this code is reached. The recursion depth is thus limited to
267
+ * 1. Do NOT use refcount_inc() here, as it will detect that
268
+ * the reference count is zero and WARN().
269
+ */
270
+ refcount_set (& map -> users , 1 );
271
+
272
+ /*
273
+ * Unmap the grants. This may or may not be asynchronous, so it
274
+ * is possible that the reference count is 1 on return, but it
275
+ * could also be greater than 1.
276
+ */
254
277
unmap_grant_pages (map , 0 , map -> count );
255
278
279
+ /* Check if the memory now needs to be freed */
280
+ if (!refcount_dec_and_test (& map -> users ))
281
+ return ;
282
+
283
+ /*
284
+ * All pages have been returned to the hypervisor, so free the
285
+ * map.
286
+ */
287
+ }
288
+
256
289
if (map -> notify .flags & UNMAP_NOTIFY_SEND_EVENT ) {
257
290
notify_remote_via_evtchn (map -> notify .event );
258
291
evtchn_put (map -> notify .event );
@@ -283,6 +316,7 @@ static int find_grant_ptes(pte_t *pte, unsigned long addr, void *data)
283
316
284
317
int gntdev_map_grant_pages (struct gntdev_grant_map * map )
285
318
{
319
+ size_t alloced = 0 ;
286
320
int i , err = 0 ;
287
321
288
322
if (!use_ptemod ) {
@@ -331,97 +365,116 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map)
331
365
map -> count );
332
366
333
367
for (i = 0 ; i < map -> count ; i ++ ) {
334
- if (map -> map_ops [i ].status == GNTST_okay )
368
+ if (map -> map_ops [i ].status == GNTST_okay ) {
335
369
map -> unmap_ops [i ].handle = map -> map_ops [i ].handle ;
336
- else if (!err )
370
+ if (!use_ptemod )
371
+ alloced ++ ;
372
+ } else if (!err )
337
373
err = - EINVAL ;
338
374
339
375
if (map -> flags & GNTMAP_device_map )
340
376
map -> unmap_ops [i ].dev_bus_addr = map -> map_ops [i ].dev_bus_addr ;
341
377
342
378
if (use_ptemod ) {
343
- if (map -> kmap_ops [i ].status == GNTST_okay )
379
+ if (map -> kmap_ops [i ].status == GNTST_okay ) {
380
+ if (map -> map_ops [i ].status == GNTST_okay )
381
+ alloced ++ ;
344
382
map -> kunmap_ops [i ].handle = map -> kmap_ops [i ].handle ;
345
- else if (!err )
383
+ } else if (!err )
346
384
err = - EINVAL ;
347
385
}
348
386
}
387
+ atomic_add (alloced , & map -> live_grants );
349
388
return err ;
350
389
}
351
390
352
- static int __unmap_grant_pages ( struct gntdev_grant_map * map , int offset ,
353
- int pages )
391
+ static void __unmap_grant_pages_done ( int result ,
392
+ struct gntab_unmap_queue_data * data )
354
393
{
355
- int i , err = 0 ;
356
- struct gntab_unmap_queue_data unmap_data ;
357
-
358
- if (map -> notify .flags & UNMAP_NOTIFY_CLEAR_BYTE ) {
359
- int pgno = (map -> notify .addr >> PAGE_SHIFT );
360
- if (pgno >= offset && pgno < offset + pages ) {
361
- /* No need for kmap, pages are in lowmem */
362
- uint8_t * tmp = pfn_to_kaddr (page_to_pfn (map -> pages [pgno ]));
363
- tmp [map -> notify .addr & (PAGE_SIZE - 1 )] = 0 ;
364
- map -> notify .flags &= ~UNMAP_NOTIFY_CLEAR_BYTE ;
365
- }
366
- }
367
-
368
- unmap_data .unmap_ops = map -> unmap_ops + offset ;
369
- unmap_data .kunmap_ops = use_ptemod ? map -> kunmap_ops + offset : NULL ;
370
- unmap_data .pages = map -> pages + offset ;
371
- unmap_data .count = pages ;
372
-
373
- err = gnttab_unmap_refs_sync (& unmap_data );
374
- if (err )
375
- return err ;
394
+ unsigned int i ;
395
+ struct gntdev_grant_map * map = data -> data ;
396
+ unsigned int offset = data -> unmap_ops - map -> unmap_ops ;
376
397
377
- for (i = 0 ; i < pages ; i ++ ) {
378
- if (map -> unmap_ops [offset + i ].status )
379
- err = - EINVAL ;
398
+ for (i = 0 ; i < data -> count ; i ++ ) {
399
+ WARN_ON (map -> unmap_ops [offset + i ].status );
380
400
pr_debug ("unmap handle=%d st=%d\n" ,
381
401
map -> unmap_ops [offset + i ].handle ,
382
402
map -> unmap_ops [offset + i ].status );
383
403
map -> unmap_ops [offset + i ].handle = INVALID_GRANT_HANDLE ;
384
404
if (use_ptemod ) {
385
- if (map -> kunmap_ops [offset + i ].status )
386
- err = - EINVAL ;
405
+ WARN_ON (map -> kunmap_ops [offset + i ].status );
387
406
pr_debug ("kunmap handle=%u st=%d\n" ,
388
407
map -> kunmap_ops [offset + i ].handle ,
389
408
map -> kunmap_ops [offset + i ].status );
390
409
map -> kunmap_ops [offset + i ].handle = INVALID_GRANT_HANDLE ;
391
410
}
392
411
}
393
- return err ;
412
+ /*
413
+ * Decrease the live-grant counter. This must happen after the loop to
414
+ * prevent premature reuse of the grants by gnttab_mmap().
415
+ */
416
+ atomic_sub (data -> count , & map -> live_grants );
417
+
418
+ /* Release reference taken by __unmap_grant_pages */
419
+ gntdev_put_map (NULL , map );
420
+ }
421
+
422
+ static void __unmap_grant_pages (struct gntdev_grant_map * map , int offset ,
423
+ int pages )
424
+ {
425
+ if (map -> notify .flags & UNMAP_NOTIFY_CLEAR_BYTE ) {
426
+ int pgno = (map -> notify .addr >> PAGE_SHIFT );
427
+
428
+ if (pgno >= offset && pgno < offset + pages ) {
429
+ /* No need for kmap, pages are in lowmem */
430
+ uint8_t * tmp = pfn_to_kaddr (page_to_pfn (map -> pages [pgno ]));
431
+
432
+ tmp [map -> notify .addr & (PAGE_SIZE - 1 )] = 0 ;
433
+ map -> notify .flags &= ~UNMAP_NOTIFY_CLEAR_BYTE ;
434
+ }
435
+ }
436
+
437
+ map -> unmap_data .unmap_ops = map -> unmap_ops + offset ;
438
+ map -> unmap_data .kunmap_ops = use_ptemod ? map -> kunmap_ops + offset : NULL ;
439
+ map -> unmap_data .pages = map -> pages + offset ;
440
+ map -> unmap_data .count = pages ;
441
+ map -> unmap_data .done = __unmap_grant_pages_done ;
442
+ map -> unmap_data .data = map ;
443
+ refcount_inc (& map -> users ); /* to keep map alive during async call below */
444
+
445
+ gnttab_unmap_refs_async (& map -> unmap_data );
394
446
}
395
447
396
- static int unmap_grant_pages (struct gntdev_grant_map * map , int offset ,
397
- int pages )
448
+ static void unmap_grant_pages (struct gntdev_grant_map * map , int offset ,
449
+ int pages )
398
450
{
399
- int range , err = 0 ;
451
+ int range ;
452
+
453
+ if (atomic_read (& map -> live_grants ) == 0 )
454
+ return ; /* Nothing to do */
400
455
401
456
pr_debug ("unmap %d+%d [%d+%d]\n" , map -> index , map -> count , offset , pages );
402
457
403
458
/* It is possible the requested range will have a "hole" where we
404
459
* already unmapped some of the grants. Only unmap valid ranges.
405
460
*/
406
- while (pages && !err ) {
407
- while (pages &&
408
- map -> unmap_ops [offset ].handle == INVALID_GRANT_HANDLE ) {
461
+ while (pages ) {
462
+ while (pages && map -> being_removed [offset ]) {
409
463
offset ++ ;
410
464
pages -- ;
411
465
}
412
466
range = 0 ;
413
467
while (range < pages ) {
414
- if (map -> unmap_ops [offset + range ].handle ==
415
- INVALID_GRANT_HANDLE )
468
+ if (map -> being_removed [offset + range ])
416
469
break ;
470
+ map -> being_removed [offset + range ] = true;
417
471
range ++ ;
418
472
}
419
- err = __unmap_grant_pages (map , offset , range );
473
+ if (range )
474
+ __unmap_grant_pages (map , offset , range );
420
475
offset += range ;
421
476
pages -= range ;
422
477
}
423
-
424
- return err ;
425
478
}
426
479
427
480
/* ------------------------------------------------------------------ */
@@ -473,7 +526,6 @@ static bool gntdev_invalidate(struct mmu_interval_notifier *mn,
473
526
struct gntdev_grant_map * map =
474
527
container_of (mn , struct gntdev_grant_map , notifier );
475
528
unsigned long mstart , mend ;
476
- int err ;
477
529
478
530
if (!mmu_notifier_range_blockable (range ))
479
531
return false;
@@ -494,10 +546,9 @@ static bool gntdev_invalidate(struct mmu_interval_notifier *mn,
494
546
map -> index , map -> count ,
495
547
map -> vma -> vm_start , map -> vma -> vm_end ,
496
548
range -> start , range -> end , mstart , mend );
497
- err = unmap_grant_pages (map ,
549
+ unmap_grant_pages (map ,
498
550
(mstart - map -> vma -> vm_start ) >> PAGE_SHIFT ,
499
551
(mend - mstart ) >> PAGE_SHIFT );
500
- WARN_ON (err );
501
552
502
553
return true;
503
554
}
@@ -985,6 +1036,10 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
985
1036
goto unlock_out ;
986
1037
if (use_ptemod && map -> vma )
987
1038
goto unlock_out ;
1039
+ if (atomic_read (& map -> live_grants )) {
1040
+ err = - EAGAIN ;
1041
+ goto unlock_out ;
1042
+ }
988
1043
refcount_inc (& map -> users );
989
1044
990
1045
vma -> vm_ops = & gntdev_vmops ;
0 commit comments