@@ -69,11 +69,27 @@ struct xenbus_map_node {
69
69
unsigned int nr_handles ;
70
70
};
71
71
72
+ struct map_ring_valloc {
73
+ struct xenbus_map_node * node ;
74
+
75
+ /* Why do we need two arrays? See comment of __xenbus_map_ring */
76
+ union {
77
+ unsigned long addrs [XENBUS_MAX_RING_GRANTS ];
78
+ pte_t * ptes [XENBUS_MAX_RING_GRANTS ];
79
+ };
80
+ phys_addr_t phys_addrs [XENBUS_MAX_RING_GRANTS ];
81
+
82
+ struct gnttab_map_grant_ref map [XENBUS_MAX_RING_GRANTS ];
83
+ struct gnttab_unmap_grant_ref unmap [XENBUS_MAX_RING_GRANTS ];
84
+
85
+ unsigned int idx ; /* HVM only. */
86
+ };
87
+
72
88
static DEFINE_SPINLOCK (xenbus_valloc_lock );
73
89
static LIST_HEAD (xenbus_valloc_pages );
74
90
75
91
struct xenbus_ring_ops {
76
- int (* map )(struct xenbus_device * dev ,
92
+ int (* map )(struct xenbus_device * dev , struct map_ring_valloc * info ,
77
93
grant_ref_t * gnt_refs , unsigned int nr_grefs ,
78
94
void * * vaddr );
79
95
int (* unmap )(struct xenbus_device * dev , void * vaddr );
@@ -440,21 +456,33 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
440
456
* Map @nr_grefs pages of memory into this domain from another
441
457
* domain's grant table. xenbus_map_ring_valloc allocates @nr_grefs
442
458
* pages of virtual address space, maps the pages to that address, and
443
- * sets *vaddr to that address. Returns 0 on success, and GNTST_*
444
- * (see xen/include/interface/grant_table.h) or -ENOMEM / -EINVAL on
459
+ * sets *vaddr to that address. Returns 0 on success, and -errno on
445
460
* error. If an error is returned, device will switch to
446
461
* XenbusStateClosing and the error message will be saved in XenStore.
447
462
*/
448
463
int xenbus_map_ring_valloc (struct xenbus_device * dev , grant_ref_t * gnt_refs ,
449
464
unsigned int nr_grefs , void * * vaddr )
450
465
{
451
466
int err ;
467
+ struct map_ring_valloc * info ;
468
+
469
+ * vaddr = NULL ;
470
+
471
+ if (nr_grefs > XENBUS_MAX_RING_GRANTS )
472
+ return - EINVAL ;
473
+
474
+ info = kzalloc (sizeof (* info ), GFP_KERNEL );
475
+ if (!info )
476
+ return - ENOMEM ;
452
477
453
- err = ring_ops -> map (dev , gnt_refs , nr_grefs , vaddr );
454
- /* Some hypervisors are buggy and can return 1. */
455
- if (err > 0 )
456
- err = GNTST_general_error ;
478
+ info -> node = kzalloc (sizeof (* info -> node ), GFP_KERNEL );
479
+ if (!info -> node )
480
+ err = - ENOMEM ;
481
+ else
482
+ err = ring_ops -> map (dev , info , gnt_refs , nr_grefs , vaddr );
457
483
484
+ kfree (info -> node );
485
+ kfree (info );
458
486
return err ;
459
487
}
460
488
EXPORT_SYMBOL_GPL (xenbus_map_ring_valloc );
@@ -466,62 +494,57 @@ static int __xenbus_map_ring(struct xenbus_device *dev,
466
494
grant_ref_t * gnt_refs ,
467
495
unsigned int nr_grefs ,
468
496
grant_handle_t * handles ,
469
- phys_addr_t * addrs ,
497
+ struct map_ring_valloc * info ,
470
498
unsigned int flags ,
471
499
bool * leaked )
472
500
{
473
- struct gnttab_map_grant_ref map [XENBUS_MAX_RING_GRANTS ];
474
- struct gnttab_unmap_grant_ref unmap [XENBUS_MAX_RING_GRANTS ];
475
501
int i , j ;
476
- int err = GNTST_okay ;
477
502
478
503
if (nr_grefs > XENBUS_MAX_RING_GRANTS )
479
504
return - EINVAL ;
480
505
481
506
for (i = 0 ; i < nr_grefs ; i ++ ) {
482
- memset (& map [i ], 0 , sizeof (map [i ]));
483
- gnttab_set_map_op (& map [i ], addrs [i ], flags , gnt_refs [i ],
484
- dev -> otherend_id );
507
+ gnttab_set_map_op (& info -> map [i ], info -> phys_addrs [i ], flags ,
508
+ gnt_refs [i ], dev -> otherend_id );
485
509
handles [i ] = INVALID_GRANT_HANDLE ;
486
510
}
487
511
488
- gnttab_batch_map (map , i );
512
+ gnttab_batch_map (info -> map , i );
489
513
490
514
for (i = 0 ; i < nr_grefs ; i ++ ) {
491
- if (map [i ].status != GNTST_okay ) {
492
- err = map [i ].status ;
493
- xenbus_dev_fatal (dev , map [i ].status ,
515
+ if (info -> map [i ].status != GNTST_okay ) {
516
+ xenbus_dev_fatal (dev , info -> map [i ].status ,
494
517
"mapping in shared page %d from domain %d" ,
495
518
gnt_refs [i ], dev -> otherend_id );
496
519
goto fail ;
497
520
} else
498
- handles [i ] = map [i ].handle ;
521
+ handles [i ] = info -> map [i ].handle ;
499
522
}
500
523
501
- return GNTST_okay ;
524
+ return 0 ;
502
525
503
526
fail :
504
527
for (i = j = 0 ; i < nr_grefs ; i ++ ) {
505
528
if (handles [i ] != INVALID_GRANT_HANDLE ) {
506
- memset ( & unmap [j ], 0 , sizeof ( unmap [ j ]));
507
- gnttab_set_unmap_op ( & unmap [ j ], ( phys_addr_t ) addrs [i ],
529
+ gnttab_set_unmap_op ( & info -> unmap [j ],
530
+ info -> phys_addrs [i ],
508
531
GNTMAP_host_map , handles [i ]);
509
532
j ++ ;
510
533
}
511
534
}
512
535
513
- if (HYPERVISOR_grant_table_op (GNTTABOP_unmap_grant_ref , unmap , j ))
536
+ if (HYPERVISOR_grant_table_op (GNTTABOP_unmap_grant_ref , info -> unmap , j ))
514
537
BUG ();
515
538
516
539
* leaked = false;
517
540
for (i = 0 ; i < j ; i ++ ) {
518
- if (unmap [i ].status != GNTST_okay ) {
541
+ if (info -> unmap [i ].status != GNTST_okay ) {
519
542
* leaked = true;
520
543
break ;
521
544
}
522
545
}
523
546
524
- return err ;
547
+ return - ENOENT ;
525
548
}
526
549
527
550
/**
@@ -566,21 +589,12 @@ static int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t *handles,
566
589
return err ;
567
590
}
568
591
569
- struct map_ring_valloc_hvm
570
- {
571
- unsigned int idx ;
572
-
573
- /* Why do we need two arrays? See comment of __xenbus_map_ring */
574
- phys_addr_t phys_addrs [XENBUS_MAX_RING_GRANTS ];
575
- unsigned long addrs [XENBUS_MAX_RING_GRANTS ];
576
- };
577
-
578
592
static void xenbus_map_ring_setup_grant_hvm (unsigned long gfn ,
579
593
unsigned int goffset ,
580
594
unsigned int len ,
581
595
void * data )
582
596
{
583
- struct map_ring_valloc_hvm * info = data ;
597
+ struct map_ring_valloc * info = data ;
584
598
unsigned long vaddr = (unsigned long )gfn_to_virt (gfn );
585
599
586
600
info -> phys_addrs [info -> idx ] = vaddr ;
@@ -589,39 +603,28 @@ static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn,
589
603
info -> idx ++ ;
590
604
}
591
605
592
- static int xenbus_map_ring_valloc_hvm (struct xenbus_device * dev ,
593
- grant_ref_t * gnt_ref ,
594
- unsigned int nr_grefs ,
595
- void * * vaddr )
606
+ static int xenbus_map_ring_hvm (struct xenbus_device * dev ,
607
+ struct map_ring_valloc * info ,
608
+ grant_ref_t * gnt_ref ,
609
+ unsigned int nr_grefs ,
610
+ void * * vaddr )
596
611
{
597
- struct xenbus_map_node * node ;
612
+ struct xenbus_map_node * node = info -> node ;
598
613
int err ;
599
614
void * addr ;
600
615
bool leaked = false;
601
- struct map_ring_valloc_hvm info = {
602
- .idx = 0 ,
603
- };
604
616
unsigned int nr_pages = XENBUS_PAGES (nr_grefs );
605
617
606
- if (nr_grefs > XENBUS_MAX_RING_GRANTS )
607
- return - EINVAL ;
608
-
609
- * vaddr = NULL ;
610
-
611
- node = kzalloc (sizeof (* node ), GFP_KERNEL );
612
- if (!node )
613
- return - ENOMEM ;
614
-
615
618
err = alloc_xenballooned_pages (nr_pages , node -> hvm .pages );
616
619
if (err )
617
620
goto out_err ;
618
621
619
622
gnttab_foreach_grant (node -> hvm .pages , nr_grefs ,
620
623
xenbus_map_ring_setup_grant_hvm ,
621
- & info );
624
+ info );
622
625
623
626
err = __xenbus_map_ring (dev , gnt_ref , nr_grefs , node -> handles ,
624
- info . phys_addrs , GNTMAP_host_map , & leaked );
627
+ info , GNTMAP_host_map , & leaked );
625
628
node -> nr_handles = nr_grefs ;
626
629
627
630
if (err )
@@ -641,19 +644,20 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
641
644
spin_unlock (& xenbus_valloc_lock );
642
645
643
646
* vaddr = addr ;
647
+ info -> node = NULL ;
648
+
644
649
return 0 ;
645
650
646
651
out_xenbus_unmap_ring :
647
652
if (!leaked )
648
- xenbus_unmap_ring (dev , node -> handles , nr_grefs , info . addrs );
653
+ xenbus_unmap_ring (dev , node -> handles , nr_grefs , info -> addrs );
649
654
else
650
655
pr_alert ("leaking %p size %u page(s)" ,
651
656
addr , nr_pages );
652
657
out_free_ballooned_pages :
653
658
if (!leaked )
654
659
free_xenballooned_pages (nr_pages , node -> hvm .pages );
655
660
out_err :
656
- kfree (node );
657
661
return err ;
658
662
}
659
663
@@ -676,40 +680,30 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
676
680
EXPORT_SYMBOL_GPL (xenbus_unmap_ring_vfree );
677
681
678
682
#ifdef CONFIG_XEN_PV
679
- static int xenbus_map_ring_valloc_pv (struct xenbus_device * dev ,
680
- grant_ref_t * gnt_refs ,
681
- unsigned int nr_grefs ,
682
- void * * vaddr )
683
+ static int xenbus_map_ring_pv (struct xenbus_device * dev ,
684
+ struct map_ring_valloc * info ,
685
+ grant_ref_t * gnt_refs ,
686
+ unsigned int nr_grefs ,
687
+ void * * vaddr )
683
688
{
684
- struct xenbus_map_node * node ;
689
+ struct xenbus_map_node * node = info -> node ;
685
690
struct vm_struct * area ;
686
- pte_t * ptes [XENBUS_MAX_RING_GRANTS ];
687
- phys_addr_t phys_addrs [XENBUS_MAX_RING_GRANTS ];
688
691
int err = GNTST_okay ;
689
692
int i ;
690
693
bool leaked ;
691
694
692
- * vaddr = NULL ;
693
-
694
- if (nr_grefs > XENBUS_MAX_RING_GRANTS )
695
- return - EINVAL ;
696
-
697
- node = kzalloc (sizeof (* node ), GFP_KERNEL );
698
- if (!node )
699
- return - ENOMEM ;
700
-
701
- area = alloc_vm_area (XEN_PAGE_SIZE * nr_grefs , ptes );
695
+ area = alloc_vm_area (XEN_PAGE_SIZE * nr_grefs , info -> ptes );
702
696
if (!area ) {
703
697
kfree (node );
704
698
return - ENOMEM ;
705
699
}
706
700
707
701
for (i = 0 ; i < nr_grefs ; i ++ )
708
- phys_addrs [i ] = arbitrary_virt_to_machine (ptes [i ]).maddr ;
702
+ info -> phys_addrs [i ] =
703
+ arbitrary_virt_to_machine (info -> ptes [i ]).maddr ;
709
704
710
705
err = __xenbus_map_ring (dev , gnt_refs , nr_grefs , node -> handles ,
711
- phys_addrs ,
712
- GNTMAP_host_map | GNTMAP_contains_pte ,
706
+ info , GNTMAP_host_map | GNTMAP_contains_pte ,
713
707
& leaked );
714
708
if (err )
715
709
goto failed ;
@@ -722,6 +716,8 @@ static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
722
716
spin_unlock (& xenbus_valloc_lock );
723
717
724
718
* vaddr = area -> addr ;
719
+ info -> node = NULL ;
720
+
725
721
return 0 ;
726
722
727
723
failed :
@@ -730,11 +726,10 @@ static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
730
726
else
731
727
pr_alert ("leaking VM area %p size %u page(s)" , area , nr_grefs );
732
728
733
- kfree (node );
734
729
return err ;
735
730
}
736
731
737
- static int xenbus_unmap_ring_vfree_pv (struct xenbus_device * dev , void * vaddr )
732
+ static int xenbus_unmap_ring_pv (struct xenbus_device * dev , void * vaddr )
738
733
{
739
734
struct xenbus_map_node * node ;
740
735
struct gnttab_unmap_grant_ref unmap [XENBUS_MAX_RING_GRANTS ];
@@ -798,12 +793,12 @@ static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
798
793
}
799
794
800
795
static const struct xenbus_ring_ops ring_ops_pv = {
801
- .map = xenbus_map_ring_valloc_pv ,
802
- .unmap = xenbus_unmap_ring_vfree_pv ,
796
+ .map = xenbus_map_ring_pv ,
797
+ .unmap = xenbus_unmap_ring_pv ,
803
798
};
804
799
#endif
805
800
806
- struct unmap_ring_vfree_hvm
801
+ struct unmap_ring_hvm
807
802
{
808
803
unsigned int idx ;
809
804
unsigned long addrs [XENBUS_MAX_RING_GRANTS ];
@@ -814,19 +809,19 @@ static void xenbus_unmap_ring_setup_grant_hvm(unsigned long gfn,
814
809
unsigned int len ,
815
810
void * data )
816
811
{
817
- struct unmap_ring_vfree_hvm * info = data ;
812
+ struct unmap_ring_hvm * info = data ;
818
813
819
814
info -> addrs [info -> idx ] = (unsigned long )gfn_to_virt (gfn );
820
815
821
816
info -> idx ++ ;
822
817
}
823
818
824
- static int xenbus_unmap_ring_vfree_hvm (struct xenbus_device * dev , void * vaddr )
819
+ static int xenbus_unmap_ring_hvm (struct xenbus_device * dev , void * vaddr )
825
820
{
826
821
int rv ;
827
822
struct xenbus_map_node * node ;
828
823
void * addr ;
829
- struct unmap_ring_vfree_hvm info = {
824
+ struct unmap_ring_hvm info = {
830
825
.idx = 0 ,
831
826
};
832
827
unsigned int nr_pages ;
@@ -887,8 +882,8 @@ enum xenbus_state xenbus_read_driver_state(const char *path)
887
882
EXPORT_SYMBOL_GPL (xenbus_read_driver_state );
888
883
889
884
static const struct xenbus_ring_ops ring_ops_hvm = {
890
- .map = xenbus_map_ring_valloc_hvm ,
891
- .unmap = xenbus_unmap_ring_vfree_hvm ,
885
+ .map = xenbus_map_ring_hvm ,
886
+ .unmap = xenbus_unmap_ring_hvm ,
892
887
};
893
888
894
889
void __init xenbus_ring_ops_init (void )
0 commit comments