@@ -69,11 +69,27 @@ struct xenbus_map_node {
69
69
unsigned int nr_handles ;
70
70
};
71
71
72
+ struct map_ring_valloc {
73
+ struct xenbus_map_node * node ;
74
+
75
+ /* Why do we need two arrays? See comment of __xenbus_map_ring */
76
+ union {
77
+ unsigned long addrs [XENBUS_MAX_RING_GRANTS ];
78
+ pte_t * ptes [XENBUS_MAX_RING_GRANTS ];
79
+ };
80
+ phys_addr_t phys_addrs [XENBUS_MAX_RING_GRANTS ];
81
+
82
+ struct gnttab_map_grant_ref map [XENBUS_MAX_RING_GRANTS ];
83
+ struct gnttab_unmap_grant_ref unmap [XENBUS_MAX_RING_GRANTS ];
84
+
85
+ unsigned int idx ; /* HVM only. */
86
+ };
87
+
72
88
static DEFINE_SPINLOCK (xenbus_valloc_lock );
73
89
static LIST_HEAD (xenbus_valloc_pages );
74
90
75
91
struct xenbus_ring_ops {
76
- int (* map )(struct xenbus_device * dev ,
92
+ int (* map )(struct xenbus_device * dev , struct map_ring_valloc * info ,
77
93
grant_ref_t * gnt_refs , unsigned int nr_grefs ,
78
94
void * * vaddr );
79
95
int (* unmap )(struct xenbus_device * dev , void * vaddr );
@@ -449,12 +465,32 @@ int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs,
449
465
unsigned int nr_grefs , void * * vaddr )
450
466
{
451
467
int err ;
468
+ struct map_ring_valloc * info ;
469
+
470
+ * vaddr = NULL ;
471
+
472
+ if (nr_grefs > XENBUS_MAX_RING_GRANTS )
473
+ return - EINVAL ;
474
+
475
+ info = kzalloc (sizeof (* info ), GFP_KERNEL );
476
+ if (!info )
477
+ return - ENOMEM ;
478
+
479
+ info -> node = kzalloc (sizeof (* info -> node ), GFP_KERNEL );
480
+ if (!info -> node ) {
481
+ err = - ENOMEM ;
482
+ goto out ;
483
+ }
484
+
485
+ err = ring_ops -> map (dev , info , gnt_refs , nr_grefs , vaddr );
452
486
453
- err = ring_ops -> map (dev , gnt_refs , nr_grefs , vaddr );
454
487
/* Some hypervisors are buggy and can return 1. */
455
488
if (err > 0 )
456
489
err = GNTST_general_error ;
457
490
491
+ out :
492
+ kfree (info -> node );
493
+ kfree (info );
458
494
return err ;
459
495
}
460
496
EXPORT_SYMBOL_GPL (xenbus_map_ring_valloc );
@@ -466,56 +502,53 @@ static int __xenbus_map_ring(struct xenbus_device *dev,
466
502
grant_ref_t * gnt_refs ,
467
503
unsigned int nr_grefs ,
468
504
grant_handle_t * handles ,
469
- phys_addr_t * addrs ,
505
+ struct map_ring_valloc * info ,
470
506
unsigned int flags ,
471
507
bool * leaked )
472
508
{
473
- struct gnttab_map_grant_ref map [XENBUS_MAX_RING_GRANTS ];
474
- struct gnttab_unmap_grant_ref unmap [XENBUS_MAX_RING_GRANTS ];
475
509
int i , j ;
476
510
int err = GNTST_okay ;
477
511
478
512
if (nr_grefs > XENBUS_MAX_RING_GRANTS )
479
513
return - EINVAL ;
480
514
481
515
for (i = 0 ; i < nr_grefs ; i ++ ) {
482
- memset (& map [i ], 0 , sizeof (map [i ]));
483
- gnttab_set_map_op (& map [i ], addrs [i ], flags , gnt_refs [i ],
484
- dev -> otherend_id );
516
+ gnttab_set_map_op (& info -> map [i ], info -> phys_addrs [i ], flags ,
517
+ gnt_refs [i ], dev -> otherend_id );
485
518
handles [i ] = INVALID_GRANT_HANDLE ;
486
519
}
487
520
488
- gnttab_batch_map (map , i );
521
+ gnttab_batch_map (info -> map , i );
489
522
490
523
for (i = 0 ; i < nr_grefs ; i ++ ) {
491
- if (map [i ].status != GNTST_okay ) {
492
- err = map [i ].status ;
493
- xenbus_dev_fatal (dev , map [i ].status ,
524
+ if (info -> map [i ].status != GNTST_okay ) {
525
+ err = info -> map [i ].status ;
526
+ xenbus_dev_fatal (dev , info -> map [i ].status ,
494
527
"mapping in shared page %d from domain %d" ,
495
528
gnt_refs [i ], dev -> otherend_id );
496
529
goto fail ;
497
530
} else
498
- handles [i ] = map [i ].handle ;
531
+ handles [i ] = info -> map [i ].handle ;
499
532
}
500
533
501
534
return GNTST_okay ;
502
535
503
536
fail :
504
537
for (i = j = 0 ; i < nr_grefs ; i ++ ) {
505
538
if (handles [i ] != INVALID_GRANT_HANDLE ) {
506
- memset ( & unmap [j ], 0 , sizeof ( unmap [ j ]));
507
- gnttab_set_unmap_op ( & unmap [ j ], ( phys_addr_t ) addrs [i ],
539
+ gnttab_set_unmap_op ( & info -> unmap [j ],
540
+ info -> phys_addrs [i ],
508
541
GNTMAP_host_map , handles [i ]);
509
542
j ++ ;
510
543
}
511
544
}
512
545
513
- if (HYPERVISOR_grant_table_op (GNTTABOP_unmap_grant_ref , unmap , j ))
546
+ if (HYPERVISOR_grant_table_op (GNTTABOP_unmap_grant_ref , info -> unmap , j ))
514
547
BUG ();
515
548
516
549
* leaked = false;
517
550
for (i = 0 ; i < j ; i ++ ) {
518
- if (unmap [i ].status != GNTST_okay ) {
551
+ if (info -> unmap [i ].status != GNTST_okay ) {
519
552
* leaked = true;
520
553
break ;
521
554
}
@@ -566,21 +599,12 @@ static int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t *handles,
566
599
return err ;
567
600
}
568
601
569
- struct map_ring_valloc_hvm
570
- {
571
- unsigned int idx ;
572
-
573
- /* Why do we need two arrays? See comment of __xenbus_map_ring */
574
- phys_addr_t phys_addrs [XENBUS_MAX_RING_GRANTS ];
575
- unsigned long addrs [XENBUS_MAX_RING_GRANTS ];
576
- };
577
-
578
602
static void xenbus_map_ring_setup_grant_hvm (unsigned long gfn ,
579
603
unsigned int goffset ,
580
604
unsigned int len ,
581
605
void * data )
582
606
{
583
- struct map_ring_valloc_hvm * info = data ;
607
+ struct map_ring_valloc * info = data ;
584
608
unsigned long vaddr = (unsigned long )gfn_to_virt (gfn );
585
609
586
610
info -> phys_addrs [info -> idx ] = vaddr ;
@@ -589,39 +613,28 @@ static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn,
589
613
info -> idx ++ ;
590
614
}
591
615
592
- static int xenbus_map_ring_valloc_hvm (struct xenbus_device * dev ,
593
- grant_ref_t * gnt_ref ,
594
- unsigned int nr_grefs ,
595
- void * * vaddr )
616
+ static int xenbus_map_ring_hvm (struct xenbus_device * dev ,
617
+ struct map_ring_valloc * info ,
618
+ grant_ref_t * gnt_ref ,
619
+ unsigned int nr_grefs ,
620
+ void * * vaddr )
596
621
{
597
- struct xenbus_map_node * node ;
622
+ struct xenbus_map_node * node = info -> node ;
598
623
int err ;
599
624
void * addr ;
600
625
bool leaked = false;
601
- struct map_ring_valloc_hvm info = {
602
- .idx = 0 ,
603
- };
604
626
unsigned int nr_pages = XENBUS_PAGES (nr_grefs );
605
627
606
- if (nr_grefs > XENBUS_MAX_RING_GRANTS )
607
- return - EINVAL ;
608
-
609
- * vaddr = NULL ;
610
-
611
- node = kzalloc (sizeof (* node ), GFP_KERNEL );
612
- if (!node )
613
- return - ENOMEM ;
614
-
615
628
err = alloc_xenballooned_pages (nr_pages , node -> hvm .pages );
616
629
if (err )
617
630
goto out_err ;
618
631
619
632
gnttab_foreach_grant (node -> hvm .pages , nr_grefs ,
620
633
xenbus_map_ring_setup_grant_hvm ,
621
- & info );
634
+ info );
622
635
623
636
err = __xenbus_map_ring (dev , gnt_ref , nr_grefs , node -> handles ,
624
- info . phys_addrs , GNTMAP_host_map , & leaked );
637
+ info , GNTMAP_host_map , & leaked );
625
638
node -> nr_handles = nr_grefs ;
626
639
627
640
if (err )
@@ -641,19 +654,20 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
641
654
spin_unlock (& xenbus_valloc_lock );
642
655
643
656
* vaddr = addr ;
657
+ info -> node = NULL ;
658
+
644
659
return 0 ;
645
660
646
661
out_xenbus_unmap_ring :
647
662
if (!leaked )
648
- xenbus_unmap_ring (dev , node -> handles , nr_grefs , info . addrs );
663
+ xenbus_unmap_ring (dev , node -> handles , nr_grefs , info -> addrs );
649
664
else
650
665
pr_alert ("leaking %p size %u page(s)" ,
651
666
addr , nr_pages );
652
667
out_free_ballooned_pages :
653
668
if (!leaked )
654
669
free_xenballooned_pages (nr_pages , node -> hvm .pages );
655
670
out_err :
656
- kfree (node );
657
671
return err ;
658
672
}
659
673
@@ -676,40 +690,30 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
676
690
EXPORT_SYMBOL_GPL (xenbus_unmap_ring_vfree );
677
691
678
692
#ifdef CONFIG_XEN_PV
679
- static int xenbus_map_ring_valloc_pv (struct xenbus_device * dev ,
680
- grant_ref_t * gnt_refs ,
681
- unsigned int nr_grefs ,
682
- void * * vaddr )
693
+ static int xenbus_map_ring_pv (struct xenbus_device * dev ,
694
+ struct map_ring_valloc * info ,
695
+ grant_ref_t * gnt_refs ,
696
+ unsigned int nr_grefs ,
697
+ void * * vaddr )
683
698
{
684
- struct xenbus_map_node * node ;
699
+ struct xenbus_map_node * node = info -> node ;
685
700
struct vm_struct * area ;
686
- pte_t * ptes [XENBUS_MAX_RING_GRANTS ];
687
- phys_addr_t phys_addrs [XENBUS_MAX_RING_GRANTS ];
688
701
int err = GNTST_okay ;
689
702
int i ;
690
703
bool leaked ;
691
704
692
- * vaddr = NULL ;
693
-
694
- if (nr_grefs > XENBUS_MAX_RING_GRANTS )
695
- return - EINVAL ;
696
-
697
- node = kzalloc (sizeof (* node ), GFP_KERNEL );
698
- if (!node )
699
- return - ENOMEM ;
700
-
701
- area = alloc_vm_area (XEN_PAGE_SIZE * nr_grefs , ptes );
705
+ area = alloc_vm_area (XEN_PAGE_SIZE * nr_grefs , info -> ptes );
702
706
if (!area ) {
703
707
kfree (node );
704
708
return - ENOMEM ;
705
709
}
706
710
707
711
for (i = 0 ; i < nr_grefs ; i ++ )
708
- phys_addrs [i ] = arbitrary_virt_to_machine (ptes [i ]).maddr ;
712
+ info -> phys_addrs [i ] =
713
+ arbitrary_virt_to_machine (info -> ptes [i ]).maddr ;
709
714
710
715
err = __xenbus_map_ring (dev , gnt_refs , nr_grefs , node -> handles ,
711
- phys_addrs ,
712
- GNTMAP_host_map | GNTMAP_contains_pte ,
716
+ info , GNTMAP_host_map | GNTMAP_contains_pte ,
713
717
& leaked );
714
718
if (err )
715
719
goto failed ;
@@ -722,6 +726,8 @@ static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
722
726
spin_unlock (& xenbus_valloc_lock );
723
727
724
728
* vaddr = area -> addr ;
729
+ info -> node = NULL ;
730
+
725
731
return 0 ;
726
732
727
733
failed :
@@ -730,11 +736,10 @@ static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
730
736
else
731
737
pr_alert ("leaking VM area %p size %u page(s)" , area , nr_grefs );
732
738
733
- kfree (node );
734
739
return err ;
735
740
}
736
741
737
- static int xenbus_unmap_ring_vfree_pv (struct xenbus_device * dev , void * vaddr )
742
+ static int xenbus_unmap_ring_pv (struct xenbus_device * dev , void * vaddr )
738
743
{
739
744
struct xenbus_map_node * node ;
740
745
struct gnttab_unmap_grant_ref unmap [XENBUS_MAX_RING_GRANTS ];
@@ -798,12 +803,12 @@ static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
798
803
}
799
804
800
805
static const struct xenbus_ring_ops ring_ops_pv = {
801
- .map = xenbus_map_ring_valloc_pv ,
802
- .unmap = xenbus_unmap_ring_vfree_pv ,
806
+ .map = xenbus_map_ring_pv ,
807
+ .unmap = xenbus_unmap_ring_pv ,
803
808
};
804
809
#endif
805
810
806
- struct unmap_ring_vfree_hvm
811
+ struct unmap_ring_hvm
807
812
{
808
813
unsigned int idx ;
809
814
unsigned long addrs [XENBUS_MAX_RING_GRANTS ];
@@ -814,19 +819,19 @@ static void xenbus_unmap_ring_setup_grant_hvm(unsigned long gfn,
814
819
unsigned int len ,
815
820
void * data )
816
821
{
817
- struct unmap_ring_vfree_hvm * info = data ;
822
+ struct unmap_ring_hvm * info = data ;
818
823
819
824
info -> addrs [info -> idx ] = (unsigned long )gfn_to_virt (gfn );
820
825
821
826
info -> idx ++ ;
822
827
}
823
828
824
- static int xenbus_unmap_ring_vfree_hvm (struct xenbus_device * dev , void * vaddr )
829
+ static int xenbus_unmap_ring_hvm (struct xenbus_device * dev , void * vaddr )
825
830
{
826
831
int rv ;
827
832
struct xenbus_map_node * node ;
828
833
void * addr ;
829
- struct unmap_ring_vfree_hvm info = {
834
+ struct unmap_ring_hvm info = {
830
835
.idx = 0 ,
831
836
};
832
837
unsigned int nr_pages ;
@@ -887,8 +892,8 @@ enum xenbus_state xenbus_read_driver_state(const char *path)
887
892
EXPORT_SYMBOL_GPL (xenbus_read_driver_state );
888
893
889
894
static const struct xenbus_ring_ops ring_ops_hvm = {
890
- .map = xenbus_map_ring_valloc_hvm ,
891
- .unmap = xenbus_unmap_ring_vfree_hvm ,
895
+ .map = xenbus_map_ring_hvm ,
896
+ .unmap = xenbus_unmap_ring_hvm ,
892
897
};
893
898
894
899
void __init xenbus_ring_ops_init (void )
0 commit comments