@@ -463,13 +463,15 @@ static void qemu_vfio_ram_block_added(RAMBlockNotifier *n, void *host,
463
463
size_t size , size_t max_size )
464
464
{
465
465
QEMUVFIOState * s = container_of (n , QEMUVFIOState , ram_notifier );
466
+ Error * local_err = NULL ;
466
467
int ret ;
467
468
468
469
trace_qemu_vfio_ram_block_added (s , host , max_size );
469
- ret = qemu_vfio_dma_map (s , host , max_size , false, NULL );
470
+ ret = qemu_vfio_dma_map (s , host , max_size , false, NULL , & local_err );
470
471
if (ret ) {
471
- error_report ("qemu_vfio_dma_map(%p, %zu) failed: %s" , host , max_size ,
472
- strerror (- ret ));
472
+ error_reportf_err (local_err ,
473
+ "qemu_vfio_dma_map(%p, %zu) failed: " ,
474
+ host , max_size );
473
475
}
474
476
}
475
477
@@ -608,7 +610,7 @@ static IOVAMapping *qemu_vfio_add_mapping(QEMUVFIOState *s,
608
610
609
611
/* Do the DMA mapping with VFIO. */
610
612
static int qemu_vfio_do_mapping (QEMUVFIOState * s , void * host , size_t size ,
611
- uint64_t iova )
613
+ uint64_t iova , Error * * errp )
612
614
{
613
615
struct vfio_iommu_type1_dma_map dma_map = {
614
616
.argsz = sizeof (dma_map ),
@@ -620,7 +622,7 @@ static int qemu_vfio_do_mapping(QEMUVFIOState *s, void *host, size_t size,
620
622
trace_qemu_vfio_do_mapping (s , host , iova , size );
621
623
622
624
if (ioctl (s -> container , VFIO_IOMMU_MAP_DMA , & dma_map )) {
623
- error_report ( "VFIO_MAP_DMA failed: %s" , strerror ( errno ) );
625
+ error_setg_errno ( errp , errno , "VFIO_MAP_DMA failed" );
624
626
return - errno ;
625
627
}
626
628
return 0 ;
@@ -660,13 +662,13 @@ static bool qemu_vfio_verify_mappings(QEMUVFIOState *s)
660
662
if (QEMU_VFIO_DEBUG ) {
661
663
for (i = 0 ; i < s -> nr_mappings - 1 ; ++ i ) {
662
664
if (!(s -> mappings [i ].host < s -> mappings [i + 1 ].host )) {
663
- fprintf ( stderr , "item %d not sorted!\n " , i );
665
+ error_report ( "item %d not sorted!" , i );
664
666
qemu_vfio_dump_mappings (s );
665
667
return false;
666
668
}
667
669
if (!(s -> mappings [i ].host + s -> mappings [i ].size <=
668
670
s -> mappings [i + 1 ].host )) {
669
- fprintf ( stderr , "item %d overlap with next!\n " , i );
671
+ error_report ( "item %d overlap with next!" , i );
670
672
qemu_vfio_dump_mappings (s );
671
673
return false;
672
674
}
@@ -675,8 +677,8 @@ static bool qemu_vfio_verify_mappings(QEMUVFIOState *s)
675
677
return true;
676
678
}
677
679
678
- static int
679
- qemu_vfio_find_fixed_iova ( QEMUVFIOState * s , size_t size , uint64_t * iova )
680
+ static bool qemu_vfio_find_fixed_iova ( QEMUVFIOState * s , size_t size ,
681
+ uint64_t * iova , Error * * errp )
680
682
{
681
683
int i ;
682
684
@@ -691,14 +693,16 @@ qemu_vfio_find_fixed_iova(QEMUVFIOState *s, size_t size, uint64_t *iova)
691
693
s -> usable_iova_ranges [i ].end - s -> low_water_mark + 1 == 0 ) {
692
694
* iova = s -> low_water_mark ;
693
695
s -> low_water_mark += size ;
694
- return 0 ;
696
+ return true ;
695
697
}
696
698
}
697
- return - ENOMEM ;
699
+ error_setg (errp , "fixed iova range not found" );
700
+
701
+ return false;
698
702
}
699
703
700
- static int
701
- qemu_vfio_find_temp_iova ( QEMUVFIOState * s , size_t size , uint64_t * iova )
704
+ static bool qemu_vfio_find_temp_iova ( QEMUVFIOState * s , size_t size ,
705
+ uint64_t * iova , Error * * errp )
702
706
{
703
707
int i ;
704
708
@@ -713,10 +717,27 @@ qemu_vfio_find_temp_iova(QEMUVFIOState *s, size_t size, uint64_t *iova)
713
717
s -> high_water_mark - s -> usable_iova_ranges [i ].start + 1 == 0 ) {
714
718
* iova = s -> high_water_mark - size ;
715
719
s -> high_water_mark = * iova ;
716
- return 0 ;
720
+ return true ;
717
721
}
718
722
}
719
- return - ENOMEM ;
723
+ error_setg (errp , "temporary iova range not found" );
724
+
725
+ return false;
726
+ }
727
+
728
+ /**
729
+ * qemu_vfio_water_mark_reached:
730
+ *
731
+ * Returns %true if high watermark has been reached, %false otherwise.
732
+ */
733
+ static bool qemu_vfio_water_mark_reached (QEMUVFIOState * s , size_t size ,
734
+ Error * * errp )
735
+ {
736
+ if (s -> high_water_mark - s -> low_water_mark + 1 < size ) {
737
+ error_setg (errp , "iova exhausted (water mark reached)" );
738
+ return true;
739
+ }
740
+ return false;
720
741
}
721
742
722
743
/* Map [host, host + size) area into a contiguous IOVA address space, and store
@@ -725,61 +746,53 @@ qemu_vfio_find_temp_iova(QEMUVFIOState *s, size_t size, uint64_t *iova)
725
746
* mapping status within this area is not allowed).
726
747
*/
727
748
int qemu_vfio_dma_map (QEMUVFIOState * s , void * host , size_t size ,
728
- bool temporary , uint64_t * iova )
749
+ bool temporary , uint64_t * iova , Error * * errp )
729
750
{
730
- int ret = 0 ;
731
751
int index ;
732
752
IOVAMapping * mapping ;
733
753
uint64_t iova0 ;
734
754
735
755
assert (QEMU_PTR_IS_ALIGNED (host , qemu_real_host_page_size ));
736
756
assert (QEMU_IS_ALIGNED (size , qemu_real_host_page_size ));
737
757
trace_qemu_vfio_dma_map (s , host , size , temporary , iova );
738
- qemu_mutex_lock (& s -> lock );
758
+ QEMU_LOCK_GUARD (& s -> lock );
739
759
mapping = qemu_vfio_find_mapping (s , host , & index );
740
760
if (mapping ) {
741
761
iova0 = mapping -> iova + ((uint8_t * )host - (uint8_t * )mapping -> host );
742
762
} else {
743
- if (s -> high_water_mark - s -> low_water_mark + 1 < size ) {
744
- ret = - ENOMEM ;
745
- goto out ;
763
+ int ret ;
764
+
765
+ if (qemu_vfio_water_mark_reached (s , size , errp )) {
766
+ return - ENOMEM ;
746
767
}
747
768
if (!temporary ) {
748
- if (qemu_vfio_find_fixed_iova (s , size , & iova0 )) {
749
- ret = - ENOMEM ;
750
- goto out ;
769
+ if (!qemu_vfio_find_fixed_iova (s , size , & iova0 , errp )) {
770
+ return - ENOMEM ;
751
771
}
752
772
753
773
mapping = qemu_vfio_add_mapping (s , host , size , index + 1 , iova0 );
754
- if (!mapping ) {
755
- ret = - ENOMEM ;
756
- goto out ;
757
- }
758
774
assert (qemu_vfio_verify_mappings (s ));
759
- ret = qemu_vfio_do_mapping (s , host , size , iova0 );
760
- if (ret ) {
775
+ ret = qemu_vfio_do_mapping (s , host , size , iova0 , errp );
776
+ if (ret < 0 ) {
761
777
qemu_vfio_undo_mapping (s , mapping , NULL );
762
- goto out ;
778
+ return ret ;
763
779
}
764
780
qemu_vfio_dump_mappings (s );
765
781
} else {
766
- if (qemu_vfio_find_temp_iova (s , size , & iova0 )) {
767
- ret = - ENOMEM ;
768
- goto out ;
782
+ if (!qemu_vfio_find_temp_iova (s , size , & iova0 , errp )) {
783
+ return - ENOMEM ;
769
784
}
770
- ret = qemu_vfio_do_mapping (s , host , size , iova0 );
771
- if (ret ) {
772
- goto out ;
785
+ ret = qemu_vfio_do_mapping (s , host , size , iova0 , errp );
786
+ if (ret < 0 ) {
787
+ return ret ;
773
788
}
774
789
}
775
790
}
776
791
trace_qemu_vfio_dma_mapped (s , host , iova0 , size );
777
792
if (iova ) {
778
793
* iova = iova0 ;
779
794
}
780
- out :
781
- qemu_mutex_unlock (& s -> lock );
782
- return ret ;
795
+ return 0 ;
783
796
}
784
797
785
798
/* Reset the high watermark and free all "temporary" mappings. */
@@ -813,14 +826,12 @@ void qemu_vfio_dma_unmap(QEMUVFIOState *s, void *host)
813
826
}
814
827
815
828
trace_qemu_vfio_dma_unmap (s , host );
816
- qemu_mutex_lock (& s -> lock );
829
+ QEMU_LOCK_GUARD (& s -> lock );
817
830
m = qemu_vfio_find_mapping (s , host , & index );
818
831
if (!m ) {
819
- goto out ;
832
+ return ;
820
833
}
821
834
qemu_vfio_undo_mapping (s , m , NULL );
822
- out :
823
- qemu_mutex_unlock (& s -> lock );
824
835
}
825
836
826
837
static void qemu_vfio_reset (QEMUVFIOState * s )
0 commit comments