@@ -372,6 +372,7 @@ static void vhost_vq_reset(struct vhost_dev *dev,
372
372
vq -> avail = NULL ;
373
373
vq -> used = NULL ;
374
374
vq -> last_avail_idx = 0 ;
375
+ vq -> next_avail_head = 0 ;
375
376
vq -> avail_idx = 0 ;
376
377
vq -> last_used_idx = 0 ;
377
378
vq -> signalled_used = 0 ;
@@ -501,6 +502,8 @@ static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
501
502
vq -> log = NULL ;
502
503
kfree (vq -> heads );
503
504
vq -> heads = NULL ;
505
+ kfree (vq -> nheads );
506
+ vq -> nheads = NULL ;
504
507
}
505
508
506
509
/* Helper to allocate iovec buffers for all vqs. */
@@ -518,7 +521,9 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
518
521
GFP_KERNEL );
519
522
vq -> heads = kmalloc_array (dev -> iov_limit , sizeof (* vq -> heads ),
520
523
GFP_KERNEL );
521
- if (!vq -> indirect || !vq -> log || !vq -> heads )
524
+ vq -> nheads = kmalloc_array (dev -> iov_limit , sizeof (* vq -> nheads ),
525
+ GFP_KERNEL );
526
+ if (!vq -> indirect || !vq -> log || !vq -> heads || !vq -> nheads )
522
527
goto err_nomem ;
523
528
}
524
529
return 0 ;
@@ -2159,14 +2164,15 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
2159
2164
break ;
2160
2165
}
2161
2166
if (vhost_has_feature (vq , VIRTIO_F_RING_PACKED )) {
2162
- vq -> last_avail_idx = s .num & 0xffff ;
2167
+ vq -> next_avail_head = vq -> last_avail_idx =
2168
+ s .num & 0xffff ;
2163
2169
vq -> last_used_idx = (s .num >> 16 ) & 0xffff ;
2164
2170
} else {
2165
2171
if (s .num > 0xffff ) {
2166
2172
r = - EINVAL ;
2167
2173
break ;
2168
2174
}
2169
- vq -> last_avail_idx = s .num ;
2175
+ vq -> next_avail_head = vq -> last_avail_idx = s .num ;
2170
2176
}
2171
2177
/* Forget the cached index value. */
2172
2178
vq -> avail_idx = vq -> last_avail_idx ;
@@ -2798,11 +2804,12 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
2798
2804
unsigned int * out_num , unsigned int * in_num ,
2799
2805
struct vhost_log * log , unsigned int * log_num )
2800
2806
{
2807
+ bool in_order = vhost_has_feature (vq , VIRTIO_F_IN_ORDER );
2801
2808
struct vring_desc desc ;
2802
2809
unsigned int i , head , found = 0 ;
2803
2810
u16 last_avail_idx = vq -> last_avail_idx ;
2804
2811
__virtio16 ring_head ;
2805
- int ret , access ;
2812
+ int ret , access , c = 0 ;
2806
2813
2807
2814
if (vq -> avail_idx == vq -> last_avail_idx ) {
2808
2815
ret = vhost_get_avail_idx (vq );
@@ -2813,17 +2820,21 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
2813
2820
return vq -> num ;
2814
2821
}
2815
2822
2816
- /* Grab the next descriptor number they're advertising, and increment
2817
- * the index we've seen. */
2818
- if (unlikely (vhost_get_avail_head (vq , & ring_head , last_avail_idx ))) {
2819
- vq_err (vq , "Failed to read head: idx %d address %p\n" ,
2820
- last_avail_idx ,
2821
- & vq -> avail -> ring [last_avail_idx % vq -> num ]);
2822
- return - EFAULT ;
2823
+ if (in_order )
2824
+ head = vq -> next_avail_head & (vq -> num - 1 );
2825
+ else {
2826
+ /* Grab the next descriptor number they're
2827
+ * advertising, and increment the index we've seen. */
2828
+ if (unlikely (vhost_get_avail_head (vq , & ring_head ,
2829
+ last_avail_idx ))) {
2830
+ vq_err (vq , "Failed to read head: idx %d address %p\n" ,
2831
+ last_avail_idx ,
2832
+ & vq -> avail -> ring [last_avail_idx % vq -> num ]);
2833
+ return - EFAULT ;
2834
+ }
2835
+ head = vhost16_to_cpu (vq , ring_head );
2823
2836
}
2824
2837
2825
- head = vhost16_to_cpu (vq , ring_head );
2826
-
2827
2838
/* If their number is silly, that's an error. */
2828
2839
if (unlikely (head >= vq -> num )) {
2829
2840
vq_err (vq , "Guest says index %u > %u is available" ,
@@ -2866,6 +2877,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
2866
2877
"in indirect descriptor at idx %d\n" , i );
2867
2878
return ret ;
2868
2879
}
2880
+ ++ c ;
2869
2881
continue ;
2870
2882
}
2871
2883
@@ -2901,10 +2913,12 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
2901
2913
}
2902
2914
* out_num += ret ;
2903
2915
}
2916
+ ++ c ;
2904
2917
} while ((i = next_desc (vq , & desc )) != -1 );
2905
2918
2906
2919
/* On success, increment avail index. */
2907
2920
vq -> last_avail_idx ++ ;
2921
+ vq -> next_avail_head += c ;
2908
2922
2909
2923
/* Assume notifications from guest are disabled at this point,
2910
2924
* if they aren't we would need to update avail_event index. */
@@ -2928,8 +2942,9 @@ int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
2928
2942
cpu_to_vhost32 (vq , head ),
2929
2943
cpu_to_vhost32 (vq , len )
2930
2944
};
2945
+ u16 nheads = 1 ;
2931
2946
2932
- return vhost_add_used_n (vq , & heads , 1 );
2947
+ return vhost_add_used_n (vq , & heads , & nheads , 1 );
2933
2948
}
2934
2949
EXPORT_SYMBOL_GPL (vhost_add_used );
2935
2950
@@ -2965,10 +2980,9 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
2965
2980
return 0 ;
2966
2981
}
2967
2982
2968
- /* After we've used one of their buffers, we tell them about it. We'll then
2969
- * want to notify the guest, using eventfd. */
2970
- int vhost_add_used_n (struct vhost_virtqueue * vq , struct vring_used_elem * heads ,
2971
- unsigned count )
2983
+ static int vhost_add_used_n_ooo (struct vhost_virtqueue * vq ,
2984
+ struct vring_used_elem * heads ,
2985
+ unsigned count )
2972
2986
{
2973
2987
int start , n , r ;
2974
2988
@@ -2981,7 +2995,69 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
2981
2995
heads += n ;
2982
2996
count -= n ;
2983
2997
}
2984
- r = __vhost_add_used_n (vq , heads , count );
2998
+ return __vhost_add_used_n (vq , heads , count );
2999
+ }
3000
+
3001
+ static int vhost_add_used_n_in_order (struct vhost_virtqueue * vq ,
3002
+ struct vring_used_elem * heads ,
3003
+ const u16 * nheads ,
3004
+ unsigned count )
3005
+ {
3006
+ vring_used_elem_t __user * used ;
3007
+ u16 old , new = vq -> last_used_idx ;
3008
+ int start , i ;
3009
+
3010
+ if (!nheads )
3011
+ return - EINVAL ;
3012
+
3013
+ start = vq -> last_used_idx & (vq -> num - 1 );
3014
+ used = vq -> used -> ring + start ;
3015
+
3016
+ for (i = 0 ; i < count ; i ++ ) {
3017
+ if (vhost_put_used (vq , & heads [i ], start , 1 )) {
3018
+ vq_err (vq , "Failed to write used" );
3019
+ return - EFAULT ;
3020
+ }
3021
+ start += nheads [i ];
3022
+ new += nheads [i ];
3023
+ if (start >= vq -> num )
3024
+ start -= vq -> num ;
3025
+ }
3026
+
3027
+ if (unlikely (vq -> log_used )) {
3028
+ /* Make sure data is seen before log. */
3029
+ smp_wmb ();
3030
+ /* Log used ring entry write. */
3031
+ log_used (vq , ((void __user * )used - (void __user * )vq -> used ),
3032
+ (vq -> num - start ) * sizeof * used );
3033
+ if (start + count > vq -> num )
3034
+ log_used (vq , 0 ,
3035
+ (start + count - vq -> num ) * sizeof * used );
3036
+ }
3037
+
3038
+ old = vq -> last_used_idx ;
3039
+ vq -> last_used_idx = new ;
3040
+ /* If the driver never bothers to signal in a very long while,
3041
+ * used index might wrap around. If that happens, invalidate
3042
+ * signalled_used index we stored. TODO: make sure driver
3043
+ * signals at least once in 2^16 and remove this. */
3044
+ if (unlikely ((u16 )(new - vq -> signalled_used ) < (u16 )(new - old )))
3045
+ vq -> signalled_used_valid = false;
3046
+ return 0 ;
3047
+ }
3048
+
3049
+ /* After we've used one of their buffers, we tell them about it. We'll then
3050
+ * want to notify the guest, using eventfd. */
3051
+ int vhost_add_used_n (struct vhost_virtqueue * vq , struct vring_used_elem * heads ,
3052
+ u16 * nheads , unsigned count )
3053
+ {
3054
+ bool in_order = vhost_has_feature (vq , VIRTIO_F_IN_ORDER );
3055
+ int r ;
3056
+
3057
+ if (!in_order || !nheads )
3058
+ r = vhost_add_used_n_ooo (vq , heads , count );
3059
+ else
3060
+ r = vhost_add_used_n_in_order (vq , heads , nheads , count );
2985
3061
2986
3062
if (r < 0 )
2987
3063
return r ;
@@ -3064,9 +3140,11 @@ EXPORT_SYMBOL_GPL(vhost_add_used_and_signal);
3064
3140
/* multi-buffer version of vhost_add_used_and_signal */
3065
3141
void vhost_add_used_and_signal_n (struct vhost_dev * dev ,
3066
3142
struct vhost_virtqueue * vq ,
3067
- struct vring_used_elem * heads , unsigned count )
3143
+ struct vring_used_elem * heads ,
3144
+ u16 * nheads ,
3145
+ unsigned count )
3068
3146
{
3069
- vhost_add_used_n (vq , heads , count );
3147
+ vhost_add_used_n (vq , heads , nheads , count );
3070
3148
vhost_signal (dev , vq );
3071
3149
}
3072
3150
EXPORT_SYMBOL_GPL (vhost_add_used_and_signal_n );
0 commit comments