@@ -503,6 +503,70 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
503
503
}
504
504
EXPORT_SYMBOL_GPL (vmbus_establish_gpadl );
505
505
506
+ /**
507
+ * request_arr_init - Allocates memory for the requestor array. Each slot
508
+ * keeps track of the next available slot in the array. Initially, each
509
+ * slot points to the next one (as in a Linked List). The last slot
510
+ * does not point to anything, so its value is U64_MAX by default.
511
+ * @size The size of the array
512
+ */
513
+ static u64 * request_arr_init (u32 size )
514
+ {
515
+ int i ;
516
+ u64 * req_arr ;
517
+
518
+ req_arr = kcalloc (size , sizeof (u64 ), GFP_KERNEL );
519
+ if (!req_arr )
520
+ return NULL ;
521
+
522
+ for (i = 0 ; i < size - 1 ; i ++ )
523
+ req_arr [i ] = i + 1 ;
524
+
525
+ /* Last slot (no more available slots) */
526
+ req_arr [i ] = U64_MAX ;
527
+
528
+ return req_arr ;
529
+ }
530
+
531
+ /*
532
+ * vmbus_alloc_requestor - Initializes @rqstor's fields.
533
+ * Index 0 is the first free slot
534
+ * @size: Size of the requestor array
535
+ */
536
+ static int vmbus_alloc_requestor (struct vmbus_requestor * rqstor , u32 size )
537
+ {
538
+ u64 * rqst_arr ;
539
+ unsigned long * bitmap ;
540
+
541
+ rqst_arr = request_arr_init (size );
542
+ if (!rqst_arr )
543
+ return - ENOMEM ;
544
+
545
+ bitmap = bitmap_zalloc (size , GFP_KERNEL );
546
+ if (!bitmap ) {
547
+ kfree (rqst_arr );
548
+ return - ENOMEM ;
549
+ }
550
+
551
+ rqstor -> req_arr = rqst_arr ;
552
+ rqstor -> req_bitmap = bitmap ;
553
+ rqstor -> size = size ;
554
+ rqstor -> next_request_id = 0 ;
555
+ spin_lock_init (& rqstor -> req_lock );
556
+
557
+ return 0 ;
558
+ }
559
+
560
+ /*
561
+ * vmbus_free_requestor - Frees memory allocated for @rqstor
562
+ * @rqstor: Pointer to the requestor struct
563
+ */
564
+ static void vmbus_free_requestor (struct vmbus_requestor * rqstor )
565
+ {
566
+ kfree (rqstor -> req_arr );
567
+ bitmap_free (rqstor -> req_bitmap );
568
+ }
569
+
506
570
static int __vmbus_open (struct vmbus_channel * newchannel ,
507
571
void * userdata , u32 userdatalen ,
508
572
void (* onchannelcallback )(void * context ), void * context )
@@ -523,6 +587,12 @@ static int __vmbus_open(struct vmbus_channel *newchannel,
523
587
if (newchannel -> state != CHANNEL_OPEN_STATE )
524
588
return - EINVAL ;
525
589
590
+ /* Create and init requestor */
591
+ if (newchannel -> rqstor_size ) {
592
+ if (vmbus_alloc_requestor (& newchannel -> requestor , newchannel -> rqstor_size ))
593
+ return - ENOMEM ;
594
+ }
595
+
526
596
newchannel -> state = CHANNEL_OPENING_STATE ;
527
597
newchannel -> onchannel_callback = onchannelcallback ;
528
598
newchannel -> channel_callback_context = context ;
@@ -626,6 +696,7 @@ static int __vmbus_open(struct vmbus_channel *newchannel,
626
696
error_clean_ring :
627
697
hv_ringbuffer_cleanup (& newchannel -> outbound );
628
698
hv_ringbuffer_cleanup (& newchannel -> inbound );
699
+ vmbus_free_requestor (& newchannel -> requestor );
629
700
newchannel -> state = CHANNEL_OPEN_STATE ;
630
701
return err ;
631
702
}
@@ -808,6 +879,9 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
808
879
channel -> ringbuffer_gpadlhandle = 0 ;
809
880
}
810
881
882
+ if (!ret )
883
+ vmbus_free_requestor (& channel -> requestor );
884
+
811
885
return ret ;
812
886
}
813
887
@@ -888,7 +962,7 @@ int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
888
962
/* in 8-bytes granularity */
889
963
desc .offset8 = sizeof (struct vmpacket_descriptor ) >> 3 ;
890
964
desc .len8 = (u16 )(packetlen_aligned >> 3 );
891
- desc .trans_id = requestid ;
965
+ desc .trans_id = VMBUS_RQST_ERROR ; /* will be updated in hv_ringbuffer_write() */
892
966
893
967
bufferlist [0 ].iov_base = & desc ;
894
968
bufferlist [0 ].iov_len = sizeof (struct vmpacket_descriptor );
@@ -897,7 +971,7 @@ int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
897
971
bufferlist [2 ].iov_base = & aligned_data ;
898
972
bufferlist [2 ].iov_len = (packetlen_aligned - packetlen );
899
973
900
- return hv_ringbuffer_write (channel , bufferlist , num_vecs );
974
+ return hv_ringbuffer_write (channel , bufferlist , num_vecs , requestid );
901
975
}
902
976
EXPORT_SYMBOL (vmbus_sendpacket );
903
977
@@ -939,7 +1013,7 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
939
1013
desc .flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED ;
940
1014
desc .dataoffset8 = descsize >> 3 ; /* in 8-bytes granularity */
941
1015
desc .length8 = (u16 )(packetlen_aligned >> 3 );
942
- desc .transactionid = requestid ;
1016
+ desc .transactionid = VMBUS_RQST_ERROR ; /* will be updated in hv_ringbuffer_write() */
943
1017
desc .reserved = 0 ;
944
1018
desc .rangecount = pagecount ;
945
1019
@@ -956,7 +1030,7 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
956
1030
bufferlist [2 ].iov_base = & aligned_data ;
957
1031
bufferlist [2 ].iov_len = (packetlen_aligned - packetlen );
958
1032
959
- return hv_ringbuffer_write (channel , bufferlist , 3 );
1033
+ return hv_ringbuffer_write (channel , bufferlist , 3 , requestid );
960
1034
}
961
1035
EXPORT_SYMBOL_GPL (vmbus_sendpacket_pagebuffer );
962
1036
@@ -983,7 +1057,7 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
983
1057
desc -> flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED ;
984
1058
desc -> dataoffset8 = desc_size >> 3 ; /* in 8-bytes granularity */
985
1059
desc -> length8 = (u16 )(packetlen_aligned >> 3 );
986
- desc -> transactionid = requestid ;
1060
+ desc -> transactionid = VMBUS_RQST_ERROR ; /* will be updated in hv_ringbuffer_write() */
987
1061
desc -> reserved = 0 ;
988
1062
desc -> rangecount = 1 ;
989
1063
@@ -994,7 +1068,7 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
994
1068
bufferlist [2 ].iov_base = & aligned_data ;
995
1069
bufferlist [2 ].iov_len = (packetlen_aligned - packetlen );
996
1070
997
- return hv_ringbuffer_write (channel , bufferlist , 3 );
1071
+ return hv_ringbuffer_write (channel , bufferlist , 3 , requestid );
998
1072
}
999
1073
EXPORT_SYMBOL_GPL (vmbus_sendpacket_mpb_desc );
1000
1074
@@ -1042,3 +1116,91 @@ int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
1042
1116
buffer_actual_len , requestid , true);
1043
1117
}
1044
1118
EXPORT_SYMBOL_GPL (vmbus_recvpacket_raw );
1119
+
1120
+ /*
1121
+ * vmbus_next_request_id - Returns a new request id. It is also
1122
+ * the index at which the guest memory address is stored.
1123
+ * Uses a spin lock to avoid race conditions.
1124
+ * @rqstor: Pointer to the requestor struct
1125
+ * @rqst_add: Guest memory address to be stored in the array
1126
+ */
1127
+ u64 vmbus_next_request_id (struct vmbus_requestor * rqstor , u64 rqst_addr )
1128
+ {
1129
+ unsigned long flags ;
1130
+ u64 current_id ;
1131
+ const struct vmbus_channel * channel =
1132
+ container_of (rqstor , const struct vmbus_channel , requestor );
1133
+
1134
+ /* Check rqstor has been initialized */
1135
+ if (!channel -> rqstor_size )
1136
+ return VMBUS_NO_RQSTOR ;
1137
+
1138
+ spin_lock_irqsave (& rqstor -> req_lock , flags );
1139
+ current_id = rqstor -> next_request_id ;
1140
+
1141
+ /* Requestor array is full */
1142
+ if (current_id >= rqstor -> size ) {
1143
+ spin_unlock_irqrestore (& rqstor -> req_lock , flags );
1144
+ return VMBUS_RQST_ERROR ;
1145
+ }
1146
+
1147
+ rqstor -> next_request_id = rqstor -> req_arr [current_id ];
1148
+ rqstor -> req_arr [current_id ] = rqst_addr ;
1149
+
1150
+ /* The already held spin lock provides atomicity */
1151
+ bitmap_set (rqstor -> req_bitmap , current_id , 1 );
1152
+
1153
+ spin_unlock_irqrestore (& rqstor -> req_lock , flags );
1154
+
1155
+ /*
1156
+ * Cannot return an ID of 0, which is reserved for an unsolicited
1157
+ * message from Hyper-V.
1158
+ */
1159
+ return current_id + 1 ;
1160
+ }
1161
+ EXPORT_SYMBOL_GPL (vmbus_next_request_id );
1162
+
1163
+ /*
1164
+ * vmbus_request_addr - Returns the memory address stored at @trans_id
1165
+ * in @rqstor. Uses a spin lock to avoid race conditions.
1166
+ * @rqstor: Pointer to the requestor struct
1167
+ * @trans_id: Request id sent back from Hyper-V. Becomes the requestor's
1168
+ * next request id.
1169
+ */
1170
+ u64 vmbus_request_addr (struct vmbus_requestor * rqstor , u64 trans_id )
1171
+ {
1172
+ unsigned long flags ;
1173
+ u64 req_addr ;
1174
+ const struct vmbus_channel * channel =
1175
+ container_of (rqstor , const struct vmbus_channel , requestor );
1176
+
1177
+ /* Check rqstor has been initialized */
1178
+ if (!channel -> rqstor_size )
1179
+ return VMBUS_NO_RQSTOR ;
1180
+
1181
+ /* Hyper-V can send an unsolicited message with ID of 0 */
1182
+ if (!trans_id )
1183
+ return trans_id ;
1184
+
1185
+ spin_lock_irqsave (& rqstor -> req_lock , flags );
1186
+
1187
+ /* Data corresponding to trans_id is stored at trans_id - 1 */
1188
+ trans_id -- ;
1189
+
1190
+ /* Invalid trans_id */
1191
+ if (trans_id >= rqstor -> size || !test_bit (trans_id , rqstor -> req_bitmap )) {
1192
+ spin_unlock_irqrestore (& rqstor -> req_lock , flags );
1193
+ return VMBUS_RQST_ERROR ;
1194
+ }
1195
+
1196
+ req_addr = rqstor -> req_arr [trans_id ];
1197
+ rqstor -> req_arr [trans_id ] = rqstor -> next_request_id ;
1198
+ rqstor -> next_request_id = trans_id ;
1199
+
1200
+ /* The already held spin lock provides atomicity */
1201
+ bitmap_clear (rqstor -> req_bitmap , trans_id , 1 );
1202
+
1203
+ spin_unlock_irqrestore (& rqstor -> req_lock , flags );
1204
+ return req_addr ;
1205
+ }
1206
+ EXPORT_SYMBOL_GPL (vmbus_request_addr );
0 commit comments