@@ -94,6 +94,63 @@ static u32 virtio_transport_get_local_cid(void)
9494 return ret ;
9595}
9696
97+ /* Caller need to hold vsock->tx_lock on vq */
98+ static int virtio_transport_send_skb (struct sk_buff * skb , struct virtqueue * vq ,
99+ struct virtio_vsock * vsock )
100+ {
101+ int ret , in_sg = 0 , out_sg = 0 ;
102+ struct scatterlist * * sgs ;
103+
104+ sgs = vsock -> out_sgs ;
105+ sg_init_one (sgs [out_sg ], virtio_vsock_hdr (skb ),
106+ sizeof (* virtio_vsock_hdr (skb )));
107+ out_sg ++ ;
108+
109+ if (!skb_is_nonlinear (skb )) {
110+ if (skb -> len > 0 ) {
111+ sg_init_one (sgs [out_sg ], skb -> data , skb -> len );
112+ out_sg ++ ;
113+ }
114+ } else {
115+ struct skb_shared_info * si ;
116+ int i ;
117+
118+ /* If skb is nonlinear, then its buffer must contain
119+ * only header and nothing more. Data is stored in
120+ * the fragged part.
121+ */
122+ WARN_ON_ONCE (skb_headroom (skb ) != sizeof (* virtio_vsock_hdr (skb )));
123+
124+ si = skb_shinfo (skb );
125+
126+ for (i = 0 ; i < si -> nr_frags ; i ++ ) {
127+ skb_frag_t * skb_frag = & si -> frags [i ];
128+ void * va ;
129+
130+ /* We will use 'page_to_virt()' for the userspace page
131+ * here, because virtio or dma-mapping layers will call
132+ * 'virt_to_phys()' later to fill the buffer descriptor.
133+ * We don't touch memory at "virtual" address of this page.
134+ */
135+ va = page_to_virt (skb_frag_page (skb_frag ));
136+ sg_init_one (sgs [out_sg ],
137+ va + skb_frag_off (skb_frag ),
138+ skb_frag_size (skb_frag ));
139+ out_sg ++ ;
140+ }
141+ }
142+
143+ ret = virtqueue_add_sgs (vq , sgs , out_sg , in_sg , skb , GFP_KERNEL );
144+ /* Usually this means that there is no more space available in
145+ * the vq
146+ */
147+ if (ret < 0 )
148+ return ret ;
149+
150+ virtio_transport_deliver_tap_pkt (skb );
151+ return 0 ;
152+ }
153+
97154static void
98155virtio_transport_send_pkt_work (struct work_struct * work )
99156{
@@ -111,66 +168,22 @@ virtio_transport_send_pkt_work(struct work_struct *work)
111168 vq = vsock -> vqs [VSOCK_VQ_TX ];
112169
113170 for (;;) {
114- int ret , in_sg = 0 , out_sg = 0 ;
115- struct scatterlist * * sgs ;
116171 struct sk_buff * skb ;
117172 bool reply ;
173+ int ret ;
118174
119175 skb = virtio_vsock_skb_dequeue (& vsock -> send_pkt_queue );
120176 if (!skb )
121177 break ;
122178
123179 reply = virtio_vsock_skb_reply (skb );
124- sgs = vsock -> out_sgs ;
125- sg_init_one (sgs [out_sg ], virtio_vsock_hdr (skb ),
126- sizeof (* virtio_vsock_hdr (skb )));
127- out_sg ++ ;
128-
129- if (!skb_is_nonlinear (skb )) {
130- if (skb -> len > 0 ) {
131- sg_init_one (sgs [out_sg ], skb -> data , skb -> len );
132- out_sg ++ ;
133- }
134- } else {
135- struct skb_shared_info * si ;
136- int i ;
137-
138- /* If skb is nonlinear, then its buffer must contain
139- * only header and nothing more. Data is stored in
140- * the fragged part.
141- */
142- WARN_ON_ONCE (skb_headroom (skb ) != sizeof (* virtio_vsock_hdr (skb )));
143-
144- si = skb_shinfo (skb );
145180
146- for (i = 0 ; i < si -> nr_frags ; i ++ ) {
147- skb_frag_t * skb_frag = & si -> frags [i ];
148- void * va ;
149-
150- /* We will use 'page_to_virt()' for the userspace page
151- * here, because virtio or dma-mapping layers will call
152- * 'virt_to_phys()' later to fill the buffer descriptor.
153- * We don't touch memory at "virtual" address of this page.
154- */
155- va = page_to_virt (skb_frag_page (skb_frag ));
156- sg_init_one (sgs [out_sg ],
157- va + skb_frag_off (skb_frag ),
158- skb_frag_size (skb_frag ));
159- out_sg ++ ;
160- }
161- }
162-
163- ret = virtqueue_add_sgs (vq , sgs , out_sg , in_sg , skb , GFP_KERNEL );
164- /* Usually this means that there is no more space available in
165- * the vq
166- */
181+ ret = virtio_transport_send_skb (skb , vq , vsock );
167182 if (ret < 0 ) {
168183 virtio_vsock_skb_queue_head (& vsock -> send_pkt_queue , skb );
169184 break ;
170185 }
171186
172- virtio_transport_deliver_tap_pkt (skb );
173-
174187 if (reply ) {
175188 struct virtqueue * rx_vq = vsock -> vqs [VSOCK_VQ_RX ];
176189 int val ;
0 commit comments