@@ -1620,22 +1620,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid)
1620
1620
if (ret )
1621
1621
goto err_init_connect ;
1622
1622
1623
- queue -> rd_enabled = true;
1624
1623
set_bit (NVME_TCP_Q_ALLOCATED , & queue -> flags );
1625
- nvme_tcp_init_recv_ctx (queue );
1626
-
1627
- write_lock_bh (& queue -> sock -> sk -> sk_callback_lock );
1628
- queue -> sock -> sk -> sk_user_data = queue ;
1629
- queue -> state_change = queue -> sock -> sk -> sk_state_change ;
1630
- queue -> data_ready = queue -> sock -> sk -> sk_data_ready ;
1631
- queue -> write_space = queue -> sock -> sk -> sk_write_space ;
1632
- queue -> sock -> sk -> sk_data_ready = nvme_tcp_data_ready ;
1633
- queue -> sock -> sk -> sk_state_change = nvme_tcp_state_change ;
1634
- queue -> sock -> sk -> sk_write_space = nvme_tcp_write_space ;
1635
- #ifdef CONFIG_NET_RX_BUSY_POLL
1636
- queue -> sock -> sk -> sk_ll_usec = 1 ;
1637
- #endif
1638
- write_unlock_bh (& queue -> sock -> sk -> sk_callback_lock );
1639
1624
1640
1625
return 0 ;
1641
1626
@@ -1655,7 +1640,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid)
1655
1640
return ret ;
1656
1641
}
1657
1642
1658
- static void nvme_tcp_restore_sock_calls (struct nvme_tcp_queue * queue )
1643
+ static void nvme_tcp_restore_sock_ops (struct nvme_tcp_queue * queue )
1659
1644
{
1660
1645
struct socket * sock = queue -> sock ;
1661
1646
@@ -1670,7 +1655,7 @@ static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
1670
1655
static void __nvme_tcp_stop_queue (struct nvme_tcp_queue * queue )
1671
1656
{
1672
1657
kernel_sock_shutdown (queue -> sock , SHUT_RDWR );
1673
- nvme_tcp_restore_sock_calls (queue );
1658
+ nvme_tcp_restore_sock_ops (queue );
1674
1659
cancel_work_sync (& queue -> io_work );
1675
1660
}
1676
1661
@@ -1688,21 +1673,42 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
1688
1673
mutex_unlock (& queue -> queue_lock );
1689
1674
}
1690
1675
1676
+ static void nvme_tcp_setup_sock_ops (struct nvme_tcp_queue * queue )
1677
+ {
1678
+ write_lock_bh (& queue -> sock -> sk -> sk_callback_lock );
1679
+ queue -> sock -> sk -> sk_user_data = queue ;
1680
+ queue -> state_change = queue -> sock -> sk -> sk_state_change ;
1681
+ queue -> data_ready = queue -> sock -> sk -> sk_data_ready ;
1682
+ queue -> write_space = queue -> sock -> sk -> sk_write_space ;
1683
+ queue -> sock -> sk -> sk_data_ready = nvme_tcp_data_ready ;
1684
+ queue -> sock -> sk -> sk_state_change = nvme_tcp_state_change ;
1685
+ queue -> sock -> sk -> sk_write_space = nvme_tcp_write_space ;
1686
+ #ifdef CONFIG_NET_RX_BUSY_POLL
1687
+ queue -> sock -> sk -> sk_ll_usec = 1 ;
1688
+ #endif
1689
+ write_unlock_bh (& queue -> sock -> sk -> sk_callback_lock );
1690
+ }
1691
+
1691
1692
static int nvme_tcp_start_queue (struct nvme_ctrl * nctrl , int idx )
1692
1693
{
1693
1694
struct nvme_tcp_ctrl * ctrl = to_tcp_ctrl (nctrl );
1695
+ struct nvme_tcp_queue * queue = & ctrl -> queues [idx ];
1694
1696
int ret ;
1695
1697
1698
+ queue -> rd_enabled = true;
1699
+ nvme_tcp_init_recv_ctx (queue );
1700
+ nvme_tcp_setup_sock_ops (queue );
1701
+
1696
1702
if (idx )
1697
1703
ret = nvmf_connect_io_queue (nctrl , idx );
1698
1704
else
1699
1705
ret = nvmf_connect_admin_queue (nctrl );
1700
1706
1701
1707
if (!ret ) {
1702
- set_bit (NVME_TCP_Q_LIVE , & ctrl -> queues [ idx ]. flags );
1708
+ set_bit (NVME_TCP_Q_LIVE , & queue -> flags );
1703
1709
} else {
1704
- if (test_bit (NVME_TCP_Q_ALLOCATED , & ctrl -> queues [ idx ]. flags ))
1705
- __nvme_tcp_stop_queue (& ctrl -> queues [ idx ] );
1710
+ if (test_bit (NVME_TCP_Q_ALLOCATED , & queue -> flags ))
1711
+ __nvme_tcp_stop_queue (queue );
1706
1712
dev_err (nctrl -> device ,
1707
1713
"failed to connect queue: %d ret=%d\n" , idx , ret );
1708
1714
}
0 commit comments