@@ -1620,22 +1620,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid)
16201620 if (ret )
16211621 goto err_init_connect ;
16221622
1623- queue -> rd_enabled = true;
16241623 set_bit (NVME_TCP_Q_ALLOCATED , & queue -> flags );
1625- nvme_tcp_init_recv_ctx (queue );
1626-
1627- write_lock_bh (& queue -> sock -> sk -> sk_callback_lock );
1628- queue -> sock -> sk -> sk_user_data = queue ;
1629- queue -> state_change = queue -> sock -> sk -> sk_state_change ;
1630- queue -> data_ready = queue -> sock -> sk -> sk_data_ready ;
1631- queue -> write_space = queue -> sock -> sk -> sk_write_space ;
1632- queue -> sock -> sk -> sk_data_ready = nvme_tcp_data_ready ;
1633- queue -> sock -> sk -> sk_state_change = nvme_tcp_state_change ;
1634- queue -> sock -> sk -> sk_write_space = nvme_tcp_write_space ;
1635- #ifdef CONFIG_NET_RX_BUSY_POLL
1636- queue -> sock -> sk -> sk_ll_usec = 1 ;
1637- #endif
1638- write_unlock_bh (& queue -> sock -> sk -> sk_callback_lock );
16391624
16401625 return 0 ;
16411626
@@ -1655,7 +1640,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid)
16551640 return ret ;
16561641}
16571642
1658- static void nvme_tcp_restore_sock_calls (struct nvme_tcp_queue * queue )
1643+ static void nvme_tcp_restore_sock_ops (struct nvme_tcp_queue * queue )
16591644{
16601645 struct socket * sock = queue -> sock ;
16611646
@@ -1670,7 +1655,7 @@ static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
16701655static void __nvme_tcp_stop_queue (struct nvme_tcp_queue * queue )
16711656{
16721657 kernel_sock_shutdown (queue -> sock , SHUT_RDWR );
1673- nvme_tcp_restore_sock_calls (queue );
1658+ nvme_tcp_restore_sock_ops (queue );
16741659 cancel_work_sync (& queue -> io_work );
16751660}
16761661
@@ -1688,21 +1673,42 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
16881673 mutex_unlock (& queue -> queue_lock );
16891674}
16901675
1676+ static void nvme_tcp_setup_sock_ops (struct nvme_tcp_queue * queue )
1677+ {
1678+ write_lock_bh (& queue -> sock -> sk -> sk_callback_lock );
1679+ queue -> sock -> sk -> sk_user_data = queue ;
1680+ queue -> state_change = queue -> sock -> sk -> sk_state_change ;
1681+ queue -> data_ready = queue -> sock -> sk -> sk_data_ready ;
1682+ queue -> write_space = queue -> sock -> sk -> sk_write_space ;
1683+ queue -> sock -> sk -> sk_data_ready = nvme_tcp_data_ready ;
1684+ queue -> sock -> sk -> sk_state_change = nvme_tcp_state_change ;
1685+ queue -> sock -> sk -> sk_write_space = nvme_tcp_write_space ;
1686+ #ifdef CONFIG_NET_RX_BUSY_POLL
1687+ queue -> sock -> sk -> sk_ll_usec = 1 ;
1688+ #endif
1689+ write_unlock_bh (& queue -> sock -> sk -> sk_callback_lock );
1690+ }
1691+
16911692static int nvme_tcp_start_queue (struct nvme_ctrl * nctrl , int idx )
16921693{
16931694 struct nvme_tcp_ctrl * ctrl = to_tcp_ctrl (nctrl );
1695+ struct nvme_tcp_queue * queue = & ctrl -> queues [idx ];
16941696 int ret ;
16951697
1698+ queue -> rd_enabled = true;
1699+ nvme_tcp_init_recv_ctx (queue );
1700+ nvme_tcp_setup_sock_ops (queue );
1701+
16961702 if (idx )
16971703 ret = nvmf_connect_io_queue (nctrl , idx );
16981704 else
16991705 ret = nvmf_connect_admin_queue (nctrl );
17001706
17011707 if (!ret ) {
1702- set_bit (NVME_TCP_Q_LIVE , & ctrl -> queues [ idx ]. flags );
1708+ set_bit (NVME_TCP_Q_LIVE , & queue -> flags );
17031709 } else {
1704- if (test_bit (NVME_TCP_Q_ALLOCATED , & ctrl -> queues [ idx ]. flags ))
1705- __nvme_tcp_stop_queue (& ctrl -> queues [ idx ] );
1710+ if (test_bit (NVME_TCP_Q_ALLOCATED , & queue -> flags ))
1711+ __nvme_tcp_stop_queue (queue );
17061712 dev_err (nctrl -> device ,
17071713 "failed to connect queue: %d ret=%d\n" , idx , ret );
17081714 }
0 commit comments