1919#define TB_TIMEOUT 100 /* ms */
2020#define TB_RELEASE_BW_TIMEOUT 10000 /* ms */
2121
22+ /*
23+ * How many time bandwidth allocation request from graphics driver is
24+ * retried if the DP tunnel is still activating.
25+ */
26+ #define TB_BW_ALLOC_RETRIES 3
27+
2228/*
2329 * Minimum bandwidth (in Mb/s) that is needed in the single transmitter/receiver
2430 * direction. This is 40G - 10% guard band bandwidth.
@@ -69,15 +75,20 @@ static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
6975}
7076
7177struct tb_hotplug_event {
72- struct work_struct work ;
78+ struct delayed_work work ;
7379 struct tb * tb ;
7480 u64 route ;
7581 u8 port ;
7682 bool unplug ;
83+ int retry ;
7784};
7885
7986static void tb_scan_port (struct tb_port * port );
8087static void tb_handle_hotplug (struct work_struct * work );
88+ static void tb_dp_resource_unavailable (struct tb * tb , struct tb_port * port ,
89+ const char * reason );
90+ static void tb_queue_dp_bandwidth_request (struct tb * tb , u64 route , u8 port ,
91+ int retry , unsigned long delay );
8192
8293static void tb_queue_hotplug (struct tb * tb , u64 route , u8 port , bool unplug )
8394{
@@ -91,8 +102,8 @@ static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
91102 ev -> route = route ;
92103 ev -> port = port ;
93104 ev -> unplug = unplug ;
94- INIT_WORK (& ev -> work , tb_handle_hotplug );
95- queue_work (tb -> wq , & ev -> work );
105+ INIT_DELAYED_WORK (& ev -> work , tb_handle_hotplug );
106+ queue_delayed_work (tb -> wq , & ev -> work , 0 );
96107}
97108
98109/* enumeration & hot plug handling */
@@ -962,7 +973,7 @@ static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
962973 return 0 ;
963974
964975err_free :
965- tb_tunnel_free (tunnel );
976+ tb_tunnel_put (tunnel );
966977err_reclaim :
967978 if (tb_route (parent ))
968979 tb_reclaim_usb3_bandwidth (tb , down , up );
@@ -1726,7 +1737,7 @@ static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
17261737 break ;
17271738 }
17281739
1729- tb_tunnel_free (tunnel );
1740+ tb_tunnel_put (tunnel );
17301741}
17311742
17321743/*
@@ -1863,12 +1874,76 @@ static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
18631874 return NULL ;
18641875}
18651876
1877+ static void tb_dp_tunnel_active (struct tb_tunnel * tunnel , void * data )
1878+ {
1879+ struct tb_port * in = tunnel -> src_port ;
1880+ struct tb_port * out = tunnel -> dst_port ;
1881+ struct tb * tb = data ;
1882+
1883+ mutex_lock (& tb -> lock );
1884+ if (tb_tunnel_is_active (tunnel )) {
1885+ int consumed_up , consumed_down , ret ;
1886+
1887+ tb_tunnel_dbg (tunnel , "DPRX capabilities read completed\n" );
1888+
1889+ /* If fail reading tunnel's consumed bandwidth, tear it down */
1890+ ret = tb_tunnel_consumed_bandwidth (tunnel , & consumed_up ,
1891+ & consumed_down );
1892+ if (ret ) {
1893+ tb_tunnel_warn (tunnel ,
1894+ "failed to read consumed bandwidth, tearing down\n" );
1895+ tb_deactivate_and_free_tunnel (tunnel );
1896+ } else {
1897+ tb_reclaim_usb3_bandwidth (tb , in , out );
1898+ /*
1899+ * Transition the links to asymmetric if the
1900+ * consumption exceeds the threshold.
1901+ */
1902+ tb_configure_asym (tb , in , out , consumed_up ,
1903+ consumed_down );
1904+ /*
1905+ * Update the domain with the new bandwidth
1906+ * estimation.
1907+ */
1908+ tb_recalc_estimated_bandwidth (tb );
1909+ /*
1910+ * In case of DP tunnel exists, change host
1911+ * router's 1st children TMU mode to HiFi for
1912+ * CL0s to work.
1913+ */
1914+ tb_increase_tmu_accuracy (tunnel );
1915+ }
1916+ } else {
1917+ struct tb_port * in = tunnel -> src_port ;
1918+
1919+ /*
1920+ * This tunnel failed to establish. This means DPRX
1921+ * negotiation most likely did not complete which
1922+ * happens either because there is no graphics driver
1923+ * loaded or not all DP cables where connected to the
1924+ * discrete router.
1925+ *
1926+ * In both cases we remove the DP IN adapter from the
1927+ * available resources as it is not usable. This will
1928+ * also tear down the tunnel and try to re-use the
1929+ * released DP OUT.
1930+ *
1931+ * It will be added back only if there is hotplug for
1932+ * the DP IN again.
1933+ */
1934+ tb_tunnel_warn (tunnel , "not active, tearing down\n" );
1935+ tb_dp_resource_unavailable (tb , in , "DPRX negotiation failed" );
1936+ }
1937+ mutex_unlock (& tb -> lock );
1938+
1939+ tb_domain_put (tb );
1940+ }
1941+
18661942static void tb_tunnel_one_dp (struct tb * tb , struct tb_port * in ,
18671943 struct tb_port * out )
18681944{
18691945 int available_up , available_down , ret , link_nr ;
18701946 struct tb_cm * tcm = tb_priv (tb );
1871- int consumed_up , consumed_down ;
18721947 struct tb_tunnel * tunnel ;
18731948
18741949 /*
@@ -1920,47 +1995,29 @@ static void tb_tunnel_one_dp(struct tb *tb, struct tb_port *in,
19201995 available_up , available_down );
19211996
19221997 tunnel = tb_tunnel_alloc_dp (tb , in , out , link_nr , available_up ,
1923- available_down );
1998+ available_down , tb_dp_tunnel_active ,
1999+ tb_domain_get (tb ));
19242000 if (!tunnel ) {
19252001 tb_port_dbg (out , "could not allocate DP tunnel\n" );
19262002 goto err_reclaim_usb ;
19272003 }
19282004
1929- if (tb_tunnel_activate (tunnel )) {
2005+ list_add_tail (& tunnel -> list , & tcm -> tunnel_list );
2006+
2007+ ret = tb_tunnel_activate (tunnel );
2008+ if (ret && ret != - EINPROGRESS ) {
19302009 tb_port_info (out , "DP tunnel activation failed, aborting\n" );
2010+ list_del (& tunnel -> list );
19312011 goto err_free ;
19322012 }
19332013
1934- /* If fail reading tunnel's consumed bandwidth, tear it down */
1935- ret = tb_tunnel_consumed_bandwidth (tunnel , & consumed_up , & consumed_down );
1936- if (ret )
1937- goto err_deactivate ;
1938-
1939- list_add_tail (& tunnel -> list , & tcm -> tunnel_list );
1940-
1941- tb_reclaim_usb3_bandwidth (tb , in , out );
1942- /*
1943- * Transition the links to asymmetric if the consumption exceeds
1944- * the threshold.
1945- */
1946- tb_configure_asym (tb , in , out , consumed_up , consumed_down );
1947-
1948- /* Update the domain with the new bandwidth estimation */
1949- tb_recalc_estimated_bandwidth (tb );
1950-
1951- /*
1952- * In case of DP tunnel exists, change host router's 1st children
1953- * TMU mode to HiFi for CL0s to work.
1954- */
1955- tb_increase_tmu_accuracy (tunnel );
19562014 return ;
19572015
1958- err_deactivate :
1959- tb_tunnel_deactivate (tunnel );
19602016err_free :
1961- tb_tunnel_free (tunnel );
2017+ tb_tunnel_put (tunnel );
19622018err_reclaim_usb :
19632019 tb_reclaim_usb3_bandwidth (tb , in , out );
2020+ tb_domain_put (tb );
19642021err_detach_group :
19652022 tb_detach_bandwidth_group (in );
19662023err_dealloc_dp :
@@ -2180,7 +2237,7 @@ static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
21802237
21812238 tb_tunnel_deactivate (tunnel );
21822239 list_del (& tunnel -> list );
2183- tb_tunnel_free (tunnel );
2240+ tb_tunnel_put (tunnel );
21842241 return 0 ;
21852242}
21862243
@@ -2210,7 +2267,7 @@ static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
22102267 if (tb_tunnel_activate (tunnel )) {
22112268 tb_port_info (up ,
22122269 "PCIe tunnel activation failed, aborting\n" );
2213- tb_tunnel_free (tunnel );
2270+ tb_tunnel_put (tunnel );
22142271 return - EIO ;
22152272 }
22162273
@@ -2269,7 +2326,7 @@ static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
22692326 return 0 ;
22702327
22712328err_free :
2272- tb_tunnel_free (tunnel );
2329+ tb_tunnel_put (tunnel );
22732330err_clx :
22742331 tb_enable_clx (sw );
22752332 mutex_unlock (& tb -> lock );
@@ -2332,7 +2389,7 @@ static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
23322389 */
23332390static void tb_handle_hotplug (struct work_struct * work )
23342391{
2335- struct tb_hotplug_event * ev = container_of (work , typeof (* ev ), work );
2392+ struct tb_hotplug_event * ev = container_of (work , typeof (* ev ), work . work );
23362393 struct tb * tb = ev -> tb ;
23372394 struct tb_cm * tcm = tb_priv (tb );
23382395 struct tb_switch * sw ;
@@ -2637,7 +2694,7 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
26372694
26382695static void tb_handle_dp_bandwidth_request (struct work_struct * work )
26392696{
2640- struct tb_hotplug_event * ev = container_of (work , typeof (* ev ), work );
2697+ struct tb_hotplug_event * ev = container_of (work , typeof (* ev ), work . work );
26412698 int requested_bw , requested_up , requested_down , ret ;
26422699 struct tb_tunnel * tunnel ;
26432700 struct tb * tb = ev -> tb ;
@@ -2664,7 +2721,7 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
26642721 goto put_sw ;
26652722 }
26662723
2667- tb_port_dbg (in , "handling bandwidth allocation request\n" );
2724+ tb_port_dbg (in , "handling bandwidth allocation request, retry %d \n" , ev -> retry );
26682725
26692726 tunnel = tb_find_tunnel (tb , TB_TUNNEL_DP , in , NULL );
26702727 if (!tunnel ) {
@@ -2717,12 +2774,33 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
27172774
27182775 ret = tb_alloc_dp_bandwidth (tunnel , & requested_up , & requested_down );
27192776 if (ret ) {
2720- if (ret == - ENOBUFS )
2777+ if (ret == - ENOBUFS ) {
27212778 tb_tunnel_warn (tunnel ,
27222779 "not enough bandwidth available\n" );
2723- else
2780+ } else if (ret == - ENOTCONN ) {
2781+ tb_tunnel_dbg (tunnel , "not active yet\n" );
2782+ /*
2783+ * We got bandwidth allocation request but the
2784+ * tunnel is not yet active. This means that
2785+ * tb_dp_tunnel_active() is not yet called for
2786+ * this tunnel. Allow it some time and retry
2787+ * this request a couple of times.
2788+ */
2789+ if (ev -> retry < TB_BW_ALLOC_RETRIES ) {
2790+ tb_tunnel_dbg (tunnel ,
2791+ "retrying bandwidth allocation request\n" );
2792+ tb_queue_dp_bandwidth_request (tb , ev -> route ,
2793+ ev -> port ,
2794+ ev -> retry + 1 ,
2795+ msecs_to_jiffies (50 ));
2796+ } else {
2797+ tb_tunnel_dbg (tunnel ,
2798+ "run out of retries, failing the request" );
2799+ }
2800+ } else {
27242801 tb_tunnel_warn (tunnel ,
27252802 "failed to change bandwidth allocation\n" );
2803+ }
27262804 } else {
27272805 tb_tunnel_dbg (tunnel ,
27282806 "bandwidth allocation changed to %d/%d Mb/s\n" ,
@@ -2743,7 +2821,8 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
27432821 kfree (ev );
27442822}
27452823
2746- static void tb_queue_dp_bandwidth_request (struct tb * tb , u64 route , u8 port )
2824+ static void tb_queue_dp_bandwidth_request (struct tb * tb , u64 route , u8 port ,
2825+ int retry , unsigned long delay )
27472826{
27482827 struct tb_hotplug_event * ev ;
27492828
@@ -2754,8 +2833,9 @@ static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port)
27542833 ev -> tb = tb ;
27552834 ev -> route = route ;
27562835 ev -> port = port ;
2757- INIT_WORK (& ev -> work , tb_handle_dp_bandwidth_request );
2758- queue_work (tb -> wq , & ev -> work );
2836+ ev -> retry = retry ;
2837+ INIT_DELAYED_WORK (& ev -> work , tb_handle_dp_bandwidth_request );
2838+ queue_delayed_work (tb -> wq , & ev -> work , delay );
27592839}
27602840
27612841static void tb_handle_notification (struct tb * tb , u64 route ,
@@ -2775,7 +2855,7 @@ static void tb_handle_notification(struct tb *tb, u64 route,
27752855 if (tb_cfg_ack_notification (tb -> ctl , route , error ))
27762856 tb_warn (tb , "could not ack notification on %llx\n" ,
27772857 route );
2778- tb_queue_dp_bandwidth_request (tb , route , error -> port );
2858+ tb_queue_dp_bandwidth_request (tb , route , error -> port , 0 , 0 );
27792859 break ;
27802860
27812861 default :
@@ -2830,7 +2910,7 @@ static void tb_stop(struct tb *tb)
28302910 */
28312911 if (tb_tunnel_is_dma (tunnel ))
28322912 tb_tunnel_deactivate (tunnel );
2833- tb_tunnel_free (tunnel );
2913+ tb_tunnel_put (tunnel );
28342914 }
28352915 tb_switch_remove (tb -> root_switch );
28362916 tcm -> hotplug_active = false; /* signal tb_handle_hotplug to quit */
@@ -3026,7 +3106,7 @@ static int tb_resume_noirq(struct tb *tb)
30263106 if (tb_tunnel_is_usb3 (tunnel ))
30273107 usb3_delay = 500 ;
30283108 tb_tunnel_deactivate (tunnel );
3029- tb_tunnel_free (tunnel );
3109+ tb_tunnel_put (tunnel );
30303110 }
30313111
30323112 /* Re-create our tunnels now */
0 commit comments