19
19
#define TB_TIMEOUT 100 /* ms */
20
20
#define TB_RELEASE_BW_TIMEOUT 10000 /* ms */
21
21
22
+ /*
23
+ * How many time bandwidth allocation request from graphics driver is
24
+ * retried if the DP tunnel is still activating.
25
+ */
26
+ #define TB_BW_ALLOC_RETRIES 3
27
+
22
28
/*
23
29
* Minimum bandwidth (in Mb/s) that is needed in the single transmitter/receiver
24
30
* direction. This is 40G - 10% guard band bandwidth.
@@ -69,15 +75,20 @@ static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
69
75
}
70
76
71
77
struct tb_hotplug_event {
72
- struct work_struct work ;
78
+ struct delayed_work work ;
73
79
struct tb * tb ;
74
80
u64 route ;
75
81
u8 port ;
76
82
bool unplug ;
83
+ int retry ;
77
84
};
78
85
79
86
static void tb_scan_port (struct tb_port * port );
80
87
static void tb_handle_hotplug (struct work_struct * work );
88
+ static void tb_dp_resource_unavailable (struct tb * tb , struct tb_port * port ,
89
+ const char * reason );
90
+ static void tb_queue_dp_bandwidth_request (struct tb * tb , u64 route , u8 port ,
91
+ int retry , unsigned long delay );
81
92
82
93
static void tb_queue_hotplug (struct tb * tb , u64 route , u8 port , bool unplug )
83
94
{
@@ -91,8 +102,8 @@ static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
91
102
ev -> route = route ;
92
103
ev -> port = port ;
93
104
ev -> unplug = unplug ;
94
- INIT_WORK (& ev -> work , tb_handle_hotplug );
95
- queue_work (tb -> wq , & ev -> work );
105
+ INIT_DELAYED_WORK (& ev -> work , tb_handle_hotplug );
106
+ queue_delayed_work (tb -> wq , & ev -> work , 0 );
96
107
}
97
108
98
109
/* enumeration & hot plug handling */
@@ -962,7 +973,7 @@ static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
962
973
return 0 ;
963
974
964
975
err_free :
965
- tb_tunnel_free (tunnel );
976
+ tb_tunnel_put (tunnel );
966
977
err_reclaim :
967
978
if (tb_route (parent ))
968
979
tb_reclaim_usb3_bandwidth (tb , down , up );
@@ -1726,7 +1737,7 @@ static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
1726
1737
break ;
1727
1738
}
1728
1739
1729
- tb_tunnel_free (tunnel );
1740
+ tb_tunnel_put (tunnel );
1730
1741
}
1731
1742
1732
1743
/*
@@ -1863,12 +1874,76 @@ static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
1863
1874
return NULL ;
1864
1875
}
1865
1876
1877
+ static void tb_dp_tunnel_active (struct tb_tunnel * tunnel , void * data )
1878
+ {
1879
+ struct tb_port * in = tunnel -> src_port ;
1880
+ struct tb_port * out = tunnel -> dst_port ;
1881
+ struct tb * tb = data ;
1882
+
1883
+ mutex_lock (& tb -> lock );
1884
+ if (tb_tunnel_is_active (tunnel )) {
1885
+ int consumed_up , consumed_down , ret ;
1886
+
1887
+ tb_tunnel_dbg (tunnel , "DPRX capabilities read completed\n" );
1888
+
1889
+ /* If fail reading tunnel's consumed bandwidth, tear it down */
1890
+ ret = tb_tunnel_consumed_bandwidth (tunnel , & consumed_up ,
1891
+ & consumed_down );
1892
+ if (ret ) {
1893
+ tb_tunnel_warn (tunnel ,
1894
+ "failed to read consumed bandwidth, tearing down\n" );
1895
+ tb_deactivate_and_free_tunnel (tunnel );
1896
+ } else {
1897
+ tb_reclaim_usb3_bandwidth (tb , in , out );
1898
+ /*
1899
+ * Transition the links to asymmetric if the
1900
+ * consumption exceeds the threshold.
1901
+ */
1902
+ tb_configure_asym (tb , in , out , consumed_up ,
1903
+ consumed_down );
1904
+ /*
1905
+ * Update the domain with the new bandwidth
1906
+ * estimation.
1907
+ */
1908
+ tb_recalc_estimated_bandwidth (tb );
1909
+ /*
1910
+ * In case of DP tunnel exists, change host
1911
+ * router's 1st children TMU mode to HiFi for
1912
+ * CL0s to work.
1913
+ */
1914
+ tb_increase_tmu_accuracy (tunnel );
1915
+ }
1916
+ } else {
1917
+ struct tb_port * in = tunnel -> src_port ;
1918
+
1919
+ /*
1920
+ * This tunnel failed to establish. This means DPRX
1921
+ * negotiation most likely did not complete which
1922
+ * happens either because there is no graphics driver
1923
+ * loaded or not all DP cables where connected to the
1924
+ * discrete router.
1925
+ *
1926
+ * In both cases we remove the DP IN adapter from the
1927
+ * available resources as it is not usable. This will
1928
+ * also tear down the tunnel and try to re-use the
1929
+ * released DP OUT.
1930
+ *
1931
+ * It will be added back only if there is hotplug for
1932
+ * the DP IN again.
1933
+ */
1934
+ tb_tunnel_warn (tunnel , "not active, tearing down\n" );
1935
+ tb_dp_resource_unavailable (tb , in , "DPRX negotiation failed" );
1936
+ }
1937
+ mutex_unlock (& tb -> lock );
1938
+
1939
+ tb_domain_put (tb );
1940
+ }
1941
+
1866
1942
static void tb_tunnel_one_dp (struct tb * tb , struct tb_port * in ,
1867
1943
struct tb_port * out )
1868
1944
{
1869
1945
int available_up , available_down , ret , link_nr ;
1870
1946
struct tb_cm * tcm = tb_priv (tb );
1871
- int consumed_up , consumed_down ;
1872
1947
struct tb_tunnel * tunnel ;
1873
1948
1874
1949
/*
@@ -1920,47 +1995,29 @@ static void tb_tunnel_one_dp(struct tb *tb, struct tb_port *in,
1920
1995
available_up , available_down );
1921
1996
1922
1997
tunnel = tb_tunnel_alloc_dp (tb , in , out , link_nr , available_up ,
1923
- available_down );
1998
+ available_down , tb_dp_tunnel_active ,
1999
+ tb_domain_get (tb ));
1924
2000
if (!tunnel ) {
1925
2001
tb_port_dbg (out , "could not allocate DP tunnel\n" );
1926
2002
goto err_reclaim_usb ;
1927
2003
}
1928
2004
1929
- if (tb_tunnel_activate (tunnel )) {
2005
+ list_add_tail (& tunnel -> list , & tcm -> tunnel_list );
2006
+
2007
+ ret = tb_tunnel_activate (tunnel );
2008
+ if (ret && ret != - EINPROGRESS ) {
1930
2009
tb_port_info (out , "DP tunnel activation failed, aborting\n" );
2010
+ list_del (& tunnel -> list );
1931
2011
goto err_free ;
1932
2012
}
1933
2013
1934
- /* If fail reading tunnel's consumed bandwidth, tear it down */
1935
- ret = tb_tunnel_consumed_bandwidth (tunnel , & consumed_up , & consumed_down );
1936
- if (ret )
1937
- goto err_deactivate ;
1938
-
1939
- list_add_tail (& tunnel -> list , & tcm -> tunnel_list );
1940
-
1941
- tb_reclaim_usb3_bandwidth (tb , in , out );
1942
- /*
1943
- * Transition the links to asymmetric if the consumption exceeds
1944
- * the threshold.
1945
- */
1946
- tb_configure_asym (tb , in , out , consumed_up , consumed_down );
1947
-
1948
- /* Update the domain with the new bandwidth estimation */
1949
- tb_recalc_estimated_bandwidth (tb );
1950
-
1951
- /*
1952
- * In case of DP tunnel exists, change host router's 1st children
1953
- * TMU mode to HiFi for CL0s to work.
1954
- */
1955
- tb_increase_tmu_accuracy (tunnel );
1956
2014
return ;
1957
2015
1958
- err_deactivate :
1959
- tb_tunnel_deactivate (tunnel );
1960
2016
err_free :
1961
- tb_tunnel_free (tunnel );
2017
+ tb_tunnel_put (tunnel );
1962
2018
err_reclaim_usb :
1963
2019
tb_reclaim_usb3_bandwidth (tb , in , out );
2020
+ tb_domain_put (tb );
1964
2021
err_detach_group :
1965
2022
tb_detach_bandwidth_group (in );
1966
2023
err_dealloc_dp :
@@ -2180,7 +2237,7 @@ static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
2180
2237
2181
2238
tb_tunnel_deactivate (tunnel );
2182
2239
list_del (& tunnel -> list );
2183
- tb_tunnel_free (tunnel );
2240
+ tb_tunnel_put (tunnel );
2184
2241
return 0 ;
2185
2242
}
2186
2243
@@ -2210,7 +2267,7 @@ static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
2210
2267
if (tb_tunnel_activate (tunnel )) {
2211
2268
tb_port_info (up ,
2212
2269
"PCIe tunnel activation failed, aborting\n" );
2213
- tb_tunnel_free (tunnel );
2270
+ tb_tunnel_put (tunnel );
2214
2271
return - EIO ;
2215
2272
}
2216
2273
@@ -2269,7 +2326,7 @@ static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
2269
2326
return 0 ;
2270
2327
2271
2328
err_free :
2272
- tb_tunnel_free (tunnel );
2329
+ tb_tunnel_put (tunnel );
2273
2330
err_clx :
2274
2331
tb_enable_clx (sw );
2275
2332
mutex_unlock (& tb -> lock );
@@ -2332,7 +2389,7 @@ static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
2332
2389
*/
2333
2390
static void tb_handle_hotplug (struct work_struct * work )
2334
2391
{
2335
- struct tb_hotplug_event * ev = container_of (work , typeof (* ev ), work );
2392
+ struct tb_hotplug_event * ev = container_of (work , typeof (* ev ), work . work );
2336
2393
struct tb * tb = ev -> tb ;
2337
2394
struct tb_cm * tcm = tb_priv (tb );
2338
2395
struct tb_switch * sw ;
@@ -2637,7 +2694,7 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
2637
2694
2638
2695
static void tb_handle_dp_bandwidth_request (struct work_struct * work )
2639
2696
{
2640
- struct tb_hotplug_event * ev = container_of (work , typeof (* ev ), work );
2697
+ struct tb_hotplug_event * ev = container_of (work , typeof (* ev ), work . work );
2641
2698
int requested_bw , requested_up , requested_down , ret ;
2642
2699
struct tb_tunnel * tunnel ;
2643
2700
struct tb * tb = ev -> tb ;
@@ -2664,7 +2721,7 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
2664
2721
goto put_sw ;
2665
2722
}
2666
2723
2667
- tb_port_dbg (in , "handling bandwidth allocation request\n" );
2724
+ tb_port_dbg (in , "handling bandwidth allocation request, retry %d \n" , ev -> retry );
2668
2725
2669
2726
tunnel = tb_find_tunnel (tb , TB_TUNNEL_DP , in , NULL );
2670
2727
if (!tunnel ) {
@@ -2717,12 +2774,33 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
2717
2774
2718
2775
ret = tb_alloc_dp_bandwidth (tunnel , & requested_up , & requested_down );
2719
2776
if (ret ) {
2720
- if (ret == - ENOBUFS )
2777
+ if (ret == - ENOBUFS ) {
2721
2778
tb_tunnel_warn (tunnel ,
2722
2779
"not enough bandwidth available\n" );
2723
- else
2780
+ } else if (ret == - ENOTCONN ) {
2781
+ tb_tunnel_dbg (tunnel , "not active yet\n" );
2782
+ /*
2783
+ * We got bandwidth allocation request but the
2784
+ * tunnel is not yet active. This means that
2785
+ * tb_dp_tunnel_active() is not yet called for
2786
+ * this tunnel. Allow it some time and retry
2787
+ * this request a couple of times.
2788
+ */
2789
+ if (ev -> retry < TB_BW_ALLOC_RETRIES ) {
2790
+ tb_tunnel_dbg (tunnel ,
2791
+ "retrying bandwidth allocation request\n" );
2792
+ tb_queue_dp_bandwidth_request (tb , ev -> route ,
2793
+ ev -> port ,
2794
+ ev -> retry + 1 ,
2795
+ msecs_to_jiffies (50 ));
2796
+ } else {
2797
+ tb_tunnel_dbg (tunnel ,
2798
+ "run out of retries, failing the request" );
2799
+ }
2800
+ } else {
2724
2801
tb_tunnel_warn (tunnel ,
2725
2802
"failed to change bandwidth allocation\n" );
2803
+ }
2726
2804
} else {
2727
2805
tb_tunnel_dbg (tunnel ,
2728
2806
"bandwidth allocation changed to %d/%d Mb/s\n" ,
@@ -2743,7 +2821,8 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
2743
2821
kfree (ev );
2744
2822
}
2745
2823
2746
- static void tb_queue_dp_bandwidth_request (struct tb * tb , u64 route , u8 port )
2824
+ static void tb_queue_dp_bandwidth_request (struct tb * tb , u64 route , u8 port ,
2825
+ int retry , unsigned long delay )
2747
2826
{
2748
2827
struct tb_hotplug_event * ev ;
2749
2828
@@ -2754,8 +2833,9 @@ static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port)
2754
2833
ev -> tb = tb ;
2755
2834
ev -> route = route ;
2756
2835
ev -> port = port ;
2757
- INIT_WORK (& ev -> work , tb_handle_dp_bandwidth_request );
2758
- queue_work (tb -> wq , & ev -> work );
2836
+ ev -> retry = retry ;
2837
+ INIT_DELAYED_WORK (& ev -> work , tb_handle_dp_bandwidth_request );
2838
+ queue_delayed_work (tb -> wq , & ev -> work , delay );
2759
2839
}
2760
2840
2761
2841
static void tb_handle_notification (struct tb * tb , u64 route ,
@@ -2775,7 +2855,7 @@ static void tb_handle_notification(struct tb *tb, u64 route,
2775
2855
if (tb_cfg_ack_notification (tb -> ctl , route , error ))
2776
2856
tb_warn (tb , "could not ack notification on %llx\n" ,
2777
2857
route );
2778
- tb_queue_dp_bandwidth_request (tb , route , error -> port );
2858
+ tb_queue_dp_bandwidth_request (tb , route , error -> port , 0 , 0 );
2779
2859
break ;
2780
2860
2781
2861
default :
@@ -2830,7 +2910,7 @@ static void tb_stop(struct tb *tb)
2830
2910
*/
2831
2911
if (tb_tunnel_is_dma (tunnel ))
2832
2912
tb_tunnel_deactivate (tunnel );
2833
- tb_tunnel_free (tunnel );
2913
+ tb_tunnel_put (tunnel );
2834
2914
}
2835
2915
tb_switch_remove (tb -> root_switch );
2836
2916
tcm -> hotplug_active = false; /* signal tb_handle_hotplug to quit */
@@ -3026,7 +3106,7 @@ static int tb_resume_noirq(struct tb *tb)
3026
3106
if (tb_tunnel_is_usb3 (tunnel ))
3027
3107
usb3_delay = 500 ;
3028
3108
tb_tunnel_deactivate (tunnel );
3029
- tb_tunnel_free (tunnel );
3109
+ tb_tunnel_put (tunnel );
3030
3110
}
3031
3111
3032
3112
/* Re-create our tunnels now */
0 commit comments