@@ -42,6 +42,7 @@ MODULE_DEVICE_TABLE(pci, btintel_pcie_table);
42
42
43
43
/* Intel PCIe uses 4 bytes of HCI type instead of 1 byte BT SIG HCI type */
44
44
#define BTINTEL_PCIE_HCI_TYPE_LEN 4
45
+ #define BTINTEL_PCIE_HCI_CMD_PKT 0x00000001
45
46
#define BTINTEL_PCIE_HCI_ACL_PKT 0x00000002
46
47
#define BTINTEL_PCIE_HCI_SCO_PKT 0x00000003
47
48
#define BTINTEL_PCIE_HCI_EVT_PKT 0x00000004
@@ -88,6 +89,75 @@ static struct btintel_pcie_data *btintel_pcie_get_data(struct msix_entry *entry)
88
89
return container_of (entries , struct btintel_pcie_data , msix_entries [0 ]);
89
90
}
90
91
92
+ /* Set the doorbell for TXQ to notify the device that @index (actually index-1)
93
+ * of the TFD is updated and ready to transmit.
94
+ */
95
+ static void btintel_pcie_set_tx_db (struct btintel_pcie_data * data , u16 index )
96
+ {
97
+ u32 val ;
98
+
99
+ val = index ;
100
+ val |= (BTINTEL_PCIE_TX_DB_VEC << 16 );
101
+
102
+ btintel_pcie_wr_reg32 (data , BTINTEL_PCIE_CSR_HBUS_TARG_WRPTR , val );
103
+ }
104
+
105
+ /* Copy the data to next(@tfd_index) data buffer and update the TFD(transfer
106
+ * descriptor) with the data length and the DMA address of the data buffer.
107
+ */
108
+ static void btintel_pcie_prepare_tx (struct txq * txq , u16 tfd_index ,
109
+ struct sk_buff * skb )
110
+ {
111
+ struct data_buf * buf ;
112
+ struct tfd * tfd ;
113
+
114
+ tfd = & txq -> tfds [tfd_index ];
115
+ memset (tfd , 0 , sizeof (* tfd ));
116
+
117
+ buf = & txq -> bufs [tfd_index ];
118
+
119
+ tfd -> size = skb -> len ;
120
+ tfd -> addr = buf -> data_p_addr ;
121
+
122
+ /* Copy the outgoing data to DMA buffer */
123
+ memcpy (buf -> data , skb -> data , tfd -> size );
124
+ }
125
+
126
+ static int btintel_pcie_send_sync (struct btintel_pcie_data * data ,
127
+ struct sk_buff * skb )
128
+ {
129
+ int ret ;
130
+ u16 tfd_index ;
131
+ struct txq * txq = & data -> txq ;
132
+
133
+ tfd_index = data -> ia .tr_hia [BTINTEL_PCIE_TXQ_NUM ];
134
+
135
+ if (tfd_index > txq -> count )
136
+ return - ERANGE ;
137
+
138
+ /* Prepare for TX. It updates the TFD with the length of data and
139
+ * address of the DMA buffer, and copy the data to the DMA buffer
140
+ */
141
+ btintel_pcie_prepare_tx (txq , tfd_index , skb );
142
+
143
+ tfd_index = (tfd_index + 1 ) % txq -> count ;
144
+ data -> ia .tr_hia [BTINTEL_PCIE_TXQ_NUM ] = tfd_index ;
145
+
146
+ /* Arm wait event condition */
147
+ data -> tx_wait_done = false;
148
+
149
+ /* Set the doorbell to notify the device */
150
+ btintel_pcie_set_tx_db (data , tfd_index );
151
+
152
+ /* Wait for the complete interrupt - URBD0 */
153
+ ret = wait_event_timeout (data -> tx_wait_q , data -> tx_wait_done ,
154
+ msecs_to_jiffies (TX_WAIT_TIMEOUT_MS ));
155
+ if (!ret )
156
+ return - ETIME ;
157
+
158
+ return 0 ;
159
+ }
160
+
91
161
/* Set the doorbell for RXQ to notify the device that @index (actually index-1)
92
162
* is available to receive the data
93
163
*/
@@ -298,7 +368,7 @@ static void btintel_pcie_msix_tx_handle(struct btintel_pcie_data *data)
298
368
* It check the frame header to identify the data type and create skb
299
369
* and calling HCI API
300
370
*/
301
- static int btintel_pcie_hci_recv_frame (struct btintel_pcie_data * data ,
371
+ static int btintel_pcie_recv_frame (struct btintel_pcie_data * data ,
302
372
struct sk_buff * skb )
303
373
{
304
374
int ret ;
@@ -406,7 +476,7 @@ static void btintel_pcie_rx_work(struct work_struct *work)
406
476
407
477
/* Process the sk_buf in queue and send to the HCI layer */
408
478
while ((skb = skb_dequeue (& data -> rx_skb_q ))) {
409
- err = btintel_pcie_hci_recv_frame (data , skb );
479
+ err = btintel_pcie_recv_frame (data , skb );
410
480
if (err )
411
481
bt_dev_err (hdev , "Failed to send received frame: %d" ,
412
482
err );
@@ -933,15 +1003,250 @@ static int btintel_pcie_alloc(struct btintel_pcie_data *data)
933
1003
return err ;
934
1004
}
935
1005
1006
+ static int btintel_pcie_open (struct hci_dev * hdev )
1007
+ {
1008
+ bt_dev_dbg (hdev , "" );
1009
+
1010
+ return 0 ;
1011
+ }
1012
+
1013
+ static int btintel_pcie_close (struct hci_dev * hdev )
1014
+ {
1015
+ bt_dev_dbg (hdev , "" );
1016
+
1017
+ return 0 ;
1018
+ }
1019
+
1020
+ static int btintel_pcie_inject_cmd_complete (struct hci_dev * hdev , __u16 opcode )
1021
+ {
1022
+ struct sk_buff * skb ;
1023
+ struct hci_event_hdr * hdr ;
1024
+ struct hci_ev_cmd_complete * evt ;
1025
+
1026
+ skb = bt_skb_alloc (sizeof (* hdr ) + sizeof (* evt ) + 1 , GFP_KERNEL );
1027
+ if (!skb )
1028
+ return - ENOMEM ;
1029
+
1030
+ hdr = (struct hci_event_hdr * )skb_put (skb , sizeof (* hdr ));
1031
+ hdr -> evt = HCI_EV_CMD_COMPLETE ;
1032
+ hdr -> plen = sizeof (* evt ) + 1 ;
1033
+
1034
+ evt = (struct hci_ev_cmd_complete * )skb_put (skb , sizeof (* evt ));
1035
+ evt -> ncmd = 0x01 ;
1036
+ evt -> opcode = cpu_to_le16 (opcode );
1037
+
1038
+ * (u8 * )skb_put (skb , 1 ) = 0x00 ;
1039
+
1040
+ hci_skb_pkt_type (skb ) = HCI_EVENT_PKT ;
1041
+
1042
+ return hci_recv_frame (hdev , skb );
1043
+ }
1044
+
1045
+ static int btintel_pcie_send_frame (struct hci_dev * hdev ,
1046
+ struct sk_buff * skb )
1047
+ {
1048
+ struct btintel_pcie_data * data = hci_get_drvdata (hdev );
1049
+ int ret ;
1050
+ u32 type ;
1051
+
1052
+ /* Due to the fw limitation, the type header of the packet should be
1053
+ * 4 bytes unlike 1 byte for UART. In UART, the firmware can read
1054
+ * the first byte to get the packet type and redirect the rest of data
1055
+ * packet to the right handler.
1056
+ *
1057
+ * But for PCIe, THF(Transfer Flow Handler) fetches the 4 bytes of data
1058
+ * from DMA memory and by the time it reads the first 4 bytes, it has
1059
+ * already consumed some part of packet. Thus the packet type indicator
1060
+ * for iBT PCIe is 4 bytes.
1061
+ *
1062
+ * Luckily, when HCI core creates the skb, it allocates 8 bytes of
1063
+ * head room for profile and driver use, and before sending the data
1064
+ * to the device, append the iBT PCIe packet type in the front.
1065
+ */
1066
+ switch (hci_skb_pkt_type (skb )) {
1067
+ case HCI_COMMAND_PKT :
1068
+ type = BTINTEL_PCIE_HCI_CMD_PKT ;
1069
+ if (btintel_test_flag (hdev , INTEL_BOOTLOADER )) {
1070
+ struct hci_command_hdr * cmd = (void * )skb -> data ;
1071
+ __u16 opcode = le16_to_cpu (cmd -> opcode );
1072
+
1073
+ /* When the 0xfc01 command is issued to boot into
1074
+ * the operational firmware, it will actually not
1075
+ * send a command complete event. To keep the flow
1076
+ * control working inject that event here.
1077
+ */
1078
+ if (opcode == 0xfc01 )
1079
+ btintel_pcie_inject_cmd_complete (hdev , opcode );
1080
+ }
1081
+ hdev -> stat .cmd_tx ++ ;
1082
+ break ;
1083
+ case HCI_ACLDATA_PKT :
1084
+ type = BTINTEL_PCIE_HCI_ACL_PKT ;
1085
+ hdev -> stat .acl_tx ++ ;
1086
+ break ;
1087
+ case HCI_SCODATA_PKT :
1088
+ type = BTINTEL_PCIE_HCI_SCO_PKT ;
1089
+ hdev -> stat .sco_tx ++ ;
1090
+ break ;
1091
+ default :
1092
+ bt_dev_err (hdev , "Unknown HCI packet type" );
1093
+ return - EILSEQ ;
1094
+ }
1095
+ memcpy (skb_push (skb , BTINTEL_PCIE_HCI_TYPE_LEN ), & type ,
1096
+ BTINTEL_PCIE_HCI_TYPE_LEN );
1097
+
1098
+ ret = btintel_pcie_send_sync (data , skb );
1099
+ if (ret ) {
1100
+ hdev -> stat .err_tx ++ ;
1101
+ bt_dev_err (hdev , "Failed to send frame (%d)" , ret );
1102
+ goto exit_error ;
1103
+ } else {
1104
+ hdev -> stat .byte_tx += skb -> len ;
1105
+ kfree_skb (skb );
1106
+ }
1107
+
1108
+ exit_error :
1109
+ return ret ;
1110
+ }
1111
+
936
1112
static void btintel_pcie_release_hdev (struct btintel_pcie_data * data )
937
1113
{
938
- /* TODO: Unregister and release hdev */
1114
+ struct hci_dev * hdev ;
1115
+
1116
+ hdev = data -> hdev ;
1117
+ hci_unregister_dev (hdev );
1118
+ hci_free_dev (hdev );
1119
+ data -> hdev = NULL ;
1120
+ }
1121
+
1122
+ static int btintel_pcie_setup (struct hci_dev * hdev )
1123
+ {
1124
+ const u8 param [1 ] = { 0xFF };
1125
+ struct intel_version_tlv ver_tlv ;
1126
+ struct sk_buff * skb ;
1127
+ int err ;
1128
+
1129
+ BT_DBG ("%s" , hdev -> name );
1130
+
1131
+ skb = __hci_cmd_sync (hdev , 0xfc05 , 1 , param , HCI_CMD_TIMEOUT );
1132
+ if (IS_ERR (skb )) {
1133
+ bt_dev_err (hdev , "Reading Intel version command failed (%ld)" ,
1134
+ PTR_ERR (skb ));
1135
+ return PTR_ERR (skb );
1136
+ }
1137
+
1138
+ /* Check the status */
1139
+ if (skb -> data [0 ]) {
1140
+ bt_dev_err (hdev , "Intel Read Version command failed (%02x)" ,
1141
+ skb -> data [0 ]);
1142
+ err = - EIO ;
1143
+ goto exit_error ;
1144
+ }
1145
+
1146
+ /* Apply the common HCI quirks for Intel device */
1147
+ set_bit (HCI_QUIRK_STRICT_DUPLICATE_FILTER , & hdev -> quirks );
1148
+ set_bit (HCI_QUIRK_SIMULTANEOUS_DISCOVERY , & hdev -> quirks );
1149
+ set_bit (HCI_QUIRK_NON_PERSISTENT_DIAG , & hdev -> quirks );
1150
+
1151
+ /* Set up the quality report callback for Intel devices */
1152
+ hdev -> set_quality_report = btintel_set_quality_report ;
1153
+
1154
+ memset (& ver_tlv , 0 , sizeof (ver_tlv ));
1155
+ /* For TLV type device, parse the tlv data */
1156
+ err = btintel_parse_version_tlv (hdev , & ver_tlv , skb );
1157
+ if (err ) {
1158
+ bt_dev_err (hdev , "Failed to parse TLV version information" );
1159
+ goto exit_error ;
1160
+ }
1161
+
1162
+ switch (INTEL_HW_PLATFORM (ver_tlv .cnvi_bt )) {
1163
+ case 0x37 :
1164
+ break ;
1165
+ default :
1166
+ bt_dev_err (hdev , "Unsupported Intel hardware platform (0x%2x)" ,
1167
+ INTEL_HW_PLATFORM (ver_tlv .cnvi_bt ));
1168
+ err = - EINVAL ;
1169
+ goto exit_error ;
1170
+ }
1171
+
1172
+ /* Check for supported iBT hardware variants of this firmware
1173
+ * loading method.
1174
+ *
1175
+ * This check has been put in place to ensure correct forward
1176
+ * compatibility options when newer hardware variants come
1177
+ * along.
1178
+ */
1179
+ switch (INTEL_HW_VARIANT (ver_tlv .cnvi_bt )) {
1180
+ case 0x1e : /* BzrI */
1181
+ /* Display version information of TLV type */
1182
+ btintel_version_info_tlv (hdev , & ver_tlv );
1183
+
1184
+ /* Apply the device specific HCI quirks for TLV based devices
1185
+ *
1186
+ * All TLV based devices support WBS
1187
+ */
1188
+ set_bit (HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED , & hdev -> quirks );
1189
+
1190
+ /* Apply LE States quirk from solar onwards */
1191
+ set_bit (HCI_QUIRK_VALID_LE_STATES , & hdev -> quirks );
1192
+
1193
+ /* Setup MSFT Extension support */
1194
+ btintel_set_msft_opcode (hdev ,
1195
+ INTEL_HW_VARIANT (ver_tlv .cnvi_bt ));
1196
+
1197
+ err = btintel_bootloader_setup_tlv (hdev , & ver_tlv );
1198
+ if (err )
1199
+ goto exit_error ;
1200
+ break ;
1201
+ default :
1202
+ bt_dev_err (hdev , "Unsupported Intel hw variant (%u)" ,
1203
+ INTEL_HW_VARIANT (ver_tlv .cnvi_bt ));
1204
+ err = - EINVAL ;
1205
+ break ;
1206
+ }
1207
+
1208
+ exit_error :
1209
+ kfree_skb (skb );
1210
+
1211
+ return err ;
939
1212
}
940
1213
941
1214
static int btintel_pcie_setup_hdev (struct btintel_pcie_data * data )
942
1215
{
943
- /* TODO: initialize hdev and assign the callbacks to hdev */
944
- return - ENODEV ;
1216
+ int err ;
1217
+ struct hci_dev * hdev ;
1218
+
1219
+ hdev = hci_alloc_dev ();
1220
+ if (!hdev )
1221
+ return - ENOMEM ;
1222
+
1223
+ hdev -> bus = HCI_PCI ;
1224
+ hci_set_drvdata (hdev , data );
1225
+
1226
+ data -> hdev = hdev ;
1227
+ SET_HCIDEV_DEV (hdev , & data -> pdev -> dev );
1228
+
1229
+ hdev -> manufacturer = 2 ;
1230
+ hdev -> open = btintel_pcie_open ;
1231
+ hdev -> close = btintel_pcie_close ;
1232
+ hdev -> send = btintel_pcie_send_frame ;
1233
+ hdev -> setup = btintel_pcie_setup ;
1234
+ hdev -> shutdown = btintel_shutdown_combined ;
1235
+ hdev -> hw_error = btintel_hw_error ;
1236
+ hdev -> set_diag = btintel_set_diag ;
1237
+ hdev -> set_bdaddr = btintel_set_bdaddr ;
1238
+
1239
+ err = hci_register_dev (hdev );
1240
+ if (err < 0 ) {
1241
+ BT_ERR ("Failed to register to hdev (%d)" , err );
1242
+ goto exit_error ;
1243
+ }
1244
+
1245
+ return 0 ;
1246
+
1247
+ exit_error :
1248
+ hci_free_dev (hdev );
1249
+ return err ;
945
1250
}
946
1251
947
1252
static int btintel_pcie_probe (struct pci_dev * pdev ,
0 commit comments