Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion esp_hosted_ng/host/esp_bt.c
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ static ESP_BT_SEND_FRAME_PROTOTYPE()

/* Populate new SKB */
skb_copy_from_linear_data(skb, pos, skb->len);
skb_put(new_skb, skb->len);
skb_put(new_skb, skb->len + pad_len);

/* Replace old SKB */
dev_kfree_skb_any(skb);
Expand Down
155 changes: 84 additions & 71 deletions esp_hosted_ng/host/spi/esp_spi.c
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,6 @@ static struct esp_if_ops if_ops = {
.write = write_packet,
};

static DEFINE_MUTEX(spi_lock);

static void open_data_path(void)
{
atomic_set(&tx_pending, 0);
Expand Down Expand Up @@ -278,93 +276,108 @@ static void esp_spi_work(struct work_struct *work)
int ret = 0;
volatile int trans_ready, rx_pending;

mutex_lock(&spi_lock);

trans_ready = gpio_get_value(HANDSHAKE_PIN);
rx_pending = gpio_get_value(SPI_DATA_READY_PIN);

if (trans_ready) {
if (data_path) {
tx_skb = skb_dequeue(&spi_context.tx_q[PRIO_Q_HIGH]);
if (!tx_skb)
tx_skb = skb_dequeue(&spi_context.tx_q[PRIO_Q_MID]);
if (!tx_skb)
tx_skb = skb_dequeue(&spi_context.tx_q[PRIO_Q_LOW]);
if (tx_skb) {
if (atomic_read(&tx_pending))
atomic_dec(&tx_pending);

/* resume network tx queue if bearable load */
cb = (struct esp_skb_cb *)tx_skb->cb;
if (cb && cb->priv && atomic_read(&tx_pending) < TX_RESUME_THRESHOLD) {
esp_tx_resume(cb->priv);
if (!trans_ready) {
return;
}

if (data_path) {
tx_skb = skb_dequeue(&spi_context.tx_q[PRIO_Q_HIGH]);
if (!tx_skb)
tx_skb = skb_dequeue(&spi_context.tx_q[PRIO_Q_MID]);
if (!tx_skb)
tx_skb = skb_dequeue(&spi_context.tx_q[PRIO_Q_LOW]);
if (tx_skb) {
if (atomic_read(&tx_pending))
atomic_dec(&tx_pending);

/* resume network tx queue if bearable load */
cb = (struct esp_skb_cb *)tx_skb->cb;
if (cb && cb->priv && atomic_read(&tx_pending) < TX_RESUME_THRESHOLD) {
esp_tx_resume(cb->priv);
#if TEST_RAW_TP
if (raw_tp_mode != 0) {
esp_raw_tp_queue_resume();
}
#endif
if (raw_tp_mode != 0) {
esp_raw_tp_queue_resume();
}
#endif
}
}
}

if (rx_pending || tx_skb) {
memset(&trans, 0, sizeof(trans));
trans.speed_hz = spi_context.spi_clk_mhz * NUMBER_1M;

/* Setup and execute SPI transaction
* Tx_buf: Check if tx_q has valid buffer for transmission,
* else keep it blank
*
* Rx_buf: Allocate memory for incoming data. This will be freed
* immediately if received buffer is invalid.
* If it is a valid buffer, upper layer will free it.
* */

/* Configure TX buffer if available */

if (tx_skb) {
trans.tx_buf = tx_skb->data;
esp_hex_dump_verbose("tx: ", trans.tx_buf, 32);
} else {
tx_skb = esp_alloc_skb(SPI_BUF_SIZE);
trans.tx_buf = skb_put(tx_skb, SPI_BUF_SIZE);
memset((void *)trans.tx_buf, 0, SPI_BUF_SIZE);
if (!rx_pending && !tx_skb) {
return;
}

memset(&trans, 0, sizeof(trans));
trans.speed_hz = spi_context.spi_clk_mhz * NUMBER_1M;

/* Setup and execute SPI transaction
* Tx_buf: Check if tx_q has valid buffer for transmission,
* else keep it blank
*
* Rx_buf: Allocate memory for incoming data. This will be freed
* immediately if received buffer is invalid.
* If it is a valid buffer, upper layer will free it.
* */

/* Configure TX buffer if available */

if (tx_skb) {
if (tx_skb->len < SPI_BUF_SIZE) {
struct sk_buff *tx_skb_new = esp_alloc_skb(SPI_BUF_SIZE);
if (!tx_skb_new) {
dev_kfree_skb(tx_skb);
return;
}

/* Configure RX buffer */
rx_skb = esp_alloc_skb(SPI_BUF_SIZE);
rx_buf = skb_put(rx_skb, SPI_BUF_SIZE);
skb_put(tx_skb_new, SPI_BUF_SIZE);
skb_copy_from_linear_data(tx_skb, tx_skb_new->data, tx_skb->len);
dev_kfree_skb(tx_skb);
tx_skb = tx_skb_new;
}
trans.tx_buf = tx_skb->data;
esp_hex_dump_verbose("tx: ", trans.tx_buf, 32);
} else {
tx_skb = esp_alloc_skb(SPI_BUF_SIZE);
if (!tx_skb) {
return;
}
trans.tx_buf = skb_put(tx_skb, SPI_BUF_SIZE);
memset((void *)trans.tx_buf, 0, SPI_BUF_SIZE);
}

memset(rx_buf, 0, SPI_BUF_SIZE);
/* Configure RX buffer */
rx_skb = esp_alloc_skb(SPI_BUF_SIZE);
rx_buf = skb_put(rx_skb, SPI_BUF_SIZE);

trans.rx_buf = rx_buf;
trans.len = SPI_BUF_SIZE;
memset(rx_buf, 0, SPI_BUF_SIZE);

trans.rx_buf = rx_buf;
trans.len = SPI_BUF_SIZE;

#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0))
if (hardware_type == ESP_FIRMWARE_CHIP_ESP32) {
trans.cs_change = 1;
}
if (hardware_type == ESP_FIRMWARE_CHIP_ESP32) {
trans.cs_change = 1;
}
#endif

ret = spi_sync_transfer(spi_context.esp_spi_dev, &trans, 1);
if (ret) {
esp_err("SPI Transaction failed: %d", ret);
dev_kfree_skb(rx_skb);
dev_kfree_skb(tx_skb);
} else {

/* Free rx_skb if received data is not valid */
if (process_rx_buf(rx_skb)) {
dev_kfree_skb(rx_skb);
}
ret = spi_sync_transfer(spi_context.esp_spi_dev, &trans, 1);
if (ret) {
esp_err("SPI Transaction failed: %d", ret);
dev_kfree_skb(rx_skb);
dev_kfree_skb(tx_skb);
} else {

if (tx_skb)
dev_kfree_skb(tx_skb);
}
/* Free rx_skb if received data is not valid */
if (process_rx_buf(rx_skb)) {
dev_kfree_skb(rx_skb);
}
}

mutex_unlock(&spi_lock);
if (tx_skb)
dev_kfree_skb(tx_skb);
}
}

#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 16, 0))
Expand Down Expand Up @@ -530,7 +543,7 @@ static int spi_init(void)
uint8_t prio_q_idx = 0;
struct esp_adapter *adapter;

spi_context.spi_workqueue = create_workqueue("ESP_SPI_WORK_QUEUE");
spi_context.spi_workqueue = alloc_ordered_workqueue("ESP_SPI_WORK_QUEUE", 0);

if (!spi_context.spi_workqueue) {
esp_err("spi workqueue failed to create\n");
Expand Down