|
| 1 | +/* |
| 2 | + * Copyright (c) 2025 Nordic Semiconductor ASA |
| 3 | + * SPDX-License-Identifier: Apache-2.0 |
| 4 | + */ |
| 5 | + |
| 6 | +#include <zephyr/cache.h> |
| 7 | +#include <zephyr/device.h> |
| 8 | +#include <zephyr/drivers/firmware/nrf_ironside/call.h> |
| 9 | +#include <zephyr/drivers/mbox.h> |
| 10 | +#include <zephyr/kernel.h> |
| 11 | +#include <zephyr/sys/barrier.h> |
| 12 | +#include <zephyr/sys/math_extras.h> |
| 13 | +#include <zephyr/sys/util.h> |
| 14 | + |
| 15 | +#define DT_DRV_COMPAT nordic_ironside_call |
| 16 | + |
| 17 | +#define SHM_NODE DT_INST_PHANDLE(0, memory_region) |
| 18 | +#define NUM_BUFS (DT_REG_SIZE(SHM_NODE) / sizeof(struct ironside_call_buf)) |
| 19 | +#define ALL_BUF_BITS BIT_MASK(NUM_BUFS) |
| 20 | + |
| 21 | +/* Note: this area is already zero-initialized at reset time. */ |
| 22 | +static struct ironside_call_buf *const bufs = (void *)DT_REG_ADDR(SHM_NODE); |
| 23 | + |
| 24 | +#if defined(CONFIG_DCACHE_LINE_SIZE) |
| 25 | +BUILD_ASSERT((DT_REG_ADDR(SHM_NODE) % CONFIG_DCACHE_LINE_SIZE) == 0); |
| 26 | +BUILD_ASSERT((sizeof(struct ironside_call_buf) % CONFIG_DCACHE_LINE_SIZE) == 0); |
| 27 | +#endif |
| 28 | + |
| 29 | +static const struct mbox_dt_spec mbox_rx = MBOX_DT_SPEC_INST_GET(0, rx); |
| 30 | +static const struct mbox_dt_spec mbox_tx = MBOX_DT_SPEC_INST_GET(0, tx); |
| 31 | + |
| 32 | +K_EVENT_DEFINE(alloc_evts); |
| 33 | +K_EVENT_DEFINE(rsp_evts); |
| 34 | + |
| 35 | +static void ironside_call_rsp(const struct device *dev, mbox_channel_id_t channel_id, |
| 36 | + void *user_data, struct mbox_msg *data) |
| 37 | +{ |
| 38 | + ARG_UNUSED(dev); |
| 39 | + ARG_UNUSED(channel_id); |
| 40 | + ARG_UNUSED(user_data); |
| 41 | + ARG_UNUSED(data); |
| 42 | + |
| 43 | + struct ironside_call_buf *buf; |
| 44 | + uint32_t rsp_buf_bits = 0; |
| 45 | + |
| 46 | + /* Check which buffers are not being dispatched currently. Those must |
| 47 | + * not be cache-invalidated, in case they're used in thread context. |
| 48 | + * |
| 49 | + * This value will remain valid as long as ironside_call_rsp is never |
| 50 | + * preempted by ironside_call_dispatch; the former runs in MBOX ISR, |
| 51 | + * while the latter shall not run in ISR (because of k_event_wait). |
| 52 | + */ |
| 53 | + const uint32_t skip_buf_bits = k_event_test(&rsp_evts, ALL_BUF_BITS); |
| 54 | + |
| 55 | + for (int i = 0; i < NUM_BUFS; i++) { |
| 56 | + if (skip_buf_bits & BIT(i)) { |
| 57 | + continue; |
| 58 | + } |
| 59 | + |
| 60 | + buf = &bufs[i]; |
| 61 | + |
| 62 | + sys_cache_data_invd_range(buf, sizeof(*buf)); |
| 63 | + barrier_dmem_fence_full(); |
| 64 | + |
| 65 | + if (buf->status != IRONSIDE_CALL_STATUS_IDLE && |
| 66 | + buf->status != IRONSIDE_CALL_STATUS_REQ) { |
| 67 | + rsp_buf_bits |= BIT(i); |
| 68 | + } |
| 69 | + } |
| 70 | + k_event_post(&rsp_evts, rsp_buf_bits); |
| 71 | +} |
| 72 | + |
| 73 | +static int ironside_call_init(const struct device *dev) |
| 74 | +{ |
| 75 | + ARG_UNUSED(dev); |
| 76 | + |
| 77 | + int err; |
| 78 | + |
| 79 | + k_event_set(&alloc_evts, ALL_BUF_BITS); |
| 80 | + k_event_set(&rsp_evts, ALL_BUF_BITS); |
| 81 | + |
| 82 | + err = mbox_register_callback_dt(&mbox_rx, ironside_call_rsp, NULL); |
| 83 | + __ASSERT_NO_MSG(err == 0); |
| 84 | + |
| 85 | + err = mbox_set_enabled_dt(&mbox_rx, 1); |
| 86 | + __ASSERT_NO_MSG(err == 0); |
| 87 | + |
| 88 | + return 0; |
| 89 | +} |
| 90 | + |
| 91 | +DEVICE_DT_INST_DEFINE(0, ironside_call_init, NULL, NULL, NULL, POST_KERNEL, |
| 92 | + CONFIG_NRF_IRONSIDE_CALL_INIT_PRIORITY, NULL); |
| 93 | + |
| 94 | +struct ironside_call_buf *ironside_call_alloc(void) |
| 95 | +{ |
| 96 | + uint32_t avail_buf_bits; |
| 97 | + uint32_t alloc_buf_bit; |
| 98 | + |
| 99 | + do { |
| 100 | + avail_buf_bits = k_event_wait(&alloc_evts, ALL_BUF_BITS, false, K_FOREVER); |
| 101 | + |
| 102 | + /* Try allocating the first available block. |
| 103 | + * If it's claimed by another thread, go back and wait for another block. |
| 104 | + */ |
| 105 | + alloc_buf_bit = LSB_GET(avail_buf_bits); |
| 106 | + } while (k_event_clear(&alloc_evts, alloc_buf_bit) == 0); |
| 107 | + |
| 108 | + return &bufs[u32_count_trailing_zeros(alloc_buf_bit)]; |
| 109 | +} |
| 110 | + |
| 111 | +void ironside_call_dispatch(struct ironside_call_buf *buf) |
| 112 | +{ |
| 113 | + const uint32_t buf_bit = BIT(buf - bufs); |
| 114 | + int err; |
| 115 | + |
| 116 | + buf->status = IRONSIDE_CALL_STATUS_REQ; |
| 117 | + barrier_dmem_fence_full(); |
| 118 | + |
| 119 | + sys_cache_data_flush_range(buf, sizeof(*buf)); |
| 120 | + |
| 121 | + k_event_clear(&rsp_evts, buf_bit); |
| 122 | + |
| 123 | + err = mbox_send_dt(&mbox_tx, NULL); |
| 124 | + __ASSERT_NO_MSG(err == 0); |
| 125 | + |
| 126 | + k_event_wait(&rsp_evts, buf_bit, false, K_FOREVER); |
| 127 | +} |
| 128 | + |
| 129 | +void ironside_call_release(struct ironside_call_buf *buf) |
| 130 | +{ |
| 131 | + const uint32_t buf_bit = BIT(buf - bufs); |
| 132 | + |
| 133 | + buf->status = IRONSIDE_CALL_STATUS_IDLE; |
| 134 | + barrier_dmem_fence_full(); |
| 135 | + |
| 136 | + sys_cache_data_flush_range(buf, sizeof(*buf)); |
| 137 | + |
| 138 | + k_event_post(&alloc_evts, buf_bit); |
| 139 | +} |
0 commit comments