diff --git a/subsys/ipc/ipc_service/backends/ipc_icmsg.c b/subsys/ipc/ipc_service/backends/ipc_icmsg.c index 215c7b469259..b7f71af393fa 100644 --- a/subsys/ipc/ipc_service/backends/ipc_icmsg.c +++ b/subsys/ipc/ipc_service/backends/ipc_icmsg.c @@ -105,7 +105,8 @@ static int backend_init(const struct device *instance) DT_INST_FOREACH_STATUS_OKAY(DEFINE_BACKEND_DEVICE) /* TODO: REMOVE THIS WORKAROUND!!! */ - +#ifdef NRF_PPR +#include static int workaround_ppr_reset(void) { #define _FIX_RESET_MEM(i) \ @@ -113,6 +114,8 @@ static int workaround_ppr_reset(void) backend_data_##i.tx_pb = &tx_pb_##i; \ backend_data_##i.rx_pb = &rx_pb_##i; DT_INST_FOREACH_STATUS_OKAY(_FIX_RESET_MEM); + return 0; } SYS_INIT(workaround_ppr_reset, PRE_KERNEL_1, 0); +#endif diff --git a/tests/subsys/ipc/ipc_sessions/CMakeLists.txt b/tests/subsys/ipc/ipc_sessions/CMakeLists.txt index 75f2766cf7b9..79b2b9c49c36 100644 --- a/tests/subsys/ipc/ipc_sessions/CMakeLists.txt +++ b/tests/subsys/ipc/ipc_sessions/CMakeLists.txt @@ -10,3 +10,7 @@ zephyr_include_directories(./common) FILE(GLOB app_sources src/*.c) target_sources(app PRIVATE ${app_sources}) + +zephyr_sources_ifdef(CONFIG_IPC_SERVICE_ICMSG_V1 interoperability/icmsg_v1.c) +zephyr_sources_ifdef(CONFIG_PBUF_V1 interoperability/pbuf_v1.c) +zephyr_sources_ifdef(CONFIG_IPC_SERVICE_BACKEND_ICMSG_V1 interoperability/ipc_icmsg_v1.c) diff --git a/tests/subsys/ipc/ipc_sessions/Kconfig b/tests/subsys/ipc/ipc_sessions/Kconfig index 3b3236d6dddf..908ad356dbc9 100644 --- a/tests/subsys/ipc/ipc_sessions/Kconfig +++ b/tests/subsys/ipc/ipc_sessions/Kconfig @@ -4,6 +4,8 @@ # SPDX-License-Identifier: LicenseRef-Nordic-5-Clause # +rsource "interoperability/Kconfig" + menu "Zephyr" source "Kconfig.zephyr" endmenu @@ -28,3 +30,8 @@ config IPC_TEST_BLOCK_SIZE config IPC_TEST_BLOCK_CNT int "Number of blocks for multiple transfers test" default 8000 + +config IPC_TEST_SKIP_UNBOUND + bool "Skip unbound tests" + help + Whether to skip tests that requires unbound callback functionality. diff --git a/tests/subsys/ipc/ipc_sessions/interoperability/Kconfig b/tests/subsys/ipc/ipc_sessions/interoperability/Kconfig new file mode 100644 index 000000000000..31a2a8d8385a --- /dev/null +++ b/tests/subsys/ipc/ipc_sessions/interoperability/Kconfig @@ -0,0 +1,29 @@ +# +# Copyright (c) 2024 Nordic Semiconductor ASA +# +# SPDX-License-Identifier: LicenseRef-Nordic-5-Clause +# + +config IPC_SERVICE_BACKEND_ICMSG + default n if IPC_SERVICE_BACKEND_ICMSG_V1 + +config IPC_SERVICE_ICMSG + default n if IPC_SERVICE_ICMSG_V1 + +config IPC_SERVICE_BACKEND_ICMSG_V1 + bool "ICMSG backend with SPSC packet buffer (old implementation)" + depends on MBOX + select IPC_SERVICE_ICMSG_V1 + help + Chosing this backend results in single endpoint implementation based + on circular packet buffer. + +menuconfig IPC_SERVICE_ICMSG_V1 + bool "icmsg IPC library (old implementation)" + select PBUF_V1 + help + Icmsg library + +if IPC_SERVICE_ICMSG_V1 + rsource "Kconfig.icmsg_v1" +endif diff --git a/tests/subsys/ipc/ipc_sessions/interoperability/Kconfig.icmsg_v1 b/tests/subsys/ipc/ipc_sessions/interoperability/Kconfig.icmsg_v1 new file mode 100644 index 000000000000..463a1a0ad3ce --- /dev/null +++ b/tests/subsys/ipc/ipc_sessions/interoperability/Kconfig.icmsg_v1 @@ -0,0 +1,90 @@ +# Copyright (c) 2022 Nordic Semiconductor (ASA) +# SPDX-License-Identifier: Apache-2.0 + +config IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC_V1 + bool "Synchronize access to shared memory" + depends on MULTITHREADING + default y + help + Provide synchronization access to shared memory at a library level. + This option is enabled by default to allow to use sending API from + multiple contexts. Mutex is used to guard access to the memory. + This option can be safely disabled if an application ensures data + are sent from single context. + +config IPC_SERVICE_ICMSG_SHMEM_ACCESS_TO_MS_V1 + int "Mutex lock timeout in milliseconds" + depends on IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC_V1 + range 1 5 + default 1 + help + Maximum time to wait, in milliseconds, for access to send data with + backends basing on icmsg library. This time should be relatively low. + +config IPC_SERVICE_ICMSG_BOND_NOTIFY_REPEAT_TO_MS_V1 + int "Bond notification timeout in miliseconds" + range 1 100 + default 1 + help + Time to wait for remote bonding notification before the + notification is repeated. + +config IPC_SERVICE_BACKEND_ICMSG_WQ_ENABLE_V1 + bool "Use dedicated workqueue" + depends on MULTITHREADING + default y + help + Enable dedicated workqueue thread for the ICMsg backend. + Disabling this configuration will cause the ICMsg backend to + process incoming data through the system workqueue context, and + therefore reduces the RAM footprint of the backend. + Disabling this config may result in deadlocks in certain usage + scenarios, such as when synchronous IPC is executed from the system + workqueue context. + The callbacks coming from the backend are executed from the workqueue + context. + When the option is disabled, the user must obey the restrictions + imposed by the system workqueue, such as never performing blocking + operations from within the callback. + +if IPC_SERVICE_BACKEND_ICMSG_WQ_ENABLE_V1 + +config IPC_SERVICE_BACKEND_ICMSG_WQ_STACK_SIZE_V1 + int "Size of RX work queue stack" + default 1280 + help + Size of stack used by work queue RX thread. This work queue is + created to prevent notifying service users about received data + from the system work queue. The queue is shared among instances. + +config IPC_SERVICE_BACKEND_ICMSG_WQ_PRIORITY_V1 + int "Priority of RX work queue thread" + default -1 + range -256 -1 + help + Priority of the ICMSG RX work queue thread. + The ICMSG library in its simplicity requires the workqueue to execute + at a cooperative priority. + +endif + +# The Icmsg library in its simplicity requires the system workqueue to execute +# at a cooperative priority. +config SYSTEM_WORKQUEUE_PRIORITY + range -256 -1 if !IPC_SERVICE_BACKEND_ICMSG_WQ_ENABLE_V1 + +config PBUF_V1 + bool "Packed buffer support library (old implementation)" + help + The packet buffer implements lightweight unidirectional packet buffer + with read/write semantics on top of a memory region shared by the + reader and writer. It optionally embeds cache and memory barrier + management to ensure correct data access. + +if PBUF_V1 + +config PBUF_RX_READ_BUF_SIZE_V1 + int "Size of PBUF read buffer in bytes" + default 128 + +endif # PBUF diff --git a/tests/subsys/ipc/ipc_sessions/interoperability/icmsg_v1.c b/tests/subsys/ipc/ipc_sessions/interoperability/icmsg_v1.c new file mode 100644 index 000000000000..ae83dd30beb9 --- /dev/null +++ b/tests/subsys/ipc/ipc_sessions/interoperability/icmsg_v1.c @@ -0,0 +1,392 @@ +/* + * Copyright (c) 2022 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "icmsg_v1.h" + +#include +#include +#include +#include "pbuf_v1.h" +#include + +#define BOND_NOTIFY_REPEAT_TO K_MSEC(CONFIG_IPC_SERVICE_ICMSG_BOND_NOTIFY_REPEAT_TO_MS) +#define SHMEM_ACCESS_TO K_MSEC(CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_TO_MS) + +static const uint8_t magic[] = {0x45, 0x6d, 0x31, 0x6c, 0x31, 0x4b, + 0x30, 0x72, 0x6e, 0x33, 0x6c, 0x69, 0x34}; + +#ifdef CONFIG_MULTITHREADING +#if defined(CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_ENABLE) +static K_THREAD_STACK_DEFINE(icmsg_stack, CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_STACK_SIZE); +static struct k_work_q icmsg_workq; +static struct k_work_q *const workq = &icmsg_workq; +#else +static struct k_work_q *const workq = &k_sys_work_q; +#endif +static void mbox_callback_process(struct k_work *item); +#else +static void mbox_callback_process(struct icmsg_data_t *dev_data); +#endif + +static int mbox_deinit(const struct icmsg_config_t *conf, + struct icmsg_data_t *dev_data) +{ + int err; + + err = mbox_set_enabled_dt(&conf->mbox_rx, 0); + if (err != 0) { + return err; + } + + err = mbox_register_callback_dt(&conf->mbox_rx, NULL, NULL); + if (err != 0) { + return err; + } + +#ifdef CONFIG_MULTITHREADING + (void)k_work_cancel(&dev_data->mbox_work); + (void)k_work_cancel_delayable(&dev_data->notify_work); +#endif + + return 0; +} + +static bool is_endpoint_ready(struct icmsg_data_t *dev_data) +{ + return atomic_get(&dev_data->state) == ICMSG_STATE_READY; +} + +#ifdef CONFIG_MULTITHREADING +static void notify_process(struct k_work *item) +{ + struct k_work_delayable *dwork = k_work_delayable_from_work(item); + struct icmsg_data_t *dev_data = + CONTAINER_OF(dwork, struct icmsg_data_t, notify_work); + + (void)mbox_send_dt(&dev_data->cfg->mbox_tx, NULL); + + atomic_t state = atomic_get(&dev_data->state); + + if (state != ICMSG_STATE_READY) { + int ret; + + ret = k_work_reschedule_for_queue(workq, dwork, BOND_NOTIFY_REPEAT_TO); + __ASSERT_NO_MSG(ret >= 0); + (void)ret; + } +} +#else +static void notify_process(struct icmsg_data_t *dev_data) +{ + (void)mbox_send_dt(&dev_data->cfg->mbox_tx, NULL); +#if defined(CONFIG_SYS_CLOCK_EXISTS) + int64_t start = k_uptime_get(); +#endif + + while (false == is_endpoint_ready(dev_data)) { + mbox_callback_process(dev_data); + +#if defined(CONFIG_SYS_CLOCK_EXISTS) + if ((k_uptime_get() - start) > CONFIG_IPC_SERVICE_ICMSG_BOND_NOTIFY_REPEAT_TO_MS) { +#endif + (void)mbox_send_dt(&dev_data->cfg->mbox_tx, NULL); +#if defined(CONFIG_SYS_CLOCK_EXISTS) + start = k_uptime_get(); + }; +#endif + } +} +#endif + +#ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC +static int reserve_tx_buffer_if_unused(struct icmsg_data_t *dev_data) +{ + int ret = k_mutex_lock(&dev_data->tx_lock, SHMEM_ACCESS_TO); + + if (ret < 0) { + return ret; + } + + return 0; +} + +static int release_tx_buffer(struct icmsg_data_t *dev_data) +{ + return k_mutex_unlock(&dev_data->tx_lock); +} +#endif + +static uint32_t data_available(struct icmsg_data_t *dev_data) +{ + return pbuf_read(dev_data->rx_pb, NULL, 0); +} + +#ifdef CONFIG_MULTITHREADING +static void submit_mbox_work(struct icmsg_data_t *dev_data) +{ + if (k_work_submit_to_queue(workq, &dev_data->mbox_work) < 0) { + /* The mbox processing work is never canceled. + * The negative error code should never be seen. + */ + __ASSERT_NO_MSG(false); + } +} + +static void submit_work_if_buffer_free(struct icmsg_data_t *dev_data) +{ + submit_mbox_work(dev_data); +} + +static void submit_work_if_buffer_free_and_data_available( + struct icmsg_data_t *dev_data) +{ + if (!data_available(dev_data)) { + return; + } + + submit_mbox_work(dev_data); +} +#else +static void submit_if_buffer_free(struct icmsg_data_t *dev_data) +{ + mbox_callback_process(dev_data); +} + +static void submit_if_buffer_free_and_data_available( + struct icmsg_data_t *dev_data) +{ + + if (!data_available(dev_data)) { + return; + } + + mbox_callback_process(dev_data); +} +#endif + +#ifdef CONFIG_MULTITHREADING +static void mbox_callback_process(struct k_work *item) +#else +static void mbox_callback_process(struct icmsg_data_t *dev_data) +#endif +{ +#ifdef CONFIG_MULTITHREADING + struct icmsg_data_t *dev_data = CONTAINER_OF(item, struct icmsg_data_t, mbox_work); +#endif + uint8_t rx_buffer[CONFIG_PBUF_RX_READ_BUF_SIZE] __aligned(4); + + atomic_t state = atomic_get(&dev_data->state); + + uint32_t len = data_available(dev_data); + + if (len == 0) { + /* Unlikely, no data in buffer. */ + return; + } + + __ASSERT_NO_MSG(len <= sizeof(rx_buffer)); + + if (sizeof(rx_buffer) < len) { + return; + } + + len = pbuf_read(dev_data->rx_pb, rx_buffer, sizeof(rx_buffer)); + + if (state == ICMSG_STATE_READY) { + if (dev_data->cb->received) { + dev_data->cb->received(rx_buffer, len, dev_data->ctx); + } + } else { + __ASSERT_NO_MSG(state == ICMSG_STATE_BUSY); + + /* Allow magic number longer than sizeof(magic) for future protocol version. */ + bool endpoint_invalid = (len < sizeof(magic) || + memcmp(magic, rx_buffer, sizeof(magic))); + + if (endpoint_invalid) { + __ASSERT_NO_MSG(false); + return; + } + + if (dev_data->cb->bound) { + dev_data->cb->bound(dev_data->ctx); + } + + atomic_set(&dev_data->state, ICMSG_STATE_READY); + } +#ifdef CONFIG_MULTITHREADING + submit_work_if_buffer_free_and_data_available(dev_data); +#else + submit_if_buffer_free_and_data_available(dev_data); +#endif +} + +static void mbox_callback(const struct device *instance, uint32_t channel, + void *user_data, struct mbox_msg *msg_data) +{ + struct icmsg_data_t *dev_data = user_data; +#ifdef CONFIG_MULTITHREADING + submit_work_if_buffer_free(dev_data); +#else + submit_if_buffer_free(dev_data); +#endif +} + +static int mbox_init(const struct icmsg_config_t *conf, + struct icmsg_data_t *dev_data) +{ + int err; + +#ifdef CONFIG_MULTITHREADING + k_work_init(&dev_data->mbox_work, mbox_callback_process); + k_work_init_delayable(&dev_data->notify_work, notify_process); +#endif + + err = mbox_register_callback_dt(&conf->mbox_rx, mbox_callback, dev_data); + if (err != 0) { + return err; + } + + return mbox_set_enabled_dt(&conf->mbox_rx, 1); +} + +int icmsg_open(const struct icmsg_config_t *conf, + struct icmsg_data_t *dev_data, + const struct ipc_service_cb *cb, void *ctx) +{ + if (!atomic_cas(&dev_data->state, ICMSG_STATE_OFF, ICMSG_STATE_BUSY)) { + /* Already opened. */ + return -EALREADY; + } + + dev_data->cb = cb; + dev_data->ctx = ctx; + dev_data->cfg = conf; + +#ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC + k_mutex_init(&dev_data->tx_lock); +#endif + + int ret = pbuf_tx_init(dev_data->tx_pb); + + if (ret < 0) { + __ASSERT(false, "Incorrect configuration"); + return ret; + } + + (void)pbuf_rx_init(dev_data->rx_pb); + + ret = pbuf_write(dev_data->tx_pb, magic, sizeof(magic)); + + if (ret < 0) { + __ASSERT_NO_MSG(false); + return ret; + } + + if (ret < (int)sizeof(magic)) { + __ASSERT_NO_MSG(ret == sizeof(magic)); + return ret; + } + + ret = mbox_init(conf, dev_data); + if (ret) { + return ret; + } +#ifdef CONFIG_MULTITHREADING + ret = k_work_schedule_for_queue(workq, &dev_data->notify_work, K_NO_WAIT); + if (ret < 0) { + return ret; + } +#else + notify_process(dev_data); +#endif + return 0; +} + +int icmsg_close(const struct icmsg_config_t *conf, + struct icmsg_data_t *dev_data) +{ + int ret; + + ret = mbox_deinit(conf, dev_data); + if (ret) { + return ret; + } + + atomic_set(&dev_data->state, ICMSG_STATE_OFF); + + return 0; +} + +int icmsg_send(const struct icmsg_config_t *conf, + struct icmsg_data_t *dev_data, + const void *msg, size_t len) +{ + int ret; + int write_ret; +#ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC + int release_ret; +#endif + int sent_bytes; + + if (!is_endpoint_ready(dev_data)) { + return -EBUSY; + } + + /* Empty message is not allowed */ + if (len == 0) { + return -ENODATA; + } + +#ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC + ret = reserve_tx_buffer_if_unused(dev_data); + if (ret < 0) { + return -ENOBUFS; + } +#endif + + write_ret = pbuf_write(dev_data->tx_pb, msg, len); + +#ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC + release_ret = release_tx_buffer(dev_data); + __ASSERT_NO_MSG(!release_ret); +#endif + + if (write_ret < 0) { + return write_ret; + } else if (write_ret < len) { + return -EBADMSG; + } + sent_bytes = write_ret; + + __ASSERT_NO_MSG(conf->mbox_tx.dev != NULL); + + ret = mbox_send_dt(&conf->mbox_tx, NULL); + if (ret) { + return ret; + } + + return sent_bytes; +} + +#if defined(CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_ENABLE) + +static int work_q_init(void) +{ + struct k_work_queue_config cfg = { + .name = "icmsg_workq", + }; + + k_work_queue_start(&icmsg_workq, + icmsg_stack, + K_KERNEL_STACK_SIZEOF(icmsg_stack), + CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_PRIORITY, &cfg); + return 0; +} + +SYS_INIT(work_q_init, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT); + +#endif diff --git a/tests/subsys/ipc/ipc_sessions/interoperability/icmsg_v1.h b/tests/subsys/ipc/ipc_sessions/interoperability/icmsg_v1.h new file mode 100644 index 000000000000..1d2c3951f5a6 --- /dev/null +++ b/tests/subsys/ipc/ipc_sessions/interoperability/icmsg_v1.h @@ -0,0 +1,166 @@ +/* + * Copyright (c) 2022 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef ZEPHYR_INCLUDE_IPC_ICMSG_H_ +#define ZEPHYR_INCLUDE_IPC_ICMSG_H_ + +#include +#include +#include +#include +#include +#include "pbuf_v1.h" +#include + +/* Config aliases that prevenets from config collisions: */ +#undef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC +#ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC_V1 +#define CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC_V1 +#endif +#undef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_TO_MS +#ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_TO_MS_V1 +#define CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_TO_MS CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_TO_MS_V1 +#endif +#undef CONFIG_IPC_SERVICE_ICMSG_BOND_NOTIFY_REPEAT_TO_MS +#ifdef CONFIG_IPC_SERVICE_ICMSG_BOND_NOTIFY_REPEAT_TO_MS_V1 +#define CONFIG_IPC_SERVICE_ICMSG_BOND_NOTIFY_REPEAT_TO_MS CONFIG_IPC_SERVICE_ICMSG_BOND_NOTIFY_REPEAT_TO_MS_V1 +#endif +#undef CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_ENABLE +#ifdef CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_ENABLE_V1 +#define CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_ENABLE CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_ENABLE_V1 +#endif +#undef CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_STACK_SIZE +#ifdef CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_STACK_SIZE_V1 +#define CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_STACK_SIZE CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_STACK_SIZE_V1 +#endif +#undef CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_PRIORITY +#ifdef CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_PRIORITY_V1 +#define CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_PRIORITY CONFIG_IPC_SERVICE_BACKEND_ICMSG_WQ_PRIORITY_V1 +#endif +#undef CONFIG_PBUF_RX_READ_BUF_SIZE +#ifdef CONFIG_PBUF_RX_READ_BUF_SIZE_V1 +#define CONFIG_PBUF_RX_READ_BUF_SIZE CONFIG_PBUF_RX_READ_BUF_SIZE_V1 +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @brief Icmsg IPC library API + * @defgroup ipc_icmsg_api Icmsg IPC library API + * @ingroup ipc + * @{ + */ + +enum icmsg_state { + ICMSG_STATE_OFF, + ICMSG_STATE_BUSY, + ICMSG_STATE_READY, +}; + +struct icmsg_config_t { + struct mbox_dt_spec mbox_tx; + struct mbox_dt_spec mbox_rx; +}; + +struct icmsg_data_t { + /* Tx/Rx buffers. */ + struct pbuf *tx_pb; + struct pbuf *rx_pb; +#ifdef CONFIG_IPC_SERVICE_ICMSG_SHMEM_ACCESS_SYNC + struct k_mutex tx_lock; +#endif + + /* Callbacks for an endpoint. */ + const struct ipc_service_cb *cb; + void *ctx; + + /* General */ + const struct icmsg_config_t *cfg; +#ifdef CONFIG_MULTITHREADING + struct k_work_delayable notify_work; + struct k_work mbox_work; +#endif + atomic_t state; +}; + +/** @brief Open an icmsg instance + * + * Open an icmsg instance to be able to send and receive messages to a remote + * instance. + * This function is blocking until the handshake with the remote instance is + * completed. + * This function is intended to be called late in the initialization process, + * possibly from a thread which can be safely blocked while handshake with the + * remote instance is being pefromed. + * + * @param[in] conf Structure containing configuration parameters for the icmsg + * instance. + * @param[inout] dev_data Structure containing run-time data used by the icmsg + * instance. + * @param[in] cb Structure containing callback functions to be called on + * events generated by this icmsg instance. The pointed memory + * must be preserved while the icmsg instance is active. + * @param[in] ctx Pointer to context passed as an argument to callbacks. + * + * + * @retval 0 on success. + * @retval -EALREADY when the instance is already opened. + * @retval other errno codes from dependent modules. + */ +int icmsg_open(const struct icmsg_config_t *conf, + struct icmsg_data_t *dev_data, + const struct ipc_service_cb *cb, void *ctx); + +/** @brief Close an icmsg instance + * + * Closing an icmsg instance results in releasing all resources used by given + * instance including the shared memory regions and mbox devices. + * + * @param[in] conf Structure containing configuration parameters for the icmsg + * instance being closed. Its content must be the same as used + * for creating this instance with @ref icmsg_open. + * @param[inout] dev_data Structure containing run-time data used by the icmsg + * instance. + * + * @retval 0 on success. + * @retval other errno codes from dependent modules. + */ +int icmsg_close(const struct icmsg_config_t *conf, + struct icmsg_data_t *dev_data); + +/** @brief Send a message to the remote icmsg instance. + * + * @param[in] conf Structure containing configuration parameters for the icmsg + * instance. + * @param[inout] dev_data Structure containing run-time data used by the icmsg + * instance. + * @param[in] msg Pointer to a buffer containing data to send. + * @param[in] len Size of data in the @p msg buffer. + * + * + * @retval Number of sent bytes. + * @retval -EBUSY when the instance has not finished handshake with the remote + * instance. + * @retval -ENODATA when the requested data to send is empty. + * @retval -EBADMSG when the requested data to send is too big. + * @retval -ENOBUFS when there are no TX buffers available. + * @retval other errno codes from dependent modules. + */ +int icmsg_send(const struct icmsg_config_t *conf, + struct icmsg_data_t *dev_data, + const void *msg, size_t len); + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* ZEPHYR_INCLUDE_IPC_ICMSG_H_ */ diff --git a/tests/subsys/ipc/ipc_sessions/interoperability/ipc_icmsg_v1.c b/tests/subsys/ipc/ipc_sessions/interoperability/ipc_icmsg_v1.c new file mode 100644 index 000000000000..4ce003667b05 --- /dev/null +++ b/tests/subsys/ipc/ipc_sessions/interoperability/ipc_icmsg_v1.c @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2022 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "ipc_icmsg_v1.h" + +#include +#include +#include "icmsg_v1.h" + +#include + +#define DT_DRV_COMPAT zephyr_ipc_icmsg + +static int register_ept(const struct device *instance, void **token, + const struct ipc_ept_cfg *cfg) +{ + const struct icmsg_config_t *conf = instance->config; + struct icmsg_data_t *dev_data = instance->data; + + /* Only one endpoint is supported. No need for a token. */ + *token = NULL; + + return icmsg_open(conf, dev_data, &cfg->cb, cfg->priv); +} + +static int deregister_ept(const struct device *instance, void *token) +{ + const struct icmsg_config_t *conf = instance->config; + struct icmsg_data_t *dev_data = instance->data; + + return icmsg_close(conf, dev_data); +} + +static int send(const struct device *instance, void *token, + const void *msg, size_t len) +{ + const struct icmsg_config_t *conf = instance->config; + struct icmsg_data_t *dev_data = instance->data; + + return icmsg_send(conf, dev_data, msg, len); +} + +const static struct ipc_service_backend backend_ops = { + .register_endpoint = register_ept, + .deregister_endpoint = deregister_ept, + .send = send, +}; + +static int backend_init(const struct device *instance) +{ + return 0; +} + +#define DEFINE_BACKEND_DEVICE(i) \ + static const struct icmsg_config_t backend_config_##i = { \ + .mbox_tx = MBOX_DT_SPEC_INST_GET(i, tx), \ + .mbox_rx = MBOX_DT_SPEC_INST_GET(i, rx), \ + }; \ + \ + PBUF_DEFINE(tx_pb_##i, \ + DT_REG_ADDR(DT_INST_PHANDLE(i, tx_region)), \ + DT_REG_SIZE(DT_INST_PHANDLE(i, tx_region)), \ + DT_INST_PROP_OR(i, dcache_alignment, 0)); \ + PBUF_DEFINE(rx_pb_##i, \ + DT_REG_ADDR(DT_INST_PHANDLE(i, rx_region)), \ + DT_REG_SIZE(DT_INST_PHANDLE(i, rx_region)), \ + DT_INST_PROP_OR(i, dcache_alignment, 0)); \ + \ + static struct icmsg_data_t backend_data_##i = { \ + .tx_pb = &tx_pb_##i, \ + .rx_pb = &rx_pb_##i, \ + }; \ + \ + DEVICE_DT_INST_DEFINE(i, \ + &backend_init, \ + NULL, \ + &backend_data_##i, \ + &backend_config_##i, \ + POST_KERNEL, \ + CONFIG_IPC_SERVICE_REG_BACKEND_PRIORITY, \ + &backend_ops); + +DT_INST_FOREACH_STATUS_OKAY(DEFINE_BACKEND_DEVICE) diff --git a/tests/subsys/ipc/ipc_sessions/interoperability/ipc_icmsg_v1.h b/tests/subsys/ipc/ipc_sessions/interoperability/ipc_icmsg_v1.h new file mode 100644 index 000000000000..5407ea3e2e8c --- /dev/null +++ b/tests/subsys/ipc/ipc_sessions/interoperability/ipc_icmsg_v1.h @@ -0,0 +1,5 @@ +/* + * Copyright (c) 2022 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ diff --git a/tests/subsys/ipc/ipc_sessions/interoperability/pbuf_v1.c b/tests/subsys/ipc/ipc_sessions/interoperability/pbuf_v1.c new file mode 100644 index 000000000000..79de2d372870 --- /dev/null +++ b/tests/subsys/ipc/ipc_sessions/interoperability/pbuf_v1.c @@ -0,0 +1,255 @@ +/* + * Copyright (c) 2023 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include +#include +#include "pbuf_v1.h" +#include + +#if defined(CONFIG_ARCH_POSIX) +#include +#endif + +/* Helper funciton for getting numer of bytes being written to the bufer. */ +static uint32_t idx_occupied(uint32_t len, uint32_t wr_idx, uint32_t rd_idx) +{ + /* It is implicitly assumed wr_idx and rd_idx cannot differ by more then len. */ + return (rd_idx > wr_idx) ? (len - (rd_idx - wr_idx)) : (wr_idx - rd_idx); +} + +/* Helper function for wrapping the index from the begging if above buffer len. */ +static uint32_t idx_wrap(uint32_t len, uint32_t idx) +{ + return (idx >= len) ? (idx % len) : (idx); +} + +static int validate_cfg(const struct pbuf_cfg *cfg) +{ + /* Validate pointers. */ + if (!cfg || !cfg->rd_idx_loc || !cfg->wr_idx_loc || !cfg->data_loc) { + return -EINVAL; + } + + /* Validate pointer alignment. */ + if (!IS_PTR_ALIGNED_BYTES(cfg->rd_idx_loc, MAX(cfg->dcache_alignment, _PBUF_IDX_SIZE)) || + !IS_PTR_ALIGNED_BYTES(cfg->wr_idx_loc, MAX(cfg->dcache_alignment, _PBUF_IDX_SIZE)) || + !IS_PTR_ALIGNED_BYTES(cfg->data_loc, _PBUF_IDX_SIZE)) { + return -EINVAL; + } + + /* Validate len. */ + if (cfg->len < _PBUF_MIN_DATA_LEN || !IS_PTR_ALIGNED_BYTES(cfg->len, _PBUF_IDX_SIZE)) { + return -EINVAL; + } + + /* Validate pointer values. */ + if (!(cfg->rd_idx_loc < cfg->wr_idx_loc) || + !((uint8_t *)cfg->wr_idx_loc < cfg->data_loc) || + !(((uint8_t *)cfg->rd_idx_loc + MAX(_PBUF_IDX_SIZE, cfg->dcache_alignment)) == + (uint8_t *)cfg->wr_idx_loc)) { + return -EINVAL; + } + + return 0; +} + +#if defined(CONFIG_ARCH_POSIX) +void pbuf_native_addr_remap(struct pbuf *pb) +{ + native_emb_addr_remap((void **)&pb->cfg->rd_idx_loc); + native_emb_addr_remap((void **)&pb->cfg->wr_idx_loc); + native_emb_addr_remap((void **)&pb->cfg->data_loc); +} +#endif + +int pbuf_tx_init(struct pbuf *pb) +{ + if (validate_cfg(pb->cfg) != 0) { + return -EINVAL; + } +#if defined(CONFIG_ARCH_POSIX) + pbuf_native_addr_remap(pb); +#endif + + /* Initialize local copy of indexes. */ + pb->data.wr_idx = 0; + pb->data.rd_idx = 0; + + /* Clear shared memory. */ + *(pb->cfg->wr_idx_loc) = pb->data.wr_idx; + *(pb->cfg->rd_idx_loc) = pb->data.rd_idx; + + __sync_synchronize(); + + /* Take care cache. */ + sys_cache_data_flush_range((void *)(pb->cfg->wr_idx_loc), sizeof(*(pb->cfg->wr_idx_loc))); + sys_cache_data_flush_range((void *)(pb->cfg->rd_idx_loc), sizeof(*(pb->cfg->rd_idx_loc))); + + return 0; +} + +int pbuf_rx_init(struct pbuf *pb) +{ + if (validate_cfg(pb->cfg) != 0) { + return -EINVAL; + } +#if defined(CONFIG_ARCH_POSIX) + pbuf_native_addr_remap(pb); +#endif + + /* Initialize local copy of indexes. */ + pb->data.wr_idx = 0; + pb->data.rd_idx = 0; + + return 0; +} + +int pbuf_write(struct pbuf *pb, const char *data, uint16_t len) +{ + if (pb == NULL || len == 0 || data == NULL) { + /* Incorrect call. */ + return -EINVAL; + } + + /* Invalidate rd_idx only, local wr_idx is used to increase buffer security. */ + sys_cache_data_invd_range((void *)(pb->cfg->rd_idx_loc), sizeof(*(pb->cfg->rd_idx_loc))); + __sync_synchronize(); + + uint8_t *const data_loc = pb->cfg->data_loc; + const uint32_t blen = pb->cfg->len; + uint32_t rd_idx = *(pb->cfg->rd_idx_loc); + uint32_t wr_idx = pb->data.wr_idx; + + /* wr_idx must always be aligned. */ + __ASSERT_NO_MSG(IS_PTR_ALIGNED_BYTES(wr_idx, _PBUF_IDX_SIZE)); + /* rd_idx shall always be aligned, but its value is received from the reader. + * Can not assert. + */ + if (!IS_PTR_ALIGNED_BYTES(rd_idx, _PBUF_IDX_SIZE)) { + return -EINVAL; + } + + uint32_t free_space = blen - idx_occupied(blen, wr_idx, rd_idx) - _PBUF_IDX_SIZE; + + /* Packet length, data + packet length size. */ + uint32_t plen = len + PBUF_PACKET_LEN_SZ; + + /* Check if packet will fit into the buffer. */ + if (free_space < plen) { + return -ENOMEM; + } + + /* Clear packet len with zeros and update. Clearing is done for possible versioning in the + * future. Writing is allowed now, because shared wr_idx value is updated at the very end. + */ + *((uint32_t *)(&data_loc[wr_idx])) = 0; + sys_put_be16(len, &data_loc[wr_idx]); + __sync_synchronize(); + sys_cache_data_flush_range(&data_loc[wr_idx], PBUF_PACKET_LEN_SZ); + + wr_idx = idx_wrap(blen, wr_idx + PBUF_PACKET_LEN_SZ); + + /* Write until end of the buffer, if data will be wrapped. */ + uint32_t tail = MIN(len, blen - wr_idx); + + memcpy(&data_loc[wr_idx], data, tail); + sys_cache_data_flush_range(&data_loc[wr_idx], tail); + + if (len > tail) { + /* Copy remaining data to buffer front. */ + memcpy(&data_loc[0], data + tail, len - tail); + sys_cache_data_flush_range(&data_loc[0], len - tail); + } + + wr_idx = idx_wrap(blen, ROUND_UP(wr_idx + len, _PBUF_IDX_SIZE)); + /* Update wr_idx. */ + pb->data.wr_idx = wr_idx; + *(pb->cfg->wr_idx_loc) = wr_idx; + __sync_synchronize(); + sys_cache_data_flush_range((void *)pb->cfg->wr_idx_loc, sizeof(*(pb->cfg->wr_idx_loc))); + + return len; +} + +int pbuf_read(struct pbuf *pb, char *buf, uint16_t len) +{ + if (pb == NULL) { + /* Incorrect call. */ + return -EINVAL; + } + + /* Invalidate wr_idx only, local rd_idx is used to increase buffer security. */ + sys_cache_data_invd_range((void *)(pb->cfg->wr_idx_loc), sizeof(*(pb->cfg->wr_idx_loc))); + __sync_synchronize(); + + uint8_t *const data_loc = pb->cfg->data_loc; + const uint32_t blen = pb->cfg->len; + uint32_t wr_idx = *(pb->cfg->wr_idx_loc); + uint32_t rd_idx = pb->data.rd_idx; + + /* rd_idx must always be aligned. */ + __ASSERT_NO_MSG(IS_PTR_ALIGNED_BYTES(rd_idx, _PBUF_IDX_SIZE)); + /* wr_idx shall always be aligned, but its value is received from the + * writer. Can not assert. + */ + if (!IS_PTR_ALIGNED_BYTES(wr_idx, _PBUF_IDX_SIZE)) { + return -EINVAL; + } + + if (rd_idx == wr_idx) { + /* Buffer is empty. */ + return 0; + } + + /* Get packet len.*/ + sys_cache_data_invd_range(&data_loc[rd_idx], PBUF_PACKET_LEN_SZ); + uint16_t plen = sys_get_be16(&data_loc[rd_idx]); + + if (!buf) { + return (int)plen; + } + + if (plen > len) { + return -ENOMEM; + } + + uint32_t occupied_space = idx_occupied(blen, wr_idx, rd_idx); + + if (occupied_space < plen + PBUF_PACKET_LEN_SZ) { + /* This should never happen. */ + return -EAGAIN; + } + + rd_idx = idx_wrap(blen, rd_idx + PBUF_PACKET_LEN_SZ); + + /* Packet will fit into provided buffer, truncate len if provided len + * is bigger than necessary. + */ + len = MIN(plen, len); + + /* Read until end of the buffer, if data are wrapped. */ + uint32_t tail = MIN(blen - rd_idx, len); + + sys_cache_data_invd_range(&data_loc[rd_idx], tail); + memcpy(buf, &data_loc[rd_idx], tail); + + if (len > tail) { + sys_cache_data_invd_range(&data_loc[0], len - tail); + memcpy(&buf[tail], &pb->cfg->data_loc[0], len - tail); + } + + /* Update rd_idx. */ + rd_idx = idx_wrap(blen, ROUND_UP(rd_idx + len, _PBUF_IDX_SIZE)); + + pb->data.rd_idx = rd_idx; + *(pb->cfg->rd_idx_loc) = rd_idx; + __sync_synchronize(); + sys_cache_data_flush_range((void *)pb->cfg->rd_idx_loc, sizeof(*(pb->cfg->rd_idx_loc))); + + return len; +} diff --git a/tests/subsys/ipc/ipc_sessions/interoperability/pbuf_v1.h b/tests/subsys/ipc/ipc_sessions/interoperability/pbuf_v1.h new file mode 100644 index 000000000000..8783cdbbf146 --- /dev/null +++ b/tests/subsys/ipc/ipc_sessions/interoperability/pbuf_v1.h @@ -0,0 +1,234 @@ +/* + * Copyright (c) 2023 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef ZEPHYR_INCLUDE_IPC_PBUF_H_ +#define ZEPHYR_INCLUDE_IPC_PBUF_H_ + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @brief Packed buffer API + * @defgroup pbuf Packed Buffer API + * @ingroup ipc + * @{ + */ + +/** @brief Size of packet length field. */ +#define PBUF_PACKET_LEN_SZ sizeof(uint32_t) + +/* Amount of data that is left unused to distinguish between empty and full. */ +#define _PBUF_IDX_SIZE sizeof(uint32_t) + +/* Minimal length of the data field in the buffer to store the smalest packet + * possible. + * (+1) for at least one byte of data. + * (+_PBUF_IDX_SIZE) to distinguish buffer full and buffer empty. + * Rounded up to keep wr/rd indexes pointing to aligned address. + */ +#define _PBUF_MIN_DATA_LEN ROUND_UP(PBUF_PACKET_LEN_SZ + 1 + _PBUF_IDX_SIZE, _PBUF_IDX_SIZE) + +#if defined(CONFIG_ARCH_POSIX) +/* For the native simulated boards we need to modify some pointers at init */ +#define PBUF_MAYBE_CONST +#else +#define PBUF_MAYBE_CONST const +#endif + +/** @brief Control block of packet buffer. + * + * The structure contains configuration data. + */ +struct pbuf_cfg { + volatile uint32_t *rd_idx_loc; /* Address of the variable holding + * index value of the first valid byte + * in data[]. + */ + volatile uint32_t *wr_idx_loc; /* Address of the variable holding + * index value of the first free byte + * in data[]. + */ + uint32_t dcache_alignment; /* CPU data cache line size in bytes. + * Used for validation - TODO: To be + * replaced by flags. + */ + uint32_t len; /* Length of data[] in bytes. */ + uint8_t *data_loc; /* Location of the data[]. */ +}; + +/** + * @brief Data block of the packed buffer. + * + * The structure contains local copies of wr and rd indexes used by writer and + * reader respectively. + */ +struct pbuf_data { + volatile uint32_t wr_idx; /* Index of the first holding first + * free byte in data[]. Used for + * writing. + */ + volatile uint32_t rd_idx; /* Index of the first holding first + * valid byte in data[]. Used for + * reading. + */ +}; + + +/** + * @brief Scure packed buffer. + * + * The packet buffer implements lightweight unidirectional packet + * buffer with read/write semantics on top of a memory region shared + * by the reader and writer. It embeds cache and memory barrier management to + * ensure correct data access. + * + * This structure supports single writer and reader. Data stored in the buffer + * is encapsulated to a message (with length header). The read/write API is + * written in a way to protect the data from being corrupted. + */ +struct pbuf { + PBUF_MAYBE_CONST struct pbuf_cfg *const cfg; /* Configuration of the + * buffer. + */ + struct pbuf_data data; /* Data used to read and write + * to the buffer + */ +}; + +/** + * @brief Macro for configuration initialization. + * + * It is recommended to use this macro to initialize packed buffer + * configuration. + * + * @param mem_addr Memory address for pbuf. + * @param size Size of the memory. + * @param dcache_align Data cache alignment. + */ +#define PBUF_CFG_INIT(mem_addr, size, dcache_align) \ +{ \ + .rd_idx_loc = (uint32_t *)(mem_addr), \ + .wr_idx_loc = (uint32_t *)((uint8_t *)(mem_addr) + \ + MAX(dcache_align, _PBUF_IDX_SIZE)), \ + .data_loc = (uint8_t *)((uint8_t *)(mem_addr) + \ + MAX(dcache_align, _PBUF_IDX_SIZE) + _PBUF_IDX_SIZE), \ + .len = (uint32_t)((uint32_t)(size) - MAX(dcache_align, _PBUF_IDX_SIZE) - \ + _PBUF_IDX_SIZE), \ + .dcache_alignment = (dcache_align), \ +} + +/** + * @brief Macro calculates memory overhead taken by the header in shared memory. + * + * It contains the read index, write index and padding. + * + * @param dcache_align Data cache alignment. + */ +#define PBUF_HEADER_OVERHEAD(dcache_align) \ + (MAX(dcache_align, _PBUF_IDX_SIZE) + _PBUF_IDX_SIZE) + +/** + * @brief Statically define and initialize pbuf. + * + * @param name Name of the pbuf. + * @param mem_addr Memory address for pbuf. + * @param size Size of the memory. + * @param dcache_align Data cache line size. + */ +#define PBUF_DEFINE(name, mem_addr, size, dcache_align) \ + BUILD_ASSERT(dcache_align >= 0, \ + "Cache line size must be non negative."); \ + BUILD_ASSERT((size) > 0 && IS_PTR_ALIGNED_BYTES(size, _PBUF_IDX_SIZE), \ + "Incorrect size."); \ + BUILD_ASSERT(IS_PTR_ALIGNED_BYTES(mem_addr, MAX(dcache_align, _PBUF_IDX_SIZE)), \ + "Misaligned memory."); \ + BUILD_ASSERT(size >= (MAX(dcache_align, _PBUF_IDX_SIZE) + _PBUF_IDX_SIZE + \ + _PBUF_MIN_DATA_LEN), "Insufficient size."); \ + static PBUF_MAYBE_CONST struct pbuf_cfg cfg_##name = \ + PBUF_CFG_INIT(mem_addr, size, dcache_align); \ + static struct pbuf name = { \ + .cfg = &cfg_##name, \ + } + +/** + * @brief Initialize the Tx packet buffer. + * + * This function initializes the Tx packet buffer based on provided configuration. + * If the configuration is incorrect, the function will return error. + * + * It is recommended to use PBUF_DEFINE macro for build time initialization. + * + * @param pb Pointer to the packed buffer containing + * configuration and data. Configuration has to be + * fixed before the initialization. + * @retval 0 on success. + * @retval -EINVAL when the input parameter is incorrect. + */ +int pbuf_tx_init(struct pbuf *pb); + +/** + * @brief Initialize the Rx packet buffer. + * + * This function initializes the Rx packet buffer. + * If the configuration is incorrect, the function will return error. + * + * It is recommended to use PBUF_DEFINE macro for build time initialization. + * + * @param pb Pointer to the packed buffer containing + * configuration and data. Configuration has to be + * fixed before the initialization. + * @retval 0 on success. + * @retval -EINVAL when the input parameter is incorrect. + */ +int pbuf_rx_init(struct pbuf *pb); + +/** + * @brief Write specified amount of data to the packet buffer. + * + * This function call writes specified amount of data to the packet buffer if + * the buffer will fit the data. + * + * @param pb A buffer to which to write. + * @param buf Pointer to the data to be written to the buffer. + * @param len Number of bytes to be written to the buffer. Must be positive. + * @retval int Number of bytes written, negative error code on fail. + * -EINVAL, if any of input parameter is incorrect. + * -ENOMEM, if len is bigger than the buffer can fit. + */ + +int pbuf_write(struct pbuf *pb, const char *buf, uint16_t len); + +/** + * @brief Read specified amount of data from the packet buffer. + * + * Single read allows to read the message send by the single write. + * The provided %p buf must be big enough to store the whole message. + * + * @param pb A buffer from which data will be read. + * @param buf Data pointer to which read data will be written. + * If NULL, len of stored message is returned. + * @param len Number of bytes to be read from the buffer. + * @retval int Bytes read, negative error code on fail. + * Bytes to be read, if buf == NULL. + * -EINVAL, if any of input parameter is incorrect. + * -ENOMEM, if message can not fit in provided buf. + * -EAGAIN, if not whole message is ready yet. + */ +int pbuf_read(struct pbuf *pb, char *buf, uint16_t len); + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* ZEPHYR_INCLUDE_IPC_PBUF_H_ */ diff --git a/tests/subsys/ipc/ipc_sessions/remote/CMakeLists.txt b/tests/subsys/ipc/ipc_sessions/remote/CMakeLists.txt index 8ef3d24d697d..c63abf7db397 100644 --- a/tests/subsys/ipc/ipc_sessions/remote/CMakeLists.txt +++ b/tests/subsys/ipc/ipc_sessions/remote/CMakeLists.txt @@ -13,3 +13,7 @@ zephyr_include_directories(../common) FILE(GLOB remote_sources src/*.c) target_sources(app PRIVATE ${remote_sources}) + +zephyr_sources_ifdef(CONFIG_IPC_SERVICE_ICMSG_V1 ../interoperability/icmsg_v1.c) +zephyr_sources_ifdef(CONFIG_PBUF_V1 ../interoperability/pbuf_v1.c) +zephyr_sources_ifdef(CONFIG_IPC_SERVICE_BACKEND_ICMSG_V1 ../interoperability/ipc_icmsg_v1.c) diff --git a/tests/subsys/ipc/ipc_sessions/remote/Kconfig b/tests/subsys/ipc/ipc_sessions/remote/Kconfig index 2cb445215094..e920a20fd147 100644 --- a/tests/subsys/ipc/ipc_sessions/remote/Kconfig +++ b/tests/subsys/ipc/ipc_sessions/remote/Kconfig @@ -4,4 +4,8 @@ # SPDX-License-Identifier: LicenseRef-Nordic-5-Clause # +rsource "../interoperability/Kconfig" + +menu "Zephyr" source "Kconfig.zephyr" +endmenu diff --git a/tests/subsys/ipc/ipc_sessions/src/main.c b/tests/subsys/ipc/ipc_sessions/src/main.c index a05575abb6ad..144240a1bbad 100644 --- a/tests/subsys/ipc/ipc_sessions/src/main.c +++ b/tests/subsys/ipc/ipc_sessions/src/main.c @@ -193,7 +193,8 @@ ZTEST(ipc_sessions, test_echo) ZTEST(ipc_sessions, test_reboot) { - zassume_false(IS_ENABLED(CONFIG_IPC_TEST_SKIP_CORE_RESET)); + Z_TEST_SKIP_IFDEF(CONFIG_IPC_TEST_SKIP_UNBOUND); + Z_TEST_SKIP_IFDEF(CONFIG_IPC_TEST_SKIP_CORE_RESET); int ret; struct test_ipc_event_state ev; @@ -223,6 +224,8 @@ ZTEST(ipc_sessions, test_reboot) ZTEST(ipc_sessions, test_rebond) { + Z_TEST_SKIP_IFDEF(CONFIG_IPC_TEST_SKIP_UNBOUND); + int ret; struct test_ipc_event_state ev; static const struct ipc_test_cmd_reboot cmd_rebond = { { IPC_TEST_CMD_REBOND }, 10 }; @@ -251,6 +254,8 @@ ZTEST(ipc_sessions, test_rebond) ZTEST(ipc_sessions, test_local_rebond) { + Z_TEST_SKIP_IFDEF(CONFIG_IPC_TEST_SKIP_UNBOUND); + int ret; struct test_ipc_event_state ev; diff --git a/tests/subsys/ipc/ipc_sessions/testcase.yaml b/tests/subsys/ipc/ipc_sessions/testcase.yaml index ebd76310f37d..b6adb9ec2472 100644 --- a/tests/subsys/ipc/ipc_sessions/testcase.yaml +++ b/tests/subsys/ipc/ipc_sessions/testcase.yaml @@ -28,3 +28,26 @@ tests: extra_args: >- FILE_SUFFIX=cpuppr ipc_sessions_SNIPPET=nordic-ppr + sample.ipc.ipc_sessions.nrf54h20dk_cpuapp_no_unbound_cpuppr: + platform_allow: + - nrf54h20dk/nrf54h20/cpuapp + integration_platforms: + - nrf54h20dk/nrf54h20/cpuapp + extra_args: > + FILE_SUFFIX=cpuppr + ipc_sessions_SNIPPET=nordic-ppr + extra_configs: + - CONFIG_IPC_TEST_SKIP_UNBOUND=y + - CONFIG_IPC_SERVICE_BACKEND_ICMSG_V1=y + sample.ipc.ipc_sessions.nrf54h20dk_cpuapp_cpuppr_no_unbound: + platform_allow: + - nrf54h20dk/nrf54h20/cpuapp + integration_platforms: + - nrf54h20dk/nrf54h20/cpuapp + extra_args: >- + FILE_SUFFIX=cpuppr + ipc_sessions_SNIPPET=nordic-ppr + extra_configs: + - CONFIG_IPC_TEST_SKIP_UNBOUND=y + - remote_CONFIG_IPC_SERVICE_BACKEND_ICMSG_V1=y +