@@ -146,6 +146,14 @@ enum ept_bounding_state {
146146 EPT_READY , /* Bounding is done. Bound callback was called. */
147147};
148148
149+ enum ept_rebound_state {
150+ EPT_NORMAL = 0 , /* No endpoint rebounding is needed. */
151+ EPT_DEREGISTERED , /* Endpoint was deregistered. */
152+ EPT_REBOUNDING , /* Rebounding was requested, waiting for work queue to
153+ * start rebounding process.
154+ */
155+ };
156+
149157struct channel_config {
150158 uint8_t * blocks_ptr ; /* Address where the blocks start. */
151159 size_t block_size ; /* Size of one block. */
@@ -166,6 +174,7 @@ struct icbmsg_config {
166174struct ept_data {
167175 const struct ipc_ept_cfg * cfg ; /* Endpoint configuration. */
168176 atomic_t state ; /* Bounding state. */
177+ atomic_t rebound_state ; /* Rebounding state. */
169178 uint8_t addr ; /* Endpoint address. */
170179};
171180
@@ -775,6 +784,18 @@ static void ept_bound_process(struct backend_data *dev_data)
775784 k_mutex_unlock (& dev_data -> mutex );
776785#endif
777786 }
787+
788+ /* Check if any endpoint is ready to rebound and call the callback if it is. */
789+ for (i = 0 ; i < NUM_EPT ; i ++ ) {
790+ ept = & dev_data -> ept [i ];
791+ matching_state = atomic_cas (& ept -> rebound_state , EPT_REBOUNDING ,
792+ EPT_NORMAL );
793+ if (matching_state ) {
794+ if (ept -> cfg -> cb .bound != NULL ) {
795+ ept -> cfg -> cb .bound (ept -> cfg -> priv );
796+ }
797+ }
798+ }
778799}
779800
780801/**
@@ -798,7 +819,10 @@ static struct ept_data *get_ept_and_rx_validate(struct backend_data *dev_data,
798819 state = atomic_get (& ept -> state );
799820
800821 if (state == EPT_READY ) {
801- /* Valid state - nothing to do. */
822+ /* Ready state, ensure that it is not deregistered nor rebounding. */
823+ if (atomic_get (& ept -> rebound_state ) != EPT_NORMAL ) {
824+ return NULL ;
825+ }
802826 } else if (state == EPT_BOUNDING ) {
803827 /* Endpoint bound callback was not called yet - call it. */
804828 atomic_set (& ept -> state , EPT_READY );
@@ -1060,9 +1084,28 @@ static int register_ept(const struct device *instance, void **token,
10601084{
10611085 struct backend_data * dev_data = instance -> data ;
10621086 struct ept_data * ept = NULL ;
1087+ bool matching_state ;
10631088 int ept_index ;
10641089 int r = 0 ;
10651090
1091+ /* Try to find endpoint to rebound */
1092+ for (ept_index = 0 ; ept_index < NUM_EPT ; ept_index ++ ) {
1093+ ept = & dev_data -> ept [ept_index ];
1094+ if (ept -> cfg == cfg ) {
1095+ matching_state = atomic_cas (& ept -> rebound_state , EPT_DEREGISTERED ,
1096+ EPT_REBOUNDING );
1097+ if (!matching_state ) {
1098+ return - EINVAL ;
1099+ }
1100+ #ifdef CONFIG_MULTITHREADING
1101+ schedule_ept_bound_process (dev_data );
1102+ #else
1103+ ept_bound_process (dev_data );
1104+ #endif
1105+ return 0 ;
1106+ }
1107+ }
1108+
10661109 /* Reserve new endpoint index. */
10671110 ept_index = atomic_inc (& dev_data -> flags ) & FLAG_EPT_COUNT_MASK ;
10681111 if (ept_index >= NUM_EPT ) {
@@ -1093,6 +1136,23 @@ static int register_ept(const struct device *instance, void **token,
10931136 return r ;
10941137}
10951138
1139+ /**
1140+ * Backend endpoint deregistration callback.
1141+ */
1142+ static int deregister_ept (const struct device * instance , void * token )
1143+ {
1144+ struct ept_data * ept = token ;
1145+ bool matching_state ;
1146+
1147+ matching_state = atomic_cas (& ept -> rebound_state , EPT_NORMAL , EPT_DEREGISTERED );
1148+
1149+ if (!matching_state ) {
1150+ return - EINVAL ;
1151+ }
1152+
1153+ return 0 ;
1154+ }
1155+
10961156/**
10971157 * Returns maximum TX buffer size.
10981158 */
@@ -1226,7 +1286,7 @@ const static struct ipc_service_backend backend_ops = {
12261286 .close_instance = NULL , /* not implemented */
12271287 .send = send ,
12281288 .register_endpoint = register_ept ,
1229- .deregister_endpoint = NULL , /* not implemented */
1289+ .deregister_endpoint = deregister_ept ,
12301290 .get_tx_buffer_size = get_tx_buffer_size ,
12311291 .get_tx_buffer = get_tx_buffer ,
12321292 .drop_tx_buffer = drop_tx_buffer ,
0 commit comments