@@ -139,6 +139,14 @@ enum ept_bounding_state {
139139 EPT_READY , /* Bounding is done. Bound callback was called. */
140140};
141141
142+ enum ept_rebound_state {
143+ EPT_NORMAL = 0 , /* No endpoint rebounding is needed. */
144+ EPT_DEREGISTERED , /* Endpoint was deregistered. */
145+ EPT_REBOUNDING , /* Rebounding was requested, waiting for work queue to
146+ * start rebounding process.
147+ */
148+ };
149+
142150struct channel_config {
143151 uint8_t * blocks_ptr ; /* Address where the blocks start. */
144152 size_t block_size ; /* Size of one block. */
@@ -159,6 +167,7 @@ struct icbmsg_config {
159167struct ept_data {
160168 const struct ipc_ept_cfg * cfg ; /* Endpoint configuration. */
161169 atomic_t state ; /* Bounding state. */
170+ atomic_t rebound_state ; /* Rebounding state. */
162171 uint8_t addr ; /* Endpoint address. */
163172};
164173
@@ -735,6 +744,18 @@ static void ept_bound_process(struct k_work *item)
735744 }
736745 k_mutex_unlock (& dev_data -> mutex );
737746 }
747+
748+ /* Check if any endpoint is ready to rebound and call the callback if it is. */
749+ for (i = 0 ; i < NUM_EPT ; i ++ ) {
750+ ept = & dev_data -> ept [i ];
751+ matching_state = atomic_cas (& ept -> rebound_state , EPT_REBOUNDING ,
752+ EPT_NORMAL );
753+ if (matching_state ) {
754+ if (ept -> cfg -> cb .bound != NULL ) {
755+ ept -> cfg -> cb .bound (ept -> cfg -> priv );
756+ }
757+ }
758+ }
738759}
739760
740761/**
@@ -758,7 +779,10 @@ static struct ept_data *get_ept_and_rx_validate(struct backend_data *dev_data,
758779 state = atomic_get (& ept -> state );
759780
760781 if (state == EPT_READY ) {
761- /* Valid state - nothing to do. */
782+ /* Ready state, ensure that it is not deregistered nor rebounding. */
783+ if (atomic_get (& ept -> rebound_state ) != EPT_NORMAL ) {
784+ return NULL ;
785+ }
762786 } else if (state == EPT_BOUNDING ) {
763787 /* Endpoint bound callback was not called yet - call it. */
764788 atomic_set (& ept -> state , EPT_READY );
@@ -1008,9 +1032,24 @@ static int register_ept(const struct device *instance, void **token,
10081032{
10091033 struct backend_data * dev_data = instance -> data ;
10101034 struct ept_data * ept = NULL ;
1035+ bool matching_state ;
10111036 int ept_index ;
10121037 int r = 0 ;
10131038
1039+ /* Try to find endpoint to rebound */
1040+ for (ept_index = 0 ; ept_index < NUM_EPT ; ept_index ++ ) {
1041+ ept = & dev_data -> ept [ept_index ];
1042+ if (ept -> cfg == cfg ) {
1043+ matching_state = atomic_cas (& ept -> rebound_state , EPT_DEREGISTERED ,
1044+ EPT_REBOUNDING );
1045+ if (!matching_state ) {
1046+ return - EINVAL ;
1047+ }
1048+ schedule_ept_bound_process (dev_data );
1049+ return 0 ;
1050+ }
1051+ }
1052+
10141053 /* Reserve new endpoint index. */
10151054 ept_index = atomic_inc (& dev_data -> flags ) & FLAG_EPT_COUNT_MASK ;
10161055 if (ept_index >= NUM_EPT ) {
@@ -1037,6 +1076,23 @@ static int register_ept(const struct device *instance, void **token,
10371076 return r ;
10381077}
10391078
1079+ /**
1080+ * Backend endpoint deregistration callback.
1081+ */
1082+ static int deregister_ept (const struct device * instance , void * token )
1083+ {
1084+ struct ept_data * ept = token ;
1085+ bool matching_state ;
1086+
1087+ matching_state = atomic_cas (& ept -> rebound_state , EPT_NORMAL , EPT_DEREGISTERED );
1088+
1089+ if (!matching_state ) {
1090+ return - EINVAL ;
1091+ }
1092+
1093+ return 0 ;
1094+ }
1095+
10401096/**
10411097 * Returns maximum TX buffer size.
10421098 */
@@ -1161,7 +1217,7 @@ const static struct ipc_service_backend backend_ops = {
11611217 .close_instance = NULL , /* not implemented */
11621218 .send = send ,
11631219 .register_endpoint = register_ept ,
1164- .deregister_endpoint = NULL , /* not implemented */
1220+ .deregister_endpoint = deregister_ept ,
11651221 .get_tx_buffer_size = get_tx_buffer_size ,
11661222 .get_tx_buffer = get_tx_buffer ,
11671223 .drop_tx_buffer = drop_tx_buffer ,
0 commit comments