1313from homeassistant .helpers .update_coordinator import DataUpdateCoordinator , UpdateFailed
1414
1515from meshcore .events import EventType
16+ from meshcore .packets import BinaryReqType
1617
1718from .const import (
1819 CONF_NAME ,
3536 CONF_SELF_TELEMETRY_ENABLED ,
3637 CONF_SELF_TELEMETRY_INTERVAL ,
3738 DEFAULT_SELF_TELEMETRY_INTERVAL ,
38- REPEATER_LOGIN_REFRESH_INTERVAL ,
3939)
4040from .meshcore_api import MeshCoreAPI
4141
@@ -173,19 +173,14 @@ async def _update_repeater(self, repeater_config):
173173 # Get the current failure count
174174 failure_count = self ._repeater_consecutive_failures .get (pubkey_prefix , 0 )
175175
176- # Check if we need to login (initial login, periodic refresh, or after failures)
176+ # Check if we need to login (initial login or after failures)
177177 last_login_time = self ._repeater_login_times .get (pubkey_prefix )
178- current_time = self ._current_time ()
179178 needs_initial_login = last_login_time is None
180- needs_periodic_refresh = (last_login_time is not None and
181- current_time - last_login_time >= REPEATER_LOGIN_REFRESH_INTERVAL )
182179 needs_failure_recovery = failure_count >= MAX_REPEATER_FAILURES_BEFORE_LOGIN
183180
184- if needs_initial_login or needs_periodic_refresh or needs_failure_recovery :
181+ if needs_initial_login or needs_failure_recovery :
185182 if needs_initial_login :
186183 self .logger .info (f"Attempting initial login to repeater { repeater_name } " )
187- elif needs_periodic_refresh :
188- self .logger .info (f"Attempting periodic login refresh to repeater { repeater_name } (last login: { last_login_time } )" )
189184 else :
190185 self .logger .info (f"Attempting login to repeater { repeater_name } after { failure_count } failures" )
191186
@@ -211,7 +206,7 @@ async def _update_repeater(self, repeater_config):
211206
212207 # Request status from the repeater
213208 self .logger .debug (f"Sending status request to repeater: { repeater_name } ({ pubkey_prefix } )" )
214- await self .api .mesh_core .commands .send_statusreq (contact )
209+ await self .api .mesh_core .commands .send_binary_req (contact , BinaryReqType . STATUS )
215210 result = await self .api .mesh_core .wait_for_event (
216211 EventType .STATUS_RESPONSE ,
217212 attribute_filters = {"pubkey_prefix" : pubkey_prefix },
@@ -224,12 +219,23 @@ async def _update_repeater(self, repeater_config):
224219 # Increment failure count and apply backoff
225220 new_failure_count = failure_count + 1
226221 self ._repeater_consecutive_failures [pubkey_prefix ] = new_failure_count
227- self ._apply_repeater_backoff (pubkey_prefix , new_failure_count )
222+
223+ # Reset path after 5 failures if there's an established path
224+ if new_failure_count == 5 and contact and contact .get ("out_path_len" , 0 ) != - 1 :
225+ try :
226+ await self .api .mesh_core .commands .reset_path (pubkey_prefix )
227+ self .logger .info (f"Reset path for repeater { repeater_name } after 5 failures" )
228+ except Exception as ex :
229+ self .logger .warning (f"Failed to reset path for repeater { repeater_name } : { ex } " )
230+
231+ update_interval = repeater_config .get (CONF_REPEATER_UPDATE_INTERVAL , DEFAULT_REPEATER_UPDATE_INTERVAL )
232+ self ._apply_repeater_backoff (pubkey_prefix , new_failure_count , update_interval )
228233 elif result .payload .get ('uptime' , 0 ) == 0 :
229234 self .logger .warn (f"Malformed status response from repeater { repeater_name } : { result .payload } " )
230235 new_failure_count = failure_count + 1
231236 self ._repeater_consecutive_failures [pubkey_prefix ] = new_failure_count
232- self ._apply_repeater_backoff (pubkey_prefix , new_failure_count )
237+ update_interval = repeater_config .get (CONF_REPEATER_UPDATE_INTERVAL , DEFAULT_REPEATER_UPDATE_INTERVAL )
238+ self ._apply_repeater_backoff (pubkey_prefix , new_failure_count , update_interval )
233239 else :
234240 self .logger .debug (f"Successfully updated repeater { repeater_name } " )
235241 # Reset failure count on success
@@ -248,23 +254,23 @@ async def _update_repeater(self, repeater_config):
248254 # Increment failure count and apply backoff
249255 new_failure_count = self ._repeater_consecutive_failures .get (pubkey_prefix , 0 ) + 1
250256 self ._repeater_consecutive_failures [pubkey_prefix ] = new_failure_count
251- self ._apply_repeater_backoff (pubkey_prefix , new_failure_count )
257+ update_interval = repeater_config .get (CONF_REPEATER_UPDATE_INTERVAL , DEFAULT_REPEATER_UPDATE_INTERVAL )
258+ self ._apply_repeater_backoff (pubkey_prefix , new_failure_count , update_interval )
252259 finally :
253260 # Remove this task from active tasks
254261 if pubkey_prefix in self ._active_repeater_tasks :
255262 self ._active_repeater_tasks .pop (pubkey_prefix )
256263
257- def _apply_backoff (self , pubkey_prefix : str , failure_count : int , update_type : str = "repeater" ) -> None :
264+ def _apply_backoff (self , pubkey_prefix : str , failure_count : int , update_interval : int , update_type : str = "repeater" ) -> None :
258265 """Apply exponential backoff delay for failed updates.
259266
260- Uses DEFAULT_UPDATE_TICK as base since that's how often we check for updates.
261-
262267 Args:
263268 pubkey_prefix: The node's public key prefix
264269 failure_count: Number of consecutive failures
270+ update_interval: The configured update interval to cap the backoff at
265271 update_type: Type of update ("repeater" or "telemetry")
266272 """
267- backoff_delay = min (REPEATER_BACKOFF_BASE ** failure_count , REPEATER_BACKOFF_MAX_MULTIPLIER )
273+ backoff_delay = min (REPEATER_BACKOFF_BASE ** failure_count , update_interval )
268274 next_update_time = self ._current_time () + backoff_delay
269275
270276 if update_type == "telemetry" :
@@ -274,11 +280,12 @@ def _apply_backoff(self, pubkey_prefix: str, failure_count: int, update_type: st
274280
275281 self .logger .debug (f"Applied backoff for { update_type } { pubkey_prefix } : "
276282 f"failure_count={ failure_count } , "
277- f"delay={ backoff_delay } s" )
283+ f"delay={ backoff_delay } s, "
284+ f"interval_cap={ update_interval } s" )
278285
279- def _apply_repeater_backoff (self , pubkey_prefix : str , failure_count : int ) -> None :
286+ def _apply_repeater_backoff (self , pubkey_prefix : str , failure_count : int , update_interval : int ) -> None :
280287 """Apply exponential backoff delay for failed repeater updates."""
281- self ._apply_backoff (pubkey_prefix , failure_count , "repeater" )
288+ self ._apply_backoff (pubkey_prefix , failure_count , update_interval , "repeater" )
282289
283290 async def _update_node_telemetry (self , contact , pubkey_prefix : str , node_name : str , update_interval : int ):
284291 """Update telemetry for a node (repeater or client).
@@ -296,7 +303,7 @@ async def _update_node_telemetry(self, contact, pubkey_prefix: str, node_name: s
296303
297304 try :
298305 self .logger .debug (f"Sending telemetry request to node: { node_name } ({ pubkey_prefix } )" )
299- await self .api .mesh_core .commands .send_telemetry_req (contact )
306+ await self .api .mesh_core .commands .send_binary_req (contact , BinaryReqType . TELEMETRY )
300307 telemetry_result = await self .api .mesh_core .wait_for_event (
301308 EventType .TELEMETRY_RESPONSE ,
302309 attribute_filters = {"pubkey_prefix" : pubkey_prefix },
@@ -314,14 +321,32 @@ async def _update_node_telemetry(self, contact, pubkey_prefix: str, node_name: s
314321 # Increment failure count and apply backoff
315322 new_failure_count = failure_count + 1
316323 self ._telemetry_consecutive_failures [pubkey_prefix ] = new_failure_count
317- self ._apply_backoff (pubkey_prefix , new_failure_count , "telemetry" )
324+
325+ # Reset path after 5 failures if there's an established path
326+ if new_failure_count == 5 and contact and contact .get ("out_path_len" , 0 ) != - 1 :
327+ try :
328+ await self .api .mesh_core .commands .reset_path (pubkey_prefix )
329+ self .logger .info (f"Reset path for node { node_name } after 5 telemetry failures" )
330+ except Exception as ex :
331+ self .logger .warning (f"Failed to reset path for node { node_name } : { ex } " )
332+
333+ self ._apply_backoff (pubkey_prefix , new_failure_count , update_interval , "telemetry" )
318334
319335 except Exception as ex :
320336 self .logger .warn (f"Exception requesting telemetry from node { node_name } : { ex } " )
321337 # Increment failure count and apply backoff
322338 new_failure_count = failure_count + 1
323339 self ._telemetry_consecutive_failures [pubkey_prefix ] = new_failure_count
324- self ._apply_backoff (pubkey_prefix , new_failure_count , "telemetry" )
340+
341+ # Reset path after 5 failures if there's an established path
342+ if new_failure_count == 5 and contact and contact .get ("out_path_len" , 0 ) != - 1 :
343+ try :
344+ await self .api .mesh_core .commands .reset_path (pubkey_prefix )
345+ self .logger .info (f"Reset path for node { node_name } after 5 telemetry failures" )
346+ except Exception as reset_ex :
347+ self .logger .warning (f"Failed to reset path for node { node_name } : { reset_ex } " )
348+
349+ self ._apply_backoff (pubkey_prefix , new_failure_count , update_interval , "telemetry" )
325350 finally :
326351 # Remove this task from active telemetry tasks
327352 if pubkey_prefix in self ._active_telemetry_tasks :
0 commit comments