|
58 | 58 | },
|
59 | 59 | std::{
|
60 | 60 | collections::BTreeMap,
|
61 |
| - sync::{ |
62 |
| - atomic::Ordering, |
63 |
| - Arc, |
64 |
| - }, |
| 61 | + sync::Arc, |
65 | 62 | time::Duration,
|
66 | 63 | },
|
67 | 64 | tokio::time::Instant,
|
@@ -160,7 +157,7 @@ pub async fn run(store: Arc<State>, pythnet_ws_endpoint: String) -> Result<()> {
|
160 | 157 | .program_subscribe(&system_program::id(), Some(config))
|
161 | 158 | .await?;
|
162 | 159 |
|
163 |
| - while !crate::SHOULD_EXIT.load(Ordering::Acquire) { |
| 160 | + loop { |
164 | 161 | match notif.next().await {
|
165 | 162 | Some(update) => {
|
166 | 163 | let account: Account = match update.value.account.decode() {
|
@@ -213,8 +210,6 @@ pub async fn run(store: Arc<State>, pythnet_ws_endpoint: String) -> Result<()> {
|
213 | 210 | }
|
214 | 211 | }
|
215 | 212 | }
|
216 |
| - |
217 |
| - Ok(()) |
218 | 213 | }
|
219 | 214 |
|
220 | 215 | /// Fetch existing GuardianSet accounts from Wormhole.
|
@@ -281,79 +276,69 @@ pub async fn spawn(opts: RunOptions, state: Arc<State>) -> Result<()> {
|
281 | 276 | let task_listener = {
|
282 | 277 | let store = state.clone();
|
283 | 278 | let pythnet_ws_endpoint = opts.pythnet.ws_addr.clone();
|
| 279 | + let mut exit = crate::EXIT.subscribe(); |
284 | 280 | tokio::spawn(async move {
|
285 |
| - while !crate::SHOULD_EXIT.load(Ordering::Acquire) { |
| 281 | + loop { |
286 | 282 | let current_time = Instant::now();
|
287 |
| - |
288 |
| - if let Err(ref e) = run(store.clone(), pythnet_ws_endpoint.clone()).await { |
289 |
| - tracing::error!(error = ?e, "Error in Pythnet network listener."); |
290 |
| - if current_time.elapsed() < Duration::from_secs(30) { |
291 |
| - tracing::error!("Pythnet listener restarting too quickly. Sleep 1s."); |
292 |
| - tokio::time::sleep(Duration::from_secs(1)).await; |
| 283 | + tokio::select! { |
| 284 | + _ = exit.changed() => break, |
| 285 | + Err(err) = run(store.clone(), pythnet_ws_endpoint.clone()) => { |
| 286 | + tracing::error!(error = ?err, "Error in Pythnet network listener."); |
| 287 | + if current_time.elapsed() < Duration::from_secs(30) { |
| 288 | + tracing::error!("Pythnet listener restarting too quickly. Sleep 1s."); |
| 289 | + tokio::time::sleep(Duration::from_secs(1)).await; |
| 290 | + } |
293 | 291 | }
|
294 | 292 | }
|
295 | 293 | }
|
296 |
| - |
297 | 294 | tracing::info!("Shutting down Pythnet listener...");
|
298 | 295 | })
|
299 | 296 | };
|
300 | 297 |
|
301 | 298 | let task_guardian_watcher = {
|
302 | 299 | let store = state.clone();
|
303 | 300 | let pythnet_http_endpoint = opts.pythnet.http_addr.clone();
|
| 301 | + let mut exit = crate::EXIT.subscribe(); |
304 | 302 | tokio::spawn(async move {
|
305 |
| - while !crate::SHOULD_EXIT.load(Ordering::Acquire) { |
306 |
| - // Poll for new guardian sets every 60 seconds. We use a short wait time so we can |
307 |
| - // properly exit if a quit signal was received. This isn't a perfect solution, but |
308 |
| - // it's good enough for now. |
309 |
| - for _ in 0..60 { |
310 |
| - if crate::SHOULD_EXIT.load(Ordering::Acquire) { |
311 |
| - break; |
312 |
| - } |
313 |
| - tokio::time::sleep(Duration::from_secs(1)).await; |
314 |
| - } |
315 |
| - |
316 |
| - match fetch_existing_guardian_sets( |
317 |
| - store.clone(), |
318 |
| - pythnet_http_endpoint.clone(), |
319 |
| - opts.wormhole.contract_addr, |
320 |
| - ) |
321 |
| - .await |
322 |
| - { |
323 |
| - Ok(_) => {} |
324 |
| - Err(err) => { |
325 |
| - tracing::error!(error = ?err, "Failed to poll for new guardian sets.") |
| 303 | + loop { |
| 304 | + tokio::select! { |
| 305 | + _ = exit.changed() => break, |
| 306 | + _ = tokio::time::sleep(Duration::from_secs(60)) => { |
| 307 | + if let Err(err) = fetch_existing_guardian_sets( |
| 308 | + store.clone(), |
| 309 | + pythnet_http_endpoint.clone(), |
| 310 | + opts.wormhole.contract_addr, |
| 311 | + ) |
| 312 | + .await |
| 313 | + { |
| 314 | + tracing::error!(error = ?err, "Failed to poll for new guardian sets.") |
| 315 | + } |
326 | 316 | }
|
327 | 317 | }
|
328 | 318 | }
|
329 |
| - |
330 | 319 | tracing::info!("Shutting down Pythnet guardian set poller...");
|
331 | 320 | })
|
332 | 321 | };
|
333 | 322 |
|
334 | 323 |
|
335 | 324 | let task_price_feeds_metadata_updater = {
|
336 | 325 | let price_feeds_state = state.clone();
|
| 326 | + let mut exit = crate::EXIT.subscribe(); |
337 | 327 | tokio::spawn(async move {
|
338 |
| - while !crate::SHOULD_EXIT.load(Ordering::Acquire) { |
339 |
| - if let Err(e) = fetch_and_store_price_feeds_metadata( |
340 |
| - price_feeds_state.as_ref(), |
341 |
| - &opts.pythnet.mapping_addr, |
342 |
| - &rpc_client, |
343 |
| - ) |
344 |
| - .await |
345 |
| - { |
346 |
| - tracing::error!("Error in fetching and storing price feeds metadata: {}", e); |
347 |
| - } |
348 |
| - // This loop with a sleep interval of 1 second allows the task to check for an exit signal at a |
349 |
| - // fine-grained interval. Instead of sleeping directly for the entire `price_feeds_update_interval`, |
350 |
| - // which could delay the response to an exit signal, this approach ensures the task can exit promptly |
351 |
| - // if `crate::SHOULD_EXIT` is set, enhancing the responsiveness of the service to shutdown requests. |
352 |
| - for _ in 0..DEFAULT_PRICE_FEEDS_CACHE_UPDATE_INTERVAL { |
353 |
| - if crate::SHOULD_EXIT.load(Ordering::Acquire) { |
354 |
| - break; |
| 328 | + loop { |
| 329 | + tokio::select! { |
| 330 | + _ = exit.changed() => break, |
| 331 | + _ = tokio::time::sleep(Duration::from_secs(DEFAULT_PRICE_FEEDS_CACHE_UPDATE_INTERVAL)) => { |
| 332 | + if let Err(e) = fetch_and_store_price_feeds_metadata( |
| 333 | + price_feeds_state.as_ref(), |
| 334 | + &opts.pythnet.mapping_addr, |
| 335 | + &rpc_client, |
| 336 | + ) |
| 337 | + .await |
| 338 | + { |
| 339 | + tracing::error!("Error in fetching and storing price feeds metadata: {}", e); |
| 340 | + } |
355 | 341 | }
|
356 |
| - tokio::time::sleep(Duration::from_secs(1)).await; |
357 | 342 | }
|
358 | 343 | }
|
359 | 344 | })
|
|
0 commit comments