1
+ from collections import defaultdict , deque
1
2
import time
2
- from dataclasses import dataclass
3
+ from dataclasses import asdict , dataclass
3
4
from datetime import datetime
4
- from typing import Dict , Protocol , runtime_checkable
5
+ from typing import Dict , List , Optional , Protocol , runtime_checkable
5
6
from zoneinfo import ZoneInfo
6
-
7
+ from loguru import logger
7
8
from pythclient .calendar import is_market_open
8
9
from pythclient .pythaccounts import PythPriceStatus
9
10
from pythclient .solana import SolanaPublicKey
10
11
12
+
13
+ @dataclass
14
+ class PriceUpdate :
15
+ """Represents a single price with its timestamp (epoch seconds)."""
16
+
17
+ timestamp : int
18
+ price : float
19
+
20
+
11
21
PUBLISHER_EXCLUSION_DISTANCE = 25
22
+ PUBLISHER_CACHE_MAX_LEN = 30
23
+ """Roughly 30 mins of updates, since the check runs about once a minute"""
12
24
13
- PUBLISHER_CACHE = {}
25
+ PUBLISHER_CACHE : Dict [tuple [str , str ], List [PriceUpdate ]] = defaultdict (
26
+ lambda : deque (maxlen = PUBLISHER_CACHE_MAX_LEN )
27
+ )
28
+ """
29
+ Cache that holds tuples of (price, timestamp) for publisher/feed combos as they stream in.
30
+ Entries longer than `PUBLISHER_CACHE_MAX_LEN` are automatically pruned.
31
+ Used by the PublisherStalledCheck to detect stalls in prices.
32
+ """
14
33
15
34
16
35
@dataclass
@@ -35,17 +54,13 @@ class PublisherState:
35
54
36
55
@runtime_checkable
37
56
class PublisherCheck (Protocol ):
38
- def __init__ (self , state : PublisherState , config : PublisherCheckConfig ):
39
- ...
57
+ def __init__ (self , state : PublisherState , config : PublisherCheckConfig ): ...
40
58
41
- def state (self ) -> PublisherState :
42
- ...
59
+ def state (self ) -> PublisherState : ...
43
60
44
- def run (self ) -> bool :
45
- ...
61
+ def run (self ) -> bool : ...
46
62
47
- def error_message (self ) -> dict :
48
- ...
63
+ def error_message (self ) -> dict : ...
49
64
50
65
51
66
class PublisherWithinAggregateConfidenceCheck (PublisherCheck ):
@@ -240,6 +255,20 @@ def __init__(self, state: PublisherState, config: PublisherCheckConfig):
240
255
self .__abandoned_time_limit : int = int (config ["abandoned_time_limit" ])
241
256
self .__max_slot_distance : int = int (config ["max_slot_distance" ])
242
257
258
+ from pyth_observer .check .stall_detection import (
259
+ StallDetectionResult ,
260
+ StallDetector ,
261
+ ) # noqa: deferred import to avoid circular import
262
+
263
+ self .__detector = StallDetector (
264
+ stall_time_limit = self .__stall_time_limit ,
265
+ noise_threshold = float (config .get ("noise_threshold" )),
266
+ min_noise_samples = int (config .get ("min_noise_samples" )),
267
+ )
268
+
269
+ # Keep track of last analysis for error reporting
270
+ self .__last_analysis : Optional [StallDetectionResult ] = None
271
+
243
272
def state (self ) -> PublisherState :
244
273
return self .__state
245
274
@@ -254,36 +283,46 @@ def run(self) -> bool:
254
283
255
284
distance = self .__state .latest_block_slot - self .__state .slot
256
285
286
+ # Pass for redemption rates because they are expected to be static for long periods
287
+ if self .__state .asset_type == "Crypto Redemption Rate" :
288
+ logger .info (f"Redemption rate: Skipping { self .__state .symbol } " )
289
+ return True
290
+
257
291
# Pass when publisher is offline because PublisherOfflineCheck will be triggered
258
292
if distance >= self .__max_slot_distance :
259
293
return True
260
294
261
- publisher_key = (self .__state .publisher_name , self .__state .symbol )
262
295
current_time = int (time .time ())
263
- previous_price , last_change_time = PUBLISHER_CACHE .get (
264
- publisher_key , (None , None )
265
- )
266
296
267
- if previous_price is None or self .__state .price != previous_price :
268
- PUBLISHER_CACHE [publisher_key ] = (self .__state .price , current_time )
269
- return True
297
+ publisher_key = (self .__state .publisher_name , self .__state .symbol )
298
+ PUBLISHER_CACHE [publisher_key ].append (
299
+ PriceUpdate (current_time , self .__state .price )
300
+ ),
301
+ updates = PUBLISHER_CACHE [publisher_key ]
270
302
271
- time_since_last_change = current_time - last_change_time
272
- if time_since_last_change > self .__stall_time_limit :
273
- if time_since_last_change > self .__abandoned_time_limit :
274
- return True # Abandon this check after the abandoned time limit
275
- return False
303
+ # Analyze for stalls
304
+ result = self .__detector .analyze_updates (list (updates ))
305
+ logger .debug (f"Stall detection result: { result } " )
306
+
307
+ self .__last_analysis = result # For error logging
308
+
309
+ # If we've been stalled for too long, abandon this check
310
+ if result .is_stalled and result .duration > self .__abandoned_time_limit :
311
+ return True
276
312
277
- return True
313
+ return not result . is_stalled
278
314
279
315
def error_message (self ) -> dict :
316
+ stall_duration = f"{ self .__last_analysis .duration :.1f} seconds"
280
317
return {
281
- "msg" : f"{ self .__state .publisher_name } has been publishing the same price for too long. " ,
318
+ "msg" : f"{ self .__state .publisher_name } has been publishing the same price of { self . __state . symbol } for { stall_duration } " ,
282
319
"type" : "PublisherStalledCheck" ,
283
320
"publisher" : self .__state .publisher_name ,
284
321
"symbol" : self .__state .symbol ,
285
322
"price" : self .__state .price ,
286
- "stall_duration" : f"{ int (time .time ()) - PUBLISHER_CACHE [(self .__state .publisher_name , self .__state .symbol )][1 ]} seconds" ,
323
+ "stall_type" : self .__last_analysis .stall_type ,
324
+ "stall_duration" : stall_duration ,
325
+ "analysis" : asdict (self .__last_analysis ),
287
326
}
288
327
289
328
0 commit comments