Skip to content

Commit ee718fb

Browse files
committed
all tests passing
1 parent 89215f2 commit ee718fb

19 files changed

+565
-1003
lines changed

pytest.ini

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,3 +15,11 @@ log_cli_date_format = %Y-%m-%d %H:%M:%S
1515
addopts =
1616
--verbose
1717
-xvs
18+
19+
# Custom markers
20+
markers =
21+
performance: marks tests as performance tests (deselect with '-m "not performance"')
22+
slow: marks tests as slow (deselect with '-m "not slow"')
23+
unit: marks tests as unit tests
24+
integration: marks tests as integration tests
25+
realtime: marks tests that require real-time connections

src/project_x_py/data/mmap_storage.py

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -378,11 +378,15 @@ def __init__(
378378
self.row_size = (len(self.columns) + 1) * self.dtype.itemsize
379379
self.current_size = 0
380380

381-
# Determine current size from existing file
381+
# Load metadata to get actual data size if file exists
382382
if self.filename.exists():
383383
self.open() # open() is idempotent and thread-safe
384-
if self._data_file_size > 0 and self.row_size > 0:
385-
self.current_size = self._data_file_size // self.row_size
384+
self._load_metadata()
385+
# Get current_size from metadata if available
386+
if "_timeseries_meta" in self._metadata:
387+
self.current_size = self._metadata["_timeseries_meta"].get(
388+
"current_size", 0
389+
)
386390

387391
def append_data(self, timestamp: float, values: dict[str, float]) -> bool:
388392
"""
@@ -425,6 +429,10 @@ def append_data(self, timestamp: float, values: dict[str, float]) -> bool:
425429
self.mmap.flush()
426430
self.current_size += 1
427431

432+
# Update metadata with current size
433+
self._metadata["_timeseries_meta"] = {"current_size": self.current_size}
434+
self._save_metadata()
435+
428436
return True
429437

430438
except Exception:

src/project_x_py/realtime/circuit_breaker.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -620,7 +620,7 @@ async def configure_circuit_breaker(
620620
max_recovery_time: float = 300.0,
621621
slow_call_threshold: float = 2.0,
622622
enable_global_circuit: bool = True,
623-
_enable_per_event_circuits: bool = True,
623+
enable_per_event_circuits: bool = True,
624624
) -> None:
625625
"""
626626
Configure circuit breaker settings.

src/project_x_py/realtime_data_manager/core.py

Lines changed: 14 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1083,19 +1083,20 @@ async def cleanup(self) -> None:
10831083
# EventBus handles all event cleanup
10841084
self.indicator_cache.clear()
10851085

1086-
# Backward-compatible attributes used in some tests/examples
1087-
# Use dynamic attribute access safely without type checker complaints
1088-
bars_attr = getattr(self, "bars", None)
1089-
if isinstance(bars_attr, dict):
1090-
for _tf in list(bars_attr.keys()):
1091-
bars_attr[_tf] = []
1092-
ticks_attr = getattr(self, "ticks", None)
1093-
if isinstance(ticks_attr, list):
1094-
ticks_attr.clear()
1095-
dom_attr = getattr(self, "dom_data", None)
1096-
if isinstance(dom_attr, dict):
1097-
for _k in list(dom_attr.keys()):
1098-
dom_attr[_k] = []
1086+
# Backward-compatible attributes used in some tests/examples
1087+
# Clear these regardless of lock type
1088+
# Use dynamic attribute access safely without type checker complaints
1089+
bars_attr = getattr(self, "bars", None)
1090+
if isinstance(bars_attr, dict):
1091+
for _tf in list(bars_attr.keys()):
1092+
bars_attr[_tf] = []
1093+
ticks_attr = getattr(self, "ticks", None)
1094+
if isinstance(ticks_attr, list):
1095+
ticks_attr.clear()
1096+
dom_attr = getattr(self, "dom_data", None)
1097+
if isinstance(dom_attr, dict):
1098+
for _k in list(dom_attr.keys()):
1099+
dom_attr[_k] = []
10991100

11001101
self.logger.info("✅ RealtimeDataManager cleanup completed")
11011102

src/project_x_py/realtime_data_manager/memory_management.py

Lines changed: 61 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -101,11 +101,11 @@
101101
if TYPE_CHECKING:
102102
from project_x_py.types.stats_types import RealtimeDataManagerStats
103103

104+
import polars as pl
105+
104106
if TYPE_CHECKING:
105107
from asyncio import Lock
106108

107-
import polars as pl
108-
109109
logger = logging.getLogger(__name__)
110110

111111

@@ -392,58 +392,71 @@ async def _cleanup_old_data(self) -> None:
392392
if current_time - self.last_cleanup < self.cleanup_interval:
393393
return
394394

395-
async with self.data_lock:
396-
total_bars_before = 0
397-
total_bars_after = 0
398-
399-
# Cleanup each timeframe's data
400-
for tf_key in self.timeframes:
401-
if tf_key in self.data and not self.data[tf_key].is_empty():
402-
initial_count = len(self.data[tf_key])
403-
total_bars_before += initial_count
404-
405-
# Check for buffer overflow first
406-
is_overflow, utilization = await self._check_buffer_overflow(tf_key)
407-
if is_overflow:
408-
await self._handle_buffer_overflow(tf_key, utilization)
409-
total_bars_after += len(self.data[tf_key])
410-
continue
411-
412-
# Check if overflow is needed (if mixin is available)
413-
if hasattr(
414-
self, "_check_overflow_needed"
415-
) and await self._check_overflow_needed(tf_key):
416-
await self._overflow_to_disk(tf_key)
417-
# Data has been overflowed, update count
418-
total_bars_after += len(self.data[tf_key])
419-
continue
420-
421-
# Keep only the most recent bars (sliding window)
422-
if initial_count > self.max_bars_per_timeframe:
423-
self.data[tf_key] = self.data[tf_key].tail(
424-
self.max_bars_per_timeframe
425-
)
395+
# Import here to avoid circular dependency
396+
from project_x_py.utils.lock_optimization import AsyncRWLock
397+
398+
# Use appropriate lock method based on lock type
399+
if isinstance(self.data_lock, AsyncRWLock):
400+
async with self.data_lock.write_lock():
401+
await self._perform_cleanup()
402+
else:
403+
async with self.data_lock:
404+
await self._perform_cleanup()
426405

406+
async def _perform_cleanup(self) -> None:
407+
"""Perform the actual cleanup logic (extracted for lock handling)."""
408+
total_bars_before = 0
409+
total_bars_after = 0
410+
411+
# Cleanup each timeframe's data
412+
for tf_key in self.timeframes:
413+
if tf_key in self.data and not self.data[tf_key].is_empty():
414+
initial_count = len(self.data[tf_key])
415+
total_bars_before += initial_count
416+
417+
# Check for buffer overflow first
418+
is_overflow, utilization = await self._check_buffer_overflow(tf_key)
419+
if is_overflow:
420+
await self._handle_buffer_overflow(tf_key, utilization)
421+
total_bars_after += len(self.data[tf_key])
422+
continue
423+
424+
# Check if overflow is needed (if mixin is available)
425+
if hasattr(
426+
self, "_check_overflow_needed"
427+
) and await self._check_overflow_needed(tf_key):
428+
await self._overflow_to_disk(tf_key)
429+
# Data has been overflowed, update count
427430
total_bars_after += len(self.data[tf_key])
431+
continue
428432

429-
# Cleanup tick buffer - deque handles its own cleanup with maxlen
430-
# No manual cleanup needed for deque with maxlen
433+
# Keep only the most recent bars (sliding window)
434+
if initial_count > self.max_bars_per_timeframe:
435+
self.data[tf_key] = self.data[tf_key].tail(
436+
self.max_bars_per_timeframe
437+
)
431438

432-
# Update stats
433-
self.last_cleanup = current_time
434-
self.memory_stats["bars_cleaned"] += total_bars_before - total_bars_after
435-
self.memory_stats["total_bars"] = total_bars_after
436-
self.memory_stats["last_cleanup"] = current_time
439+
total_bars_after += len(self.data[tf_key])
437440

438-
# Log cleanup if significant
439-
if total_bars_before != total_bars_after:
440-
self.logger.debug(
441-
f"DataManager cleanup - Bars: {total_bars_before}{total_bars_after}, "
442-
f"Ticks: {len(self.current_tick_data)}"
443-
)
441+
# Cleanup tick buffer - deque handles its own cleanup with maxlen
442+
# No manual cleanup needed for deque with maxlen
444443

445-
# Force garbage collection after cleanup
446-
gc.collect()
444+
# Update stats
445+
current_time = time.time()
446+
self.last_cleanup = current_time
447+
self.memory_stats["bars_cleaned"] += total_bars_before - total_bars_after
448+
self.memory_stats["total_bars"] = total_bars_after
449+
self.memory_stats["last_cleanup"] = current_time
450+
451+
# Log cleanup if significant
452+
if total_bars_before != total_bars_after:
453+
self.logger.debug(
454+
f"DataManager cleanup - Bars: {total_bars_before}{total_bars_after}, "
455+
f"Ticks: {len(self.current_tick_data)}"
456+
)
457+
458+
# Force garbage collection after cleanup
459+
gc.collect()
447460

448461
async def _periodic_cleanup(self) -> None:
449462
"""Background task for periodic cleanup."""

src/project_x_py/realtime_data_manager/mmap_overflow.py

Lines changed: 22 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -377,14 +377,28 @@ async def restore_from_overflow(self, timeframe: str, bars: int) -> bool:
377377
# Take the requested number of bars from the end
378378
restore_df = overflow_df.tail(bars)
379379

380-
async with self.data_lock:
381-
# Prepend to current data
382-
if timeframe in self.data:
383-
self.data[timeframe] = pl.concat(
384-
[restore_df, self.data[timeframe]]
385-
)
386-
else:
387-
self.data[timeframe] = restore_df
380+
# Import here to avoid circular dependency
381+
from project_x_py.utils.lock_optimization import AsyncRWLock
382+
383+
# Use appropriate lock method based on lock type
384+
if isinstance(self.data_lock, AsyncRWLock):
385+
async with self.data_lock.write_lock():
386+
# Prepend to current data
387+
if timeframe in self.data:
388+
self.data[timeframe] = pl.concat(
389+
[restore_df, self.data[timeframe]]
390+
)
391+
else:
392+
self.data[timeframe] = restore_df
393+
else:
394+
async with self.data_lock:
395+
# Prepend to current data
396+
if timeframe in self.data:
397+
self.data[timeframe] = pl.concat(
398+
[restore_df, self.data[timeframe]]
399+
)
400+
else:
401+
self.data[timeframe] = restore_df
388402

389403
logger.info(
390404
f"Restored {len(restore_df)} bars for {timeframe} from overflow"

src/project_x_py/statistics/export.py

Lines changed: 68 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
import csv
1212
import json
1313
import re
14-
from datetime import datetime
14+
from datetime import UTC, datetime
1515
from io import StringIO
1616
from typing import Any, ClassVar, Union
1717

@@ -64,7 +64,9 @@ async def to_json(
6464
data = self._stats_to_dict(stats)
6565

6666
if include_timestamp:
67-
data["export_timestamp"] = datetime.utcnow().isoformat() + "Z"
67+
data["export_timestamp"] = (
68+
datetime.now(UTC).isoformat().replace("+00:00", "Z")
69+
)
6870

6971
if self.sanitize_sensitive:
7072
data = self._sanitize_data(data)
@@ -88,7 +90,7 @@ async def to_prometheus(
8890
Prometheus format string
8991
"""
9092
lines = []
91-
timestamp = int(datetime.utcnow().timestamp() * 1000)
93+
timestamp = int(datetime.now(UTC).timestamp() * 1000)
9294

9395
# Health metrics
9496
health_stats = stats.get("health")
@@ -227,7 +229,11 @@ async def to_csv(
227229

228230
writer.writerow(headers)
229231

230-
timestamp = datetime.utcnow().isoformat() + "Z" if include_timestamp else None
232+
timestamp = (
233+
datetime.now(UTC).isoformat().replace("+00:00", "Z")
234+
if include_timestamp
235+
else None
236+
)
231237

232238
# Flatten stats into rows
233239
rows = []
@@ -320,9 +326,41 @@ async def to_csv(
320326
).items():
321327
rows.append(["connections", "connection_status", status, conn_type])
322328

329+
# If no rows were generated from standard stats, handle custom structures
330+
if not rows:
331+
# Flatten any custom dictionary structure
332+
def flatten_dict(d, parent_key="", sep="_"):
333+
items = []
334+
for k, v in d.items():
335+
new_key = f"{parent_key}{sep}{k}" if parent_key else k
336+
if isinstance(v, dict):
337+
items.extend(flatten_dict(v, new_key, sep=sep).items())
338+
else:
339+
items.append((new_key, v))
340+
return dict(items)
341+
342+
flat_stats = flatten_dict(stats)
343+
for key, value in flat_stats.items():
344+
# Split the key to get category and name
345+
parts = key.split("_", 1)
346+
category = parts[0] if parts else "custom"
347+
name = parts[1] if len(parts) > 1 else key
348+
# Try to extract component from key
349+
component = "system"
350+
if "order_manager" in key:
351+
component = "order_manager"
352+
elif "position_manager" in key:
353+
component = "position_manager"
354+
elif "realtime" in key:
355+
component = "realtime"
356+
row = [category, name, value, component]
357+
if include_timestamp:
358+
row.append(timestamp)
359+
rows.append(row)
360+
323361
# Write rows
324362
for row in rows:
325-
if include_timestamp:
363+
if include_timestamp and len(row) == 4:
326364
row.append(timestamp)
327365
writer.writerow(row)
328366

@@ -342,7 +380,7 @@ async def to_datadog(
342380
Dictionary formatted for Datadog API
343381
"""
344382
metrics = []
345-
timestamp = int(datetime.utcnow().timestamp())
383+
timestamp = int(datetime.now(UTC).timestamp())
346384

347385
# Health metrics
348386
health_stats = stats.get("health")
@@ -517,6 +555,26 @@ async def export(
517555

518556
def _stats_to_dict(self, stats: ComprehensiveStats) -> dict[str, Any]:
519557
"""Convert ComprehensiveStats to dictionary."""
558+
# Check if this appears to be a structured ComprehensiveStats
559+
# by looking for standard stats keys
560+
has_standard_keys = any(
561+
key in stats
562+
for key in [
563+
"health",
564+
"performance",
565+
"memory",
566+
"errors",
567+
"connections",
568+
"trading",
569+
]
570+
)
571+
572+
# If no standard keys found, just return the dict as-is
573+
# This handles test cases and non-standard data structures
574+
if not has_standard_keys:
575+
return dict(stats)
576+
577+
# Process standard ComprehensiveStats structure
520578
result = {}
521579

522580
health_stats = stats.get("health")
@@ -583,6 +641,10 @@ def _stats_to_dict(self, stats: ComprehensiveStats) -> dict[str, Any]:
583641
else None,
584642
}
585643

644+
# Also preserve any suite data if present
645+
if "suite" in stats:
646+
result["suite"] = stats["suite"]
647+
586648
return result
587649

588650
def _sanitize_data(self, data: Any) -> Any:

0 commit comments

Comments
 (0)