2525try :
2626 from src .amp .loaders .base import LoadMode
2727 from src .amp .loaders .implementations .snowflake_loader import SnowflakeLoader
28+ from src .amp .streaming .types import BatchMetadata , BlockRange , ResponseBatch , ResponseBatchWithReorg
2829except ImportError :
2930 pytest .skip ('amp modules not available' , allow_module_level = True )
3031
@@ -127,7 +128,8 @@ def test_batch_loading(self, snowflake_config, medium_test_table, test_table_nam
127128 loader = SnowflakeLoader (snowflake_config )
128129
129130 with loader :
130- result = loader .load_table (medium_test_table , test_table_name , create_table = True )
131+ # Use smaller batch size to force multiple batches (medium_test_table has 10000 rows)
132+ result = loader .load_table (medium_test_table , test_table_name , create_table = True , batch_size = 5000 )
131133
132134 assert result .success is True
133135 assert result .rows_loaded == medium_test_table .num_rows
@@ -333,11 +335,10 @@ def test_stage_and_compression_options(self, snowflake_config, medium_test_table
333335 """Test different stage and compression options"""
334336 cleanup_tables .append (test_table_name )
335337
336- # Test with different compression
338+ # Test with stage loading method
337339 config = {
338340 ** snowflake_config ,
339341 'loading_method' : 'stage' ,
340- 'compression' : 'zstd' ,
341342 }
342343 loader = SnowflakeLoader (config )
343344
@@ -614,23 +615,19 @@ def test_streaming_with_reorg(self, snowflake_config, test_table_name, cleanup_t
614615 data2 = pa .RecordBatch .from_pydict ({'id' : [3 , 4 ], 'value' : [300 , 400 ]})
615616
616617 # Create response batches
617- response1 = ResponseBatchWithReorg (
618- is_reorg = False ,
619- data = ResponseBatch (
620- data = data1 , metadata = BatchMetadata (ranges = [BlockRange (network = 'ethereum' , start = 100 , end = 110 )])
621- ),
618+ batch1 = ResponseBatch (
619+ data = data1 , metadata = BatchMetadata (ranges = [BlockRange (network = 'ethereum' , start = 100 , end = 110 )])
622620 )
621+ response1 = ResponseBatchWithReorg .data_batch (batch1 )
623622
624- response2 = ResponseBatchWithReorg (
625- is_reorg = False ,
626- data = ResponseBatch (
627- data = data2 , metadata = BatchMetadata (ranges = [BlockRange (network = 'ethereum' , start = 150 , end = 160 )])
628- ),
623+ batch2 = ResponseBatch (
624+ data = data2 , metadata = BatchMetadata (ranges = [BlockRange (network = 'ethereum' , start = 150 , end = 160 )])
629625 )
626+ response2 = ResponseBatchWithReorg .data_batch (batch2 )
630627
631628 # Simulate reorg event
632- reorg_response = ResponseBatchWithReorg (
633- is_reorg = True , invalidation_ranges = [BlockRange (network = 'ethereum' , start = 150 , end = 200 )]
629+ reorg_response = ResponseBatchWithReorg . reorg_batch (
630+ [BlockRange (network = 'ethereum' , start = 150 , end = 200 )]
634631 )
635632
636633 # Process streaming data
@@ -647,8 +644,8 @@ def test_streaming_with_reorg(self, snowflake_config, test_table_name, cleanup_t
647644 assert results [2 ].is_reorg
648645
649646 # Verify reorg deleted the second batch
650- loader .cursor .execute (f'SELECT id FROM { test_table_name } ORDER BY id ' )
651- remaining_ids = [row ['ID ' ] for row in loader .cursor .fetchall ()]
647+ loader .cursor .execute (f'SELECT "id" FROM { test_table_name } ORDER BY "id" ' )
648+ remaining_ids = [row ['id ' ] for row in loader .cursor .fetchall ()]
652649 assert remaining_ids == [1 , 2 ] # 3 and 4 deleted by reorg
653650
654651
@@ -704,8 +701,8 @@ def test_streaming_connection(self, snowflake_streaming_config):
704701 loader .connect ()
705702 assert loader ._is_connected is True
706703 assert loader .connection is not None
707- # Streaming client is lazily initialized ( created on first load, not at connection time )
708- assert loader .streaming_client is None
704+ # Streaming clients dict is initialized empty (clients created on first load per table )
705+ assert loader .streaming_clients == {}
709706
710707 loader .disconnect ()
711708 assert loader ._is_connected is False
0 commit comments