Skip to content

Commit f910b03

Browse files
committed
Fix linting/formatting issues in loader test files
1 parent 6d44011 commit f910b03

File tree

9 files changed

+28
-34
lines changed

9 files changed

+28
-34
lines changed

docs/implementing_data_loaders.md

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -781,5 +781,3 @@ class KeyValueLoader(DataLoader[KeyValueConfig]):
781781
'database': self.config.database
782782
}
783783
```
784-
785-
This documentation provides everything needed to implement new data loaders efficiently and consistently!

tests/integration/loaders/backends/test_deltalake.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,6 @@ class TestDeltaLakeSpecific:
9191

9292
def test_partitioning(self, delta_partitioned_config, small_test_data):
9393
"""Test DeltaLake partitioning functionality"""
94-
import pyarrow as pa
9594

9695
loader = DeltaLakeLoader(delta_partitioned_config)
9796

@@ -148,7 +147,7 @@ def test_schema_evolution(self, delta_basic_config, small_test_data):
148147
# Load with new schema (if schema evolution enabled)
149148
from src.amp.loaders.base import LoadMode
150149

151-
result2 = loader.load_table(extended_table, 'test_table', mode=LoadMode.APPEND)
150+
loader.load_table(extended_table, 'test_table', mode=LoadMode.APPEND)
152151

153152
# Result depends on merge_schema configuration
154153
dt = DeltaTable(loader.config.table_path)

tests/integration/loaders/backends/test_iceberg.py

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -10,9 +10,6 @@
1010
import pytest
1111

1212
try:
13-
from pyiceberg.catalog import load_catalog
14-
from pyiceberg.schema import Schema
15-
1613
from src.amp.loaders.implementations.iceberg_loader import IcebergLoader
1714
from tests.integration.loaders.conftest import LoaderTestConfig
1815
from tests.integration.loaders.test_base_loader import BaseLoaderTests
@@ -102,7 +99,6 @@ def test_catalog_initialization(self, iceberg_basic_config):
10299

103100
def test_partitioning(self, iceberg_basic_config, small_test_data):
104101
"""Test Iceberg partitioning (partition spec)"""
105-
import pyarrow as pa
106102

107103
# Create config with partitioning
108104
config = {**iceberg_basic_config, 'partition_spec': [('year', 'identity'), ('month', 'identity')]}
@@ -147,7 +143,7 @@ def test_schema_evolution(self, iceberg_basic_config, small_test_data):
147143
extended_table = pa.Table.from_pydict(extended_data)
148144

149145
# Load with new schema
150-
result2 = loader.load_table(extended_table, table_name, mode=LoadMode.APPEND)
146+
loader.load_table(extended_table, table_name, mode=LoadMode.APPEND)
151147

152148
# Schema evolution depends on config
153149
catalog = loader._catalog
@@ -161,9 +157,10 @@ def test_schema_evolution(self, iceberg_basic_config, small_test_data):
161157

162158
def test_timestamp_conversion(self, iceberg_basic_config):
163159
"""Test timestamp conversion for Iceberg"""
164-
import pyarrow as pa
165160
from datetime import datetime
166161

162+
import pyarrow as pa
163+
167164
# Create data with timestamps
168165
data = {
169166
'id': [1, 2, 3],
@@ -193,7 +190,6 @@ def test_timestamp_conversion(self, iceberg_basic_config):
193190

194191
def test_multiple_tables(self, iceberg_basic_config, small_test_data):
195192
"""Test managing multiple tables with same loader"""
196-
from src.amp.loaders.base import LoadMode
197193

198194
loader = IcebergLoader(iceberg_basic_config)
199195

tests/integration/loaders/backends/test_lmdb.py

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,6 @@
1111
import pytest
1212

1313
try:
14-
import lmdb
15-
1614
from src.amp.loaders.implementations.lmdb_loader import LMDBLoader
1715
from tests.integration.loaders.conftest import LoaderTestConfig
1816
from tests.integration.loaders.test_base_loader import BaseLoaderTests
@@ -37,7 +35,7 @@ def get_row_count(self, loader: LMDBLoader, table_name: str) -> int:
3735
count = 0
3836
with loader.env.begin(db=loader.db) as txn:
3937
cursor = txn.cursor()
40-
for key, value in cursor:
38+
for _key, _value in cursor:
4139
count += 1
4240
return count
4341

@@ -58,7 +56,7 @@ def query_rows(
5856
row_data = json.loads(value.decode())
5957
row_data['_key'] = key.decode()
6058
rows.append(row_data)
61-
except:
59+
except Exception:
6260
# Fallback to raw value
6361
rows.append({'_key': key.decode(), '_value': value.decode()})
6462
return rows
@@ -79,20 +77,20 @@ def get_column_names(self, loader: LMDBLoader, table_name: str) -> List[str]:
7977

8078
with loader.env.begin(db=loader.db) as txn:
8179
cursor = txn.cursor()
82-
for key, value in cursor:
80+
for _key, value in cursor:
8381
try:
8482
row_data = json.loads(value.decode())
8583
return list(row_data.keys())
86-
except:
84+
except Exception:
8785
return ['_value'] # Fallback
8886
return []
8987

9088

9189
@pytest.fixture
9290
def lmdb_test_env():
9391
"""Create and cleanup temporary directory for LMDB databases"""
94-
import tempfile
9592
import shutil
93+
import tempfile
9694

9795
temp_dir = tempfile.mkdtemp(prefix='lmdb_test_')
9896
yield temp_dir

tests/integration/loaders/backends/test_postgresql.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ def query_rows(
7070
rows = cur.fetchall()
7171

7272
# Convert to list of dicts
73-
return [dict(zip(columns, row)) for row in rows]
73+
return [dict(zip(columns, row, strict=False)) for row in rows]
7474
finally:
7575
loader.pool.putconn(conn)
7676

@@ -318,9 +318,10 @@ class TestPostgreSQLPerformance:
318318

319319
def test_large_data_loading(self, postgresql_test_config, test_table_name, cleanup_tables):
320320
"""Test loading large datasets"""
321-
import pyarrow as pa
322321
from datetime import datetime
323322

323+
import pyarrow as pa
324+
324325
cleanup_tables.append(test_table_name)
325326

326327
# Create large dataset

tests/integration/loaders/backends/test_redis.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ def get_row_count(self, loader: RedisLoader, table_name: str) -> int:
5454
# Get stream length
5555
try:
5656
return loader.redis_client.xlen(table_name)
57-
except:
57+
except Exception:
5858
return 0
5959
else:
6060
# For other structures, scan for keys
@@ -166,7 +166,6 @@ class TestRedisSpecific:
166166

167167
def test_hash_storage(self, redis_test_config, small_test_data, cleanup_redis):
168168
"""Test Redis hash data structure storage"""
169-
import pyarrow as pa
170169

171170
keys_to_clean, patterns_to_clean = cleanup_redis
172171
table_name = 'test_hash'
@@ -343,7 +342,7 @@ def test_data_structure_comparison(self, redis_test_config, comprehensive_test_d
343342
}
344343

345344
# Verify all structures work
346-
for structure, data in results.items():
345+
for _structure, data in results.items():
347346
assert data['success'] == True
348347
assert data['rows_loaded'] == 1000
349348

tests/integration/loaders/backends/test_snowflake.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ def query_rows(
5353
cur.execute(query)
5454
columns = [col[0] for col in cur.description]
5555
rows = cur.fetchall()
56-
return [dict(zip(columns, row)) for row in rows]
56+
return [dict(zip(columns, row, strict=False)) for row in rows]
5757

5858
def cleanup_table(self, loader: SnowflakeLoader, table_name: str) -> None:
5959
"""Drop Snowflake table"""
@@ -87,7 +87,6 @@ class TestSnowflakeSpecific:
8787

8888
def test_stage_loading_method(self, snowflake_config, small_test_table, test_table_name, cleanup_tables):
8989
"""Test Snowflake stage-based loading (Snowflake-specific optimization)"""
90-
import pyarrow as pa
9190

9291
cleanup_tables.append(test_table_name)
9392

tests/integration/loaders/conftest.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -141,11 +141,7 @@ def loader(self, request):
141141
loader_config = loader_config.copy()
142142
# Only enable state if not explicitly configured
143143
if 'state' not in loader_config:
144-
loader_config['state'] = {
145-
'enabled': True,
146-
'storage': 'memory',
147-
'store_batch_id': True
148-
}
144+
loader_config['state'] = {'enabled': True, 'storage': 'memory', 'store_batch_id': True}
149145

150146
# Create and return the loader instance
151147
return self.config.loader_class(loader_config)

tests/integration/loaders/test_base_streaming.py

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -138,7 +138,9 @@ def test_reorg_deletion(self, loader, test_table_name, cleanup_tables):
138138
reorg_response = ResponseBatch.reorg_batch(
139139
invalidation_ranges=[BlockRange(network='ethereum', start=104, end=108)]
140140
)
141-
reorg_results = list(loader.load_stream_continuous(iter([reorg_response]), test_table_name, connection_name='test_conn'))
141+
reorg_results = list(
142+
loader.load_stream_continuous(iter([reorg_response]), test_table_name, connection_name='test_conn')
143+
)
142144
assert len(reorg_results) == 1
143145
assert reorg_results[0].success
144146

@@ -174,7 +176,9 @@ def test_reorg_overlapping_ranges(self, loader, test_table_name, cleanup_tables)
174176
)
175177

176178
# Load via streaming API (with connection_name for state tracking)
177-
results = list(loader.load_stream_continuous(iter([response]), test_table_name, connection_name='test_conn'))
179+
results = list(
180+
loader.load_stream_continuous(iter([response]), test_table_name, connection_name='test_conn')
181+
)
178182
assert len(results) == 1
179183
assert results[0].success
180184

@@ -187,7 +191,9 @@ def test_reorg_overlapping_ranges(self, loader, test_table_name, cleanup_tables)
187191
reorg_response = ResponseBatch.reorg_batch(
188192
invalidation_ranges=[BlockRange(network='ethereum', start=160, end=180)]
189193
)
190-
reorg_results = list(loader.load_stream_continuous(iter([reorg_response]), test_table_name, connection_name='test_conn'))
194+
reorg_results = list(
195+
loader.load_stream_continuous(iter([reorg_response]), test_table_name, connection_name='test_conn')
196+
)
191197
assert len(reorg_results) == 1
192198
assert reorg_results[0].success
193199

@@ -248,7 +254,9 @@ def test_reorg_multi_network(self, loader, test_table_name, cleanup_tables):
248254
reorg_response = ResponseBatch.reorg_batch(
249255
invalidation_ranges=[BlockRange(network='ethereum', start=100, end=100)]
250256
)
251-
reorg_results = list(loader.load_stream_continuous(iter([reorg_response]), test_table_name, connection_name='test_conn'))
257+
reorg_results = list(
258+
loader.load_stream_continuous(iter([reorg_response]), test_table_name, connection_name='test_conn')
259+
)
252260
assert len(reorg_results) == 1
253261
assert reorg_results[0].success
254262

0 commit comments

Comments
 (0)