Skip to content

Commit cd4b2b9

Browse files
committed
Fix S3 integration tests
1 parent baca481 commit cd4b2b9

File tree

2 files changed

+12
-12
lines changed

2 files changed

+12
-12
lines changed

tests/test_integration/test_run_transfer/test_hdfs.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -248,8 +248,8 @@ async def test_run_transfer_postgres_to_hdfs_with_full_strategy(
248248

249249
await run_transfer_and_verify(client, group_owner, postgres_to_hdfs.id)
250250

251-
files = [file.name for file in hdfs_file_connection.list_dir(target_path) if file.is_file()]
252-
verify_file_name_template(files, expected_extension)
251+
file_names = [file.name for file in hdfs_file_connection.list_dir(target_path) if file.is_file()]
252+
verify_file_name_template(file_names, expected_extension)
253253

254254
spark.catalog.clearCache()
255255
reader = FileDFReader(
@@ -304,8 +304,8 @@ async def test_run_transfer_postgres_to_hdfs_with_incremental_strategy(
304304
fill_with_data(first_transfer_df)
305305
await run_transfer_and_verify(client, group_owner, postgres_to_hdfs.id)
306306

307-
files = [file.name for file in hdfs_file_connection.list_dir(target_path) if file.is_file()]
308-
verify_file_name_template(files, expected_extension)
307+
file_names = [file.name for file in hdfs_file_connection.list_dir(target_path) if file.is_file()]
308+
verify_file_name_template(file_names, expected_extension)
309309

310310
spark.catalog.clearCache()
311311
reader = FileDFReader(
@@ -323,8 +323,8 @@ async def test_run_transfer_postgres_to_hdfs_with_incremental_strategy(
323323
fill_with_data(second_transfer_df)
324324
await run_transfer_and_verify(client, group_owner, postgres_to_hdfs.id)
325325

326-
files = [file.name for file in hdfs_file_connection.list_dir(target_path) if file.is_file()]
327-
verify_file_name_template(files, expected_extension)
326+
file_names = [file.name for file in hdfs_file_connection.list_dir(target_path) if file.is_file()]
327+
verify_file_name_template(file_names, expected_extension)
328328

329329
spark.catalog.clearCache()
330330
df_with_increment = reader.run()

tests/test_integration/test_run_transfer/test_s3.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -295,8 +295,8 @@ async def test_run_transfer_postgres_to_s3_with_full_strategy(
295295

296296
await run_transfer_and_verify(client, group_owner, postgres_to_s3.id, target_auth="s3")
297297

298-
files = [file for file in s3_file_connection.list_dir(target_path) if file.is_file()]
299-
verify_file_name_template(files, expected_extension)
298+
file_names = [file.name for file in s3_file_connection.list_dir(target_path) if file.is_file()]
299+
verify_file_name_template(file_names, expected_extension)
300300

301301
reader = FileDFReader(
302302
connection=s3_file_df_connection,
@@ -357,8 +357,8 @@ async def test_run_transfer_postgres_to_s3_with_incremental_strategy(
357357
fill_with_data(first_transfer_df)
358358
await run_transfer_and_verify(client, group_owner, postgres_to_s3.id, target_auth="s3")
359359

360-
files = [file for file in s3_file_connection.list_dir(target_path)]
361-
verify_file_name_template(files, expected_extension)
360+
file_names = [file.name for file in s3_file_connection.list_dir(target_path)]
361+
verify_file_name_template(file_names, expected_extension)
362362

363363
reader = FileDFReader(
364364
connection=s3_file_df_connection,
@@ -375,8 +375,8 @@ async def test_run_transfer_postgres_to_s3_with_incremental_strategy(
375375
fill_with_data(second_transfer_df)
376376
await run_transfer_and_verify(client, group_owner, postgres_to_s3.id, target_auth="s3")
377377

378-
files = [file.name for file in s3_file_connection.list_dir(target_path) if file.is_file()]
379-
verify_file_name_template(files, expected_extension)
378+
file_names = [file.name for file in s3_file_connection.list_dir(target_path) if file.is_file()]
379+
verify_file_name_template(file_names, expected_extension)
380380

381381
df_with_increment = reader.run()
382382
df_with_increment, init_df = cast_dataframe_types(df_with_increment, init_df)

0 commit comments

Comments
 (0)