Skip to content

Commit 5bce7b3

Browse files
committed
pre-commit -fixes
1 parent ac5ebe6 commit 5bce7b3

File tree

2 files changed

+85
-105
lines changed

2 files changed

+85
-105
lines changed

tests/test_export_threaded.py

Lines changed: 16 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -161,9 +161,7 @@ def test_execute_batch_handles_json_decode_error(self) -> None:
161161
)
162162

163163
# 2. Action
164-
with patch(
165-
"odoo_data_flow.export_threaded.log.error"
166-
) as mock_log_error:
164+
with patch("odoo_data_flow.export_threaded.log.error") as mock_log_error:
167165
result = thread._execute_batch([1], 1)
168166

169167
# 3. Assert
@@ -456,9 +454,7 @@ def test_export_handles_memory_error_fallback(
456454

457455
# Verify the final file has all data from the successful retries
458456
on_disk_df = pl.read_csv(output_file, separator=";")
459-
expected_df = pl.DataFrame(
460-
{"id": [1, 2, 3, 4], "name": ["A", "B", "C", "D"]}
461-
)
457+
expected_df = pl.DataFrame({"id": [1, 2, 3, 4], "name": ["A", "B", "C", "D"]})
462458
assert_frame_equal(on_disk_df.sort("id"), expected_df.sort("id"))
463459

464460
def test_export_handles_empty_batch_result(
@@ -528,9 +524,7 @@ def test_export_handles_permanent_worker_failure(
528524
on_disk_df = pl.read_csv(output_file, separator=";")
529525
assert len(on_disk_df) == 1
530526

531-
def test_initialize_export_connection_error(
532-
self, mock_conf_lib: MagicMock
533-
) -> None:
527+
def test_initialize_export_connection_error(self, mock_conf_lib: MagicMock) -> None:
534528
"""Tests that the function handles connection errors gracefully."""
535529
mock_conf_lib.side_effect = Exception("Connection Refused")
536530

@@ -620,9 +614,7 @@ def test_process_export_batches_empty_result(
620614
if result is not None:
621615
assert result.is_empty()
622616

623-
def test_process_export_batches_no_dfs_with_output(
624-
self, tmp_path: Path
625-
) -> None:
617+
def test_process_export_batches_no_dfs_with_output(self, tmp_path: Path) -> None:
626618
"""Test _process_export_batches with no dataframes and an output file."""
627619
mock_rpc_thread = MagicMock()
628620
mock_rpc_thread.futures = []
@@ -648,9 +640,7 @@ def test_process_export_batches_no_dfs_with_output(
648640
assert result.is_empty()
649641
mock_write_csv.assert_called_once()
650642

651-
def test_export_relational_raw_id_success(
652-
self, mock_conf_lib: MagicMock
653-
) -> None:
643+
def test_export_relational_raw_id_success(self, mock_conf_lib: MagicMock) -> None:
654644
"""Test Relational Raw id.
655645
656646
Tests that requesting a relational field with '/.id' triggers read mode
@@ -718,9 +708,7 @@ def test_export_hybrid_mode_success(self, mock_conf_lib: MagicMock) -> None:
718708
}
719709

720710
# 2. Mock the primary read() call
721-
mock_model.read.return_value = [
722-
{"id": 10, "parent_id": (5, "Parent Category")}
723-
]
711+
mock_model.read.return_value = [{"id": 10, "parent_id": (5, "Parent Category")}]
724712

725713
# 3. Mock the secondary XML ID lookup on 'ir.model.data'
726714
mock_ir_model_data = MagicMock()
@@ -749,9 +737,7 @@ def test_export_hybrid_mode_success(self, mock_conf_lib: MagicMock) -> None:
749737
)
750738
assert_frame_equal(result_df, expected_df)
751739

752-
def test_export_id_in_export_data_mode(
753-
self, mock_conf_lib: MagicMock
754-
) -> None:
740+
def test_export_id_in_export_data_mode(self, mock_conf_lib: MagicMock) -> None:
755741
"""Test export id in export data.
756742
757743
Tests that in export_data mode, the 'id' field correctly resolves
@@ -838,9 +824,7 @@ def test_export_auto_enables_read_mode_for_selection_field(
838824

839825
# --- Assert ---
840826
_init_args, init_kwargs = mock_rpc_thread_class.call_args
841-
assert init_kwargs.get("technical_names") is True, (
842-
"Read mode was not triggered"
843-
)
827+
assert init_kwargs.get("technical_names") is True, "Read mode was not triggered"
844828

845829
assert result_df is not None
846830
expected_df = pl.DataFrame({"name": ["Test Record"], "state": ["done"]})
@@ -890,14 +874,10 @@ def test_export_auto_enables_read_mode_for_binary_field(
890874

891875
# --- Assert ---
892876
_init_args, init_kwargs = mock_rpc_thread_class.call_args
893-
assert init_kwargs.get("technical_names") is True, (
894-
"Read mode was not triggered"
895-
)
877+
assert init_kwargs.get("technical_names") is True, "Read mode was not triggered"
896878

897879
assert result_df is not None
898-
expected_df = pl.DataFrame(
899-
{"name": ["test.zip"], "datas": ["UEsDBAoAAAAA..."]}
900-
)
880+
expected_df = pl.DataFrame({"name": ["test.zip"], "datas": ["UEsDBAoAAAAA..."]})
901881
assert_frame_equal(result_df, expected_df)
902882

903883
@patch("odoo_data_flow.export_threaded.concurrent.futures.as_completed")
@@ -1058,9 +1038,7 @@ def test_execute_batch_single_record_failure(self) -> None:
10581038
# has_failures should be set to True
10591039
assert thread.has_failures is True
10601040

1061-
def test_resume_existing_session_missing_all_ids(
1062-
self, tmp_path: Path
1063-
) -> None:
1041+
def test_resume_existing_session_missing_all_ids(self, tmp_path: Path) -> None:
10641042
"""Test _resume_existing_session when all_ids.json is missing."""
10651043
from odoo_data_flow.export_threaded import _resume_existing_session
10661044

@@ -1072,17 +1050,13 @@ def test_resume_existing_session_missing_all_ids(
10721050

10731051
session_id = "test_session"
10741052

1075-
ids_to_export, total_count = _resume_existing_session(
1076-
session_dir, session_id
1077-
)
1053+
ids_to_export, total_count = _resume_existing_session(session_dir, session_id)
10781054

10791055
# Should return empty list since all_ids.json is missing
10801056
assert ids_to_export == []
10811057
assert total_count == 0
10821058

1083-
def test_resume_existing_session_with_completed_ids(
1084-
self, tmp_path: Path
1085-
) -> None:
1059+
def test_resume_existing_session_with_completed_ids(self, tmp_path: Path) -> None:
10861060
"""Test _resume_existing_session with existing completed IDs."""
10871061
import json
10881062

@@ -1107,9 +1081,7 @@ def test_resume_existing_session_with_completed_ids(
11071081

11081082
session_id = "test_session"
11091083

1110-
ids_to_export, total_count = _resume_existing_session(
1111-
session_dir, session_id
1112-
)
1084+
ids_to_export, total_count = _resume_existing_session(session_dir, session_id)
11131085

11141086
# Should return only uncompleted IDs (2, 4)
11151087
assert sorted(ids_to_export) == [2, 4]
@@ -1160,13 +1132,9 @@ def test_execute_batch_successful_split_retry(self) -> None:
11601132

11611133
# Should split [1,2,3,4] into [1,2] and [3,4]
11621134
assert first_call_args[0] == [1, 2] # First half
1163-
assert (
1164-
first_call_args[1] == "test_batch-a"
1165-
) # First half batch number
1135+
assert first_call_args[1] == "test_batch-a" # First half batch number
11661136
assert second_call_args[0] == [3, 4] # Second half
1167-
assert (
1168-
second_call_args[1] == "test_batch-b"
1169-
) # Second half batch number
1137+
assert second_call_args[1] == "test_batch-b" # Second half batch number
11701138

11711139
# Results should be combined
11721140
expected_data = [{"id": 1}, {"id": 2}, {"id": 3}, {"id": 4}]

0 commit comments

Comments
 (0)