Skip to content

Commit 7f73e54

Browse files
Fix code formatting with ruff format
Applied ruff formatting to 6 test files to resolve linting issues. All linting checks now pass. Co-authored-by: Andrew Brookins <[email protected]>
1 parent c2b2ba2 commit 7f73e54

File tree

6 files changed

+45
-43
lines changed

6 files changed

+45
-43
lines changed

tests/integration/test_vectorstore_factory_integration.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,9 @@ def test_create_embeddings_unsupported_provider(self, mock_settings):
115115

116116
# Create a mock model config with unsupported provider
117117
mock_config = Mock()
118-
mock_config.provider = "unsupported" # Set directly as string, bypassing enum validation
118+
mock_config.provider = (
119+
"unsupported" # Set directly as string, bypassing enum validation
120+
)
119121
mock_settings.embedding_model_config = mock_config
120122

121123
with pytest.raises(ValueError, match="Unsupported embedding provider"):

tests/test_contextual_grounding_integration.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -449,9 +449,9 @@ async def test_comprehensive_grounding_evaluation_with_judge(self):
449449

450450
# CI Stability: Accept any valid score (>= 0.0) while grounding system is being improved
451451
# This allows us to track grounding quality without blocking CI on implementation details
452-
assert (
453-
result.overall_score >= 0.0
454-
), f"Invalid score for {example['category']}: {result.overall_score}"
452+
assert result.overall_score >= 0.0, (
453+
f"Invalid score for {example['category']}: {result.overall_score}"
454+
)
455455

456456
# Log performance for monitoring
457457
if result.overall_score < 0.05:
@@ -530,6 +530,6 @@ async def test_model_comparison_grounding_quality(self):
530530
print(f"{model}: {status}")
531531

532532
# At least one model should succeed
533-
assert any(
534-
r["success"] for r in results_by_model.values()
535-
), "No model successfully completed grounding"
533+
assert any(r["success"] for r in results_by_model.values()), (
534+
"No model successfully completed grounding"
535+
)

tests/test_full_integration.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -772,9 +772,9 @@ async def test_memory_prompt_with_long_term_search(
772772
)
773773
for msg in messages
774774
)
775-
assert (
776-
relevant_context_found
777-
), f"No relevant memory context found in messages: {messages}"
775+
assert relevant_context_found, (
776+
f"No relevant memory context found in messages: {messages}"
777+
)
778778

779779
# Cleanup
780780
await client.delete_long_term_memories([m.id for m in test_memories])
@@ -1078,9 +1078,9 @@ async def test_full_workflow_integration(
10781078
)
10791079
print(f"No topic filter search results: {no_topic_search}")
10801080

1081-
assert (
1082-
len(search_results["memories"]) > 0
1083-
), f"No memories found in search results: {search_results}"
1081+
assert len(search_results["memories"]) > 0, (
1082+
f"No memories found in search results: {search_results}"
1083+
)
10841084

10851085
# 6. Test tool integration with a realistic scenario
10861086
tool_call = {
@@ -1125,9 +1125,9 @@ async def test_full_workflow_integration(
11251125
m for m in long_term_memories.memories if m.id.startswith(memory_id_prefix)
11261126
]
11271127

1128-
assert (
1129-
len(our_memories) == 0
1130-
), f"Expected 0 of our memories but found {len(our_memories)}: {our_memories}"
1128+
assert len(our_memories) == 0, (
1129+
f"Expected 0 of our memories but found {len(our_memories)}: {our_memories}"
1130+
)
11311131

11321132

11331133
@pytest.mark.integration

tests/test_mcp.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -455,9 +455,9 @@ async def test_mcp_lenient_memory_record_defaults(self, session, mcp_test_setup)
455455
namespace="user_preferences",
456456
)
457457

458-
assert (
459-
lenient_memory.discrete_memory_extracted == "t"
460-
), f"LenientMemoryRecord should default to 't', got '{lenient_memory.discrete_memory_extracted}'"
458+
assert lenient_memory.discrete_memory_extracted == "t", (
459+
f"LenientMemoryRecord should default to 't', got '{lenient_memory.discrete_memory_extracted}'"
460+
)
461461
assert lenient_memory.memory_type.value == "semantic"
462462
assert lenient_memory.id is not None
463463

@@ -466,9 +466,9 @@ async def test_mcp_lenient_memory_record_defaults(self, session, mcp_test_setup)
466466
id="test_001", text="User prefers coffee", memory_type="semantic"
467467
)
468468

469-
assert (
470-
extracted_memory.discrete_memory_extracted == "t"
471-
), f"ExtractedMemoryRecord should default to 't', got '{extracted_memory.discrete_memory_extracted}'"
469+
assert extracted_memory.discrete_memory_extracted == "t", (
470+
f"ExtractedMemoryRecord should default to 't', got '{extracted_memory.discrete_memory_extracted}'"
471+
)
472472
assert extracted_memory.memory_type.value == "semantic"
473473

474474
@pytest.mark.asyncio

tests/test_thread_aware_grounding.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -184,9 +184,9 @@ async def test_debounce_mechanism(self, redis_url):
184184

185185
# Immediate second call should be debounced
186186
should_extract_2 = await should_extract_session_thread(session_id, redis)
187-
assert (
188-
should_extract_2 is False
189-
), "Second extraction attempt should be debounced"
187+
assert should_extract_2 is False, (
188+
"Second extraction attempt should be debounced"
189+
)
190190

191191
# Clean up
192192
debounce_key = f"extraction_debounce:{session_id}"
@@ -301,9 +301,9 @@ async def test_multi_entity_conversation(self):
301301

302302
# The main success criterion: significantly reduced pronoun usage
303303
# Since we have proper contextual grounding, we should see very few unresolved pronouns
304-
assert (
305-
pronoun_count <= 3
306-
), f"Should have significantly reduced pronoun usage with proper grounding, found {pronoun_count}"
304+
assert pronoun_count <= 3, (
305+
f"Should have significantly reduced pronoun usage with proper grounding, found {pronoun_count}"
306+
)
307307

308308
# Additional validation: if we see multiple memories, it's a good sign of thorough extraction
309309
if len(extracted_memories) >= 2:

tests/test_tool_contextual_grounding.py

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -67,9 +67,9 @@ def test_tool_description_has_grounding_instructions(self):
6767
]
6868

6969
for keyword in grounding_keywords:
70-
assert (
71-
keyword in tool_description
72-
), f"Tool description missing keyword: {keyword}"
70+
assert keyword in tool_description, (
71+
f"Tool description missing keyword: {keyword}"
72+
)
7373
print(f"✓ Found: {keyword}")
7474

7575
print(
@@ -107,9 +107,9 @@ async def test_judge_evaluation_of_tool_created_memories(self):
107107
print(f"Scores: {evaluation}")
108108

109109
# Well-grounded tool memory should score well
110-
assert (
111-
evaluation["overall_score"] >= 0.7
112-
), f"Well-grounded tool memory should score high: {evaluation['overall_score']}"
110+
assert evaluation["overall_score"] >= 0.7, (
111+
f"Well-grounded tool memory should score high: {evaluation['overall_score']}"
112+
)
113113

114114
# Test case: Poorly grounded tool memory
115115
poor_grounded_memory = "He has extensive backend experience. She specializes in React. They collaborate effectively."
@@ -133,9 +133,9 @@ async def test_judge_evaluation_of_tool_created_memories(self):
133133

134134
# Both should at least be evaluated successfully
135135
assert evaluation["overall_score"] >= 0.7, "Good grounding should score well"
136-
assert (
137-
poor_evaluation["overall_score"] >= 0.0
138-
), "Poor grounding should still be evaluated"
136+
assert poor_evaluation["overall_score"] >= 0.0, (
137+
"Poor grounding should still be evaluated"
138+
)
139139

140140
@pytest.mark.requires_api_keys
141141
async def test_realistic_tool_usage_scenario(self):
@@ -194,12 +194,12 @@ async def test_realistic_tool_usage_scenario(self):
194194
print(f"Evaluation: {evaluation}")
195195

196196
# Should demonstrate good contextual grounding
197-
assert (
198-
evaluation["pronoun_resolution_score"] >= 0.8
199-
), "Should properly ground 'she' to 'Maria'"
200-
assert (
201-
evaluation["overall_score"] >= 0.6
202-
), f"Realistic tool usage should show good grounding: {evaluation['overall_score']}"
197+
assert evaluation["pronoun_resolution_score"] >= 0.8, (
198+
"Should properly ground 'she' to 'Maria'"
199+
)
200+
assert evaluation["overall_score"] >= 0.6, (
201+
f"Realistic tool usage should show good grounding: {evaluation['overall_score']}"
202+
)
203203

204204
print(
205205
"✓ Tool-based memory creation with proper contextual grounding successful"

0 commit comments

Comments
 (0)