Skip to content

Commit 28adec6

Browse files
committed
Fix code formatting (black and isort)
1 parent 6e29178 commit 28adec6

File tree

2 files changed

+56
-34
lines changed

2 files changed

+56
-34
lines changed

custom_components/ai_agent_ha/agent.py

Lines changed: 21 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -59,10 +59,20 @@ def sanitize_for_logging(data: Any, mask: str = "***REDACTED***") -> Any:
5959
"""
6060
# Sensitive field patterns (case-insensitive)
6161
sensitive_patterns = {
62-
'token', 'key', 'password', 'secret', 'credential',
63-
'auth', 'authorization', 'api_key', 'apikey',
64-
'llama_token', 'openai_token', 'gemini_token',
65-
'anthropic_token', 'openrouter_token'
62+
"token",
63+
"key",
64+
"password",
65+
"secret",
66+
"credential",
67+
"auth",
68+
"authorization",
69+
"api_key",
70+
"apikey",
71+
"llama_token",
72+
"openai_token",
73+
"gemini_token",
74+
"anthropic_token",
75+
"openrouter_token",
6676
}
6777

6878
if isinstance(data, dict):
@@ -190,7 +200,10 @@ async def get_response(self, messages, **kwargs):
190200
)
191201
_LOGGER.debug("Local API response status: %d", resp.status)
192202
# Sanitize headers to avoid logging any auth tokens
193-
_LOGGER.debug("Local API response headers: %s", sanitize_for_logging(dict(resp.headers)))
203+
_LOGGER.debug(
204+
"Local API response headers: %s",
205+
sanitize_for_logging(dict(resp.headers)),
206+
)
194207

195208
# Try to parse as JSON
196209
try:
@@ -2216,7 +2229,9 @@ async def process_query(
22162229

22172230
_LOGGER.debug(f"Processing query with provider: {provider}")
22182231
# Log sanitized config (masks all tokens/keys for security)
2219-
_LOGGER.debug(f"Using config: {json.dumps(sanitize_for_logging(config), default=str)}")
2232+
_LOGGER.debug(
2233+
f"Using config: {json.dumps(sanitize_for_logging(config), default=str)}"
2234+
)
22202235

22212236
selected_provider = provider or config.get("ai_provider", "llama")
22222237
models_config = config.get("models", {})

tests/test_ai_agent_ha/test_sanitization.py

Lines changed: 35 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,9 @@
11
"""Tests for the sanitization utility function."""
2-
import pytest
3-
import sys
2+
43
import os
4+
import sys
5+
6+
import pytest
57

68
# Add the parent directory to the path for direct imports
79
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
@@ -28,18 +30,30 @@ def sanitize_for_logging(data: Any, mask: str = "***REDACTED***") -> Any:
2830
"""
2931
# Sensitive field patterns (case-insensitive)
3032
sensitive_patterns = {
31-
'token', 'key', 'password', 'secret', 'credential',
32-
'auth', 'authorization', 'api_key', 'apikey',
33-
'llama_token', 'openai_token', 'gemini_token',
34-
'anthropic_token', 'openrouter_token'
33+
"token",
34+
"key",
35+
"password",
36+
"secret",
37+
"credential",
38+
"auth",
39+
"authorization",
40+
"api_key",
41+
"apikey",
42+
"llama_token",
43+
"openai_token",
44+
"gemini_token",
45+
"anthropic_token",
46+
"openrouter_token",
3547
}
3648

3749
if isinstance(data, dict):
3850
sanitized = {}
3951
for key, value in data.items():
4052
# Check if key matches any sensitive pattern
4153
key_lower = str(key).lower()
42-
is_sensitive = any(pattern in key_lower for pattern in sensitive_patterns)
54+
is_sensitive = any(
55+
pattern in key_lower for pattern in sensitive_patterns
56+
)
4357

4458
if is_sensitive:
4559
sanitized[key] = mask
@@ -82,7 +96,7 @@ def test_sanitize_multiple_tokens(self):
8296
"gemini_token": "gm-xyz789",
8397
"anthropic_token": "sk-ant-456",
8498
"ai_provider": "openai",
85-
"model": "gpt-4"
99+
"model": "gpt-4",
86100
}
87101
result = sanitize_for_logging(data)
88102

@@ -119,11 +133,8 @@ def test_sanitize_authorization(self):
119133
def test_sanitize_nested_dict(self):
120134
"""Test sanitization of nested dictionaries."""
121135
data = {
122-
"config": {
123-
"openai_token": "sk-abc123",
124-
"model": "gpt-4"
125-
},
126-
"ai_provider": "openai"
136+
"config": {"openai_token": "sk-abc123", "model": "gpt-4"},
137+
"ai_provider": "openai",
127138
}
128139
result = sanitize_for_logging(data)
129140

@@ -135,7 +146,7 @@ def test_sanitize_list_of_dicts(self):
135146
"""Test sanitization of lists containing dictionaries."""
136147
data = [
137148
{"api_key": "key1", "name": "config1"},
138-
{"api_key": "key2", "name": "config2"}
149+
{"api_key": "key2", "name": "config2"},
139150
]
140151
result = sanitize_for_logging(data)
141152

@@ -146,10 +157,7 @@ def test_sanitize_list_of_dicts(self):
146157

147158
def test_sanitize_tuple(self):
148159
"""Test sanitization of tuples."""
149-
data = (
150-
{"token": "secret", "name": "test"},
151-
"plain_value"
152-
)
160+
data = ({"token": "secret", "name": "test"}, "plain_value")
153161
result = sanitize_for_logging(data)
154162

155163
assert isinstance(result, tuple)
@@ -163,7 +171,7 @@ def test_sanitize_case_insensitive(self):
163171
"Token": "secret1",
164172
"API_KEY": "secret2",
165173
"Password": "secret3",
166-
"AUTHORIZATION": "secret4"
174+
"AUTHORIZATION": "secret4",
167175
}
168176
result = sanitize_for_logging(data)
169177

@@ -177,7 +185,7 @@ def test_sanitize_partial_match(self):
177185
data = {
178186
"my_token": "secret",
179187
"custom_api_key": "secret2",
180-
"db_password": "secret3"
188+
"db_password": "secret3",
181189
}
182190
result = sanitize_for_logging(data)
183191

@@ -213,18 +221,17 @@ def test_sanitize_deeply_nested(self):
213221
"level2": {
214222
"level3": {
215223
"api_key": "secret",
216-
"config": {
217-
"token": "secret2",
218-
"safe_value": "ok"
219-
}
224+
"config": {"token": "secret2", "safe_value": "ok"},
220225
}
221226
}
222227
}
223228
}
224229
result = sanitize_for_logging(data)
225230

226231
assert result["level1"]["level2"]["level3"]["api_key"] == "***REDACTED***"
227-
assert result["level1"]["level2"]["level3"]["config"]["token"] == "***REDACTED***"
232+
assert (
233+
result["level1"]["level2"]["level3"]["config"]["token"] == "***REDACTED***"
234+
)
228235
assert result["level1"]["level2"]["level3"]["config"]["safe_value"] == "ok"
229236

230237
def test_sanitize_real_config(self):
@@ -240,8 +247,8 @@ def test_sanitize_real_config(self):
240247
"models": {
241248
"openai": "gpt-4",
242249
"gemini": "gemini-pro",
243-
"anthropic": "claude-3-sonnet"
244-
}
250+
"anthropic": "claude-3-sonnet",
251+
},
245252
}
246253
result = sanitize_for_logging(config)
247254

@@ -265,7 +272,7 @@ def test_sanitize_http_headers(self):
265272
"Content-Type": "application/json",
266273
"Authorization": "Bearer secret-token",
267274
"X-API-Key": "api-key-123",
268-
"User-Agent": "HomeAssistant/1.0"
275+
"User-Agent": "HomeAssistant/1.0",
269276
}
270277
result = sanitize_for_logging(headers)
271278

0 commit comments

Comments
 (0)