@@ -263,6 +263,127 @@ def test_ai_adapter_raises_error_for_unsupported_model(self, mock_api_key, mock_
263263 with pytest.raises(ValueError, match="Unsupported model type"):
264264 AIAdapter(mock_api_key, mock_llm_proxy)
265265
266+ @patch("src.llm.llm_manager.FREE_TIER", False)
267+ @patch("src.llm.llm_manager.LLM_MODEL_TYPE", "openai")
268+ @patch("src.llm.llm_manager.EASY_APPLY_MODEL", "gpt-4")
269+ @patch("src.llm.llm_manager.OpenAIModel")
270+ @patch("src.llm.llm_manager.pause")
271+ def test_ai_adapter_no_rate_limiting_when_free_tier_disabled(
272+ self, mock_pause, mock_openai, mock_api_key, mock_llm_proxy
273+ ):
274+ """Test AIAdapter does not apply rate limiting when free tier is disabled"""
275+ mock_model = MagicMock()
276+ mock_model.invoke.return_value = AIMessage(content="Test response")
277+ mock_openai.return_value = mock_model
278+
279+ adapter = AIAdapter(mock_api_key, mock_llm_proxy)
280+
281+ # Make multiple requests
282+ for _ in range(5):
283+ adapter.invoke("Test prompt")
284+
285+ # Verify pause was never called
286+ mock_pause.assert_not_called()
287+
288+ @patch("src.llm.llm_manager.FREE_TIER", True)
289+ @patch("src.llm.llm_manager.FREE_TIER_RPM_LIMIT", 3)
290+ @patch("src.llm.llm_manager.LLM_MODEL_TYPE", "openai")
291+ @patch("src.llm.llm_manager.EASY_APPLY_MODEL", "gpt-4")
292+ @patch("src.llm.llm_manager.OpenAIModel")
293+ @patch("src.llm.llm_manager.pause")
294+ def test_ai_adapter_no_pause_under_rpm_limit(
295+ self, mock_pause, mock_openai, mock_api_key, mock_llm_proxy
296+ ):
297+ """Test AIAdapter does not pause when requests are under RPM limit"""
298+ mock_model = MagicMock()
299+ mock_model.invoke.return_value = AIMessage(content="Test response")
300+ mock_openai.return_value = mock_model
301+
302+ adapter = AIAdapter(mock_api_key, mock_llm_proxy)
303+
304+ # Make requests under the limit (3)
305+ for _ in range(2):
306+ adapter.invoke("Test prompt")
307+
308+ # Verify pause was not called
309+ mock_pause.assert_not_called()
310+
311+ @patch("src.llm.llm_manager.FREE_TIER", True)
312+ @patch("src.llm.llm_manager.FREE_TIER_RPM_LIMIT", 3)
313+ @patch("src.llm.llm_manager.LLM_MODEL_TYPE", "openai")
314+ @patch("src.llm.llm_manager.EASY_APPLY_MODEL", "gpt-4")
315+ @patch("src.llm.llm_manager.OpenAIModel")
316+ @patch("src.llm.llm_manager.pause")
317+ @patch("src.llm.llm_manager.datetime")
318+ def test_ai_adapter_pauses_when_rpm_limit_reached(
319+ self, mock_datetime, mock_pause, mock_openai, mock_api_key, mock_llm_proxy
320+ ):
321+ """Test AIAdapter pauses when RPM limit is reached within 60 seconds"""
322+ from datetime import datetime, timedelta
323+
324+ # Setup mock datetime
325+ base_time = datetime(2024, 1, 1, 12, 0, 0)
326+ mock_datetime.now.side_effect = [
327+ base_time, # First request
328+ base_time + timedelta(seconds=10), # Second request
329+ base_time + timedelta(seconds=20), # Third request
330+ base_time + timedelta(seconds=30), # Fourth request - triggers check
331+ base_time + timedelta(seconds=30), # During pause calculation
332+ ]
333+
334+ mock_model = MagicMock()
335+ mock_model.invoke.return_value = AIMessage(content="Test response")
336+ mock_openai.return_value = mock_model
337+
338+ adapter = AIAdapter(mock_api_key, mock_llm_proxy)
339+
340+ # Make requests that hit the limit (3 requests in queue, 4th triggers pause)
341+ for _ in range(4):
342+ adapter.invoke("Test prompt")
343+
344+ # Verify pause was called
345+ # Time delta = 30 seconds since first request
346+ # Should pause for 60 - 30 = 30 seconds
347+ assert mock_pause.call_count == 1
348+ call_args = mock_pause.call_args[0]
349+ assert call_args[0] == 30.0 # pause duration
350+ assert call_args[1] == 31.0 # pause duration + 1
351+
352+ @patch("src.llm.llm_manager.FREE_TIER", True)
353+ @patch("src.llm.llm_manager.FREE_TIER_RPM_LIMIT", 2)
354+ @patch("src.llm.llm_manager.LLM_MODEL_TYPE", "openai")
355+ @patch("src.llm.llm_manager.EASY_APPLY_MODEL", "gpt-4")
356+ @patch("src.llm.llm_manager.OpenAIModel")
357+ @patch("src.llm.llm_manager.pause")
358+ @patch("src.llm.llm_manager.datetime")
359+ def test_ai_adapter_no_pause_after_60_seconds(
360+ self, mock_datetime, mock_pause, mock_openai, mock_api_key, mock_llm_proxy
361+ ):
362+ """Test AIAdapter does not pause if oldest request is older than 60 seconds"""
363+ from datetime import datetime, timedelta
364+
365+ # Setup mock datetime - requests spaced more than 60 seconds apart
366+ base_time = datetime(2024, 1, 1, 12, 0, 0)
367+ mock_datetime.now.side_effect = [
368+ base_time, # First request
369+ base_time + timedelta(seconds=30), # Second request
370+ base_time + timedelta(seconds=65), # Third request - 65 seconds after first
371+ base_time + timedelta(seconds=65), # During check
372+ ]
373+
374+ mock_model = MagicMock()
375+ mock_model.invoke.return_value = AIMessage(content="Test response")
376+ mock_openai.return_value = mock_model
377+
378+ adapter = AIAdapter(mock_api_key, mock_llm_proxy)
379+
380+ # Make 3 requests
381+ for _ in range(3):
382+ adapter.invoke("Test prompt")
383+
384+ # Verify pause was not called since 60+ seconds passed
385+ mock_pause.assert_not_called()
386+
266387
267388class TestLLMLogger:
268389 """Tests for LLMLogger class"""
0 commit comments