Skip to content

Commit ddf6d42

Browse files
authored
chore: move use_auto_chat_cache_seed_gen and init_chat_cache_seed to LLMSett… (#444)
* move use_auto_chat_cache_seed_gen and init_chat_cache_seed to LLMSettings * remove RD_AGENT_SETTINGS in llm_utils.py
1 parent ca15365 commit ddf6d42

File tree

5 files changed

+19
-21
lines changed

5 files changed

+19
-21
lines changed

rdagent/core/conf.py

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -15,16 +15,6 @@ class RDAgentSettings(BaseSettings):
1515
# TODO: (xiao) think it can be a separate config.
1616
log_trace_path: str | None = None
1717

18-
# Behavior of returning answers to the same question when caching is enabled
19-
use_auto_chat_cache_seed_gen: bool = False
20-
"""
21-
`_create_chat_completion_inner_function` provdies a feature to pass in a seed to affect the cache hash key
22-
We want to enable a auto seed generator to get different default seed for `_create_chat_completion_inner_function`
23-
if seed is not given.
24-
So the cache will only not miss you ask the same question on same round.
25-
"""
26-
init_chat_cache_seed: int = 42
27-
2818
# azure document intelligence configs
2919
azure_document_intelligence_key: str = ""
3020
azure_document_intelligence_endpoint: str = ""

rdagent/core/utils.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
from fuzzywuzzy import fuzz # type: ignore[import-untyped]
1515

1616
from rdagent.core.conf import RD_AGENT_SETTINGS
17+
from rdagent.oai.llm_conf import LLM_SETTINGS
1718

1819

1920
class RDAgentException(Exception): # noqa: N818
@@ -98,7 +99,7 @@ class CacheSeedGen:
9899
"""
99100

100101
def __init__(self) -> None:
101-
self.set_seed(RD_AGENT_SETTINGS.init_chat_cache_seed)
102+
self.set_seed(LLM_SETTINGS.init_chat_cache_seed)
102103

103104
def set_seed(self, seed: int) -> None:
104105
random.seed(seed)

rdagent/oai/llm_conf.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,16 @@ class LLMSettings(BaseSettings):
2020
prompt_cache_path: str = str(Path.cwd() / "prompt_cache.db")
2121
max_past_message_include: int = 10
2222

23+
# Behavior of returning answers to the same question when caching is enabled
24+
use_auto_chat_cache_seed_gen: bool = False
25+
"""
26+
`_create_chat_completion_inner_function` provdies a feature to pass in a seed to affect the cache hash key
27+
We want to enable a auto seed generator to get different default seed for `_create_chat_completion_inner_function`
28+
if seed is not given.
29+
So the cache will only not miss you ask the same question on same round.
30+
"""
31+
init_chat_cache_seed: int = 42
32+
2333
# Chat configs
2434
openai_api_key: str = "" # TODO: simplify the key design.
2535
chat_openai_api_key: str = ""

rdagent/oai/llm_utils.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,6 @@
1717
import numpy as np
1818
import tiktoken
1919

20-
from rdagent.core.conf import RD_AGENT_SETTINGS
2120
from rdagent.core.utils import LLM_CACHE_SEED_GEN, SingletonBaseClass
2221
from rdagent.log import LogColors
2322
from rdagent.log import rdagent_logger as logger
@@ -596,7 +595,7 @@ def _create_chat_completion_inner_function( # noqa: C901, PLR0912, PLR0915
596595
To make retries useful, we need to enable a seed.
597596
This seed is different from `self.chat_seed` for GPT. It is for the local cache mechanism enabled by RD-Agent locally.
598597
"""
599-
if seed is None and RD_AGENT_SETTINGS.use_auto_chat_cache_seed_gen:
598+
if seed is None and LLM_SETTINGS.use_auto_chat_cache_seed_gen:
600599
seed = LLM_CACHE_SEED_GEN.get_next_seed()
601600

602601
# TODO: we can add this function back to avoid so much `self.cfg.log_llm_chat_content`

test/oai/test_completion.py

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -60,23 +60,22 @@ def test_chat_cache(self) -> None:
6060
- 2 pass
6161
- cache is not missed & same question get different answer.
6262
"""
63-
from rdagent.core.conf import RD_AGENT_SETTINGS
6463
from rdagent.core.utils import LLM_CACHE_SEED_GEN
6564
from rdagent.oai.llm_conf import LLM_SETTINGS
6665

6766
system_prompt = "You are a helpful assistant."
6867
user_prompt = f"Give me {2} random country names, list {2} cities in each country, and introduce them"
6968

7069
origin_value = (
71-
RD_AGENT_SETTINGS.use_auto_chat_cache_seed_gen,
70+
LLM_SETTINGS.use_auto_chat_cache_seed_gen,
7271
LLM_SETTINGS.use_chat_cache,
7372
LLM_SETTINGS.dump_chat_cache,
7473
)
7574

7675
LLM_SETTINGS.use_chat_cache = True
7776
LLM_SETTINGS.dump_chat_cache = True
7877

79-
RD_AGENT_SETTINGS.use_auto_chat_cache_seed_gen = True
78+
LLM_SETTINGS.use_auto_chat_cache_seed_gen = True
8079

8180
LLM_CACHE_SEED_GEN.set_seed(10)
8281
response1 = APIBackend().build_messages_and_create_chat_completion(
@@ -110,7 +109,7 @@ def test_chat_cache(self) -> None:
110109

111110
# Reset, for other tests
112111
(
113-
RD_AGENT_SETTINGS.use_auto_chat_cache_seed_gen,
112+
LLM_SETTINGS.use_auto_chat_cache_seed_gen,
114113
LLM_SETTINGS.use_chat_cache,
115114
LLM_SETTINGS.dump_chat_cache,
116115
) = origin_value
@@ -132,23 +131,22 @@ def test_chat_cache_multiprocess(self) -> None:
132131
- 2 pass
133132
- cache is not missed & same question get different answer.
134133
"""
135-
from rdagent.core.conf import RD_AGENT_SETTINGS
136134
from rdagent.core.utils import LLM_CACHE_SEED_GEN, multiprocessing_wrapper
137135
from rdagent.oai.llm_conf import LLM_SETTINGS
138136

139137
system_prompt = "You are a helpful assistant."
140138
user_prompt = f"Give me {2} random country names, list {2} cities in each country, and introduce them"
141139

142140
origin_value = (
143-
RD_AGENT_SETTINGS.use_auto_chat_cache_seed_gen,
141+
LLM_SETTINGS.use_auto_chat_cache_seed_gen,
144142
LLM_SETTINGS.use_chat_cache,
145143
LLM_SETTINGS.dump_chat_cache,
146144
)
147145

148146
LLM_SETTINGS.use_chat_cache = True
149147
LLM_SETTINGS.dump_chat_cache = True
150148

151-
RD_AGENT_SETTINGS.use_auto_chat_cache_seed_gen = True
149+
LLM_SETTINGS.use_auto_chat_cache_seed_gen = True
152150

153151
func_calls = [(_worker, (system_prompt, user_prompt)) for _ in range(4)]
154152

@@ -161,7 +159,7 @@ def test_chat_cache_multiprocess(self) -> None:
161159

162160
# Reset, for other tests
163161
(
164-
RD_AGENT_SETTINGS.use_auto_chat_cache_seed_gen,
162+
LLM_SETTINGS.use_auto_chat_cache_seed_gen,
165163
LLM_SETTINGS.use_chat_cache,
166164
LLM_SETTINGS.dump_chat_cache,
167165
) = origin_value

0 commit comments

Comments
 (0)