Skip to content

Commit fcb78a9

Browse files
fixing LiteLLM call for the follow up (#1799)
1 parent afd3596 commit fcb78a9

File tree

5 files changed

+12
-9
lines changed

5 files changed

+12
-9
lines changed

extensions/llms/litellm/pandasai_litellm/litellm.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ def type(self) -> str:
4848
str: The type of the model."""
4949
return f"litellm"
5050

51-
def call(self, instruction: BasePrompt, _: AgentState = None) -> str:
51+
def call(self, instruction: BasePrompt, context: AgentState = None) -> str:
5252
"""Generates a completion response based on the provided instruction.
5353
5454
This method converts the given instruction into a user prompt string and
@@ -57,17 +57,18 @@ def call(self, instruction: BasePrompt, _: AgentState = None) -> str:
5757
5858
Args:
5959
instruction (BasePrompt): The instruction to convert into a prompt.
60-
_ (AgentState, optional): An optional state of the agent. Defaults to None.
60+
context (AgentState, optional): An optional state of the agent. Defaults to None.
6161
6262
Returns:
6363
str: The content of the model's response to the user prompt."""
6464

65-
user_prompt = instruction.to_string()
65+
memory = context.memory if context else None
66+
self.last_prompt = self.prepend_system_prompt(instruction.to_string(), memory)
6667

6768
return (
6869
completion(
6970
model=self.model,
70-
messages=[{"content": user_prompt, "role": "user"}],
71+
messages=[{"content": self.last_prompt, "role": "user"}],
7172
**self.params,
7273
)
7374
.choices[0]

extensions/llms/openai/pandasai_openai/base.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -115,6 +115,7 @@ def completion(self, prompt: str, memory: Memory) -> str:
115115
116116
Args:
117117
prompt (str): A string representation of the prompt.
118+
memory (Memory): Memory object containing conversation history.
118119
119120
Returns:
120121
str: LLM response.
@@ -139,6 +140,7 @@ def chat_completion(self, value: str, memory: Memory) -> str:
139140
140141
Args:
141142
value (str): Prompt
143+
memory (Memory): Memory object containing conversation history.
142144
143145
Returns:
144146
str: LLM response.

pandasai/agent/base.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -270,7 +270,7 @@ def _process_query(self, query: str, output_type: Optional[str] = None):
270270
self._state.assign_prompt_id()
271271

272272
# Generate code
273-
code = self.generate_code_with_retries(query)
273+
code = self.generate_code_with_retries(str(query))
274274

275275
# Execute code with retries
276276
result = self.execute_with_retries(code)
@@ -296,7 +296,7 @@ def _regenerate_code_after_error(self, code: str, error: Exception) -> str:
296296

297297
return self._code_generator.generate_code(prompt)
298298

299-
def _handle_exception(self, code: str) -> str:
299+
def _handle_exception(self, code: str) -> ErrorResponse:
300300
"""Handle exceptions and return an error message."""
301301
error_message = traceback.format_exc()
302302
self._state.logger.log(f"Processing failed with error: {error_message}")

pandasai/core/code_generation/base.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@ def generate_code(self, prompt: BasePrompt) -> str:
1818
Generates code using a given LLM and performs validation and cleaning steps.
1919
2020
Args:
21-
context (PipelineContext): The pipeline context containing dataframes and logger.
2221
prompt (BasePrompt): The prompt to guide code generation.
2322
2423
Returns:

pandasai/llm/base.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -119,11 +119,11 @@ def _extract_code(self, response: str, separator: str = "```") -> str:
119119

120120
return code
121121

122-
def prepend_system_prompt(self, prompt: BasePrompt, memory: Memory):
122+
def prepend_system_prompt(self, prompt: str, memory: Memory) -> str | Any:
123123
"""
124124
Append system prompt to the chat prompt, useful when model doesn't have messages for chat history
125125
Args:
126-
prompt (BasePrompt): prompt for chat method
126+
prompt (str): prompt for chat method
127127
memory (Memory): user conversation history
128128
"""
129129
return self.get_system_prompt(memory) + prompt if memory else prompt
@@ -164,6 +164,7 @@ def generate_code(self, instruction: BasePrompt, context: AgentState) -> str:
164164
165165
Args:
166166
instruction (BasePrompt): Prompt with instruction for LLM.
167+
context (AgentState): Context to pass.
167168
168169
Returns:
169170
str: A string of Python code.

0 commit comments

Comments
 (0)