diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index b9baf2efd..a8b5539f9 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -31,7 +31,7 @@ repos:
stages: [commit]
- repo: https://github.com/sourcery-ai/sourcery
- rev: v1.11.0
+ rev: v1.41.1
hooks:
- id: sourcery
# The best way to use Sourcery in a pre-commit hook:
diff --git a/extensions/llms/openai/pandasai_openai/azure_openai.py b/extensions/llms/openai/pandasai_openai/azure_openai.py
index ad80dc275..4a12c489c 100644
--- a/extensions/llms/openai/pandasai_openai/azure_openai.py
+++ b/extensions/llms/openai/pandasai_openai/azure_openai.py
@@ -12,26 +12,22 @@
class AzureOpenAI(BaseOpenAI):
- """OpenAI LLM via Microsoft Azure
- This class uses `BaseOpenAI` class to support Azure OpenAI features.
- """
+ """OpenAI LLM via Microsoft Azure.
- azure_endpoint: Union[str, None] = None
- """Your Azure Active Directory token.
- Automatically inferred from env var `AZURE_OPENAI_AD_TOKEN` if not provided.
- For more:
- https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id.
+ Supports:
+ - Chat Completions (`.chat.completions`)
+ - Legacy Completions (`.completions`)
+ - Responses API (`.responses`) for GPT-5-style reasoning models
+ including `reasoning.effort` and `text.verbosity`.
"""
- azure_ad_token: Union[str, None] = None
+
"""A function that returns an Azure Active Directory token.
- Will be invoked on every request.
+ Will be invoked on every request.
"""
azure_ad_token_provider: Union[Callable[[], str], None] = None
deployment_name: str
api_version: str = ""
- """Legacy, for openai<1.0.0 support."""
api_base: str
- """Legacy, for openai<1.0.0 support."""
api_type: str = "azure"
def __init__(
@@ -48,26 +44,16 @@ def __init__(
**kwargs,
):
"""
- __init__ method of AzureOpenAI Class.
-
Args:
api_token (str): Azure OpenAI API token.
- azure_endpoint (str): Azure endpoint.
- It should look like the following:
-
- azure_ad_token (str): Your Azure Active Directory token.
- Automatically inferred from env var `AZURE_OPENAI_AD_TOKEN` if not provided.
- For more: https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id.
- azure_ad_token_provider (str): A function that returns an Azure Active Directory token.
- Will be invoked on every request.
- api_version (str): Version of the Azure OpenAI API.
- Be aware the API version may change.
- api_base (str): Legacy, kept for backward compatibility with openai < 1.0.
- Ignored for openai >= 1.0.
- deployment_name (str): Custom name of the deployed model
- is_chat_model (bool): Whether ``deployment_name`` corresponds to a Chat
- or a Completion model.
- **kwargs: Inference Parameters.
+ azure_endpoint (str):
+ azure_ad_token (str): AAD token.
+ azure_ad_token_provider (Callable): provider for AAD token.
+ api_version (str): Azure OpenAI API version.
+ api_base (str): legacy param for openai<1.0 compatibility.
+ deployment_name (str): name of your Azure deployment.
+ is_chat_model (bool): legacy flag for chat vs completion.
+ **kwargs: inference params (temperature, reasoning_effort, etc.)
"""
self.api_token = (
@@ -78,6 +64,7 @@ def __init__(
self.azure_endpoint = azure_endpoint or os.getenv("AZURE_OPENAI_ENDPOINT")
self.api_base = api_base or os.getenv("OPENAI_API_BASE")
self.api_version = api_version or os.getenv("OPENAI_API_VERSION")
+
if self.api_token is None:
raise APIKeyNotFoundError(
"Azure OpenAI key is required. Please add an environment variable "
@@ -86,24 +73,25 @@ def __init__(
if self.azure_endpoint is None:
raise APIKeyNotFoundError(
"Azure endpoint is required. Please add an environment variable "
- "`AZURE_OPENAI_API_ENDPOINT` or pass `azure_endpoint` as a named parameter"
+ "`AZURE_OPENAI_ENDPOINT` or pass `azure_endpoint` as a named parameter"
)
-
if self.api_version is None:
raise APIKeyNotFoundError(
"Azure OpenAI version is required. Please add an environment variable "
"`OPENAI_API_VERSION` or pass `api_version` as a named parameter"
)
-
if deployment_name is None:
raise MissingModelError(
"No deployment name provided.",
"Please include deployment name from Azure dashboard.",
)
+
self.azure_ad_token = azure_ad_token or os.getenv("AZURE_OPENAI_AD_TOKEN")
self.azure_ad_token_provider = azure_ad_token_provider
- self._is_chat_model = is_chat_model
+
self.deployment_name = deployment_name
+
+ self._is_chat_model = is_chat_model
self.http_client = http_client
self.openai_proxy = kwargs.get("openai_proxy") or os.getenv("OPENAI_PROXY")
@@ -111,20 +99,26 @@ def __init__(
openai.proxy = {"http": self.openai_proxy, "https": self.openai_proxy}
self._set_params(**kwargs)
- # set the client
- if self._is_chat_model:
- self.client = openai.AzureOpenAI(**self._client_params).chat.completions
+
+ root_client = openai.AzureOpenAI(**self._client_params)
+
+ if self._is_responses_api_like(self.deployment_name):
+ self._is_responses_model = True
+ self._is_chat_model = True
+ self.responses_client = root_client.responses
+ self.client = root_client.chat.completions
else:
- self.client = openai.AzureOpenAI(**self._client_params).completions
+ if self._is_chat_model:
+ self.client = root_client.chat.completions
+ self.responses_client = root_client.responses
+ else:
+ self.client = root_client.completions
+ self.responses_client = root_client.responses
@property
def _default_params(self) -> Dict[str, Any]:
"""
- Get the default parameters for calling OpenAI API.
-
- Returns:
- dict: A dictionary containing Default Params.
-
+ Default params, plus Azure deployment name instead of `model`.
"""
return {
**super()._default_params,
@@ -132,7 +126,7 @@ def _default_params(self) -> Dict[str, Any]:
}
@property
- def _client_params(self) -> Dict[str, any]:
+ def _client_params(self) -> Dict[str, Any]:
client_params = {
"api_version": self.api_version,
"azure_endpoint": self.azure_endpoint,
diff --git a/extensions/llms/openai/pandasai_openai/base.py b/extensions/llms/openai/pandasai_openai/base.py
index be31af71f..691d5e6c3 100644
--- a/extensions/llms/openai/pandasai_openai/base.py
+++ b/extensions/llms/openai/pandasai_openai/base.py
@@ -1,6 +1,6 @@
from __future__ import annotations
-from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Tuple, Union
+from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Tuple, Union, List
from pandasai.core.prompts.base import BasePrompt
from pandasai.helpers.memory import Memory
@@ -13,12 +13,13 @@
class BaseOpenAI(LLM):
"""Base class to implement a new OpenAI LLM.
- LLM base class, this class is extended to be used with OpenAI API.
-
+ LLM base class, extended by OpenAI and AzureOpenAI.
"""
api_token: str
api_base: str = "https://api.openai.com/v1"
+
+ # legacy/chat-style sampling controls (still valid for gpt-4.x, gpt-3.5, etc.)
temperature: float = 0
max_tokens: int = 1000
top_p: float = 1
@@ -27,6 +28,13 @@ class BaseOpenAI(LLM):
best_of: int = 1
n: int = 1
stop: Optional[str] = None
+
+ # Responses API / reasoning-era controls (GPT-5 etc.) # NEW
+ reasoning_effort: Optional[str] = "medium" # "minimal" | "low" | "medium" | "high"
+ verbosity: Optional[str] = "low" # "low" | "medium" | "high"
+ max_output_tokens: Optional[int] = 5000 # replaces max_tokens for GPT-5
+
+ # misc
request_timeout: Union[float, Tuple[float, float], Any, None] = None
max_retries: int = 2
seed: Optional[int] = None
@@ -34,25 +42,21 @@ class BaseOpenAI(LLM):
openai_proxy: Optional[str] = None
default_headers: Union[Mapping[str, str], None] = None
default_query: Union[Mapping[str, object], None] = None
- # Configure a custom httpx client. See the
- # [httpx documentation](https://www.python-httpx.org/api/#client) for more details.
+ # Configure a custom httpx client
http_client: Union[Any, None] = None
+
client: Any
_is_chat_model: bool
+ _is_responses_model: bool = False
def _set_params(self, **kwargs):
"""
- Set Parameters
- Args:
- **kwargs: ["model", "deployment_name", "temperature","max_tokens",
- "top_p", "frequency_penalty", "presence_penalty", "stop", "seed"]
-
- Returns:
- None.
+ Copy supported kwargs onto self so subclasses can pass through config.
+ Keeping backward-compatible params like `temperature`, but going to conditionally drop them when talking to GPT-5 models.
"""
-
valid_params = [
+ # legacy/chat params
"model",
"deployment_name",
"temperature",
@@ -62,15 +66,39 @@ def _set_params(self, **kwargs):
"presence_penalty",
"stop",
"seed",
+ "n",
+ "best_of",
+ # connection
+ "request_timeout",
+ "max_retries",
+ "openai_proxy",
+ "default_headers",
+ "default_query",
+ "http_client",
+ # Responses API params (GPT-5+)
+ "reasoning_effort",
+ "verbosity",
+ "max_output_tokens",
]
for key, value in kwargs.items():
if key in valid_params:
setattr(self, key, value)
+ # Utility to know if current model is "GPT-5 style reasoning" and should use Responses API.
+ @staticmethod
+ def _is_responses_api_like(model_name: str) -> bool:
+ return model_name.startswith("gpt-5")
+
@property
def _default_params(self) -> Dict[str, Any]:
- """Get the default parameters for calling OpenAI API."""
+ """
+ Params that are *conceptually* available to this LLM, regardless of
+ endpoint style.
+ NOTE: We DO NOT filter here for GPT-5 unsupported params. That happens
+ in the per-endpoint builders below.
+ """
params: Dict[str, Any] = {
+ # classic knobs (chat/completions)
"temperature": self.temperature,
"top_p": self.top_p,
"frequency_penalty": self.frequency_penalty,
@@ -80,25 +108,81 @@ def _default_params(self) -> Dict[str, Any]:
"n": self.n,
}
+ # classic token budget
if self.max_tokens is not None:
params["max_tokens"] = self.max_tokens
- # Azure gpt-35-turbo doesn't support best_of
- # don't specify best_of if it is 1
- if self.best_of > 1:
- params["best_of"] = self.best_of
+ return params
+
+ @property
+ def _responses_params(self) -> Dict[str, Any]:
+ """
+ Build params for Responses API (GPT-5 series).
+ Intentionally omitting temperature/top_p/logprobs/etc., because GPT-5
+ reasoning models don't support them. Instead, forwarding
+ reasoning.effort and text.verbosity.
+ """
+ out: Dict[str, Any] = {}
+
+ if self.reasoning_effort:
+ out["reasoning"] = {"effort": self.reasoning_effort}
+
+ if self.verbosity:
+ out["text"] = {"verbosity": self.verbosity}
+
+ if self.max_output_tokens is not None:
+ out["max_output_tokens"] = self.max_output_tokens
+ elif self.max_tokens is not None:
+ out["max_output_tokens"] = self.max_tokens
+
+ if self.stop is not None:
+ out["stop"] = [self.stop]
+
+ if self.seed is not None:
+ out["seed"] = self.seed
+
+ return out
+
+ @property
+ def _chat_params(self) -> Dict[str, Any]:
+ """
+ Build params for Chat Completions API.
+ This is used for gpt-4.x / gpt-3.5 etc.
+ """
+ params = {**self._default_params}
+
+ if params.get("stop") is not None:
+ params["stop"] = [params["stop"]]
+
+ return params
+
+ @property
+ def _completion_params(self) -> Dict[str, Any]:
+ """
+ Build params for the legacy text Completions API.
+ Similar to _chat_params but with `prompt` instead of `messages`.
+ """
+ params = {**self._default_params}
+
+ if params.get("stop") is not None:
+ params["stop"] = [params["stop"]]
return params
@property
def _invocation_params(self) -> Dict[str, Any]:
- """Get the parameters used to invoke the model."""
+ """
+ Returns the object actually sent to the OpenAI client call.
+ NOTE: This is *not* used directly anymore because different endpoints
+ need different shapes. Leaving this for backward compatibility where
+ subclasses expect _invocation_params to include credentials.
+ """
openai_creds: Dict[str, Any] = {}
-
return {**openai_creds, **self._default_params}
@property
- def _client_params(self) -> Dict[str, any]:
+ def _client_params(self) -> Dict[str, Any]:
+ """Params passed when constructing the OpenAI client."""
return {
"api_key": self.api_token,
"base_url": self.api_base,
@@ -109,85 +193,85 @@ def _client_params(self) -> Dict[str, any]:
"http_client": self.http_client,
}
+ def prepend_system_prompt(self, prompt: str, memory: Optional[Memory]) -> str:
+ """Kept from previous codebase: combine memory/system style with new prompt."""
+ if memory and hasattr(memory, "to_string"):
+ return memory.to_string() + "\n" + prompt
+ return prompt
+
def completion(self, prompt: str, memory: Memory) -> str:
"""
- Query the completion API
-
- Args:
- prompt (str): A string representation of the prompt.
- memory (Memory): Memory object containing conversation history.
-
- Returns:
- str: LLM response.
-
+ Legacy text completion endpoint (.completions.create).
"""
- prompt = self.prepend_system_prompt(prompt, memory)
+ full_prompt = self.prepend_system_prompt(prompt, memory)
- params = {**self._invocation_params, "prompt": prompt}
-
- if self.stop is not None:
- params["stop"] = [self.stop]
+ params = {
+ **self._completion_params,
+ "prompt": full_prompt,
+ }
response = self.client.create(**params)
-
- self.last_prompt = prompt
-
+ self.last_prompt = full_prompt
return response.choices[0].text
def chat_completion(self, value: str, memory: Memory) -> str:
"""
- Query the chat completion API
-
- Args:
- value (str): Prompt
- memory (Memory): Memory object containing conversation history.
-
- Returns:
- str: LLM response.
-
+ Chat Completions endpoint (.chat.completions.create).
"""
messages = memory.to_openai_messages() if memory else []
-
- # adding current prompt as latest query message
messages.append(
{
"role": "user",
"content": value,
- },
+ }
)
params = {
- **self._invocation_params,
+ **self._chat_params,
"messages": messages,
}
- if self.stop is not None:
- params["stop"] = [self.stop]
-
response = self.client.create(**params)
-
return response.choices[0].message.content
- def call(self, instruction: BasePrompt, context: AgentState = None):
+ def responses_completion(self, value: str, memory: Memory = None) -> str:
"""
- Call the OpenAI LLM.
+ Responses API for GPT-5 / reasoning models (.responses.create).
+
+ """
+ input_messages: List[Dict[str, Any]] = (
+ memory.to_openai_messages() if memory else []
+ )
+ input_messages.append({"role": "user", "content": value})
+
+ params = {
+ "model": getattr(self, "model", None)
+ or getattr(self, "deployment_name", None),
+ "input": input_messages,
+ **self._responses_params,
+ }
- Args:
- instruction (BasePrompt): A prompt object with instruction for LLM.
- context (AgentState): context to pass.
+ response = self.responses_client.create(**params)
+ output_text = response.output_text
+ self.last_prompt = value
+ return output_text
- Raises:
- UnsupportedModelError: Unsupported model
+ def call(self, instruction: BasePrompt, context: AgentState = None):
+ """
+ Unified entrypoint used by pandas-ai.
- Returns:
- str: Response
+ We now branch 3 ways:
+ - responses_completion (GPT-5 / Responses API)
+ - chat_completion (gpt-4.x, gpt-3.5, etc.)
+ - completion (text-davinci-style legacy)
"""
self.last_prompt = instruction.to_string()
-
memory = context.memory if context else None
- return (
- self.chat_completion(self.last_prompt, memory)
- if self._is_chat_model
- else self.completion(self.last_prompt, memory)
- )
+ if getattr(self, "_is_responses_model", False):
+ return self.responses_completion(self.last_prompt, memory)
+
+ if self._is_chat_model:
+ return self.chat_completion(self.last_prompt, memory)
+
+ return self.completion(self.last_prompt, memory)
diff --git a/extensions/llms/openai/pandasai_openai/openai.py b/extensions/llms/openai/pandasai_openai/openai.py
index 22b395d7b..6d0b663f6 100644
--- a/extensions/llms/openai/pandasai_openai/openai.py
+++ b/extensions/llms/openai/pandasai_openai/openai.py
@@ -5,22 +5,17 @@
from pandasai.exceptions import APIKeyNotFoundError, UnsupportedModelError
from pandasai.helpers import load_dotenv
-
from .base import BaseOpenAI
load_dotenv()
class OpenAI(BaseOpenAI):
- """OpenAI LLM using BaseOpenAI Class.
-
- An API call to OpenAI API is sent and response is recorded and returned.
- The default chat model is **gpt-3.5-turbo**.
- The list of supported Chat models includes ["gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", "gpt-4o", "gpt-4o-mini", "gpt-4", "gpt-4-0613", "gpt-4-32k",
- "gpt-4-32k-0613", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-0613",
- "gpt-3.5-turbo-16k-0613", "gpt-3.5-turbo-instruct"].
- The list of supported Completion models includes "gpt-3.5-turbo-instruct" and
- "text-davinci-003" (soon to be deprecated).
+ """OpenAI LLM using BaseOpenAI.
+
+ - gpt-3.5 / gpt-4.x -> Chat Completions API
+ - legacy instruct -> Completions API
+ - gpt-5* -> Responses API (recommended by OpenAI since GPT 5).
"""
_supported_chat_models = [
@@ -45,53 +40,78 @@ class OpenAI(BaseOpenAI):
"gpt-4.1-2025-04-14",
"gpt-4.1-mini",
"gpt-4.1-mini-2025-04-14",
- "gpt-4.1-nano",
- "gpt-4.1-nano-2025-04-14"
+ "gpt-4.1-nano",
+ "gpt-4.1-nano-2025-04-14",
]
- _supported_completion_models = ["gpt-3.5-turbo-instruct"]
- model: str = "gpt-4.1-mini"
+ _supported_completion_models = [
+ "gpt-3.5-turbo-instruct"
+ ]
+
+ _supported_responses_models = [
+ # GPT-5 family uses Responses API, with reasoning_effort + verbosity.
+ "gpt-5",
+ "gpt-5-mini",
+ "gpt-5-nano",
+ ]
+
+ model: str = "gpt-5-mini"
def __init__(
- self,
- api_token: Optional[str] = None,
- **kwargs,
+ self,
+ api_token: Optional[str] = None,
+ **kwargs,
):
"""
- __init__ method of OpenAI Class
-
Args:
api_token (str): API Token for OpenAI platform.
- **kwargs: Extended Parameters inferred from BaseOpenAI class
-
+ **kwargs: Passed through to BaseOpenAI._set_params
"""
self.api_token = api_token or os.getenv("OPENAI_API_KEY") or None
-
if not self.api_token:
raise APIKeyNotFoundError("OpenAI API key is required")
+ # base URL override (for self-host / Azure-style compat); default keeps public api
self.api_base = (
- kwargs.get("api_base") or os.getenv("OPENAI_API_BASE") or self.api_base
+ kwargs.get("api_base") or os.getenv("OPENAI_API_BASE") or self.api_base
)
+
self.openai_proxy = kwargs.get("openai_proxy") or os.getenv("OPENAI_PROXY")
if self.openai_proxy:
- openai.proxy = {"http": self.openai_proxy, "https": self.openai_proxy}
+ openai.proxy = {
+ "http": self.openai_proxy,
+ "https": self.openai_proxy,
+ }
self._set_params(**kwargs)
- # set the client
+ root_client = openai.OpenAI(**self._client_params)
model_name = self.model.split(":")[1] if "ft:" in self.model else self.model
- if model_name in self._supported_chat_models:
+
+ if model_name in self._supported_responses_models or self._is_responses_api_like(model_name):
+ self._is_responses_model = True
self._is_chat_model = True
- self.client = openai.OpenAI(**self._client_params).chat.completions
+ self.responses_client = root_client.responses
+ self.client = root_client.chat.completions
+ elif model_name in self._supported_chat_models:
+ self._is_chat_model = True
+ self._is_responses_model = False
+ self.client = root_client.chat.completions
+ # we *still* give responses_client to avoid attribute errors if someone
+ # accidentally calls responses_completion()
+ self.responses_client = root_client.responses
elif model_name in self._supported_completion_models:
self._is_chat_model = False
- self.client = openai.OpenAI(**self._client_params).completions
+ self._is_responses_model = False
+ self.client = root_client.completions
+ self.responses_client = root_client.responses
else:
raise UnsupportedModelError(self.model)
@property
def _default_params(self) -> Dict[str, Any]:
- """Get the default parameters for calling OpenAI API"""
+ """
+ Merge BaseOpenAI params + current model name.
+ """
return {
**super()._default_params,
"model": self.model,
diff --git a/extensions/llms/openai/poetry.lock b/extensions/llms/openai/poetry.lock
index 638ef04f0..4b9b483e3 100644
--- a/extensions/llms/openai/poetry.lock
+++ b/extensions/llms/openai/poetry.lock
@@ -1,4 +1,4 @@
-# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand.
+# This file is automatically @generated by Poetry 2.2.1 and should not be changed by hand.
[[package]]
name = "annotated-types"
@@ -6,6 +6,7 @@ version = "0.7.0"
description = "Reusable constraint types to use with typing.Annotated"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"},
{file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"},
@@ -20,6 +21,7 @@ version = "4.5.2"
description = "High level compatibility layer for multiple asynchronous event loop implementations"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "anyio-4.5.2-py3-none-any.whl", hash = "sha256:c011ee36bc1e8ba40e5a81cb9df91925c218fe9b778554e0b56a21e1b5d4716f"},
{file = "anyio-4.5.2.tar.gz", hash = "sha256:23009af4ed04ce05991845451e11ef02fc7c5ed29179ac9a420e5ad0ac7ddc5b"},
@@ -33,7 +35,7 @@ typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""}
[package.extras]
doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"]
-test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21.0b1)"]
+test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "truststore (>=0.9.1) ; python_version >= \"3.10\"", "uvloop (>=0.21.0b1) ; platform_python_implementation == \"CPython\" and platform_system != \"Windows\""]
trio = ["trio (>=0.26.1)"]
[[package]]
@@ -42,6 +44,7 @@ version = "0.8.1"
description = "Read/rewrite/write Python ASTs"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7"
+groups = ["main"]
files = [
{file = "astor-0.8.1-py2.py3-none-any.whl", hash = "sha256:070a54e890cefb5b3739d19f30f5a5ec840ffc9c50ffa7d23cc9fc1a38ebbfc5"},
{file = "astor-0.8.1.tar.gz", hash = "sha256:6a6effda93f4e1ce9f618779b2dd1d9d84f1e32812c23a29b3fff6fd7f63fa5e"},
@@ -53,6 +56,7 @@ version = "2024.12.14"
description = "Python package for providing Mozilla's CA Bundle."
optional = false
python-versions = ">=3.6"
+groups = ["main"]
files = [
{file = "certifi-2024.12.14-py3-none-any.whl", hash = "sha256:1275f7a45be9464efc1173084eaa30f866fe2e47d389406136d332ed4967ec56"},
{file = "certifi-2024.12.14.tar.gz", hash = "sha256:b650d30f370c2b724812bee08008be0c4163b163ddaec3f2546c1caf65f191db"},
@@ -64,6 +68,7 @@ version = "3.4.1"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
optional = false
python-versions = ">=3.7"
+groups = ["main"]
files = [
{file = "charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de"},
{file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176"},
@@ -165,10 +170,12 @@ version = "0.4.6"
description = "Cross-platform colored terminal text."
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
+groups = ["main", "test"]
files = [
{file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
{file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
]
+markers = {main = "platform_system == \"Windows\"", test = "sys_platform == \"win32\""}
[[package]]
name = "contourpy"
@@ -176,6 +183,7 @@ version = "1.1.1"
description = "Python library for calculating contours of 2D quadrilateral grids"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "contourpy-1.1.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:46e24f5412c948d81736509377e255f6040e94216bf1a9b5ea1eaa9d29f6ec1b"},
{file = "contourpy-1.1.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e48694d6a9c5a26ee85b10130c77a011a4fedf50a7279fa0bdaf44bafb4299d"},
@@ -247,6 +255,7 @@ version = "7.6.1"
description = "Code coverage measurement for Python"
optional = false
python-versions = ">=3.8"
+groups = ["test"]
files = [
{file = "coverage-7.6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b06079abebbc0e89e6163b8e8f0e16270124c154dc6e4a47b413dd538859af16"},
{file = "coverage-7.6.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cf4b19715bccd7ee27b6b120e7e9dd56037b9c0681dcc1adc9ba9db3d417fa36"},
@@ -326,7 +335,7 @@ files = [
tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""}
[package.extras]
-toml = ["tomli"]
+toml = ["tomli ; python_full_version <= \"3.11.0a6\""]
[[package]]
name = "cycler"
@@ -334,6 +343,7 @@ version = "0.12.1"
description = "Composable style cycles"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30"},
{file = "cycler-0.12.1.tar.gz", hash = "sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c"},
@@ -349,6 +359,7 @@ version = "1.9.0"
description = "Distro - an OS platform information API"
optional = false
python-versions = ">=3.6"
+groups = ["main"]
files = [
{file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"},
{file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"},
@@ -360,6 +371,7 @@ version = "1.1.3"
description = "DuckDB in-process database"
optional = false
python-versions = ">=3.7.0"
+groups = ["main"]
files = [
{file = "duckdb-1.1.3-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:1c0226dc43e2ee4cc3a5a4672fddb2d76fd2cf2694443f395c02dd1bea0b7fce"},
{file = "duckdb-1.1.3-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:7c71169fa804c0b65e49afe423ddc2dc83e198640e3b041028da8110f7cd16f7"},
@@ -421,6 +433,8 @@ version = "1.2.2"
description = "Backport of PEP 654 (exception groups)"
optional = false
python-versions = ">=3.7"
+groups = ["main", "test"]
+markers = "python_version <= \"3.10\""
files = [
{file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"},
{file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"},
@@ -435,6 +449,7 @@ version = "3.16.1"
description = "A platform independent file lock."
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "filelock-3.16.1-py3-none-any.whl", hash = "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0"},
{file = "filelock-3.16.1.tar.gz", hash = "sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435"},
@@ -443,7 +458,7 @@ files = [
[package.extras]
docs = ["furo (>=2024.8.6)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4.1)"]
testing = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "diff-cover (>=9.2)", "pytest (>=8.3.3)", "pytest-asyncio (>=0.24)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.26.4)"]
-typing = ["typing-extensions (>=4.12.2)"]
+typing = ["typing-extensions (>=4.12.2) ; python_version < \"3.11\""]
[[package]]
name = "fonttools"
@@ -451,6 +466,7 @@ version = "4.55.3"
description = "Tools to manipulate font files"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "fonttools-4.55.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1dcc07934a2165ccdc3a5a608db56fb3c24b609658a5b340aee4ecf3ba679dc0"},
{file = "fonttools-4.55.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f7d66c15ba875432a2d2fb419523f5d3d347f91f48f57b8b08a2dfc3c39b8a3f"},
@@ -505,18 +521,18 @@ files = [
]
[package.extras]
-all = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "fs (>=2.2.0,<3)", "lxml (>=4.0)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres", "pycairo", "scipy", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=15.1.0)", "xattr", "zopfli (>=0.1.4)"]
+all = ["brotli (>=1.0.1) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\"", "fs (>=2.2.0,<3)", "lxml (>=4.0)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres ; platform_python_implementation == \"PyPy\"", "pycairo", "scipy ; platform_python_implementation != \"PyPy\"", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=15.1.0) ; python_version <= \"3.12\"", "xattr ; sys_platform == \"darwin\"", "zopfli (>=0.1.4)"]
graphite = ["lz4 (>=1.7.4.2)"]
-interpolatable = ["munkres", "pycairo", "scipy"]
+interpolatable = ["munkres ; platform_python_implementation == \"PyPy\"", "pycairo", "scipy ; platform_python_implementation != \"PyPy\""]
lxml = ["lxml (>=4.0)"]
pathops = ["skia-pathops (>=0.5.0)"]
plot = ["matplotlib"]
repacker = ["uharfbuzz (>=0.23.0)"]
symfont = ["sympy"]
-type1 = ["xattr"]
+type1 = ["xattr ; sys_platform == \"darwin\""]
ufo = ["fs (>=2.2.0,<3)"]
-unicode = ["unicodedata2 (>=15.1.0)"]
-woff = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "zopfli (>=0.1.4)"]
+unicode = ["unicodedata2 (>=15.1.0) ; python_version <= \"3.12\""]
+woff = ["brotli (>=1.0.1) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\"", "zopfli (>=0.1.4)"]
[[package]]
name = "fsspec"
@@ -524,6 +540,7 @@ version = "2024.12.0"
description = "File-system specification"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "fsspec-2024.12.0-py3-none-any.whl", hash = "sha256:b520aed47ad9804237ff878b504267a3b0b441e97508bd6d2d8774e3db85cee2"},
{file = "fsspec-2024.12.0.tar.gz", hash = "sha256:670700c977ed2fb51e0d9f9253177ed20cbde4a3e5c0283cc5385b5870c8533f"},
@@ -563,6 +580,7 @@ version = "0.14.0"
description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
optional = false
python-versions = ">=3.7"
+groups = ["main"]
files = [
{file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"},
{file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"},
@@ -574,6 +592,7 @@ version = "1.0.7"
description = "A minimal low-level HTTP client."
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd"},
{file = "httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c"},
@@ -595,6 +614,7 @@ version = "0.28.1"
description = "The next generation HTTP client."
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"},
{file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"},
@@ -607,7 +627,7 @@ httpcore = "==1.*"
idna = "*"
[package.extras]
-brotli = ["brotli", "brotlicffi"]
+brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""]
cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"]
http2 = ["h2 (>=3,<5)"]
socks = ["socksio (==1.*)"]
@@ -619,6 +639,7 @@ version = "0.27.1"
description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub"
optional = false
python-versions = ">=3.8.0"
+groups = ["main"]
files = [
{file = "huggingface_hub-0.27.1-py3-none-any.whl", hash = "sha256:1c5155ca7d60b60c2e2fc38cbb3ffb7f7c3adf48f824015b219af9061771daec"},
{file = "huggingface_hub-0.27.1.tar.gz", hash = "sha256:c004463ca870283909d715d20f066ebd6968c2207dae9393fdffb3c1d4d8f98b"},
@@ -653,6 +674,7 @@ version = "3.10"
description = "Internationalized Domain Names in Applications (IDNA)"
optional = false
python-versions = ">=3.6"
+groups = ["main"]
files = [
{file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"},
{file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"},
@@ -667,6 +689,8 @@ version = "6.4.5"
description = "Read resources from Python packages"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
+markers = "python_version < \"3.10\""
files = [
{file = "importlib_resources-6.4.5-py3-none-any.whl", hash = "sha256:ac29d5f956f01d5e4bb63102a5a19957f1b9175e45649977264a1416783bb717"},
{file = "importlib_resources-6.4.5.tar.gz", hash = "sha256:980862a1d16c9e147a59603677fa2aa5fd82b87f223b6cb870695bcfce830065"},
@@ -676,7 +700,7 @@ files = [
zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""}
[package.extras]
-check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"]
+check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""]
cover = ["pytest-cov"]
doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
enabler = ["pytest-enabler (>=2.2)"]
@@ -689,6 +713,7 @@ version = "2.0.0"
description = "brain-dead simple config-ini parsing"
optional = false
python-versions = ">=3.7"
+groups = ["test"]
files = [
{file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"},
{file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"},
@@ -700,6 +725,7 @@ version = "3.1.5"
description = "A very fast and expressive template engine."
optional = false
python-versions = ">=3.7"
+groups = ["main"]
files = [
{file = "jinja2-3.1.5-py3-none-any.whl", hash = "sha256:aba0f4dc9ed8013c424088f68a5c226f7d6097ed89b246d7749c2ec4175c6adb"},
{file = "jinja2-3.1.5.tar.gz", hash = "sha256:8fefff8dc3034e27bb80d67c671eb8a9bc424c0ef4c0826edbff304cceff43bb"},
@@ -717,6 +743,7 @@ version = "0.8.2"
description = "Fast iterable JSON parser."
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "jiter-0.8.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:ca8577f6a413abe29b079bc30f907894d7eb07a865c4df69475e868d73e71c7b"},
{file = "jiter-0.8.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b25bd626bde7fb51534190c7e3cb97cee89ee76b76d7585580e22f34f5e3f393"},
@@ -802,6 +829,7 @@ version = "1.4.7"
description = "A fast implementation of the Cassowary constraint solver"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "kiwisolver-1.4.7-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8a9c83f75223d5e48b0bc9cb1bf2776cf01563e00ade8775ffe13b0b6e1af3a6"},
{file = "kiwisolver-1.4.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:58370b1ffbd35407444d57057b57da5d6549d2d854fa30249771775c63b5fe17"},
@@ -925,6 +953,7 @@ version = "2.1.5"
description = "Safely add untrusted strings to HTML/XML markup."
optional = false
python-versions = ">=3.7"
+groups = ["main"]
files = [
{file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc"},
{file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5"},
@@ -994,6 +1023,7 @@ version = "3.7.5"
description = "Python plotting package"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "matplotlib-3.7.5-cp310-cp310-macosx_10_12_universal2.whl", hash = "sha256:4a87b69cb1cb20943010f63feb0b2901c17a3b435f75349fd9865713bfa63925"},
{file = "matplotlib-3.7.5-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:d3ce45010fefb028359accebb852ca0c21bd77ec0f281952831d235228f15810"},
@@ -1062,6 +1092,7 @@ version = "1.24.4"
description = "Fundamental package for array computing in Python"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "numpy-1.24.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c0bfb52d2169d58c1cdb8cc1f16989101639b34c7d3ce60ed70b19c63eba0b64"},
{file = "numpy-1.24.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ed094d4f0c177b1b8e7aa9cba7d6ceed51c0e569a5318ac0ca9a090680a6a1b1"},
@@ -1095,13 +1126,14 @@ files = [
[[package]]
name = "openai"
-version = "1.59.9"
+version = "1.109.1"
description = "The official Python library for the openai API"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
- {file = "openai-1.59.9-py3-none-any.whl", hash = "sha256:61a0608a1313c08ddf92fe793b6dbd1630675a1fe3866b2f96447ce30050c448"},
- {file = "openai-1.59.9.tar.gz", hash = "sha256:ec1a20b0351b4c3e65c6292db71d8233515437c6065efd4fd50edeb55df5f5d2"},
+ {file = "openai-1.109.1-py3-none-any.whl", hash = "sha256:6bcaf57086cf59159b8e27447e4e7dd019db5d29a438072fbd49c290c7e65315"},
+ {file = "openai-1.109.1.tar.gz", hash = "sha256:d173ed8dbca665892a6db099b4a2dfac624f94d20a93f46eb0b56aae940ed869"},
]
[package.dependencies]
@@ -1115,8 +1147,10 @@ tqdm = ">4"
typing-extensions = ">=4.11,<5"
[package.extras]
+aiohttp = ["aiohttp", "httpx-aiohttp (>=0.1.8)"]
datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"]
-realtime = ["websockets (>=13,<15)"]
+realtime = ["websockets (>=13,<16)"]
+voice-helpers = ["numpy (>=2.0.2)", "sounddevice (>=0.5.1)"]
[[package]]
name = "packaging"
@@ -1124,6 +1158,7 @@ version = "24.2"
description = "Core utilities for Python packages"
optional = false
python-versions = ">=3.8"
+groups = ["main", "test"]
files = [
{file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"},
{file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"},
@@ -1135,6 +1170,7 @@ version = "2.0.3"
description = "Powerful data structures for data analysis, time series, and statistics"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "pandas-2.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e4c7c9f27a4185304c7caf96dc7d91bc60bc162221152de697c98eb0b2648dd8"},
{file = "pandas-2.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f167beed68918d62bffb6ec64f2e1d8a7d297a038f86d4aed056b9493fca407f"},
@@ -1167,7 +1203,7 @@ files = [
numpy = [
{version = ">=1.20.3", markers = "python_version < \"3.10\""},
{version = ">=1.23.2", markers = "python_version >= \"3.11\""},
- {version = ">=1.21.0", markers = "python_version >= \"3.10\" and python_version < \"3.11\""},
+ {version = ">=1.21.0", markers = "python_version == \"3.10\""},
]
python-dateutil = ">=2.8.2"
pytz = ">=2020.1"
@@ -1199,9 +1235,10 @@ xml = ["lxml (>=4.6.3)"]
[[package]]
name = "pandasai"
version = "3.0.0b4"
-description = "Chat with your database (SQL, CSV, pandas, mongodb, noSQL, etc). PandasAI makes data analysis conversational using LLMs (GPT 3.5 / 4, Anthropic, VertexAI) and RAG."
+description = "Chat with your database (SQL, CSV, pandas, mongodb, noSQL, etc). PandaAI makes data analysis conversational using LLMs (GPT 3.5 / 4, Anthropic, VertexAI) and RAG."
optional = false
python-versions = "<3.12,>=3.8"
+groups = ["main"]
files = [
{file = "pandasai-3.0.0b4-py3-none-any.whl", hash = "sha256:bed889ab85d866a4e98703bf7407ba61767e3787db7de18fd9fd7d4b50a29033"},
{file = "pandasai-3.0.0b4.tar.gz", hash = "sha256:6b2419ed0bf63735a7e3e94b4ebeda931bf4c25bb47a71038cc3ae6875ec776c"},
@@ -1235,6 +1272,7 @@ version = "10.4.0"
description = "Python Imaging Library (Fork)"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "pillow-10.4.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:4d9667937cfa347525b319ae34375c37b9ee6b525440f3ef48542fcf66f2731e"},
{file = "pillow-10.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:543f3dc61c18dafb755773efc89aae60d06b6596a63914107f75459cf984164d"},
@@ -1323,7 +1361,7 @@ docs = ["furo", "olefile", "sphinx (>=7.3)", "sphinx-copybutton", "sphinx-inline
fpx = ["olefile"]
mic = ["olefile"]
tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"]
-typing = ["typing-extensions"]
+typing = ["typing-extensions ; python_version < \"3.10\""]
xmp = ["defusedxml"]
[[package]]
@@ -1332,6 +1370,7 @@ version = "1.5.0"
description = "plugin and hook calling mechanisms for python"
optional = false
python-versions = ">=3.8"
+groups = ["test"]
files = [
{file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"},
{file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"},
@@ -1347,6 +1386,7 @@ version = "14.0.2"
description = "Python library for Apache Arrow"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "pyarrow-14.0.2-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:ba9fe808596c5dbd08b3aeffe901e5f81095baaa28e7d5118e01354c64f22807"},
{file = "pyarrow-14.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:22a768987a16bb46220cef490c56c671993fbee8fd0475febac0b3e16b00a10e"},
@@ -1395,6 +1435,7 @@ version = "2.10.5"
description = "Data validation using Python type hints"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "pydantic-2.10.5-py3-none-any.whl", hash = "sha256:4dd4e322dbe55472cb7ca7e73f4b63574eecccf2835ffa2af9021ce113c83c53"},
{file = "pydantic-2.10.5.tar.gz", hash = "sha256:278b38dbbaec562011d659ee05f63346951b3a248a6f3642e1bc68894ea2b4ff"},
@@ -1407,7 +1448,7 @@ typing-extensions = ">=4.12.2"
[package.extras]
email = ["email-validator (>=2.0.0)"]
-timezone = ["tzdata"]
+timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""]
[[package]]
name = "pydantic-core"
@@ -1415,6 +1456,7 @@ version = "2.27.2"
description = "Core functionality for Pydantic validation and serialization"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "pydantic_core-2.27.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2d367ca20b2f14095a8f4fa1210f5a7b78b8a20009ecced6b12818f455b1e9fa"},
{file = "pydantic_core-2.27.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:491a2b73db93fab69731eaee494f320faa4e093dbed776be1a829c2eb222c34c"},
@@ -1527,6 +1569,7 @@ version = "3.1.4"
description = "pyparsing module - Classes and methods to define and execute parsing grammars"
optional = false
python-versions = ">=3.6.8"
+groups = ["main"]
files = [
{file = "pyparsing-3.1.4-py3-none-any.whl", hash = "sha256:a6a7ee4235a3f944aa1fa2249307708f893fe5717dc603503c6c7969c070fb7c"},
{file = "pyparsing-3.1.4.tar.gz", hash = "sha256:f86ec8d1a83f11977c9a6ea7598e8c27fc5cddfa5b07ea2241edbbde1d7bc032"},
@@ -1541,6 +1584,7 @@ version = "7.4.4"
description = "pytest: simple powerful testing with Python"
optional = false
python-versions = ">=3.7"
+groups = ["test"]
files = [
{file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"},
{file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"},
@@ -1563,6 +1607,7 @@ version = "4.1.0"
description = "Pytest plugin for measuring coverage."
optional = false
python-versions = ">=3.7"
+groups = ["test"]
files = [
{file = "pytest-cov-4.1.0.tar.gz", hash = "sha256:3904b13dfbfec47f003b8e77fd5b589cd11904a21ddf1ab38a64f204d6a10ef6"},
{file = "pytest_cov-4.1.0-py3-none-any.whl", hash = "sha256:6ba70b9e97e69fcc3fb45bfeab2d0a138fb65c4d0d6a41ef33983ad114be8c3a"},
@@ -1581,6 +1626,7 @@ version = "3.14.0"
description = "Thin-wrapper around the mock package for easier use with pytest"
optional = false
python-versions = ">=3.8"
+groups = ["test"]
files = [
{file = "pytest-mock-3.14.0.tar.gz", hash = "sha256:2719255a1efeceadbc056d6bf3df3d1c5015530fb40cf347c0f9afac88410bd0"},
{file = "pytest_mock-3.14.0-py3-none-any.whl", hash = "sha256:0b72c38033392a5f4621342fe11e9219ac11ec9d375f8e2a0c164539e0d70f6f"},
@@ -1598,6 +1644,7 @@ version = "2.9.0.post0"
description = "Extensions to the standard Python datetime module"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
+groups = ["main"]
files = [
{file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"},
{file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"},
@@ -1612,6 +1659,7 @@ version = "1.0.1"
description = "Read key-value pairs from a .env file and set them as environment variables"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "python-dotenv-1.0.1.tar.gz", hash = "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca"},
{file = "python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a"},
@@ -1626,6 +1674,7 @@ version = "2024.2"
description = "World timezone definitions, modern and historical"
optional = false
python-versions = "*"
+groups = ["main"]
files = [
{file = "pytz-2024.2-py2.py3-none-any.whl", hash = "sha256:31c7c1817eb7fae7ca4b8c7ee50c72f93aa2dd863de768e1ef4245d426aa0725"},
{file = "pytz-2024.2.tar.gz", hash = "sha256:2aa355083c50a0f93fa581709deac0c9ad65cca8a9e9beac660adcbd493c798a"},
@@ -1637,6 +1686,7 @@ version = "6.0.2"
description = "YAML parser and emitter for Python"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"},
{file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"},
@@ -1699,6 +1749,7 @@ version = "2024.11.6"
description = "Alternative regular expression module, to replace re."
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "regex-2024.11.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ff590880083d60acc0433f9c3f713c51f7ac6ebb9adf889c79a261ecf541aa91"},
{file = "regex-2024.11.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:658f90550f38270639e83ce492f27d2c8d2cd63805c65a13a14d36ca126753f0"},
@@ -1802,6 +1853,7 @@ version = "2.32.3"
description = "Python HTTP for Humans."
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"},
{file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"},
@@ -1823,6 +1875,7 @@ version = "0.5.2"
description = ""
optional = false
python-versions = ">=3.7"
+groups = ["main"]
files = [
{file = "safetensors-0.5.2-cp38-abi3-macosx_10_12_x86_64.whl", hash = "sha256:45b6092997ceb8aa3801693781a71a99909ab9cc776fbc3fa9322d29b1d3bef2"},
{file = "safetensors-0.5.2-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:6d0d6a8ee2215a440e1296b843edf44fd377b055ba350eaba74655a2fe2c4bae"},
@@ -1860,6 +1913,7 @@ version = "1.10.1"
description = "Fundamental algorithms for scientific computing in Python"
optional = false
python-versions = "<3.12,>=3.8"
+groups = ["main"]
files = [
{file = "scipy-1.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e7354fd7527a4b0377ce55f286805b34e8c54b91be865bac273f527e1b839019"},
{file = "scipy-1.10.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:4b3f429188c66603a1a5c549fb414e4d3bdc2a24792e061ffbd607d3d75fd84e"},
@@ -1898,6 +1952,7 @@ version = "0.12.2"
description = "Statistical data visualization"
optional = false
python-versions = ">=3.7"
+groups = ["main"]
files = [
{file = "seaborn-0.12.2-py3-none-any.whl", hash = "sha256:ebf15355a4dba46037dfd65b7350f014ceb1f13c05e814eda2c9f5fd731afc08"},
{file = "seaborn-0.12.2.tar.gz", hash = "sha256:374645f36509d0dcab895cba5b47daf0586f77bfe3b36c97c607db7da5be0139"},
@@ -1919,6 +1974,7 @@ version = "1.17.0"
description = "Python 2 and 3 compatibility utilities"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
+groups = ["main"]
files = [
{file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"},
{file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"},
@@ -1930,6 +1986,7 @@ version = "1.3.1"
description = "Sniff out which async library your code is running under"
optional = false
python-versions = ">=3.7"
+groups = ["main"]
files = [
{file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"},
{file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"},
@@ -1941,6 +1998,7 @@ version = "25.34.1"
description = "An easily customizable SQL parser and transpiler"
optional = false
python-versions = ">=3.7"
+groups = ["main"]
files = [
{file = "sqlglot-25.34.1-py3-none-any.whl", hash = "sha256:15099f8af832e6f5593fb92211d8b3f0810744ac0dc443fb70010fa38dc2562b"},
{file = "sqlglot-25.34.1.tar.gz", hash = "sha256:6952c083c4a8b8de3c09c10b262a03c6853071bd397f05759c08f1e2f3c683cb"},
@@ -1959,6 +2017,7 @@ version = "0.3.0"
description = "An easily customizable SQL parser and transpiler"
optional = false
python-versions = ">=3.7"
+groups = ["main"]
files = [
{file = "sqlglotrs-0.3.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:20483ace62f943d50a7caeae57b434d1872f0dfeebc697f5e97a6851e3cef254"},
{file = "sqlglotrs-0.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:602feea94d9cfbba0d8b7cf40f144ce311f8c11f06b6a49638d6311b799ee578"},
@@ -2029,6 +2088,7 @@ version = "0.20.3"
description = ""
optional = false
python-versions = ">=3.7"
+groups = ["main"]
files = [
{file = "tokenizers-0.20.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:31ccab28dbb1a9fe539787210b0026e22debeab1662970f61c2d921f7557f7e4"},
{file = "tokenizers-0.20.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c6361191f762bda98c773da418cf511cbaa0cb8d0a1196f16f8c0119bde68ff8"},
@@ -2158,6 +2218,8 @@ version = "2.2.1"
description = "A lil' TOML parser"
optional = false
python-versions = ">=3.8"
+groups = ["test"]
+markers = "python_version <= \"3.10\""
files = [
{file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"},
{file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"},
@@ -2199,6 +2261,7 @@ version = "4.67.1"
description = "Fast, Extensible Progress Meter"
optional = false
python-versions = ">=3.7"
+groups = ["main"]
files = [
{file = "tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2"},
{file = "tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2"},
@@ -2220,6 +2283,7 @@ version = "4.46.3"
description = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow"
optional = false
python-versions = ">=3.8.0"
+groups = ["main"]
files = [
{file = "transformers-4.46.3-py3-none-any.whl", hash = "sha256:a12ef6f52841fd190a3e5602145b542d03507222f2c64ebb7ee92e8788093aef"},
{file = "transformers-4.46.3.tar.gz", hash = "sha256:8ee4b3ae943fe33e82afff8e837f4b052058b07ca9be3cb5b729ed31295f72cc"},
@@ -2289,6 +2353,7 @@ version = "4.12.2"
description = "Backported and Experimental Type Hints for Python 3.8+"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"},
{file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"},
@@ -2300,6 +2365,7 @@ version = "2024.2"
description = "Provider of IANA time zone data"
optional = false
python-versions = ">=2"
+groups = ["main"]
files = [
{file = "tzdata-2024.2-py2.py3-none-any.whl", hash = "sha256:a48093786cdcde33cad18c2555e8532f34422074448fbc874186f0abd79565cd"},
{file = "tzdata-2024.2.tar.gz", hash = "sha256:7d85cc416e9382e69095b7bdf4afd9e3880418a2413feec7069d533d6b4e31cc"},
@@ -2311,13 +2377,14 @@ version = "2.2.3"
description = "HTTP library with thread-safe connection pooling, file post, and more."
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"},
{file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"},
]
[package.extras]
-brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"]
+brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""]
h2 = ["h2 (>=4,<5)"]
socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
zstd = ["zstandard (>=0.18.0)"]
@@ -2328,20 +2395,22 @@ version = "3.20.2"
description = "Backport of pathlib-compatible object wrapper for zip files"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
+markers = "python_version < \"3.10\""
files = [
{file = "zipp-3.20.2-py3-none-any.whl", hash = "sha256:a817ac80d6cf4b23bf7f2828b7cabf326f15a001bea8b1f9b49631780ba28350"},
{file = "zipp-3.20.2.tar.gz", hash = "sha256:bc9eb26f4506fda01b81bcde0ca78103b6e62f991b381fec825435c836edbc29"},
]
[package.extras]
-check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"]
+check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""]
cover = ["pytest-cov"]
doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
enabler = ["pytest-enabler (>=2.2)"]
-test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"]
+test = ["big-O", "importlib-resources ; python_version < \"3.9\"", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"]
type = ["pytest-mypy"]
[metadata]
-lock-version = "2.0"
+lock-version = "2.1"
python-versions = ">=3.8,<3.12"
-content-hash = "083c1c663873ad9c5cd484a4c34c4b32e4092972b8b2655061587db330ceab9e"
+content-hash = "604ed80d13c94fe516281dc953e9a2a9f36cddea67f3b65244b5cb3f25e491bf"
diff --git a/extensions/llms/openai/pyproject.toml b/extensions/llms/openai/pyproject.toml
index d6287c6d2..541ba7147 100644
--- a/extensions/llms/openai/pyproject.toml
+++ b/extensions/llms/openai/pyproject.toml
@@ -13,7 +13,7 @@ readme = "README.md"
[tool.poetry.dependencies]
python = ">=3.8,<3.12"
pandasai = ">=3.0.0b4"
-openai = "^1.3.7"
+openai = "^1.109.1"
typing-extensions = "^4.0.0"
[tool.poetry.group.test]
diff --git a/extensions/llms/openai/tests/test_openai.py b/extensions/llms/openai/tests/test_openai.py
index a1b740641..02a300537 100644
--- a/extensions/llms/openai/tests/test_openai.py
+++ b/extensions/llms/openai/tests/test_openai.py
@@ -61,6 +61,22 @@ def test_params_setting(self):
assert llm.presence_penalty == 3.0
assert llm.stop == ["\n"]
+ def test_responses_api_params_setting(self):
+ llm = OpenAI(
+ api_token="test",
+ model="gpt-5-mini",
+ reasoning_effort="minimal",
+ max_output_tokens=5000,
+ verbosity="low",
+ stop=["\n"],
+ )
+
+ assert llm.model == "gpt-5-mini"
+ assert llm.reasoning_effort == "minimal"
+ assert llm.max_output_tokens == 5000
+ assert llm.verbosity == "low"
+ assert llm.stop == ["\n"]
+
def test_completion(self, mocker):
expected_text = "This is the generated text."
expected_response = OpenAIObject(
@@ -82,6 +98,27 @@ def test_completion(self, mocker):
openai.completion.assert_called_once_with("Some prompt.")
assert result == expected_response
+ def test_responses(self, mocker):
+ expected_text = "This is the generated text."
+ expected_response = OpenAIObject(
+ {
+ "choices": [{"text": expected_text}],
+ "usage": {
+ "prompt_tokens": 2,
+ "completion_tokens": 1,
+ "total_tokens": 3,
+ },
+ "model": "gpt-5-mini",
+ }
+ )
+
+ openai = OpenAI(api_token="test")
+ mocker.patch.object(openai, "responses_completion", return_value=expected_response)
+ result = openai.responses_completion("Some prompt.")
+
+ openai.responses_completion.assert_called_once_with("Some prompt.")
+ assert result == expected_response
+
def test_chat_completion(self, mocker):
openai = OpenAI(api_token="test")
expected_response = OpenAIObject(