diff --git a/.github/labeler.yml b/.github/labeler.yml index bd14ca500e..9dbaa52ab4 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -39,6 +39,11 @@ integration:cohere: - any-glob-to-any-file: "integrations/cohere/**/*" - any-glob-to-any-file: ".github/workflows/cohere.yml" +integration:cometapi: + - changed-files: + - any-glob-to-any-file: "integrations/cometapi/**/*" + - any-glob-to-any-file: ".github/workflows/cometapi.yml" + integration:deepeval: - changed-files: - any-glob-to-any-file: "integrations/deepeval/**/*" diff --git a/.github/workflows/cometapi.yml b/.github/workflows/cometapi.yml new file mode 100644 index 0000000000..5d08ba9c3a --- /dev/null +++ b/.github/workflows/cometapi.yml @@ -0,0 +1,83 @@ +# This workflow comes from https://github.com/ofek/hatch-mypyc +# https://github.com/ofek/hatch-mypyc/blob/5a198c0ba8660494d02716cfc9d79ce4adfb1442/.github/workflows/test.yml +name: Test / cometapi + +on: + schedule: + - cron: "0 0 * * *" + pull_request: + paths: + - "integrations/cometapi/**" + - "!integrations/cometapi/*.md" + - ".github/workflows/cometapi.yml" + +defaults: + run: + working-directory: integrations/cometapi + +concurrency: + group: cometapi-${{ github.head_ref }} + cancel-in-progress: true + +env: + PYTHONUNBUFFERED: "1" + FORCE_COLOR: "1" + COMET_API_KEY: "${{ secrets.COMET_API_KEY }}" + +jobs: + run: + name: Python ${{ matrix.python-version }} on ${{ startsWith(matrix.os, 'macos-') && 'macOS' || startsWith(matrix.os, 'windows-') && 'Windows' || 'Linux' }} + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, windows-latest, macos-latest] + python-version: ["3.9", "3.13"] + max-parallel: 2 # to avoid "429 Resource has been exhausted" + + steps: + - name: Support longpaths + if: matrix.os == 'windows-latest' + working-directory: . + run: git config --system core.longpaths true + + - uses: actions/checkout@v5 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v6 + with: + python-version: ${{ matrix.python-version }} + + - name: Install Hatch + run: pip install --upgrade hatch + - name: Lint + if: matrix.python-version == '3.9' && runner.os == 'Linux' + run: hatch run fmt-check && hatch run test:types + + - name: Generate docs + if: matrix.python-version == '3.9' && runner.os == 'Linux' + run: hatch run docs + + - name: Run tests + run: hatch run test:cov-retry + + - name: Run unit tests with lowest direct dependencies + run: | + hatch run uv pip compile pyproject.toml --resolution lowest-direct --output-file requirements_lowest_direct.txt + hatch -e test env run -- uv pip install -r requirements_lowest_direct.txt + hatch run test:unit + + - name: Nightly - run unit tests with Haystack main branch + if: github.event_name == 'schedule' + run: | + hatch env prune + hatch run uv pip install git+https://github.com/deepset-ai/haystack.git@main + hatch run test:unit + + - name: Send event to Datadog for nightly failures + if: failure() && github.event_name == 'schedule' + uses: ./.github/actions/send_failure + with: + title: | + Core integrations nightly tests failure: ${{ github.workflow }} + api-key: ${{ secrets.CORE_DATADOG_API_KEY }} \ No newline at end of file diff --git a/README.md b/README.md index d545bdc533..c2cb01558b 100644 --- a/README.md +++ b/README.md @@ -32,6 +32,7 @@ Please check out our [Contribution Guidelines](CONTRIBUTING.md) for all the deta | [azure-ai-search-haystack](integrations/azure_ai_search/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/azure-ai-search-haystack.svg)](https://pypi.org/project/azure-ai-search-haystack) | [![Test / azure-ai-search](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/azure_ai_search.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/azure_ai_search.yml) | | [chroma-haystack](integrations/chroma/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/chroma-haystack.svg)](https://pypi.org/project/chroma-haystack) | [![Test / chroma](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/chroma.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/chroma.yml) | | [cohere-haystack](integrations/cohere/) | Embedder, Generator, Ranker | [![PyPI - Version](https://img.shields.io/pypi/v/cohere-haystack.svg)](https://pypi.org/project/cohere-haystack) | [![Test / cohere](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/cohere.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/cohere.yml) | +| [cometapi-haystack](integrations/cometapi/) | Embedder, Generator, Ranker | [![PyPI - Version](https://img.shields.io/pypi/v/cometapi-haystack.svg)](https://pypi.org/project/cometapi-haystack) | [![Test / cometapi](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/cometapi.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/cometapi.yml) | | [deepeval-haystack](integrations/deepeval/) | Evaluator | [![PyPI - Version](https://img.shields.io/pypi/v/deepeval-haystack.svg)](https://pypi.org/project/deepeval-haystack) | [![Test / deepeval](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/deepeval.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/deepeval.yml) | | [elasticsearch-haystack](integrations/elasticsearch/) | Document Store | [![PyPI - Version](https://img.shields.io/pypi/v/elasticsearch-haystack.svg)](https://pypi.org/project/elasticsearch-haystack) | [![Test / elasticsearch](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/elasticsearch.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/elasticsearch.yml) | | [fastembed-haystack](integrations/fastembed/) | Embedder, Ranker | [![PyPI - Version](https://img.shields.io/pypi/v/fastembed-haystack.svg)](https://pypi.org/project/fastembed-haystack/) | [![Test / fastembed](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/fastembed.yml/badge.svg)](https://github.com/deepset-ai/haystack-core-integrations/actions/workflows/fastembed.yml) | diff --git a/integrations/cometapi/CHANGELOG.md b/integrations/cometapi/CHANGELOG.md new file mode 100644 index 0000000000..825c32f0d0 --- /dev/null +++ b/integrations/cometapi/CHANGELOG.md @@ -0,0 +1 @@ +# Changelog diff --git a/integrations/cometapi/LICENSE.txt b/integrations/cometapi/LICENSE.txt new file mode 100644 index 0000000000..c819630014 --- /dev/null +++ b/integrations/cometapi/LICENSE.txt @@ -0,0 +1,192 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (which shall not include communications that are marked or + designated in writing by the copyright owner as "Not a Contribution"). + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control + systems, and issue tracking systems that are managed by, or on behalf + of, the Licensor for the purpose of discussing and improving the Work, + but excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution". + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to use, reproduce, modify, distribute, and prepare + Derivative Works of, publicly display, publicly perform, sublicense, + and distribute the Work and such Derivative Works in Source or Object + form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, trademark, patent, + attribution and other notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright notice to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Support. You may choose to offer, and to + charge a fee for, warranty, support, indemnity or other liability + obligations and/or rights consistent with this License. However, in + accepting such obligations, You may act only on Your own behalf and on + Your sole responsibility, not on behalf of any other Contributor, and + only if You agree to indemnify, defend, and hold each Contributor + harmless for any liability incurred by, or claims asserted against, + such Contributor by reason of your accepting any such warranty or support. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same page as the copyright notice for easier identification within + third-party archives. + + Copyright 2023-present deepset GmbH + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/integrations/cometapi/README.md b/integrations/cometapi/README.md new file mode 100644 index 0000000000..58110ec077 --- /dev/null +++ b/integrations/cometapi/README.md @@ -0,0 +1,48 @@ +# Comet API Haystack Integration + +[![PyPI - Version](https://img.shields.io/pypi/v/cometapi-haystack.svg)](https://pypi.org/project/cometapi-haystack) +[![PyPI - Python Version](https://img.shields.io/pypi/pyversions/cometapi-haystack.svg)](https://pypi.org/project/cometapi-haystack) + +----- + +**CometAPI Resources** +- [Website](https://www.cometapi.com/?utm_source=haystack&utm_campaign=integration&utm_medium=integration&utm_content=integration) +- [Documentation](https://api.cometapi.com/doc) +- [Get an API Key](https://api.cometapi.com/console/token) +- [Pricing](https://api.cometapi.com/pricing) + +**Table of Contents** + +- [Installation](#installation) +- [Usage](#usage) +- [License](#license) + +## Installation + +```console +pip install cometapi-haystack +``` + +## Usage + +This integration offers a set of pre-built components that allow developers to interact seamlessly with AI models using the new Comet APIs. + +### Chat Generator + +```python +from haystack.dataclasses.chat_message import ChatMessage +from haystack_integrations.components.generators.cometapi import CometAPIChatGenerator + +# Initialize the chat generator +chat_generator = CometAPIChatGenerator(model="grok-3-mini") + +# Generate a response +messages = [ChatMessage.from_user("Tell me about the future of AI")] +response = chat_generator.run(messages=messages) +print(response["replies"][0].text) +``` + + +## License + +`cometapi-haystack` is distributed under the terms of the [Apache-2.0](https://spdx.org/licenses/Apache-2.0.html) license. \ No newline at end of file diff --git a/integrations/cometapi/pydoc/config.yml b/integrations/cometapi/pydoc/config.yml new file mode 100644 index 0000000000..4dbb825bd1 --- /dev/null +++ b/integrations/cometapi/pydoc/config.yml @@ -0,0 +1,29 @@ +loaders: + - type: haystack_pydoc_tools.loaders.CustomPythonLoader + search_path: [../src] + modules: [ + "haystack_integrations.components.generators.cometapi.chat.chat_generator", + ] + ignore_when_discovered: ["__init__"] +processors: + - type: filter + expression: + documented_only: true + do_not_filter_modules: false + skip_empty_modules: true + - type: smart + - type: crossref +renderer: + type: haystack_pydoc_tools.renderers.ReadmeIntegrationRenderer + excerpt: Comet API integration for Haystack + category_slug: integrations-api + title: Comet API + slug: integrations-cometapi + order: 91 + markdown: + descriptive_class_title: false + classdef_code_block: false + descriptive_module_title: true + add_method_class_prefix: true + add_member_class_prefix: false + filename: _readme_cometapi.md \ No newline at end of file diff --git a/integrations/cometapi/pyproject.toml b/integrations/cometapi/pyproject.toml new file mode 100644 index 0000000000..2b31e2288f --- /dev/null +++ b/integrations/cometapi/pyproject.toml @@ -0,0 +1,174 @@ +[build-system] +requires = ["hatchling", "hatch-vcs"] +build-backend = "hatchling.build" + +[project] +name = "cometapi-haystack" +dynamic = ["version"] +description = 'Use Comet API with Haystack to build AI applications with 500+ AI models.' +readme = "README.md" +requires-python = ">=3.9" +license = "Apache-2.0" +keywords = [] +authors = [ + { name = "deepset GmbH", email = "info@deepset.ai" }, + { name = "Gary Badwal", email = "gurpreet071999@gmail.com" } +] +classifiers = [ + "License :: OSI Approved :: Apache Software License", + "Development Status :: 4 - Beta", + "Programming Language :: Python", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: Implementation :: CPython", + "Programming Language :: Python :: Implementation :: PyPy", +] +dependencies = ["haystack-ai>=2.13.2",] + +[project.urls] +Documentation = "https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/cometapi#readme" +Issues = "https://github.com/deepset-ai/haystack-core-integrations/issues" +Source = "https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/cometapi" + +[tool.hatch.build.targets.wheel] +packages = ["src/haystack_integrations"] + +[tool.hatch.version] +source = "vcs" +tag-pattern = 'integrations\/cometapi-v(?P.*)' + +[tool.hatch.version.raw-options] +root = "../.." +git_describe_command = 'git describe --tags --match="integrations/cometapi-v[0-9]*"' + +[tool.hatch.envs.default] +installer = "uv" +dependencies = ["haystack-pydoc-tools", "ruff"] + +[tool.hatch.envs.default.scripts] +docs = ["pydoc-markdown pydoc/config.yml"] +fmt = "ruff check --fix {args} && ruff format {args}" +fmt-check = "ruff check {args} && ruff format --check {args}" + +[tool.hatch.envs.test] +dependencies = [ + "pytest", + "pytest-asyncio", + "pytest-cov", + "pytest-rerunfailures", + "mypy", + "pip", + "pytz" +] + +[tool.hatch.envs.test.scripts] +unit = 'pytest -m "not integration" {args:tests}' +integration = 'pytest -m "integration" {args:tests}' +all = 'pytest {args:tests}' +cov-retry = 'all --cov=haystack_integrations --reruns 3 --reruns-delay 30 -x' + +types = "mypy -p haystack_integrations.components.generators.cometapi {args}" + +[tool.mypy] +install_types = true +non_interactive = true +check_untyped_defs = true +disallow_incomplete_defs = true + +[[tool.mypy.overrides]] +module = [ + "jsonref.*", # jsonref does not provide types +] +ignore_missing_imports = true + +[tool.ruff] +target-version = "py38" +line-length = 120 + +[tool.ruff.lint] +select = [ + "A", + "ARG", + "B", + "C", + "DTZ", + "E", + "EM", + "F", + "FBT", + "I", + "ICN", + "ISC", + "N", + "PLC", + "PLE", + "PLR", + "PLW", + "Q", + "RUF", + "S", + "T", + "TID", + "UP", + "W", + "YTT", +] +ignore = [ + # Allow non-abstract empty methods in abstract base classes + "B027", + # Allow boolean positional values in function calls, like `dict.get(... True)` + "FBT003", + # Ignore checks for possible passwords + "S105", + "S106", + "S107", + # Ignore complexity + "C901", + "PLR0911", + "PLR0912", + "PLR0913", + "PLR0915", + # Ignore unused params + "ARG001", + "ARG002", + "ARG005", + # Allow function call argument defaults e.g. `Secret.from_env_var` + "B008", +] +unfixable = [ + # Don't touch unused imports + "F401", +] + +[tool.ruff.lint.isort] +known-first-party = ["haystack_integrations"] + +[tool.ruff.lint.flake8-tidy-imports] +ban-relative-imports = "parents" + +[tool.ruff.lint.per-file-ignores] +# Tests can use magic values, assertions, and relative imports +"tests/**/*" = ["PLR2004", "S101", "TID252"] +# Examples can use print statements +"examples/**/*" = ["T201"] + +[tool.coverage.run] +source = ["haystack_integrations"] +branch = true +parallel = true + +[tool.coverage.report] +omit = ["*/tests/*", "*/__init__.py"] +show_missing = true +exclude_lines = ["no cov", "if __name__ == .__main__.:", "if TYPE_CHECKING:"] + +[tool.pytest.ini_options] +addopts = "--strict-markers" +markers = [ + "integration: integration tests", +] +log_cli = true +asyncio_mode = "auto" diff --git a/integrations/cometapi/src/haystack_integrations/components/generators/cometapi/__init__.py b/integrations/cometapi/src/haystack_integrations/components/generators/cometapi/__init__.py new file mode 100644 index 0000000000..98f0ce06ab --- /dev/null +++ b/integrations/cometapi/src/haystack_integrations/components/generators/cometapi/__init__.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: 2023-present deepset GmbH +# +# SPDX-License-Identifier: Apache-2.0 + +from .chat.chat_generator import CometAPIChatGenerator + +__all__ = ["CometAPIChatGenerator"] diff --git a/integrations/cometapi/src/haystack_integrations/components/generators/cometapi/chat/chat_generator.py b/integrations/cometapi/src/haystack_integrations/components/generators/cometapi/chat/chat_generator.py new file mode 100644 index 0000000000..cc389302e8 --- /dev/null +++ b/integrations/cometapi/src/haystack_integrations/components/generators/cometapi/chat/chat_generator.py @@ -0,0 +1,58 @@ +from typing import Any, Dict, List, Optional, Union + +from haystack.components.generators.chat import OpenAIChatGenerator +from haystack.dataclasses import StreamingCallbackT +from haystack.tools import Tool, Toolset +from haystack.utils import Secret + + +class CometAPIChatGenerator(OpenAIChatGenerator): + """ + A chat generator that uses the CometAPI for generating chat responses. + + This class extends Haystack's OpenAIChatGenerator to specifically interact with the CometAPI. + It sets the `api_base_url` to the CometAPI endpoint and allows for all the + standard configurations available in the OpenAIChatGenerator. + + :param api_key: The API key for authenticating with the CometAPI. Defaults to + loading from the "COMET_API_KEY" environment variable. + :param model: The name of the model to use for chat generation (e.g., "gpt-4o-mini", "grok-3-mini"). + Defaults to "gpt-4o-mini". + :param streaming_callback: An optional callable that will be called with each chunk of + a streaming response. + :param generation_kwargs: Optional keyword arguments to pass to the underlying generation + API call. + :param timeout: The maximum time in seconds to wait for a response from the API. + :param max_retries: The maximum number of times to retry a failed API request. + :param tools: An optional list of tool definitions that the model can use. + :param tools_strict: If True, the model is forced to use one of the provided tools if a tool call is made. + :param http_client_kwargs: Optional keyword arguments to pass to the HTTP client. + """ + + def __init__( + self, + *, + api_key: Secret = Secret.from_env_var("COMET_API_KEY"), + model: str = "gpt-4o-mini", + streaming_callback: Optional[StreamingCallbackT] = None, + generation_kwargs: Optional[Dict[str, Any]] = None, + timeout: Optional[int] = None, + max_retries: Optional[int] = None, + tools: Optional[Union[List[Union[Tool, Toolset]], Toolset]] = None, + tools_strict: bool = False, + http_client_kwargs: Optional[Dict[str, Any]] = None, + ): + api_base_url = "https://api.cometapi.com/v1" + + super().__init__( + api_key=api_key, + model=model, + api_base_url=api_base_url, + streaming_callback=streaming_callback, + generation_kwargs=generation_kwargs, + timeout=timeout, + max_retries=max_retries, + tools=tools, + tools_strict=tools_strict, + http_client_kwargs=http_client_kwargs, + ) diff --git a/integrations/cometapi/src/haystack_integrations/components/generators/py.typed b/integrations/cometapi/src/haystack_integrations/components/generators/py.typed new file mode 100644 index 0000000000..e69de29bb2 diff --git a/integrations/cometapi/tests/test_cometapi_chat_generator.py b/integrations/cometapi/tests/test_cometapi_chat_generator.py new file mode 100644 index 0000000000..6a3b5c17f2 --- /dev/null +++ b/integrations/cometapi/tests/test_cometapi_chat_generator.py @@ -0,0 +1,876 @@ +import os +from dataclasses import asdict +from datetime import datetime +from unittest.mock import patch + +import pytest +import pytz +from haystack import Pipeline +from haystack.components.generators.utils import print_streaming_chunk +from haystack.components.tools import ToolInvoker +from haystack.dataclasses import ChatMessage, ChatRole, StreamingChunk, ToolCall +from haystack.tools import Tool +from haystack.utils.auth import Secret +from openai import OpenAIError +from openai.types.chat import ChatCompletion, ChatCompletionChunk, ChatCompletionMessage +from openai.types.chat.chat_completion import Choice +from openai.types.chat.chat_completion_chunk import Choice as ChoiceChunk +from openai.types.chat.chat_completion_chunk import ChoiceDelta, ChoiceDeltaToolCall, ChoiceDeltaToolCallFunction +from openai.types.completion_usage import CompletionTokensDetails, CompletionUsage, PromptTokensDetails + +from haystack_integrations.components.generators.cometapi.chat.chat_generator import CometAPIChatGenerator + + +class CollectorCallback: + """ + Callback to collect streaming chunks for testing purposes. + """ + + def __init__(self): + self.chunks = [] + + def __call__(self, chunk: StreamingChunk) -> None: + self.chunks.append(chunk) + + +@pytest.fixture +def chat_messages(): + return [ + ChatMessage.from_system("You are a helpful assistant"), + ChatMessage.from_user("What's the capital of France"), + ] + + +def weather(city: str): + """Get weather for a given city.""" + return f"The weather in {city} is sunny and 32°C" + + +@pytest.fixture +def tools(): + tool_parameters = {"type": "object", "properties": {"city": {"type": "string"}}, "required": ["city"]} + tool = Tool( + name="weather", + description="useful to determine the weather in a given location", + parameters=tool_parameters, + function=weather, + ) + + return [tool] + + +@pytest.fixture +def mock_chat_completion(): + """ + Mock the OpenAI API completion response and reuse it for tests + """ + with patch("openai.resources.chat.completions.Completions.create") as mock_chat_completion_create: + completion = ChatCompletion( + id="foo", + model="gpt-4o-mini", + object="chat.completion", + choices=[ + Choice( + finish_reason="stop", + logprobs=None, + index=0, + message=ChatCompletionMessage(content="Hello world!", role="assistant"), + ) + ], + created=int(datetime.now(tz=pytz.timezone("UTC")).timestamp()), + usage={"prompt_tokens": 57, "completion_tokens": 40, "total_tokens": 97}, + ) + + mock_chat_completion_create.return_value = completion + yield mock_chat_completion_create + + +class TestCometAPIChatGenerator: + def test_init_default(self, monkeypatch): + monkeypatch.setenv("COMET_API_KEY", "test-api-key") + component = CometAPIChatGenerator() + assert component.client.api_key == "test-api-key" + assert component.model == "gpt-4o-mini" + assert component.api_base_url == "https://api.cometapi.com/v1" + assert component.streaming_callback is None + assert not component.generation_kwargs + + def test_init_fail_wo_api_key(self, monkeypatch): + monkeypatch.delenv("COMET_API_KEY", raising=False) + with pytest.raises(ValueError, match=r"None of the .* environment variables are set"): + CometAPIChatGenerator() + + def test_init_with_parameters(self): + component = CometAPIChatGenerator( + api_key=Secret.from_token("test-api-key"), + model="gpt-4o-mini", + streaming_callback=print_streaming_chunk, + generation_kwargs={"max_tokens": 10, "some_test_param": "test-params"}, + ) + assert component.client.api_key == "test-api-key" + assert component.model == "gpt-4o-mini" + assert component.streaming_callback is print_streaming_chunk + assert component.generation_kwargs == {"max_tokens": 10, "some_test_param": "test-params"} + + def test_to_dict_default(self, monkeypatch): + monkeypatch.setenv("COMET_API_KEY", "test-api-key") + component = CometAPIChatGenerator() + data = component.to_dict() + + assert ( + data["type"] + == "haystack_integrations.components.generators.cometapi.chat.chat_generator.CometAPIChatGenerator" + ) + + expected_params = { + "api_key": {"env_vars": ["COMET_API_KEY"], "strict": True, "type": "env_var"}, + "model": "gpt-4o-mini", + "streaming_callback": None, + "api_base_url": "https://api.cometapi.com/v1", + "generation_kwargs": {}, + "timeout": None, + "max_retries": None, + "tools": None, + "http_client_kwargs": None, + } + + for key, value in expected_params.items(): + assert data["init_parameters"][key] == value + + def test_to_dict_with_parameters(self, monkeypatch): + monkeypatch.setenv("ENV_VAR", "test-api-key") + component = CometAPIChatGenerator( + api_key=Secret.from_env_var("ENV_VAR"), + model="gpt-4o-mini", + streaming_callback=print_streaming_chunk, + generation_kwargs={"max_tokens": 10, "some_test_param": "test-params"}, + timeout=10, + max_retries=10, + tools=None, + http_client_kwargs={"proxy": "http://localhost:8080"}, + ) + data = component.to_dict() + + assert ( + data["type"] + == "haystack_integrations.components.generators.cometapi.chat.chat_generator.CometAPIChatGenerator" + ) + + expected_params = { + "api_key": {"env_vars": ["ENV_VAR"], "strict": True, "type": "env_var"}, + "model": "gpt-4o-mini", + "api_base_url": "https://api.cometapi.com/v1", + "streaming_callback": "haystack.components.generators.utils.print_streaming_chunk", + "generation_kwargs": {"max_tokens": 10, "some_test_param": "test-params"}, + "timeout": 10, + "max_retries": 10, + "tools": None, + "http_client_kwargs": {"proxy": "http://localhost:8080"}, + } + + for key, value in expected_params.items(): + assert data["init_parameters"][key] == value + + def test_from_dict(self, monkeypatch): + monkeypatch.setenv("COMET_API_KEY", "fake-api-key") + data = { + "type": ("haystack_integrations.components.generators.cometapi.chat.chat_generator.CometAPIChatGenerator"), + "init_parameters": { + "api_key": {"env_vars": ["COMET_API_KEY"], "strict": True, "type": "env_var"}, + "model": "gpt-4o-mini", + "streaming_callback": "haystack.components.generators.utils.print_streaming_chunk", + "generation_kwargs": {"max_tokens": 10, "some_test_param": "test-params"}, + "timeout": 10, + "max_retries": 10, + "tools": None, + "http_client_kwargs": {"proxy": "http://localhost:8080"}, + }, + } + component = CometAPIChatGenerator.from_dict(data) + assert component.model == "gpt-4o-mini" + assert component.streaming_callback is print_streaming_chunk + assert component.api_base_url == "https://api.cometapi.com/v1" + assert component.generation_kwargs == {"max_tokens": 10, "some_test_param": "test-params"} + assert component.api_key == Secret.from_env_var("COMET_API_KEY") + assert component.http_client_kwargs == {"proxy": "http://localhost:8080"} + assert component.tools is None + assert component.timeout == 10 + assert component.max_retries == 10 + + def test_from_dict_fail_wo_env_var(self, monkeypatch): + monkeypatch.delenv("COMET_API_KEY", raising=False) + data = { + "type": ("haystack_integrations.components.generators.cometapi.chat.chat_generator.CometAPIChatGenerator"), + "init_parameters": { + "api_key": {"env_vars": ["COMET_API_KEY"], "strict": True, "type": "env_var"}, + "model": "gpt-4o-mini", + "streaming_callback": "haystack.components.generators.utils.print_streaming_chunk", + "generation_kwargs": {"max_tokens": 10, "some_test_param": "test-params"}, + "timeout": 10, + "max_retries": 10, + }, + } + with pytest.raises(ValueError, match=r"None of the .* environment variables are set"): + CometAPIChatGenerator.from_dict(data) + + def test_run(self, chat_messages, mock_chat_completion, monkeypatch): + monkeypatch.setenv("COMET_API_KEY", "fake-api-key") + component = CometAPIChatGenerator() + response = component.run(chat_messages) + + # check that the component returns the correct ChatMessage response + assert isinstance(response, dict) + assert "replies" in response + assert isinstance(response["replies"], list) + assert len(response["replies"]) == 1 + assert [isinstance(reply, ChatMessage) for reply in response["replies"]] + + def test_run_with_params(self, chat_messages, mock_chat_completion, monkeypatch): + monkeypatch.setenv("COMET_API_KEY", "fake-api-key") + component = CometAPIChatGenerator(generation_kwargs={"max_tokens": 10, "temperature": 0.5}) + response = component.run(chat_messages) + + # check that the component calls the OpenAI API with the correct parameters + # for cometapi, these are passed in the extra_body parameter + _, kwargs = mock_chat_completion.call_args + assert kwargs["max_tokens"] == 10 + assert kwargs["temperature"] == 0.5 + # check that the component returns the correct response + assert isinstance(response, dict) + assert "replies" in response + assert isinstance(response["replies"], list) + assert len(response["replies"]) == 1 + assert [isinstance(reply, ChatMessage) for reply in response["replies"]] + + @pytest.mark.skipif( + not os.environ.get("COMET_API_KEY", None), + reason="Export an env var called COMET_API_KEY containing the cometapi API key to run this test.", + ) + @pytest.mark.integration + def test_live_run(self): + chat_messages = [ChatMessage.from_user("What's the capital of France")] + component = CometAPIChatGenerator() + results = component.run(chat_messages) + assert len(results["replies"]) == 1 + message: ChatMessage = results["replies"][0] + assert "Paris" in message.text + assert "gpt-4o-mini" in message.meta["model"] + assert message.meta["finish_reason"] == "stop" + + @pytest.mark.skipif( + not os.environ.get("COMET_API_KEY", None), + reason="Export an env var called COMET_API_KEY containing the OpenAI API key to run this test.", + ) + @pytest.mark.integration + def test_live_run_wrong_model(self, chat_messages): + component = CometAPIChatGenerator(model="something-obviously-wrong") + with pytest.raises(OpenAIError): + component.run(chat_messages) + + @pytest.mark.skipif( + not os.environ.get("COMET_API_KEY", None), + reason="Export an env var called COMET_API_KEY containing the OpenAI API key to run this test.", + ) + @pytest.mark.integration + def test_live_run_streaming(self): + class Callback: + def __init__(self): + self.responses = "" + self.counter = 0 + + def __call__(self, chunk: StreamingChunk) -> None: + self.counter += 1 + self.responses += chunk.content if chunk.content else "" + + callback = Callback() + component = CometAPIChatGenerator(streaming_callback=callback) + results = component.run([ChatMessage.from_user("What's the capital of France?")]) + + assert len(results["replies"]) == 1 + message: ChatMessage = results["replies"][0] + assert "Paris" in message.text + + assert "gpt-4o-mini" in message.meta["model"] + assert message.meta["finish_reason"] == "stop" + + assert callback.counter > 1 + assert "Paris" in callback.responses + + @pytest.mark.skipif( + not os.environ.get("COMET_API_KEY", None), + reason="Export an env var called COMET_API_KEY containing the OpenAI API key to run this test.", + ) + @pytest.mark.integration + def test_live_run_with_tools(self, tools): + chat_messages = [ChatMessage.from_user("What's the weather like in Paris?")] + component = CometAPIChatGenerator(tools=tools) + results = component.run(chat_messages) + assert len(results["replies"]) == 1 + message = results["replies"][0] + assert message.text is None + + assert message.tool_calls + tool_call = message.tool_call + assert isinstance(tool_call, ToolCall) + assert tool_call.tool_name == "weather" + assert tool_call.arguments == {"city": "Paris"} + assert message.meta["finish_reason"] == "tool_calls" + + @pytest.mark.skipif( + not os.environ.get("COMET_API_KEY", None), + reason="Export an env var called COMET_API_KEY containing the OpenAI API key to run this test.", + ) + @pytest.mark.integration + def test_live_run_with_tools_and_response(self, tools): + """ + Integration test that the CometAPIChatGenerator component can run with tools and get a response. + """ + initial_messages = [ChatMessage.from_user("What's the weather like in Paris and Berlin?")] + component = CometAPIChatGenerator(tools=tools) + results = component.run(messages=initial_messages, generation_kwargs={"tool_choice": "auto"}) + + assert len(results["replies"]) == 1 + + # Find the message with tool calls + tool_message = results["replies"][0] + + assert isinstance(tool_message, ChatMessage) + tool_calls = tool_message.tool_calls + assert len(tool_calls) == 2 + assert ChatMessage.is_from(tool_message, ChatRole.ASSISTANT) + + for tool_call in tool_calls: + assert tool_call.id is not None + assert isinstance(tool_call, ToolCall) + assert tool_call.tool_name == "weather" + + arguments = [tool_call.arguments for tool_call in tool_calls] + assert sorted(arguments, key=lambda x: x["city"]) == [{"city": "Berlin"}, {"city": "Paris"}] + assert tool_message.meta["finish_reason"] == "tool_calls" + + new_messages = [ + initial_messages[0], + tool_message, + ChatMessage.from_tool(tool_result="22° C and sunny", origin=tool_calls[0]), + ChatMessage.from_tool(tool_result="16° C and windy", origin=tool_calls[1]), + ] + # Pass the tool result to the model to get the final response + results = component.run(new_messages) + + assert len(results["replies"]) == 1 + final_message = results["replies"][0] + assert final_message.is_from(ChatRole.ASSISTANT) + assert len(final_message.text) > 0 + assert "paris" in final_message.text.lower() + assert "berlin" in final_message.text.lower() + + @pytest.mark.skipif( + not os.environ.get("COMET_API_KEY", None), + reason="Export an env var called COMET_API_KEY containing the OpenAI API key to run this test.", + ) + @pytest.mark.integration + def test_live_run_with_tools_streaming(self, tools): + """ + Integration test that the CometAPIChatGenerator component can run with tools and streaming. + """ + component = CometAPIChatGenerator(tools=tools, streaming_callback=print_streaming_chunk) + results = component.run( + [ChatMessage.from_user("What's the weather like in Paris and Berlin?")], + generation_kwargs={"tool_choice": "auto"}, + ) + + assert len(results["replies"]) == 1 + + # Find the message with tool calls + tool_message = results["replies"][0] + + assert isinstance(tool_message, ChatMessage) + tool_calls = tool_message.tool_calls + assert len(tool_calls) == 2 + assert ChatMessage.is_from(tool_message, ChatRole.ASSISTANT) + + for tool_call in tool_calls: + assert tool_call.id is not None + assert isinstance(tool_call, ToolCall) + assert tool_call.tool_name == "weather" + + arguments = [tool_call.arguments for tool_call in tool_calls] + assert sorted(arguments, key=lambda x: x["city"]) == [{"city": "Berlin"}, {"city": "Paris"}] + assert tool_message.meta["finish_reason"] == "tool_calls" + + @pytest.mark.skipif( + not os.environ.get("COMET_API_KEY", None), + reason="Export an env var called COMET_API_KEY containing the OpenAI API key to run this test.", + ) + @pytest.mark.integration + def test_pipeline_with_cometapi_chat_generator(self, tools): + """ + Test that the CometAPIChatGenerator component can be used in a pipeline + """ + pipeline = Pipeline() + pipeline.add_component("generator", CometAPIChatGenerator(tools=tools)) + pipeline.add_component("tool_invoker", ToolInvoker(tools=tools)) + + pipeline.connect("generator", "tool_invoker") + + results = pipeline.run( + data={ + "generator": { + "messages": [ChatMessage.from_user("What's the weather like in Paris?")], + "generation_kwargs": {"tool_choice": "auto"}, + } + } + ) + + assert ( + "The weather in Paris is sunny and 32°C" + == results["tool_invoker"]["tool_messages"][0].tool_call_result.result + ) + + def test_serde_in_pipeline(self, monkeypatch): + """ + Test serialization/deserialization of CometAPIChatGenerator in a Pipeline, + including YAML conversion and detailed dictionary validation + """ + # Set mock API key + monkeypatch.setenv("COMET_API_KEY", "test-key") + + # Create a test tool + tool = Tool( + name="weather", + description="useful to determine the weather in a given location", + parameters={"city": {"type": "string"}}, + function=weather, + ) + + # Create generator with specific configuration + generator = CometAPIChatGenerator( + model="gpt-4o-mini", + generation_kwargs={"temperature": 0.7}, + streaming_callback=print_streaming_chunk, + tools=[tool], + ) + + # Create and configure pipeline + pipeline = Pipeline() + pipeline.add_component("generator", generator) + + # Get pipeline dictionary and verify its structure + pipeline_dict = pipeline.to_dict() + expected_dict = { + "metadata": {}, + "max_runs_per_component": 100, + "components": { + "generator": { + "type": ( + "haystack_integrations.components.generators.cometapi.chat.chat_generator.CometAPIChatGenerator" + ), + "init_parameters": { + "model": "gpt-4o-mini", + "streaming_callback": "haystack.components.generators.utils.print_streaming_chunk", + "api_base_url": "https://api.cometapi.com/v1", + "organization": None, + "generation_kwargs": {"temperature": 0.7}, + "api_key": {"type": "env_var", "env_vars": ["COMET_API_KEY"], "strict": True}, + "timeout": None, + "max_retries": None, + "tools": [ + { + "type": "haystack.tools.tool.Tool", + "data": { + "name": "weather", + "description": "useful to determine the weather in a given location", + "parameters": {"city": {"type": "string"}}, + "function": "test_cometapi_chat_generator.weather", + }, + } + ], + "tools_strict": False, + "http_client_kwargs": None, + }, + } + }, + "connections": [], + "connection_type_validation": True, + } + + if not hasattr(pipeline, "_connection_type_validation"): + expected_dict.pop("connection_type_validation") + + # add outputs_to_string, inputs_from_state and outputs_to_state tool parameters for compatibility with + # haystack-ai>=2.12.0 + if hasattr(tool, "outputs_to_string"): + expected_dict["components"]["generator"]["init_parameters"]["tools"][0]["data"]["outputs_to_string"] = ( + tool.outputs_to_string + ) + if hasattr(tool, "inputs_from_state"): + expected_dict["components"]["generator"]["init_parameters"]["tools"][0]["data"]["inputs_from_state"] = ( + tool.inputs_from_state + ) + if hasattr(tool, "outputs_to_state"): + expected_dict["components"]["generator"]["init_parameters"]["tools"][0]["data"]["outputs_to_state"] = ( + tool.outputs_to_state + ) + + assert pipeline_dict == expected_dict + + # Verify the loaded pipeline's generator has the same configuration + loaded_generator = pipeline.get_component("generator") + assert loaded_generator.model == generator.model + assert loaded_generator.generation_kwargs == generator.generation_kwargs + assert loaded_generator.streaming_callback == generator.streaming_callback + assert len(loaded_generator.tools) == len(generator.tools) + assert loaded_generator.tools[0].name == generator.tools[0].name + assert loaded_generator.tools[0].description == generator.tools[0].description + assert loaded_generator.tools[0].parameters == generator.tools[0].parameters + + +class TestChatCompletionChunkConversion: + def test_handle_stream_response(self): + cometapi_chunks = [ + ChatCompletionChunk( + id="gen-1750162525-tc7ParBHvsqd6rYhCDtK", + choices=[ + ChoiceChunk(delta=ChoiceDelta(content="", role="assistant"), index=0, native_finish_reason=None) + ], + created=1750162525, + model="gpt-4o-mini", + object="chat.completion.chunk", + system_fingerprint="fp_34a54ae93c", + provider="OpenAI", + ), + ChatCompletionChunk( + id="gen-1750162525-tc7ParBHvsqd6rYhCDtK", + choices=[ + ChoiceChunk( + delta=ChoiceDelta( + role="assistant", + tool_calls=[ + ChoiceDeltaToolCall( + index=0, + id="call_zznlVyVfK0GJwY28SShJpDCh", + function=ChoiceDeltaToolCallFunction(arguments="", name="weather"), + type="function", + ) + ], + ), + index=0, + native_finish_reason=None, + ) + ], + created=1750162525, + model="gpt-4o-mini", + object="chat.completion.chunk", + system_fingerprint="fp_34a54ae93c", + provider="OpenAI", + ), + ChatCompletionChunk( + id="gen-1750162525-tc7ParBHvsqd6rYhCDtK", + choices=[ + ChoiceChunk( + delta=ChoiceDelta( + role="assistant", + tool_calls=[ + ChoiceDeltaToolCall( + index=0, + function=ChoiceDeltaToolCallFunction(arguments='{"ci'), + type="function", + ) + ], + ), + index=0, + native_finish_reason=None, + ) + ], + created=1750162525, + model="gpt-4o-mini", + object="chat.completion.chunk", + system_fingerprint="fp_34a54ae93c", + provider="OpenAI", + ), + ChatCompletionChunk( + id="gen-1750162525-tc7ParBHvsqd6rYhCDtK", + choices=[ + ChoiceChunk( + delta=ChoiceDelta( + role="assistant", + tool_calls=[ + ChoiceDeltaToolCall( + index=0, + function=ChoiceDeltaToolCallFunction(arguments='ty": '), + type="function", + ) + ], + ), + index=0, + native_finish_reason=None, + ) + ], + created=1750162525, + model="gpt-4o-mini", + object="chat.completion.chunk", + system_fingerprint="fp_34a54ae93c", + provider="OpenAI", + ), + ChatCompletionChunk( + id="gen-1750162525-tc7ParBHvsqd6rYhCDtK", + choices=[ + ChoiceChunk( + delta=ChoiceDelta( + role="assistant", + tool_calls=[ + ChoiceDeltaToolCall( + index=0, + function=ChoiceDeltaToolCallFunction(arguments='"Paris'), + type="function", + ) + ], + ), + index=0, + native_finish_reason=None, + ) + ], + created=1750162525, + model="gpt-4o-mini", + object="chat.completion.chunk", + system_fingerprint="fp_34a54ae93c", + provider="OpenAI", + ), + ChatCompletionChunk( + id="gen-1750162525-tc7ParBHvsqd6rYhCDtK", + choices=[ + ChoiceChunk( + delta=ChoiceDelta( + role="assistant", + tool_calls=[ + ChoiceDeltaToolCall( + index=0, + function=ChoiceDeltaToolCallFunction(arguments='"}'), + type="function", + ) + ], + ), + index=0, + native_finish_reason=None, + ) + ], + created=1750162525, + model="gpt-4o-mini", + object="chat.completion.chunk", + system_fingerprint="fp_34a54ae93c", + provider="OpenAI", + ), + ChatCompletionChunk( + id="gen-1750162525-tc7ParBHvsqd6rYhCDtK", + choices=[ + ChoiceChunk( + delta=ChoiceDelta( + role="assistant", + tool_calls=[ + ChoiceDeltaToolCall( + index=1, + id="call_Mh1uOyW3Ys4gwydHjNHILHGX", + function=ChoiceDeltaToolCallFunction(arguments="", name="weather"), + type="function", + ) + ], + ), + index=0, + native_finish_reason=None, + ) + ], + created=1750162525, + model="gpt-4o-mini", + object="chat.completion.chunk", + service_tier=None, + system_fingerprint="fp_34a54ae93c", + usage=None, + provider="OpenAI", + ), + ChatCompletionChunk( + id="gen-1750162525-tc7ParBHvsqd6rYhCDtK", + choices=[ + ChoiceChunk( + delta=ChoiceDelta( + role="assistant", + tool_calls=[ + ChoiceDeltaToolCall( + index=1, + id=None, + function=ChoiceDeltaToolCallFunction(arguments='{"ci'), + type="function", + ) + ], + ), + index=0, + native_finish_reason=None, + ) + ], + created=1750162525, + model="gpt-4o-mini", + object="chat.completion.chunk", + system_fingerprint="fp_34a54ae93c", + provider="OpenAI", + ), + ChatCompletionChunk( + id="gen-1750162525-tc7ParBHvsqd6rYhCDtK", + choices=[ + ChoiceChunk( + delta=ChoiceDelta( + role="assistant", + tool_calls=[ + ChoiceDeltaToolCall( + index=1, + function=ChoiceDeltaToolCallFunction(arguments='ty": '), + type="function", + ) + ], + ), + index=0, + native_finish_reason=None, + ) + ], + created=1750162525, + model="gpt-4o-mini", + object="chat.completion.chunk", + system_fingerprint="fp_34a54ae93c", + provider="OpenAI", + ), + ChatCompletionChunk( + id="gen-1750162525-tc7ParBHvsqd6rYhCDtK", + choices=[ + ChoiceChunk( + delta=ChoiceDelta( + role="assistant", + tool_calls=[ + ChoiceDeltaToolCall( + index=1, + function=ChoiceDeltaToolCallFunction(arguments='"Berli'), + type="function", + ) + ], + ), + index=0, + native_finish_reason=None, + ) + ], + created=1750162525, + model="gpt-4o-mini", + object="chat.completion.chunk", + system_fingerprint="fp_34a54ae93c", + provider="OpenAI", + ), + ChatCompletionChunk( + id="gen-1750162525-tc7ParBHvsqd6rYhCDtK", + choices=[ + ChoiceChunk( + delta=ChoiceDelta( + role="assistant", + tool_calls=[ + ChoiceDeltaToolCall( + index=1, + function=ChoiceDeltaToolCallFunction(arguments='n"}'), + type="function", + ) + ], + ), + index=0, + native_finish_reason=None, + ) + ], + created=1750162525, + model="gpt-4o-mini", + object="chat.completion.chunk", + system_fingerprint="fp_34a54ae93c", + provider="OpenAI", + ), + ChatCompletionChunk( + id="gen-1750162525-tc7ParBHvsqd6rYhCDtK", + choices=[ + ChoiceChunk( + delta=ChoiceDelta(content="", role="assistant"), + finish_reason="tool_calls", + index=0, + native_finish_reason="tool_calls", + ) + ], + created=1750162525, + model="gpt-4o-mini", + object="chat.completion.chunk", + system_fingerprint="fp_34a54ae93c", + provider="OpenAI", + ), + ChatCompletionChunk( + id="gen-1750162525-tc7ParBHvsqd6rYhCDtK", + choices=[ + ChoiceChunk( + delta=ChoiceDelta(content="", role="assistant"), + index=0, + native_finish_reason=None, + ) + ], + created=1750162525, + model="gpt-4o-mini", + object="chat.completion.chunk", + usage=CompletionUsage( + completion_tokens=42, + prompt_tokens=55, + total_tokens=97, + completion_tokens_details=CompletionTokensDetails(reasoning_tokens=0), + prompt_tokens_details=PromptTokensDetails(cached_tokens=0), + ), + provider="OpenAI", + ), + ] + + collector_callback = CollectorCallback() + llm = CometAPIChatGenerator(api_key=Secret.from_token("test-api-key")) + result = llm._handle_stream_response(cometapi_chunks, callback=collector_callback)[0] # type: ignore + + # Assert text is empty + assert result.text is None + + # Verify both tool calls were found and processed + assert len(result.tool_calls) == 2 + assert result.tool_calls[0].id == "call_zznlVyVfK0GJwY28SShJpDCh" + assert result.tool_calls[0].tool_name == "weather" + assert result.tool_calls[0].arguments == {"city": "Paris"} + assert result.tool_calls[1].id == "call_Mh1uOyW3Ys4gwydHjNHILHGX" + assert result.tool_calls[1].tool_name == "weather" + assert result.tool_calls[1].arguments == {"city": "Berlin"} + + # Verify meta information + assert result.meta["model"] == "gpt-4o-mini" + assert result.meta["finish_reason"] == "tool_calls" + assert result.meta["index"] == 0 + assert result.meta["completion_start_time"] is not None + + # Normalize usage details before asserting + usage = result.meta["usage"] + + if hasattr(usage["completion_tokens_details"], "model_dump"): + usage["completion_tokens_details"] = usage["completion_tokens_details"].model_dump() + if hasattr(usage["prompt_tokens_details"], "model_dump"): + usage["prompt_tokens_details"] = usage["prompt_tokens_details"].model_dump() + + # For dataclass fallback + if not isinstance(usage["completion_tokens_details"], dict): + usage["completion_tokens_details"] = asdict(usage["completion_tokens_details"]) + if not isinstance(usage["prompt_tokens_details"], dict): + usage["prompt_tokens_details"] = asdict(usage["prompt_tokens_details"]) + + assert usage == { + "completion_tokens": 42, + "prompt_tokens": 55, + "total_tokens": 97, + "completion_tokens_details": { + "accepted_prediction_tokens": None, + "audio_tokens": None, + "reasoning_tokens": 0, + "rejected_prediction_tokens": None, + }, + "prompt_tokens_details": { + "audio_tokens": None, + "cached_tokens": 0, + }, + } diff --git a/integrations/cometapi/tests/test_cometapi_chat_generator_async.py b/integrations/cometapi/tests/test_cometapi_chat_generator_async.py new file mode 100644 index 0000000000..7caa40775c --- /dev/null +++ b/integrations/cometapi/tests/test_cometapi_chat_generator_async.py @@ -0,0 +1,269 @@ +import os +from datetime import datetime +from unittest.mock import AsyncMock, patch + +import pytest +import pytz +from haystack.dataclasses import ( + ChatMessage, + ChatRole, + StreamingChunk, +) +from haystack.tools import Tool +from openai import AsyncOpenAI +from openai.types.chat import ChatCompletion, ChatCompletionMessage +from openai.types.chat.chat_completion import Choice + +from haystack_integrations.components.generators.cometapi.chat.chat_generator import ( + CometAPIChatGenerator, +) + +pytestmark = pytest.mark.asyncio + + +@pytest.fixture +def chat_messages(): + return [ + ChatMessage.from_system("You are a helpful assistant"), + ChatMessage.from_user("What's the capital of France"), + ] + + +def weather(city: str): + """Get weather for a given city.""" + return f"The weather in {city} is sunny and 32°C" + + +@pytest.fixture +def tools(): + tool_parameters = { + "type": "object", + "properties": {"city": {"type": "string"}}, + "required": ["city"], + } + tool = Tool( + name="weather", + description="useful to determine the weather in a given location", + parameters=tool_parameters, + function=weather, + ) + + return [tool] + + +@pytest.fixture +def mock_async_chat_completion(): + """ + Mock the Async OpenAI API completion response and reuse it for async tests + """ + with patch( + "openai.resources.chat.completions.AsyncCompletions.create", + new_callable=AsyncMock, + ) as mock_chat_completion_create: + completion = ChatCompletion( + id="foo", + model="openai/gpt-4o-mini", + object="chat.completion", + choices=[ + Choice( + finish_reason="stop", + logprobs=None, + index=0, + message=ChatCompletionMessage(content="Hello world!", role="assistant"), + ) + ], + created=int(datetime.now(tz=pytz.timezone("UTC")).timestamp()), + usage={ + "prompt_tokens": 57, + "completion_tokens": 40, + "total_tokens": 97, + }, + ) + # For async mocks, the return value should be awaitable + mock_chat_completion_create.return_value = completion + yield mock_chat_completion_create + + +class TestCometAPIChatGeneratorAsync: + def test_init_default_async(self, monkeypatch): + monkeypatch.setenv("COMET_API_KEY", "test-api-key") + component = CometAPIChatGenerator() + + assert isinstance(component.async_client, AsyncOpenAI) + assert component.async_client.api_key == "test-api-key" + assert component.async_client.base_url == "https://api.cometapi.com/v1/" + assert not component.generation_kwargs + + @pytest.mark.asyncio + async def test_run_async(self, chat_messages, mock_async_chat_completion, monkeypatch): + monkeypatch.setenv("COMET_API_KEY", "fake-api-key") + component = CometAPIChatGenerator() + response = await component.run_async(chat_messages) + + # Verify the mock was called + mock_async_chat_completion.assert_called_once() + + # check that the component returns the correct ChatMessage response + assert isinstance(response, dict) + assert "replies" in response + assert isinstance(response["replies"], list) + assert len(response["replies"]) == 1 + assert [isinstance(reply, ChatMessage) for reply in response["replies"]] + + @pytest.mark.asyncio + async def test_run_async_with_params(self, chat_messages, mock_async_chat_completion, monkeypatch): + monkeypatch.setenv("COMET_API_KEY", "fake-api-key") + component = CometAPIChatGenerator(generation_kwargs={"max_tokens": 10, "temperature": 0.5}) + response = await component.run_async(chat_messages) + + # check that the component calls the OpenAI API with the correct parameters + _, kwargs = mock_async_chat_completion.call_args + assert kwargs["max_tokens"] == 10 + assert kwargs["temperature"] == 0.5 + + # check that the component returns the correct response + assert isinstance(response, dict) + assert "replies" in response + assert isinstance(response["replies"], list) + assert len(response["replies"]) == 1 + assert [isinstance(reply, ChatMessage) for reply in response["replies"]] + + @pytest.mark.skipif( + not os.environ.get("COMET_API_KEY", None), + reason="Export an env var called COMET_API_KEY containing the OpenAI API key to run this test.", + ) + @pytest.mark.integration + @pytest.mark.asyncio + async def test_live_run_async(self): + chat_messages = [ChatMessage.from_user("What's the capital of France")] + component = CometAPIChatGenerator() + results = await component.run_async(chat_messages) + assert len(results["replies"]) == 1 + message: ChatMessage = results["replies"][0] + assert "Paris" in message.text + assert "gpt-4o-mini" in message.meta["model"] + assert message.meta["finish_reason"] == "stop" + + @pytest.mark.skipif( + not os.environ.get("COMET_API_KEY", None), + reason="Export an env var called COMET_API_KEY containing the OpenAI API key to run this test.", + ) + @pytest.mark.integration + @pytest.mark.asyncio + async def test_live_run_streaming_async(self): + counter = 0 + responses = "" + + async def callback(chunk: StreamingChunk): + nonlocal counter + nonlocal responses + counter += 1 + responses += chunk.content if chunk.content else "" + + component = CometAPIChatGenerator(streaming_callback=callback) + results = await component.run_async([ChatMessage.from_user("What's the capital of France?")]) + + assert len(results["replies"]) == 1 + message: ChatMessage = results["replies"][0] + assert "Paris" in message.text + + assert "gpt-4o-mini" in message.meta["model"] + assert message.meta["finish_reason"] == "stop" + + assert counter > 1 + assert "Paris" in responses + + @pytest.mark.skipif( + not os.environ.get("COMET_API_KEY", None), + reason="Export an env var called COMET_API_KEY containing the OpenAI API key to run this test.", + ) + @pytest.mark.integration + @pytest.mark.asyncio + async def test_live_run_with_tools_and_response_async(self, tools): + """ + Integration test that the CometAPIChatGenerator component can run with tools and get a response. + """ + initial_messages = [ChatMessage.from_user("What's the weather like in Paris?")] + component = CometAPIChatGenerator(tools=tools) + results = await component.run_async(messages=initial_messages, generation_kwargs={"tool_choice": "auto"}) + + assert len(results["replies"]) > 0, "No replies received" + + # Find the message with tool calls + tool_message = None + for message in results["replies"]: + if message.tool_call: + tool_message = message + break + + assert tool_message is not None, "No message with tool call found" + assert isinstance(tool_message, ChatMessage), "Tool message is not a ChatMessage instance" + assert ChatMessage.is_from(tool_message, ChatRole.ASSISTANT), "Tool message is not from the assistant" + + tool_call = tool_message.tool_call + assert tool_call.id, "Tool call does not contain value for 'id' key" + assert tool_call.tool_name == "weather" + assert tool_call.arguments == {"city": "Paris"} + assert tool_message.meta["finish_reason"] == "tool_calls" + + new_messages = [ + initial_messages[0], + tool_message, + ChatMessage.from_tool(tool_result="22° C", origin=tool_call), + ] + # Pass the tool result to the model to get the final response + results = await component.run_async(new_messages) + + assert len(results["replies"]) == 1 + final_message = results["replies"][0] + assert not final_message.tool_call + assert len(final_message.text) > 0 + assert "paris" in final_message.text.lower() + + @pytest.mark.skipif( + not os.environ.get("COMET_API_KEY", None), + reason="Export an env var called COMET_API_KEY containing the OpenAI API key to run this test.", + ) + @pytest.mark.integration + @pytest.mark.asyncio + async def test_live_run_with_tools_streaming_async(self, tools): + """ + Integration test that the CometAPIChatGenerator component can run with tools and streaming. + """ + + counter = 0 + tool_calls = [] + + async def callback(chunk: StreamingChunk): + nonlocal counter + nonlocal tool_calls + counter += 1 + if chunk.meta.get("tool_calls"): + tool_calls.extend(chunk.meta["tool_calls"]) + + component = CometAPIChatGenerator(tools=tools, streaming_callback=callback) + results = await component.run_async( + [ChatMessage.from_user("What's the weather like in Paris?")], + generation_kwargs={"tool_choice": "auto"}, + ) + + assert len(results["replies"]) > 0, "No replies received" + assert counter > 1, "Streaming callback was not called multiple times" + assert tool_calls, "No tool calls received in streaming" + + # Find the message with tool calls + tool_message = None + for message in results["replies"]: + if message.tool_call: + tool_message = message + break + + assert tool_message is not None, "No message with tool call found" + assert isinstance(tool_message, ChatMessage), "Tool message is not a ChatMessage instance" + assert ChatMessage.is_from(tool_message, ChatRole.ASSISTANT), "Tool message is not from the assistant" + + tool_call = tool_message.tool_call + assert tool_call.id, "Tool call does not contain value for 'id' key" + assert tool_call.tool_name == "weather" + assert tool_call.arguments == {"city": "Paris"} + assert tool_message.meta["finish_reason"] == "tool_calls"