diff --git a/.github/workflows/check-file-contents.yml b/.github/workflows/check-file-contents.yml index 1f31ac073..861e2247a 100644 --- a/.github/workflows/check-file-contents.yml +++ b/.github/workflows/check-file-contents.yml @@ -89,7 +89,7 @@ jobs: - name: Check for import from cli package in certain changed Python files run: | git fetch origin ${{ github.base_ref }} - CHANGED_FILES=$(git diff --diff-filter=ACMR --name-only origin/${{ github.base_ref }}...HEAD | grep -E '\.py$' | grep -v -E 'cli/.*|tests/.*|contributing/samples/' || true) + CHANGED_FILES=$(git diff --diff-filter=ACMR --name-only origin/${{ github.base_ref }}...HEAD | grep -E '\.py$' | grep -v -E 'cli/.*|src/google/adk/tools/apihub_tool/apihub_toolset.py|tests/.*|contributing/samples/' || true) if [ -n "$CHANGED_FILES" ]; then echo "Changed Python files to check:" echo "$CHANGED_FILES" diff --git a/AGENTS.md b/AGENTS.md index 10cf03a97..2d9fade1a 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -46,7 +46,7 @@ $ ./autoformat.sh ### In ADK source -Below styles applies to the ADK source code (under `src/` folder of the Github. +Below styles applies to the ADK source code (under `src/` folder of the GitHub. repo). #### Use relative imports diff --git a/CHANGELOG.md b/CHANGELOG.md index 133fc7611..0064796c3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,7 +21,7 @@ ### Improvements -* Add Github workflow config for the ADK Answering agent ([8dc0c94](https://github.com/google/adk-python/commit/8dc0c949afb9024738ff7ac1b2c19282175c3200)) +* Add GitHub workflow config for the ADK Answering agent ([8dc0c94](https://github.com/google/adk-python/commit/8dc0c949afb9024738ff7ac1b2c19282175c3200)) * Import AGENT_CARD_WELL_KNOWN_PATH from adk instead of from a2a directly ([37dae9b](https://github.com/google/adk-python/commit/37dae9b631db5060770b66fce0e25cf0ffb56948)) * Make `LlmRequest.LiveConnectConfig` field default to a factory ([74589a1](https://github.com/google/adk-python/commit/74589a1db7df65e319d1ad2f0676ee0cf5d6ec1d)) * Update the prompt to make the ADK Answering Agent more objective ([2833030](https://github.com/google/adk-python/commit/283303032a174d51b8d72f14df83c794d66cb605)) @@ -80,14 +80,13 @@ ### Features * [Core]Add agent card builder ([18f5bea](https://github.com/google/adk-python/commit/18f5bea411b3b76474ff31bfb2f62742825b45e5)) -* [Core]Add an to_a2a util to convert adk agent to A2A ASGI application ([a77d689](https://github.com/google/adk-python/commit/a77d68964a1c6b7659d6117d57fa59e43399e0c2)) +* [Core]Add a to_a2a util to convert adk agent to A2A ASGI application ([a77d689](https://github.com/google/adk-python/commit/a77d68964a1c6b7659d6117d57fa59e43399e0c2)) * [Core]Add camel case converter for agents ([0e173d7](https://github.com/google/adk-python/commit/0e173d736334f8c6c171b3144ac6ee5b7125c846)) * [Evals]Use LocalEvalService to run all evals in cli and web ([d1f182e](https://github.com/google/adk-python/commit/d1f182e8e68c4a5a4141592f3f6d2ceeada78887)) * [Evals]Enable FinalResponseMatchV2 metric as an experiment ([36e45cd](https://github.com/google/adk-python/commit/36e45cdab3bbfb653eee3f9ed875b59bcd525ea1)) * [Models]Add support for `model-optimizer-*` family of models in vertex ([ffe2bdb](https://github.com/google/adk-python/commit/ffe2bdbe4c2ea86cc7924eb36e8e3bb5528c0016)) * [Services]Added a sample for History Management ([67284fc](https://github.com/google/adk-python/commit/67284fc46667b8c2946762bc9234a8453d48a43c)) -* [Services]Support passing fully qualified agent engine resource name when constructing session service and memory service ([2e77804](https://github.com/google/adk-python/commit/2e778049d0a675e458f4e -35fe4104ca1298dbfcf)) +* [Services]Support passing fully qualified agent engine resource name when constructing session service and memory service ([2e77804](https://github.com/google/adk-python/commit/2e778049d0a675e458f4e35fe4104ca1298dbfcf)) * [Tools]Add ComputerUseToolset ([083dcb4](https://github.com/google/adk-python/commit/083dcb44650eb0e6b70219ede731f2fa78ea7d28)) * [Tools]Allow toolset to process llm_request before tools returned by it ([3643b4a](https://github.com/google/adk-python/commit/3643b4ae196fd9e38e52d5dc9d1cd43ea0733d36)) * [Tools]Support input/output schema by fully-qualified code reference ([dfee06a](https://github.com/google/adk-python/commit/dfee06ac067ea909251d6fb016f8331065d430e9)) @@ -200,7 +199,7 @@ ### Documentation -* Update the a2a exmaple link in README.md [d0fdfb8](https://github.com/google/adk-python/commit/d0fdfb8c8e2e32801999c81de8d8ed0be3f88e76) +* Update the a2a example link in README.md [d0fdfb8](https://github.com/google/adk-python/commit/d0fdfb8c8e2e32801999c81de8d8ed0be3f88e76) * Adds AGENTS.md to provide relevant project context for the Gemini CLI [37108be](https://github.com/google/adk-python/commit/37108be8557e011f321de76683835448213f8515) * Update CONTRIBUTING.md [ffa9b36](https://github.com/google/adk-python/commit/ffa9b361db615ae365ba62c09a8f4226fb761551) * Add adk project overview and architecture [28d0ea8](https://github.com/google/adk-python/commit/28d0ea876f2f8de952f1eccbc788e98e39f50cf5) @@ -395,7 +394,7 @@ * Fix typos in README for sample bigquery_agent and oauth_calendar_agent ([9bdd813](https://github.com/google/adk-python/commit/9bdd813be15935af5c5d2a6982a2391a640cab23)) * Make tool_call one span for telemetry and renamed to execute_tool ([999a7fe](https://github.com/google/adk-python/commit/999a7fe69d511b1401b295d23ab3c2f40bccdc6f)) * Use media type in chat window. Remove isArtifactImage and isArtifactAudio reference ([1452dac](https://github.com/google/adk-python/commit/1452dacfeb6b9970284e1ddeee6c4f3cb56781f8)) -* Set output_schema correctly for LiteLllm ([6157db7](https://github.com/google/adk-python/commit/6157db77f2fba4a44d075b51c83bff844027a147)) +* Set output_schema correctly for LiteLlm ([6157db7](https://github.com/google/adk-python/commit/6157db77f2fba4a44d075b51c83bff844027a147)) * Update pending event dialog style ([1db601c](https://github.com/google/adk-python/commit/1db601c4bd90467b97a2f26fe9d90d665eb3c740)) * Remove the gap between event holder and image ([63822c3](https://github.com/google/adk-python/commit/63822c3fa8b0bdce2527bd0d909c038e2b66dd98)) @@ -423,7 +422,7 @@ ## 1.1.1 ### Features -* Add BigQuery first-party tools. See [here](https://github.com/google/adk-python/commit/d6c6bb4b2489a8b7a4713e4747c30d6df0c07961) for more details. +* Add [BigQuery first-party tools](https://github.com/google/adk-python/commit/d6c6bb4b2489a8b7a4713e4747c30d6df0c07961) for more details. ## 1.1.0 @@ -559,7 +558,7 @@ * Fix google search reading undefined for `renderedContent`. ### Miscellaneous Chores -* Docstring improvements, typo fixings, github action to enfore code styles on formatting and imports, etc. +* Docstring improvements, typo fixings, github action to enforce code styles on formatting and imports, etc. ## 0.3.0 @@ -598,7 +597,7 @@ ### ⚠ BREAKING CHANGES -* Fix typo in method name in `Event`: has_trailing_code_exeuction_result --> has_trailing_code_execution_result. +* Fix typo in method name in `Event`: has_trailing_code_execution_result --> has_trailing_code_execution_result. ### Features @@ -628,7 +627,7 @@ ### Miscellaneous Chores -* Adds unit tests in Github action. +* Adds unit tests in GitHub action. * Improves test coverage. * Various typo fixes. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index dc0723353..863e0d553 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -50,7 +50,7 @@ This project follows ## Requirement for PRs - Each PR should only have one commit. Please squash it if there are multiple PRs. -- All PRs, other than small documentation or typo fixes, should have a Issue assoicated. If not, please create one. +- All PRs, other than small documentation or typo fixes, should have a Issue associated. If not, please create one. - Small, focused PRs. Keep changes minimal—one concern per PR. - For bug fixes or features, please provide logs or screenshot after the fix is applied to help reviewers better understand the fix. - Please include a `testing plan` section in your PR to talk about how you will test. This will save time for PR review. See `Testing Requirements` section for more details. diff --git a/contributing/samples/a2a_auth/remote_a2a/bigquery_agent/agent.py b/contributing/samples/a2a_auth/remote_a2a/bigquery_agent/agent.py index 976cea170..05517cd86 100644 --- a/contributing/samples/a2a_auth/remote_a2a/bigquery_agent/agent.py +++ b/contributing/samples/a2a_auth/remote_a2a/bigquery_agent/agent.py @@ -46,7 +46,7 @@ Use the provided tools to conduct various operations on users' data in Google BigQuery. Scenario 1: - The user wants to query their biguqery datasets + The user wants to query their bigquery datasets Use bigquery_datasets_list to query user's datasets Scenario 2: diff --git a/contributing/samples/a2a_human_in_loop/README.md b/contributing/samples/a2a_human_in_loop/README.md index b985e6b9b..1987c78d5 100644 --- a/contributing/samples/a2a_human_in_loop/README.md +++ b/contributing/samples/a2a_human_in_loop/README.md @@ -99,7 +99,7 @@ Agent: ✅ Great news! Your reimbursement has been approved by the manager. Proc The human-in-the-loop process follows this pattern: 1. **Initial Call**: Root agent delegates approval request to remote approval agent for amounts >$100 -2. **Pending Response**: Remote approval agent returns immediate response with `status: "pending"` and ticket ID and serface the approval request to root agent +2. **Pending Response**: Remote approval agent returns immediate response with `status: "pending"` and ticket ID and surface the approval request to root agent 3. **Agent Acknowledgment**: Root agent informs user about pending approval status 4. **Human Interaction**: Human manager interacts with root agent to review and approve/reject the request 5. **Updated Response**: Root agent receives updated tool response with approval decision and send it to remote agent diff --git a/contributing/samples/adk_answering_agent/README.md b/contributing/samples/adk_answering_agent/README.md index 158c825a0..d8737b06f 100644 --- a/contributing/samples/adk_answering_agent/README.md +++ b/contributing/samples/adk_answering_agent/README.md @@ -50,7 +50,7 @@ Or `python -m adk_answering_agent.answer_discussions --recent 10` to answer the ## GitHub Workflow Mode -The `main.py` is reserved for the Github Workflow. The detailed setup for the automatic workflow is TBD. +The `main.py` is reserved for the GitHub Workflow. The detailed setup for the automatic workflow is TBD. --- diff --git a/contributing/samples/adk_answering_agent/agent.py b/contributing/samples/adk_answering_agent/agent.py index a67a209c4..2616507c0 100644 --- a/contributing/samples/adk_answering_agent/agent.py +++ b/contributing/samples/adk_answering_agent/agent.py @@ -47,13 +47,13 @@ 1. Use the `get_discussion_and_comments` tool to get the details of the discussion including the comments. 2. Focus on the latest comment but reference all comments if needed to understand the context. * If there is no comment at all, just focus on the discussion title and body. - 3. If all the following conditions are met, try to add a comment to the discussion, otherwise, do not respond: + 3. If all the following conditions are met, try to add a comment to the discussion; otherwise, do not respond: * The discussion is not closed. * The latest comment is not from you or other agents (marked as "Response from XXX Agent"). * The latest comment is asking a question or requesting information. 4. Use the `VertexAiSearchTool` to find relevant information before answering. 5. If you can find relevant information, use the `add_comment_to_discussion` tool to add a comment to the discussion. - 6. If you post a commment and the discussion does not have a label named {BOT_RESPONSE_LABEL}, + 6. If you post a comment and the discussion does not have a label named {BOT_RESPONSE_LABEL}, add the label {BOT_RESPONSE_LABEL} to the discussion using the `add_label_to_discussion` tool. @@ -63,7 +63,7 @@ information that is not in the document store. Do not invent citations which are not in the document store. * **Be Objective**: your answer should be based on the facts you found in the document store, do not be misled by user's assumptions or user's understanding of ADK. * If you can't find the answer or information in the document store, **do not** respond. - * Inlclude a short summary of your response in the comment as a TLDR, e.g. "**TLDR**: ". + * Include a short summary of your response in the comment as a TLDR, e.g. "**TLDR**: ". * Have a divider line between the TLDR and your detail response. * Do not respond to any other discussion except the one specified by the user. * Please include your justification for your decision in your output diff --git a/contributing/samples/adk_answering_agent/answer_discussions.py b/contributing/samples/adk_answering_agent/answer_discussions.py index 1aa737585..cfca0241a 100644 --- a/contributing/samples/adk_answering_agent/answer_discussions.py +++ b/contributing/samples/adk_answering_agent/answer_discussions.py @@ -87,7 +87,7 @@ async def list_most_recent_discussions(count: int = 1) -> list[int] | None: def process_arguments(): """Parses command-line arguments.""" parser = argparse.ArgumentParser( - description="A script that answer questions for Github discussions.", + description="A script that answer questions for GitHub discussions.", epilog=( "Example usage: \n" "\tpython -m adk_answering_agent.answer_discussions --recent 10\n" diff --git a/contributing/samples/adk_answering_agent/upload_docs_to_vertex_ai_search.py b/contributing/samples/adk_answering_agent/upload_docs_to_vertex_ai_search.py index 9dd7ca6a2..ac184cebc 100644 --- a/contributing/samples/adk_answering_agent/upload_docs_to_vertex_ai_search.py +++ b/contributing/samples/adk_answering_agent/upload_docs_to_vertex_ai_search.py @@ -117,7 +117,7 @@ def upload_directory_to_gcs( ) return False - print(f"Sucessfully uploaded {file_count} files to GCS.") + print(f"Successfully uploaded {file_count} files to GCS.") return True @@ -135,7 +135,7 @@ def import_from_gcs_to_vertex_ai( # parent has the format of # "projects/{project_number}/locations/{location}/collections/{collection}/dataStores/{datastore_id}/branches/default_branch" parent=full_datastore_id + "/branches/default_branch", - # Specify the GCS source and use "content" for unstructed data. + # Specify the GCS source and use "content" for unstructured data. gcs_source=discoveryengine.GcsSource( input_uris=[gcs_uri], data_schema="content" ), diff --git a/contributing/samples/adk_answering_agent/utils.py b/contributing/samples/adk_answering_agent/utils.py index 029e5f129..8c575cf6c 100644 --- a/contributing/samples/adk_answering_agent/utils.py +++ b/contributing/samples/adk_answering_agent/utils.py @@ -127,7 +127,7 @@ def convert_gcs_to_https(gcs_uri: str) -> Optional[str]: # Use the directory path if it is a index file final_path_segment = os.path.dirname(path_after_docs) else: - # Otherwise, use the file name without extention + # Otherwise, use the file name without extension final_path_segment = path_after_docs.removesuffix(".md") if final_path_segment and not final_path_segment.endswith("/"): @@ -139,7 +139,7 @@ def convert_gcs_to_https(gcs_uri: str) -> Optional[str]: if _check_url_exists(potential_url): return potential_url else: - # If it doesn't exist, fallback to the regular github url + # If it doesn't exist, fall back to the regular github url return _generate_github_url(prefix, relative_path) # Convert the links for other cases, e.g. adk-python diff --git a/contributing/samples/adk_issue_formatting_agent/agent.py b/contributing/samples/adk_issue_formatting_agent/agent.py index 78add9b83..d2f040ed5 100644 --- a/contributing/samples/adk_issue_formatting_agent/agent.py +++ b/contributing/samples/adk_issue_formatting_agent/agent.py @@ -45,7 +45,7 @@ def list_open_issues(issue_count: int) -> dict[str, Any]: - """List most recent `issue_count` numer of open issues in the repo. + """List most recent `issue_count` number of open issues in the repo. Args: issue_count: number of issues to return @@ -75,7 +75,7 @@ def get_issue(issue_number: int) -> dict[str, Any]: """Get the details of the specified issue number. Args: - issue_number: issue number of the Github issue. + issue_number: issue number of the GitHub issue. Returns: The status of this request, with the issue details when successful. @@ -92,7 +92,7 @@ def add_comment_to_issue(issue_number: int, comment: str) -> dict[str, any]: """Add the specified comment to the given issue number. Args: - issue_number: issue number of the Github issue + issue_number: issue number of the GitHub issue comment: comment to add Returns: @@ -116,7 +116,7 @@ def list_comments_on_issue(issue_number: int) -> dict[str, any]: """List all comments on the given issue number. Args: - issue_number: issue number of the Github issue + issue_number: issue number of the GitHub issue Returns: The the status of this request, with the list of comments when successful. diff --git a/contributing/samples/adk_pr_agent/agent.py b/contributing/samples/adk_pr_agent/agent.py index 8c398e7ed..7d6088ac4 100644 --- a/contributing/samples/adk_pr_agent/agent.py +++ b/contributing/samples/adk_pr_agent/agent.py @@ -125,7 +125,7 @@ def get_github_pr_info_http(pr_number: int) -> str | None: system_prompt = """ You are a helpful assistant to generate reasonable descriptions for pull requests for software engineers. -The descritions should not be too short (e.g.: less than 3 words), or too long (e.g.: more than 30 words). +The descriptions should not be too short (e.g.: less than 3 words), or too long (e.g.: more than 30 words). The generated description should start with `chore`, `docs`, `feat`, `fix`, `test`, or `refactor`. `feat` stands for a new feature. diff --git a/contributing/samples/adk_pr_triaging_agent/agent.py b/contributing/samples/adk_pr_triaging_agent/agent.py index 6e2f1bd96..79d32262c 100644 --- a/contributing/samples/adk_pr_triaging_agent/agent.py +++ b/contributing/samples/adk_pr_triaging_agent/agent.py @@ -58,7 +58,7 @@ def get_pull_request_details(pr_number: int) -> str: """Get the details of the specified pull request. Args: - pr_number: number of the Github pull request. + pr_number: number of the GitHub pull request. Returns: The status of this request, with the details when successful. @@ -160,7 +160,7 @@ def add_label_and_reviewer_to_pr(pr_number: int, label: str) -> dict[str, Any]: """Adds a specified label and requests a review from a mapped reviewer on a PR. Args: - pr_number: the number of the Github pull request + pr_number: the number of the GitHub pull request label: the label to add Returns: @@ -173,7 +173,7 @@ def add_label_and_reviewer_to_pr(pr_number: int, label: str) -> dict[str, Any]: f"Error: Label '{label}' is not an allowed label. Will not apply." ) - # Pull Request is a special issue in Github, so we can use issue url for PR. + # Pull Request is a special issue in GitHub, so we can use issue url for PR. label_url = ( f"{GITHUB_BASE_URL}/repos/{OWNER}/{REPO}/issues/{pr_number}/labels" ) @@ -216,7 +216,7 @@ def add_comment_to_pr(pr_number: int, comment: str) -> dict[str, Any]: """Add the specified comment to the given PR number. Args: - pr_number: the number of the Github pull request + pr_number: the number of the GitHub pull request comment: the comment to add Returns: @@ -224,7 +224,7 @@ def add_comment_to_pr(pr_number: int, comment: str) -> dict[str, Any]: """ print(f"Attempting to add comment '{comment}' to issue #{pr_number}") - # Pull Request is a special issue in Github, so we can use issue url for PR. + # Pull Request is a special issue in GitHub, so we can use issue url for PR. url = f"{GITHUB_BASE_URL}/repos/{OWNER}/{REPO}/issues/{pr_number}/comments" payload = {"body": comment} @@ -244,7 +244,7 @@ def add_comment_to_pr(pr_number: int, comment: str) -> dict[str, Any]: description="Triage ADK pull requests.", instruction=f""" # 1. Identity - You are a Pull Request (PR) triaging bot for the Github {REPO} repo with the owner {OWNER}. + You are a Pull Request (PR) triaging bot for the GitHub {REPO} repo with the owner {OWNER}. # 2. Responsibilities Your core responsibility includes: @@ -262,7 +262,7 @@ def add_comment_to_pr(pr_number: int, comment: str) -> dict[str, Any]: - If it's about session, memory, artifacts services, label it with "services" - If it's about UI/web, label it with "web" - If it's related to tools, label it with "tools" - - If it's about agent evalaution, then label it with "eval". + - If it's about agent evaluation, then label it with "eval". - If it's about streaming/live, label it with "live". - If it's about model support(non-Gemini, like Litellm, Ollama, OpenAI models), label it with "models". - If it's about tracing, label it with "tracing". @@ -297,7 +297,7 @@ def add_comment_to_pr(pr_number: int, comment: str) -> dict[str, Any]: # 4. Steps When you are given a PR, here are the steps you should take: - Call the `get_pull_request_details` tool to get the details of the PR. - - Skip the PR (i.e. do not label or comment) if the PR is closed or is labeled with "{BOT_LABEL}" or "google-contributior". + - Skip the PR (i.e. do not label or comment) if the PR is closed or is labeled with "{BOT_LABEL}" or "google-contributor". - Check if the PR is following the contribution guidelines. - If it's not following the guidelines, recommend or add a comment to the PR that points to the contribution guidelines (https://github.com/google/adk-python/blob/main/CONTRIBUTING.md). - If it's following the guidelines, recommend or add a label to the PR. diff --git a/contributing/samples/adk_triaging_agent/agent.py b/contributing/samples/adk_triaging_agent/agent.py index fef742cc5..0ede67f7c 100644 --- a/contributing/samples/adk_triaging_agent/agent.py +++ b/contributing/samples/adk_triaging_agent/agent.py @@ -49,7 +49,7 @@ def list_unlabeled_issues(issue_count: int) -> dict[str, Any]: - """List most recent `issue_count` numer of unlabeled issues in the repo. + """List most recent `issue_count` number of unlabeled issues in the repo. Args: issue_count: number of issues to return @@ -86,7 +86,7 @@ def add_label_and_owner_to_issue( """Add the specified label and owner to the given issue number. Args: - issue_number: issue number of the Github issue. + issue_number: issue number of the GitHub issue. label: label to assign Returns: @@ -142,7 +142,7 @@ def change_issue_type(issue_number: int, issue_type: str) -> dict[str, Any]: """Change the issue type of the given issue number. Args: - issue_number: issue number of the Github issue, in string foramt. + issue_number: issue number of the GitHub issue, in string format. issue_type: issue type to assign Returns: @@ -167,7 +167,7 @@ def change_issue_type(issue_number: int, issue_type: str) -> dict[str, Any]: name="adk_triaging_assistant", description="Triage ADK issues.", instruction=f""" - You are a triaging bot for the Github {REPO} repo with the owner {OWNER}. You will help get issues, and recommend a label. + You are a triaging bot for the GitHub {REPO} repo with the owner {OWNER}. You will help get issues, and recommend a label. IMPORTANT: {APPROVAL_INSTRUCTION} Here are the rules for labeling: @@ -176,7 +176,7 @@ def change_issue_type(issue_number: int, issue_type: str) -> dict[str, Any]: - If it's about UI/web, label it with "web" - If the user is asking about a question, label it with "question" - If it's related to tools, label it with "tools" - - If it's about agent evalaution, then label it with "eval". + - If it's about agent evaluation, then label it with "eval". - If it's about streaming/live, label it with "live". - If it's about model support(non-Gemini, like Litellm, Ollama, OpenAI models), label it with "models". - If it's about tracing, label it with "tracing". diff --git a/contributing/samples/application_integration_agent/README.md b/contributing/samples/application_integration_agent/README.md index a7106c09a..0e0a70c17 100644 --- a/contributing/samples/application_integration_agent/README.md +++ b/contributing/samples/application_integration_agent/README.md @@ -7,7 +7,7 @@ This sample demonstrates how to use the `ApplicationIntegrationToolset` within a ## Prerequisites 1. **Set up Integration Connection:** - * You need an existing [Integration connection](https://cloud.google.com/integration-connectors/docs/overview) configured to interact with your Jira instance. Follow the [documentation](https://google.github.io/adk-docs/tools/google-cloud-tools/#use-integration-connectors) to provision the Integration Connector in Google Cloud and then use this [documentation](https://cloud.google.com/integration-connectors/docs/connectors/jiracloud/configure) to create an JIRA connection. Note the `Connection Name`, `Project ID`, and `Location` of your connection. + * You need an existing [Integration connection](https://cloud.google.com/integration-connectors/docs/overview) configured to interact with your Jira instance. Follow the [documentation](https://google.github.io/adk-docs/tools/google-cloud-tools/#use-integration-connectors) to provision the Integration Connector in Google Cloud and then use this [documentation](https://cloud.google.com/integration-connectors/docs/connectors/jiracloud/configure) to create an Jira connection. Note the `Connection Name`, `Project ID`, and `Location` of your connection. * 2. **Configure Environment Variables:** diff --git a/contributing/samples/application_integration_agent/agent.py b/contributing/samples/application_integration_agent/agent.py index 9658641e3..83e114360 100644 --- a/contributing/samples/application_integration_agent/agent.py +++ b/contributing/samples/application_integration_agent/agent.py @@ -40,7 +40,7 @@ model="gemini-2.0-flash", name="Issue_Management_Agent", instruction=""" - You are an agent that helps manage issues in a JIRA instance. + You are an agent that helps manage issues in a Jira instance. Be accurate in your responses based on the tool response. You can perform any formatting in the response that is appropriate or if asked by the user. If there is an error in the tool response, understand the error and try and see if you can fix the error and then and execute the tool again. For example if a variable or parameter is missing, try and see if you can find it in the request or user query or default it and then execute the tool again or check for other tools that could give you the details. If there are any math operations like count or max, min in the user request, call the tool to get the data and perform the math operations and then return the result in the response. For example for maximum, fetch the list and then do the math operation. diff --git a/contributing/samples/bigquery/agent.py b/contributing/samples/bigquery/agent.py index f1ba10fe2..83b896e29 100644 --- a/contributing/samples/bigquery/agent.py +++ b/contributing/samples/bigquery/agent.py @@ -34,7 +34,7 @@ tool_config = BigQueryToolConfig(write_mode=WriteMode.ALLOWED) if CREDENTIALS_TYPE == AuthCredentialTypes.OAUTH2: - # Initiaze the tools to do interactive OAuth + # Initialize the tools to do interactive OAuth # The environment variables OAUTH_CLIENT_ID and OAUTH_CLIENT_SECRET # must be set credentials_config = BigQueryCredentialsConfig( diff --git a/contributing/samples/code_execution/agent.py b/contributing/samples/code_execution/agent.py index b8cbd6141..82de04f25 100644 --- a/contributing/samples/code_execution/agent.py +++ b/contributing/samples/code_execution/agent.py @@ -43,7 +43,7 @@ def base_system_instruction(): ``` **Output Visibility:** Always print the output of code execution to visualize results, especially for data exploration and analysis. For example: - - To look a the shape of a pandas.DataFrame do: + - To look at the shape of a pandas.DataFrame do: ```tool_code print(df.shape) ``` @@ -84,7 +84,7 @@ def base_system_instruction(): You need to assist the user with their queries by looking at the data and the context in the conversation. -You final answer should summarize the code and code execution relavant to the user query. +You final answer should summarize the code and code execution relevant to the user query. You should include all pieces of data to answer the user query, such as the table from code execution results. If you cannot answer the question directly, you should follow the guidelines above to generate the next step. diff --git a/contributing/samples/fields_output_schema/agent.py b/contributing/samples/fields_output_schema/agent.py index e3c696684..70645ea9b 100644 --- a/contributing/samples/fields_output_schema/agent.py +++ b/contributing/samples/fields_output_schema/agent.py @@ -16,7 +16,7 @@ from pydantic import BaseModel -class WeahterData(BaseModel): +class WeatherData(BaseModel): temperature: str humidity: str wind_speed: str @@ -43,6 +43,6 @@ class WeahterData(BaseModel): * wind_speed: 13 mph """, - output_schema=WeahterData, + output_schema=WeatherData, output_key='weather_data', ) diff --git a/contributing/samples/google_api/agent.py b/contributing/samples/google_api/agent.py index bb06e36f2..390f1bca1 100644 --- a/contributing/samples/google_api/agent.py +++ b/contributing/samples/google_api/agent.py @@ -46,7 +46,7 @@ Use the provided tools to conduct various operations on users' data in Google BigQuery. Scenario 1: - The user wants to query their biguqery datasets + The user wants to query their bigquery datasets Use bigquery_datasets_list to query user's datasets Scenario 2: diff --git a/contributing/samples/hello_world_ollama/README.md b/contributing/samples/hello_world_ollama/README.md index 559e42f65..dc7acf139 100644 --- a/contributing/samples/hello_world_ollama/README.md +++ b/contributing/samples/hello_world_ollama/README.md @@ -25,7 +25,7 @@ ollama show mistral-small3.1 You are supposed to see `tools` listed under capabilities. -You can also look at the template the model is using and tweak it based on your needs. +You can also look at the model's template and tweak it based on your needs. ```bash ollama show --modelfile llama3.1 > model_file_to_modify diff --git a/contributing/samples/jira_agent/agent.py b/contributing/samples/jira_agent/agent.py index 9f2b866c9..537d8f084 100644 --- a/contributing/samples/jira_agent/agent.py +++ b/contributing/samples/jira_agent/agent.py @@ -19,7 +19,7 @@ root_agent = Agent( model='gemini-2.0-flash-001', name='jira_connector_agent', - description='This agent helps search issues in JIRA', + description='This agent helps search issues in Jira', instruction=""" To start with, greet the user First, you will be given a description of what you can do. diff --git a/contributing/samples/jira_agent/tools.py b/contributing/samples/jira_agent/tools.py index f03c5ed10..94c37565f 100644 --- a/contributing/samples/jira_agent/tools.py +++ b/contributing/samples/jira_agent/tools.py @@ -27,7 +27,7 @@ tool_name="jira_conversation_tool", tool_instructions=""" - This tool is to call an integration to search for issues in JIRA + This tool is to call an integration to search for issues in Jira """, ) diff --git a/contributing/samples/langchain_youtube_search_agent/README.md b/contributing/samples/langchain_youtube_search_agent/README.md index e87ca5942..7d46f60b8 100644 --- a/contributing/samples/langchain_youtube_search_agent/README.md +++ b/contributing/samples/langchain_youtube_search_agent/README.md @@ -1,6 +1,6 @@ # Langchain Youtube Search Agent -This agent utilize the Lanchain YoutubeSearchTool to search youtubes. +This agent utilize the Langchain YoutubeSearchTool to search youtubes. You need to install below dependencies: ```python diff --git a/contributing/samples/live_bidi_streaming_multi_agent/readme.md b/contributing/samples/live_bidi_streaming_multi_agent/readme.md index 27c93b10f..dee6f38bf 100644 --- a/contributing/samples/live_bidi_streaming_multi_agent/readme.md +++ b/contributing/samples/live_bidi_streaming_multi_agent/readme.md @@ -1,9 +1,7 @@ # Simplistic Live (Bidi-Streaming) Multi-Agent -This project provides a basic example of a live, bidirectional streaming multi-agent +This project provides a basic example of a live, [bidirectional streaming](https://google.github.io/adk-docs/streaming/) multi-agent designed for testing and experimentation. -You can see full documentation [here](https://google.github.io/adk-docs/streaming/). - ## Getting Started Follow these steps to get the agent up and running: diff --git a/contributing/samples/live_bidi_streaming_single_agent/readme.md b/contributing/samples/live_bidi_streaming_single_agent/readme.md index 6a9258f3e..56187fb0d 100644 --- a/contributing/samples/live_bidi_streaming_single_agent/readme.md +++ b/contributing/samples/live_bidi_streaming_single_agent/readme.md @@ -1,9 +1,7 @@ # Simplistic Live (Bidi-Streaming) Agent -This project provides a basic example of a live, bidirectional streaming agent +This project provides a basic example of a live, [bidirectional streaming](https://google.github.io/adk-docs/streaming/) agent designed for testing and experimentation. -You can see full documentation [here](https://google.github.io/adk-docs/streaming/). - ## Getting Started Follow these steps to get the agent up and running: diff --git a/contributing/samples/mcp_stdio_notion_agent/README.md b/contributing/samples/mcp_stdio_notion_agent/README.md index f53bd2f03..d40df313f 100644 --- a/contributing/samples/mcp_stdio_notion_agent/README.md +++ b/contributing/samples/mcp_stdio_notion_agent/README.md @@ -17,4 +17,4 @@ export NOTION_API_KEY= * Send below queries: * What can you do for me ? - * Seach `XXXX` in my pages. + * Search `XXXX` in my pages. diff --git a/contributing/samples/oauth_calendar_agent/README.md b/contributing/samples/oauth_calendar_agent/README.md index aaefd6d08..b0d124791 100644 --- a/contributing/samples/oauth_calendar_agent/README.md +++ b/contributing/samples/oauth_calendar_agent/README.md @@ -12,7 +12,7 @@ This sample tests and demos the OAuth support in ADK via two tools: * 2. get_calendar_events - This is an google calendar tool that calls Google Calendar API to get the details of a specific calendar. + This is a google calendar tool that calls Google Calendar API to get the details of a specific calendar. This tool is from the ADK built-in Google Calendar ToolSet. Everything is wrapped and the tool user just needs to pass in the client id and client secret. diff --git a/contributing/samples/oauth_calendar_agent/agent.py b/contributing/samples/oauth_calendar_agent/agent.py index 718f5c662..a6f87195d 100644 --- a/contributing/samples/oauth_calendar_agent/agent.py +++ b/contributing/samples/oauth_calendar_agent/agent.py @@ -115,7 +115,7 @@ def update_time(callback_context: CallbackContext): name="calendar_agent", instruction=""" You are a helpful personal calendar assistant. - Use the provided tools to search for calendar events (use 10 as limit if user does't specify), and update them. + Use the provided tools to search for calendar events (use 10 as limit if user doesn't specify), and update them. Use "primary" as the calendarId if users don't specify. Scenario1: @@ -133,7 +133,7 @@ def update_time(callback_context: CallbackContext): {userInfo?} - Currnet time: {_time} + Current time: {_time} """, tools=[ AuthenticatedFunctionTool( diff --git a/contributing/samples/session_state_agent/README.md b/contributing/samples/session_state_agent/README.md index bec053648..699517ec5 100644 --- a/contributing/samples/session_state_agent/README.md +++ b/contributing/samples/session_state_agent/README.md @@ -6,7 +6,7 @@ After assigning a state using the context object (e.g. `tool_context.state['log_query_var'] = 'log_query_var_value'`): * The state is available for use in a later callback. -* Once the resulting event is processed by the runner and appneded in the +* Once the resulting event is processed by the runner and appended in the session, the state will be also persisted in the session. This sample agent is for demonstrating the aforementioned behavior. @@ -55,7 +55,7 @@ state is available after writing via the context object ### Current Behavior -The current behavior of pesisting states are: +The current behavior of persisting states are: * for `before_agent_callback`: state delta will be persisted after all callbacks are processed. * for `before_model_callback`: state delta will be persisted with the final LlmResponse, diff --git a/llms-full.txt b/llms-full.txt index 4ce28660f..db2ba8734 100644 --- a/llms-full.txt +++ b/llms-full.txt @@ -7010,7 +7010,7 @@ This approach involves creating individual test files, each representing a singl - `Expected Intermediate Agent Responses`: These are the natural language responses that the agent (or sub-agents) generates as it moves towards generating a final answer. These natural language responses are usually an - artifact of an multi-agent system, where your root agent depends on sub-agents to achieve a goal. These intermediate responses, may or may not be of + artifact of a multi-agent system, where your root agent depends on sub-agents to achieve a goal. These intermediate responses, may or may not be of interest to the end user, but for a developer/owner of the system, are of critical importance, as they give you the confidence that the agent went through the right path to generate final response. diff --git a/pylintrc b/pylintrc index 3fc226368..303cbc302 100644 --- a/pylintrc +++ b/pylintrc @@ -257,7 +257,7 @@ single-line-if-stmt=yes max-module-lines=99999 # String used as indentation unit. The internal Google style guide mandates 2 -# spaces. Google's externaly-published style guide says 4, consistent with +# spaces. Google's externally-published style guide says 4, consistent with # PEP 8. Here, we use 2 spaces, for conformity with many open-sourced Google # projects (like TensorFlow). indent-string=' ' diff --git a/src/google/adk/a2a/converters/part_converter.py b/src/google/adk/a2a/converters/part_converter.py index dc3532090..3b2050973 100644 --- a/src/google/adk/a2a/converters/part_converter.py +++ b/src/google/adk/a2a/converters/part_converter.py @@ -13,7 +13,7 @@ # limitations under the License. """ -module containing utilities for conversion betwen A2A Part and Google GenAI Part +module containing utilities for conversion between A2A Part and Google GenAI Part """ from __future__ import annotations @@ -84,11 +84,11 @@ def convert_a2a_part_to_genai_part( return None if isinstance(part, a2a_types.DataPart): - # Conver the Data Part to funcall and function reponse. + # Convert the Data Part to funcall and function response. # This is mainly for converting human in the loop and auth request and # response. - # TODO once A2A defined how to suervice such information, migrate below - # logic accordinlgy + # TODO once A2A defined how to service such information, migrate below + # logic accordingly if ( part.metadata and _get_adk_metadata_key(A2A_DATA_PART_METADATA_TYPE_KEY) @@ -179,11 +179,11 @@ def convert_genai_part_to_a2a_part( return a2a_types.Part(root=a2a_part) - # Conver the funcall and function reponse to A2A DataPart. + # Convert the funcall and function response to A2A DataPart. # This is mainly for converting human in the loop and auth request and # response. - # TODO once A2A defined how to suervice such information, migrate below - # logic accordinlgy + # TODO once A2A defined how to service such information, migrate below + # logic accordingly if part.function_call: return a2a_types.Part( root=a2a_types.DataPart( diff --git a/src/google/adk/a2a/utils/agent_card_builder.py b/src/google/adk/a2a/utils/agent_card_builder.py index 06e0d55eb..2de71e10f 100644 --- a/src/google/adk/a2a/utils/agent_card_builder.py +++ b/src/google/adk/a2a/utils/agent_card_builder.py @@ -473,7 +473,7 @@ def _get_default_description(agent: BaseAgent) -> str: async def _extract_examples_from_agent( agent: BaseAgent, ) -> Optional[List[Dict]]: - """Extract examples from example_tool if configured, otherwise from agent instruction.""" + """Extract examples from example_tool if configured; otherwise, from agent instruction.""" if not isinstance(agent, LlmAgent): return None diff --git a/src/google/adk/agents/base_agent.py b/src/google/adk/agents/base_agent.py index 98f7b1254..e78f02944 100644 --- a/src/google/adk/agents/base_agent.py +++ b/src/google/adk/agents/base_agent.py @@ -174,7 +174,7 @@ def clone( invalid_fields = set(update) - allowed_fields if invalid_fields: raise ValueError( - f'Cannot update non-existent fields in {self.__class__.__name__}:' + f'Cannot update nonexistent fields in {self.__class__.__name__}:' f' {invalid_fields}' ) @@ -514,7 +514,7 @@ def from_config( """Creates an agent from a config. If sub-classes uses a custom agent config, override `_from_config_kwargs` - method to return an updated kwargs for agent construstor. + method to return an updated kwargs for agent constructor. Args: config: The config to create the agent from. @@ -537,7 +537,7 @@ def _parse_config( ) -> Dict[str, Any]: """Parses the config and returns updated kwargs to construct the agent. - Sub-classes should override this method to use a custome agent config class. + Sub-classes should override this method to use a custom agent config class. Args: config: The config to parse. diff --git a/src/google/adk/agents/common_configs.py b/src/google/adk/agents/common_configs.py index 094b8fb75..e86af5bd0 100644 --- a/src/google/adk/agents/common_configs.py +++ b/src/google/adk/agents/common_configs.py @@ -65,7 +65,7 @@ class CodeConfig(BaseModel): args: Optional[List[ArgumentConfig]] = None """Optional. The arguments for the code when `name` refers to a function or a - class's contructor. + class's constructor. Examples: ``` diff --git a/src/google/adk/agents/config_schemas/AgentConfig.json b/src/google/adk/agents/config_schemas/AgentConfig.json index 08d14cd34..10efbea25 100644 --- a/src/google/adk/agents/config_schemas/AgentConfig.json +++ b/src/google/adk/agents/config_schemas/AgentConfig.json @@ -623,7 +623,7 @@ } ], "default": null, - "description": "Optional. The producer of the content. Must be either 'user' or\n 'model'. Useful to set for multi-turn conversations, otherwise can be\n empty. If role is not specified, SDK will determine the role.", + "description": "Optional. The producer of the content. Must be either 'user' or\n 'model'. Useful to set for multi-turn conversations; otherwise, can be\n empty. If role is not specified, SDK will determine the role.", "title": "Role" } }, @@ -1072,7 +1072,7 @@ } ], "default": null, - "description": "Optional. Display name of the file data. Used to provide a label or filename to distinguish file datas. It is not currently used in the Gemini GenerateContent calls.", + "description": "Optional. Display name of the file data. Used to provide a label or filename to distinguish file data. It is not currently used in the Gemini GenerateContent calls.", "title": "Displayname" }, "fileUri": { @@ -1324,7 +1324,7 @@ } ], "default": null, - "description": "Optional. Describes the parameters to this function in JSON Schema Object format. Reflects the Open API 3.03 Parameter Object. string Key: the name of the parameter. Parameter names are case sensitive. Schema Value: the Schema defining the type used for the parameter. For function with no parameters, this can be left unset. Parameter names must start with a letter or an underscore and must only contain chars a-z, A-Z, 0-9, or underscores with a maximum length of 64. Example with 1 required and 1 optional parameter: type: OBJECT properties: param1: type: STRING param2: type: INTEGER required: - param1" + "description": "Optional. Describes the parameters to this function in JSON Schema Object format. Reflects the Open API 3.03 Parameter Object. string Key: the name of the parameter. Parameter names are case-sensitive. Schema Value: the Schema defining the type used for the parameter. For function with no parameters, this can be left unset. Parameter names must start with a letter or an underscore and must only contain chars a-z, A-Z, 0-9, or underscores with a maximum length of 64. Example with 1 required and 1 optional parameter: type: OBJECT properties: param1: type: STRING param2: type: INTEGER required: - param1" }, "parametersJsonSchema": { "anyOf": [ @@ -4012,7 +4012,7 @@ } ], "default": null, - "description": "Optional. Number of search results to return per query. The default value is 10. The maximumm allowed value is 10.", + "description": "Optional. Number of search results to return per query. The default value is 10. The maximum allowed value is 10.", "title": "Maxresults" } }, @@ -4428,7 +4428,7 @@ }, "mcp__types__Tool": { "additionalProperties": true, - "description": "Definition for a tool the client can call.", + "description": "Definition for a tool that the client can call.", "properties": { "name": { "title": "Name", diff --git a/src/google/adk/agents/remote_a2a_agent.py b/src/google/adk/agents/remote_a2a_agent.py index cc9fb75ad..0db04fd1d 100644 --- a/src/google/adk/agents/remote_a2a_agent.py +++ b/src/google/adk/agents/remote_a2a_agent.py @@ -525,7 +525,7 @@ async def _run_live_impl( raise NotImplementedError( f"_run_live_impl for {type(self)} via A2A is not implemented." ) - # This makes the function an async generator but the yield is still unreachable + # This makes the function into an async generator but the yield is still unreachable yield async def cleanup(self) -> None: diff --git a/src/google/adk/auth/auth_handler.py b/src/google/adk/auth/auth_handler.py index 2e2a9a074..7187ea35e 100644 --- a/src/google/adk/auth/auth_handler.py +++ b/src/google/adk/auth/auth_handler.py @@ -137,7 +137,7 @@ def generate_auth_request(self) -> AuthConfig: def generate_auth_uri( self, ) -> AuthCredential: - """Generates an response containing the auth uri for user to sign in. + """Generates a response containing the auth uri for user to sign in. Returns: An AuthCredential object containing the auth URI and state. diff --git a/src/google/adk/auth/auth_preprocessor.py b/src/google/adk/auth/auth_preprocessor.py index b06774973..1f8198e48 100644 --- a/src/google/adk/auth/auth_preprocessor.py +++ b/src/google/adk/auth/auth_preprocessor.py @@ -93,7 +93,7 @@ async def run_async( if not tools_to_resume: continue - # found the the system long running request euc function call + # found the system long running request euc function call # looking for original function call that requests euc for j in range(i - 1, -1, -1): event = events[j] diff --git a/src/google/adk/auth/exchanger/base_credential_exchanger.py b/src/google/adk/auth/exchanger/base_credential_exchanger.py index b09adb80a..31106b55e 100644 --- a/src/google/adk/auth/exchanger/base_credential_exchanger.py +++ b/src/google/adk/auth/exchanger/base_credential_exchanger.py @@ -24,7 +24,7 @@ from ..auth_schemes import AuthScheme -class CredentialExchangError(Exception): +class CredentialExchangeError(Exception): """Base exception for credential exchange errors.""" @@ -52,6 +52,6 @@ async def exchange( The exchanged credential. Raises: - CredentialExchangError: If credential exchange fails. + CredentialExchangeError: If credential exchange fails. """ pass diff --git a/src/google/adk/auth/exchanger/oauth2_credential_exchanger.py b/src/google/adk/auth/exchanger/oauth2_credential_exchanger.py index 4231a7c1e..184867993 100644 --- a/src/google/adk/auth/exchanger/oauth2_credential_exchanger.py +++ b/src/google/adk/auth/exchanger/oauth2_credential_exchanger.py @@ -28,7 +28,7 @@ from typing_extensions import override from .base_credential_exchanger import BaseCredentialExchanger -from .base_credential_exchanger import CredentialExchangError +from .base_credential_exchanger import CredentialExchangeError try: from authlib.integrations.requests_client import OAuth2Session @@ -61,10 +61,10 @@ async def exchange( The exchanged credential with access token. Raises: - CredentialExchangError: If auth_scheme is missing. + CredentialExchangeError: If auth_scheme is missing. """ if not auth_scheme: - raise CredentialExchangError( + raise CredentialExchangeError( "auth_scheme is required for OAuth2 credential exchange" ) diff --git a/src/google/adk/cli/adk_web_server.py b/src/google/adk/cli/adk_web_server.py index 1886ec47c..893f42579 100644 --- a/src/google/adk/cli/adk_web_server.py +++ b/src/google/adk/cli/adk_web_server.py @@ -202,7 +202,7 @@ class AdkWebServer: If you pass in a web_assets_dir, the static assets will be served under /dev-ui in addition to the API endpoints created by default. - You can add add additional API endpoints by modifying the FastAPI app + You can add additional API endpoints by modifying the FastAPI app instance returned by get_fast_api_app as this class exposes the agent runners and most other bits of state retained during the lifetime of the server. @@ -245,7 +245,7 @@ def __init__( self.eval_sets_manager = eval_sets_manager self.eval_set_results_manager = eval_set_results_manager self.agents_dir = agents_dir - # Internal propeties we want to allow being modified from callbacks. + # Internal properties we want to allow being modified from callbacks. self.runners_to_clean: set[str] = set() self.current_app_name_ref: SharedValue[str] = SharedValue(value="") self.runner_dict = {} diff --git a/src/google/adk/cli/cli_eval.py b/src/google/adk/cli/cli_eval.py index 2f1d090c1..d32d01659 100644 --- a/src/google/adk/cli/cli_eval.py +++ b/src/google/adk/cli/cli_eval.py @@ -189,7 +189,7 @@ async def _collect_eval_results( @deprecated( - "This method is deprecated and will be removed in fututre release. Please" + "This method is deprecated and will be removed in future release. Please" " use LocalEvalService to define your custom evals." ) async def run_evals( @@ -288,8 +288,8 @@ async def run_evals( ) final_eval_status = EvalStatus.NOT_EVALUATED - # Go over the all the eval statuses and mark the final eval status as - # passed if all of them pass, otherwise mark the final eval status to + # Go over all the eval statuses and mark the final eval status as + # passed if all of them pass; otherwise, mark the final eval status to # failed. for overall_eval_metric_result in overall_eval_metric_results: overall_eval_status = overall_eval_metric_result.eval_status diff --git a/src/google/adk/cli/cli_tools_click.py b/src/google/adk/cli/cli_tools_click.py index 13a0a620a..8c7ae20da 100644 --- a/src/google/adk/cli/cli_tools_click.py +++ b/src/google/adk/cli/cli_tools_click.py @@ -342,7 +342,7 @@ def cli_eval( This will only run eval_1, eval_2 and eval_3 from sample_eval_set_file.json. - *Eval Set Id* + *Eval Set ID* For each eval set, all evals will be run by default. If you want to run only specific evals from a eval set, first create a comma @@ -575,7 +575,7 @@ def wrapper(*args, **kwargs): def deprecated_adk_services_options(): - """Depracated ADK services options.""" + """Deprecated ADK services options.""" def warn(alternative_param, ctx, param, value): if value: diff --git a/src/google/adk/cli/fast_api.py b/src/google/adk/cli/fast_api.py index bc1a75dda..fc5f2588f 100644 --- a/src/google/adk/cli/fast_api.py +++ b/src/google/adk/cli/fast_api.py @@ -84,7 +84,7 @@ def get_fast_api_app( def _parse_agent_engine_resource_name(agent_engine_id_or_resource_name): if not agent_engine_id_or_resource_name: raise click.ClickException( - "Agent engine resource name or resource id can not be empty." + "Agent engine resource name or resource id cannot be empty." ) # "projects/my-project/locations/us-central1/reasoningEngines/1234567890", @@ -113,7 +113,7 @@ def _parse_agent_engine_resource_name(agent_engine_id_or_resource_name): rag_corpus = memory_service_uri.split("://")[1] if not rag_corpus: - raise click.ClickException("Rag corpus can not be empty.") + raise click.ClickException("Rag corpus cannot be empty.") envs.load_dotenv_for_agent("", agents_dir) memory_service = VertexAiRagMemoryService( rag_corpus=f'projects/{os.environ["GOOGLE_CLOUD_PROJECT"]}/locations/{os.environ["GOOGLE_CLOUD_LOCATION"]}/ragCorpora/{rag_corpus}' diff --git a/src/google/adk/cli/utils/agent_loader.py b/src/google/adk/cli/utils/agent_loader.py index 0bc44abd8..68d14e4a3 100644 --- a/src/google/adk/cli/utils/agent_loader.py +++ b/src/google/adk/cli/utils/agent_loader.py @@ -81,7 +81,7 @@ def _load_from_module_or_package( if e.name == agent_name: logger.debug("Module %s itself not found.", agent_name) else: - # it's the case the module imported by {agent_name}.agent module is not + # the module imported by {agent_name}.agent module is not # found e.msg = f"Fail to load '{agent_name}' module. " + e.msg raise e @@ -120,8 +120,7 @@ def _load_from_submodule(self, agent_name: str) -> Optional[BaseAgent]: if e.name == f"{agent_name}.agent" or e.name == agent_name: logger.debug("Module %s.agent not found.", agent_name) else: - # it's the case the module imported by {agent_name}.agent module is not - # found + # the module imported by {agent_name}.agent module is not found e.msg = f"Fail to load '{agent_name}.agent' module. " + e.msg raise e except Exception as e: diff --git a/src/google/adk/cli/utils/evals.py b/src/google/adk/cli/utils/evals.py index 305d47544..434a47b4d 100644 --- a/src/google/adk/cli/utils/evals.py +++ b/src/google/adk/cli/utils/evals.py @@ -141,7 +141,7 @@ def convert_session_to_eval_invocations(session: Session) -> list[Invocation]: # The content present in this event is the user content. user_content = event.content invocation_id = event.invocation_id - invocaton_timestamp = event.timestamp + invocation_timestamp = event.timestamp # Find the corresponding tool usage or response for the query tool_uses: list[genai_types.FunctionCall] = [] @@ -183,7 +183,7 @@ def convert_session_to_eval_invocations(session: Session) -> list[Invocation]: Invocation( user_content=user_content, invocation_id=invocation_id, - creation_timestamp=invocaton_timestamp, + creation_timestamp=invocation_timestamp, intermediate_data=IntermediateData( tool_uses=tool_uses, intermediate_responses=intermediate_responses[:-1], diff --git a/src/google/adk/code_executors/code_execution_utils.py b/src/google/adk/code_executors/code_execution_utils.py index 8a2021837..adff6fb80 100644 --- a/src/google/adk/code_executors/code_execution_utils.py +++ b/src/google/adk/code_executors/code_execution_utils.py @@ -13,6 +13,7 @@ # limitations under the License. """Utility functions for code execution.""" +from __future__ import annotations import base64 import binascii @@ -120,12 +121,12 @@ def extract_code_and_truncate_content( the code blocks. Returns: - The first code block if found, otherwise None. + The first code block if found; otherwise, None. """ if not content or not content.parts: return - # Extract the code from the executable code parts if there're no associated + # Extract the code from the executable code parts if there are no associated # code execution result parts. for idx, part in enumerate(content.parts): if part.executable_code and ( diff --git a/src/google/adk/evaluation/_eval_sets_manager_utils.py b/src/google/adk/evaluation/_eval_sets_manager_utils.py index b7e12dd37..737f769e7 100644 --- a/src/google/adk/evaluation/_eval_sets_manager_utils.py +++ b/src/google/adk/evaluation/_eval_sets_manager_utils.py @@ -28,7 +28,7 @@ def get_eval_set_from_app_and_id( eval_sets_manager: EvalSetsManager, app_name: str, eval_set_id: str ) -> EvalSet: - """Returns an EvalSet if found, otherwise raises NotFoundError.""" + """Returns an EvalSet if found; otherwise, raises NotFoundError.""" eval_set = eval_sets_manager.get_eval_set(app_name, eval_set_id) if not eval_set: raise NotFoundError(f"Eval set `{eval_set_id}` not found.") @@ -38,7 +38,7 @@ def get_eval_set_from_app_and_id( def get_eval_case_from_eval_set( eval_set: EvalSet, eval_case_id: str ) -> Optional[EvalCase]: - """Returns an EvalCase if found, otherwise None.""" + """Returns an EvalCase if found; otherwise, None.""" eval_case_to_find = None # Look up the eval case by eval_case_id diff --git a/src/google/adk/evaluation/agent_evaluator.py b/src/google/adk/evaluation/agent_evaluator.py index 150a80c1a..bbce29699 100644 --- a/src/google/adk/evaluation/agent_evaluator.py +++ b/src/google/adk/evaluation/agent_evaluator.py @@ -43,7 +43,7 @@ from .eval_sets_manager import EvalSetsManager from .evaluator import EvalStatus from .in_memory_eval_sets_manager import InMemoryEvalSetsManager -from .local_eval_sets_manager import convert_eval_set_to_pydanctic_schema +from .local_eval_sets_manager import convert_eval_set_to_pydantic_schema logger = logging.getLogger("google_adk." + __name__) @@ -130,7 +130,7 @@ async def evaluate_eval_set( the agent. There is convention in place here, where the code is going to look for 'root_agent' in the loaded module. eval_set: The eval set. - criteria: Evauation criterias, a dictionary of metric names to their + criteria: Evaluation criteria, a dictionary of metric names to their respective thresholds. num_runs: Number of times all entries in the eval dataset should be assessed. @@ -267,7 +267,7 @@ def _load_eval_set_from_file( try: eval_set = EvalSet.model_validate_json(content) assert len(initial_session) == 0, ( - "Intial session should be specified as a part of EvalSet file." + "Initial session should be specified as a part of EvalSet file." " Explicit initial session is only needed, when specifying data in" " the older schema." ) @@ -299,7 +299,7 @@ def _get_eval_set_from_old_format( "data": data, "initial_session": initial_session, } - return convert_eval_set_to_pydanctic_schema( + return convert_eval_set_to_pydantic_schema( eval_set_id=str(uuid.uuid4()), eval_set_in_json_format=[eval_data] ) @@ -559,7 +559,7 @@ async def _get_eval_results_by_eval_id( def _get_eval_metric_results_with_invocation( eval_results_per_eval_id: list[EvalCaseResult], ) -> dict[str, list[_EvalMetricResultWithInvocation]]: - """Retruns _EvalMetricResultWithInvocation grouped by metric. + """Returns _EvalMetricResultWithInvocation grouped by metric. EvalCaseResult contain results for each metric per invocation. diff --git a/src/google/adk/evaluation/base_eval_service.py b/src/google/adk/evaluation/base_eval_service.py index 3d576ab2d..a82a46832 100644 --- a/src/google/adk/evaluation/base_eval_service.py +++ b/src/google/adk/evaluation/base_eval_service.py @@ -94,11 +94,11 @@ class InferenceRequest(BaseModel): description="""The name of the app to which the eval case belongs to.""" ) - eval_set_id: str = Field(description="""Id of the eval set.""") + eval_set_id: str = Field(description="""ID of the eval set.""") eval_case_ids: Optional[list[str]] = Field( default=None, - description="""Id of the eval cases for which inferences need to be + description="""ID of the eval cases for which inferences need to be generated. All the eval case ids should belong to the EvalSet. @@ -133,10 +133,10 @@ class InferenceResult(BaseModel): description="""The name of the app to which the eval case belongs to.""" ) - eval_set_id: str = Field(description="""Id of the eval set.""") + eval_set_id: str = Field(description="""ID of the eval set.""") eval_case_id: str = Field( - description="""Id of the eval case for which inferences were generated.""", + description="""ID of the eval case for which inferences were generated.""", ) inferences: Optional[list[Invocation]] = Field( @@ -145,7 +145,7 @@ class InferenceResult(BaseModel): ) session_id: Optional[str] = Field( - description="""Id of the inference session.""" + description="""ID of the inference session.""" ) status: InferenceStatus = Field( diff --git a/src/google/adk/evaluation/eval_metrics.py b/src/google/adk/evaluation/eval_metrics.py index d73ce1e6a..45d9b08c3 100644 --- a/src/google/adk/evaluation/eval_metrics.py +++ b/src/google/adk/evaluation/eval_metrics.py @@ -133,7 +133,7 @@ class EvalMetricResultPerInvocation(BaseModel): eval_metric_results: list[EvalMetricResult] = Field( default=[], - description="Eval resutls for each applicable metric.", + description="Eval results for each applicable metric.", ) diff --git a/src/google/adk/evaluation/eval_sets_manager.py b/src/google/adk/evaluation/eval_sets_manager.py index 0c062af56..1790eec3b 100644 --- a/src/google/adk/evaluation/eval_sets_manager.py +++ b/src/google/adk/evaluation/eval_sets_manager.py @@ -46,7 +46,7 @@ def list_eval_sets(self, app_name: str) -> list[str]: def get_eval_case( self, app_name: str, eval_set_id: str, eval_case_id: str ) -> Optional[EvalCase]: - """Returns an EvalCase if found, otherwise None.""" + """Returns an EvalCase if found; otherwise, None.""" @abstractmethod def add_eval_case(self, app_name: str, eval_set_id: str, eval_case: EvalCase): diff --git a/src/google/adk/evaluation/evaluation_generator.py b/src/google/adk/evaluation/evaluation_generator.py index 7d6436493..530e2c440 100644 --- a/src/google/adk/evaluation/evaluation_generator.py +++ b/src/google/adk/evaluation/evaluation_generator.py @@ -40,7 +40,7 @@ class EvalCaseResponses(BaseModel): """Contains multiple responses associated with an EvalCase. - Multiple responses are a result of repeated requests to genereate inferences. + Multiple responses are a result of repeated requests to generate inferences. """ eval_case: EvalCase diff --git a/src/google/adk/evaluation/evaluator.py b/src/google/adk/evaluation/evaluator.py index bc19313df..a9cdb4e1f 100644 --- a/src/google/adk/evaluation/evaluator.py +++ b/src/google/adk/evaluation/evaluator.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations from abc import ABC from enum import Enum @@ -47,7 +48,7 @@ class EvaluationResult(BaseModel): class Evaluator(ABC): - """A merics evaluator interface.""" + """A metrics evaluator interface.""" def evaluate_invocations( self, diff --git a/src/google/adk/evaluation/gcs_eval_sets_manager.py b/src/google/adk/evaluation/gcs_eval_sets_manager.py index ba7871ce5..002d0d1be 100644 --- a/src/google/adk/evaluation/gcs_eval_sets_manager.py +++ b/src/google/adk/evaluation/gcs_eval_sets_manager.py @@ -101,7 +101,7 @@ def get_eval_set(self, app_name: str, eval_set_id: str) -> Optional[EvalSet]: @override def create_eval_set(self, app_name: str, eval_set_id: str): """Creates an empty EvalSet and saves it to GCS.""" - self._validate_id(id_name="Eval Set Id", id_value=eval_set_id) + self._validate_id(id_name="Eval Set ID", id_value=eval_set_id) new_eval_set_blob_name = self._get_eval_set_blob_name(app_name, eval_set_id) if self.bucket.blob(new_eval_set_blob_name).exists(): raise ValueError( diff --git a/src/google/adk/evaluation/local_eval_service.py b/src/google/adk/evaluation/local_eval_service.py index f443bb703..7d95db216 100644 --- a/src/google/adk/evaluation/local_eval_service.py +++ b/src/google/adk/evaluation/local_eval_service.py @@ -120,7 +120,7 @@ async def perform_inference( async def run_inference(eval_case): async with semaphore: - return await self._perform_inference_sigle_eval_item( + return await self._perform_inference_single_eval_item( app_name=inference_request.app_name, eval_set_id=inference_request.eval_set_id, eval_case=eval_case, @@ -176,7 +176,7 @@ async def _evaluate_single_inference_result( """Returns EvalCaseResult for the given inference result. A single inference result can have multiple invocations. For each - invocaiton, this method evaluates the metrics present in evaluate config. + invocation, this method evaluates the metrics present in evaluate config. The EvalCaseResult contains scores for each metric per invocation and the overall score. @@ -199,7 +199,7 @@ async def _evaluate_single_inference_result( # We also keep track of the overall score for a metric, derived from all # invocation. For example, if we were keeping track the metric that compares - # how well is the final resposne as compared to a golden answer, then each + # how well is the final response as compared to a golden answer, then each # invocation will have the value of this metric. We will also have an # overall score using aggregation strategy across all invocations. This # would be the score for the eval case. @@ -233,7 +233,7 @@ async def _evaluate_single_inference_result( expected_invocations=eval_case.conversation, ) - # Track overall scrore across all invocations. + # Track overall score across all invocations. overall_eval_metric_results.append( EvalMetricResult( metric_name=eval_metric.metric_name, @@ -325,8 +325,8 @@ def _generate_final_eval_status( self, overall_eval_metric_results: list[EvalMetricResult] ) -> EvalStatus: final_eval_status = EvalStatus.NOT_EVALUATED - # Go over the all the eval statuses and mark the final eval status as - # passed if all of them pass, otherwise mark the final eval status to + # Go over all the eval statuses and mark the final eval status as + # passed if all of them pass; otherwise, mark the final eval status to # failed. for overall_eval_metric_result in overall_eval_metric_results: overall_eval_status = overall_eval_metric_result.eval_status @@ -342,7 +342,7 @@ def _generate_final_eval_status( return final_eval_status - async def _perform_inference_sigle_eval_item( + async def _perform_inference_single_eval_item( self, app_name: str, eval_set_id: str, diff --git a/src/google/adk/evaluation/local_eval_sets_manager.py b/src/google/adk/evaluation/local_eval_sets_manager.py index b26739652..e292719bc 100644 --- a/src/google/adk/evaluation/local_eval_sets_manager.py +++ b/src/google/adk/evaluation/local_eval_sets_manager.py @@ -85,11 +85,11 @@ def _convert_invocation_to_pydantic_schema( ) -def convert_eval_set_to_pydanctic_schema( +def convert_eval_set_to_pydantic_schema( eval_set_id: str, eval_set_in_json_format: list[dict[str, Any]], ) -> EvalSet: - r"""Returns an pydantic EvalSet generated from the json representation. + r"""Returns a pydantic EvalSet generated from the json representation. Args: eval_set_id: Eval set id. @@ -183,7 +183,7 @@ def load_eval_set_from_file( except ValidationError: # We assume that the eval data was specified in the old format and try # to convert it to the new format. - return convert_eval_set_to_pydanctic_schema( + return convert_eval_set_to_pydantic_schema( eval_set_id, json.loads(content) ) @@ -207,7 +207,7 @@ def get_eval_set(self, app_name: str, eval_set_id: str) -> Optional[EvalSet]: @override def create_eval_set(self, app_name: str, eval_set_id: str): """Creates an empty EvalSet given the app_name and eval_set_id.""" - self._validate_id(id_name="Eval Set Id", id_value=eval_set_id) + self._validate_id(id_name="Eval Set ID", id_value=eval_set_id) # Define the file path new_eval_set_path = self._get_eval_set_file_path(app_name, eval_set_id) @@ -256,7 +256,7 @@ def list_eval_sets(self, app_name: str) -> list[str]: def get_eval_case( self, app_name: str, eval_set_id: str, eval_case_id: str ) -> Optional[EvalCase]: - """Returns an EvalCase if found, otherwise None.""" + """Returns an EvalCase if found; otherwise, None.""" eval_set = self.get_eval_set(app_name, eval_set_id) if not eval_set: return None diff --git a/src/google/adk/evaluation/response_evaluator.py b/src/google/adk/evaluation/response_evaluator.py index fa6be8bf6..5e35178cd 100644 --- a/src/google/adk/evaluation/response_evaluator.py +++ b/src/google/adk/evaluation/response_evaluator.py @@ -36,7 +36,7 @@ class ResponseEvaluator(Evaluator): This class supports two metrics: 1) response_evaluation_score - This metric evaluates how coherent agent's resposne was. + This metric evaluates how coherent agent's response was. Value range of this metric is [1,5], with values closer to 5 more desirable. @@ -81,7 +81,7 @@ def get_metric_info(metric_name: str) -> MetricInfo: return MetricInfo( metric_name=PrebuiltMetrics.RESPONSE_EVALUATION_SCORE.value, description=( - "This metric evaluates how coherent agent's resposne was. Value" + "This metric evaluates how coherent agent's response was. Value" " range of this metric is [1,5], with values closer to 5 more" " desirable." ), diff --git a/src/google/adk/evaluation/trajectory_evaluator.py b/src/google/adk/evaluation/trajectory_evaluator.py index 8f7508d44..0061dd08b 100644 --- a/src/google/adk/evaluation/trajectory_evaluator.py +++ b/src/google/adk/evaluation/trajectory_evaluator.py @@ -165,7 +165,7 @@ def evaluate( following keys: 1) query 2) response - 3) acutal_tool_use + 3) actual_tool_use 4) expected_tool_use Here is a sample eval_dataset value with one entry: @@ -183,7 +183,7 @@ def evaluate( } } ], - "acutal_tool_use": [ + "actual_tool_use": [ { "tool_name": "roll_die", "tool_input": { diff --git a/src/google/adk/events/event.py b/src/google/adk/events/event.py index 93c378103..88ad2d218 100644 --- a/src/google/adk/events/event.py +++ b/src/google/adk/events/event.py @@ -95,7 +95,7 @@ def is_final_response(self) -> bool: NOTE: This method is ONLY for use by Agent Development Kit. - Note that when multiple agents participage in one invocation, there could be + Note that when multiple agents participate in one invocation, there could be one event has `is_final_response()` as True for each participating agent. """ if self.actions.skip_summarization or self.long_running_tool_ids: diff --git a/src/google/adk/flows/llm_flows/_code_execution.py b/src/google/adk/flows/llm_flows/_code_execution.py index c2252f972..62620b200 100644 --- a/src/google/adk/flows/llm_flows/_code_execution.py +++ b/src/google/adk/flows/llm_flows/_code_execution.py @@ -194,7 +194,7 @@ async def _run_pre_processor( # [Step 1] Extract data files from the session_history and store them in # memory. Meanwhile, mutate the inline data file to text part in session # history from all turns. - all_input_files = _extrac_and_replace_inline_files( + all_input_files = _extract_and_replace_inline_files( code_executor_context, llm_request ) @@ -322,7 +322,7 @@ async def _run_post_processor( llm_response.content = None -def _extrac_and_replace_inline_files( +def _extract_and_replace_inline_files( code_executor_context: CodeExecutorContext, llm_request: LlmRequest, ) -> list[File]: @@ -353,7 +353,7 @@ def _extrac_and_replace_inline_files( text='\nAvailable file: `%s`\n' % file_name ) - # Add the inlne data as input file to the code executor context. + # Add the inline data as input file to the code executor context. file = File( name=file_name, content=CodeExecutionUtils.get_encoded_file_content( diff --git a/src/google/adk/flows/llm_flows/base_llm_flow.py b/src/google/adk/flows/llm_flows/base_llm_flow.py index 90cf0fbcf..ca6006784 100644 --- a/src/google/adk/flows/llm_flows/base_llm_flow.py +++ b/src/google/adk/flows/llm_flows/base_llm_flow.py @@ -243,7 +243,7 @@ async def _send_to_model( invocation_context.transcription_cache = [] if not invocation_context.run_config.input_audio_transcription: # if the live model's input transcription is not enabled, then - # we use our onwn audio transcriber to achieve that. + # we use our own audio transcriber to achieve that. invocation_context.transcription_cache.append( TranscriptionEntry(role='user', data=live_request.blob) ) @@ -285,7 +285,7 @@ def get_author_for_event(llm_response): async for llm_response in llm_connection.receive(): if llm_response.live_session_resumption_update: logger.info( - 'Update session resumption hanlde:' + 'Update session resumption handle:' f' {llm_response.live_session_resumption_update}.' ) invocation_context.live_session_resumption_handle = ( diff --git a/src/google/adk/flows/llm_flows/basic.py b/src/google/adk/flows/llm_flows/basic.py index 549c6d875..f1539052c 100644 --- a/src/google/adk/flows/llm_flows/basic.py +++ b/src/google/adk/flows/llm_flows/basic.py @@ -52,7 +52,7 @@ async def run_async( ) # Only set output_schema if no tools are specified. as of now, model don't # support output_schema and tools together. we have a workaround to support - # both outoput_schema and tools at the same time. see + # both output_schema and tools at the same time. see # _output_schema_processor.py for details if agent.output_schema and not agent.tools: llm_request.set_output_schema(agent.output_schema) diff --git a/src/google/adk/flows/llm_flows/contents.py b/src/google/adk/flows/llm_flows/contents.py index ae1bd44ad..203dc990d 100644 --- a/src/google/adk/flows/llm_flows/contents.py +++ b/src/google/adk/flows/llm_flows/contents.py @@ -133,7 +133,7 @@ def _rearrange_events_for_latest_function_response( function_responses = events[-1].get_function_responses() if not function_responses: - # No need to process, since the latest event is not fuction_response. + # No need to process, since the latest event is not function_response. return events function_responses_ids = set() diff --git a/src/google/adk/models/base_llm_connection.py b/src/google/adk/models/base_llm_connection.py index 22ca3b360..afce550b1 100644 --- a/src/google/adk/models/base_llm_connection.py +++ b/src/google/adk/models/base_llm_connection.py @@ -30,7 +30,7 @@ async def send_history(self, history: list[types.Content]): """Sends the conversation history to the model. You call this method right after setting up the model connection. - The model will respond if the last content is from user, otherwise it will + The model will respond if the last content is from user; otherwise, it will wait for new user input before responding. Args: diff --git a/src/google/adk/models/gemini_llm_connection.py b/src/google/adk/models/gemini_llm_connection.py index 3b46c91ad..d073562b6 100644 --- a/src/google/adk/models/gemini_llm_connection.py +++ b/src/google/adk/models/gemini_llm_connection.py @@ -39,7 +39,7 @@ async def send_history(self, history: list[types.Content]): """Sends the conversation history to the gemini model. You call this method right after setting up the model connection. - The model will respond if the last content is from user, otherwise it will + The model will respond if the last content is from user; otherwise, it will wait for new user input before responding. Args: @@ -220,7 +220,7 @@ async def receive(self) -> AsyncGenerator[LlmResponse, None]: ] yield LlmResponse(content=types.Content(role='model', parts=parts)) if message.session_resumption_update: - logger.info('Redeived session reassumption message: %s', message) + logger.info('Received session reassumption message: %s', message) yield ( LlmResponse( live_session_resumption_update=message.session_resumption_update diff --git a/src/google/adk/models/google_llm.py b/src/google/adk/models/google_llm.py index b1cad1c54..537978efc 100644 --- a/src/google/adk/models/google_llm.py +++ b/src/google/adk/models/google_llm.py @@ -136,7 +136,7 @@ async def generate_content_async( thought_text = '' text = '' usage_metadata = None - # for sse, similar as bidi (see receive method in gemini_llm_connecton.py), + # for sse, similar as bidi (see receive method in gemini_llm_connection.py), # we need to mark those text content as partial and after all partial # contents are sent, we send an accumulated event which contains all the # previous partial content. The only difference is bidi rely on diff --git a/src/google/adk/plugins/plugin_manager.py b/src/google/adk/plugins/plugin_manager.py index 217dbb8be..634f90438 100644 --- a/src/google/adk/plugins/plugin_manager.py +++ b/src/google/adk/plugins/plugin_manager.py @@ -102,7 +102,7 @@ def get_plugin(self, plugin_name: str) -> Optional[BasePlugin]: plugin_name: The name of the plugin to retrieve. Returns: - The plugin instance if found, otherwise `None`. + The plugin instance if found; otherwise, `None`. """ return next((p for p in self.plugins if p.name == plugin_name), None) diff --git a/src/google/adk/runners.py b/src/google/adk/runners.py index 51fdb9658..af0e2ac98 100644 --- a/src/google/adk/runners.py +++ b/src/google/adk/runners.py @@ -469,7 +469,7 @@ def _find_agent_to_run( message) """ # If the last event is a function response, should send this response to - # the agent that returned the corressponding function call regardless the + # the agent that returned the corresponding function call regardless the # type of the agent. e.g. a remote a2a agent may surface a credential # request as a special long running function tool call. event = find_matching_function_call(session.events) diff --git a/src/google/adk/telemetry.py b/src/google/adk/telemetry.py index 10ac58399..e54563a14 100644 --- a/src/google/adk/telemetry.py +++ b/src/google/adk/telemetry.py @@ -252,7 +252,7 @@ def _build_llm_request_for_trace(llm_request: LlmRequest) -> dict[str, Any]: Returns: A dictionary representation of the LLM request. """ - # Some fields in LlmRequest are function pointers and can not be serialized. + # Some fields in LlmRequest are function pointers and cannot be serialized. result = { 'model': llm_request.model, 'config': llm_request.config.model_dump( diff --git a/src/google/adk/tools/apihub_tool/apihub_toolset.py b/src/google/adk/tools/apihub_tool/apihub_toolset.py index ba4d3f488..fe9e38bd9 100644 --- a/src/google/adk/tools/apihub_tool/apihub_toolset.py +++ b/src/google/adk/tools/apihub_tool/apihub_toolset.py @@ -114,7 +114,7 @@ def __init__( apihub_resource_name: The resource name of the API in API Hub. Example: ``projects/test-project/locations/us-central1/apis/test-api``. access_token: Google Access token. Generate with gcloud cli - ``gcloud auth auth print-access-token``. Used for fetching API Specs from API Hub. + ``gcloud auth print-access-token``. Used for fetching API Specs from API Hub. service_account_json: The service account config as a json string. Required if not using default service credential. It is used for creating the API Hub client and fetching the API Specs from API Hub. diff --git a/src/google/adk/tools/apihub_tool/clients/apihub_client.py b/src/google/adk/tools/apihub_tool/clients/apihub_client.py index 9bee236e3..84bde6029 100644 --- a/src/google/adk/tools/apihub_tool/clients/apihub_client.py +++ b/src/google/adk/tools/apihub_tool/clients/apihub_client.py @@ -37,7 +37,7 @@ class BaseAPIHubClient(ABC): @abstractmethod def get_spec_content(self, resource_name: str) -> str: - """From a given resource name, get the soec in the API Hub.""" + """From a given resource name, get the spec in the API Hub.""" raise NotImplementedError() diff --git a/src/google/adk/tools/apihub_tool/clients/secret_client.py b/src/google/adk/tools/apihub_tool/clients/secret_client.py index d5015b8aa..f4d148615 100644 --- a/src/google/adk/tools/apihub_tool/clients/secret_client.py +++ b/src/google/adk/tools/apihub_tool/clients/secret_client.py @@ -29,7 +29,7 @@ class SecretManagerClient: This class provides a simplified interface for retrieving secrets from Secret Manager, handling authentication using either a service account - JSON keyfile (passed as a string) or a pre-existing authorization token. + JSON keyfile (passed as a string) or a preexisting authorization token. Attributes: _credentials: Google Cloud credentials object (ServiceAccountCredentials diff --git a/src/google/adk/tools/authenticated_function_tool.py b/src/google/adk/tools/authenticated_function_tool.py index 67cc5885f..eff994409 100644 --- a/src/google/adk/tools/authenticated_function_tool.py +++ b/src/google/adk/tools/authenticated_function_tool.py @@ -58,7 +58,7 @@ def __init__( the tool doesn't configure any credentials (auth_config.raw_auth_credential is missing) or the credentials configured is not enough to authenticate the tool (e.g. an OAuth - client id and client secrect is configured.) and needs client input + client id and client secret is configured.) and needs client input (e.g. client need to involve the end user in an oauth flow and get back the oauth response.) """ diff --git a/src/google/adk/tools/base_authenticated_tool.py b/src/google/adk/tools/base_authenticated_tool.py index 4858e4953..79960d2e9 100644 --- a/src/google/adk/tools/base_authenticated_tool.py +++ b/src/google/adk/tools/base_authenticated_tool.py @@ -57,7 +57,7 @@ def __init__( the tool doesn't configure any credentials (auth_config.raw_auth_credential is missing) or the credentials configured is not enough to authenticate the tool (e.g. an OAuth - client id and client secrect is configured.) and needs client input + client id and client secret is configured.) and needs client input (e.g. client need to involve the end user in an oauth flow and get back the oauth response.) """ diff --git a/src/google/adk/tools/bigquery/bigquery_credentials.py b/src/google/adk/tools/bigquery/bigquery_credentials.py index d0f3abe0e..15857c9b3 100644 --- a/src/google/adk/tools/bigquery/bigquery_credentials.py +++ b/src/google/adk/tools/bigquery/bigquery_credentials.py @@ -70,7 +70,7 @@ class BigQueryCredentialsConfig(BaseModel): To load service account key credentials, use: `google.auth.load_credentials_from_file(...)`. See more details in https://cloud.google.com/iam/docs/service-account-creds#user-managed-keys. - When the deployed environment cannot provide a pre-existing credential, + When the deployed environment cannot provide a preexisting credential, consider setting below client_id, client_secret and scope for end users to go through oauth flow, so that agent can access the user data. """ @@ -146,7 +146,7 @@ async def get_valid_credentials( else None ) - # If credentails are empty use the default credential + # If credentials are empty use the default credential if not creds: creds = self.credentials_config.credentials diff --git a/src/google/adk/tools/bigquery/bigquery_tool.py b/src/google/adk/tools/bigquery/bigquery_tool.py index 0b231edb6..bcad1a1e3 100644 --- a/src/google/adk/tools/bigquery/bigquery_tool.py +++ b/src/google/adk/tools/bigquery/bigquery_tool.py @@ -52,10 +52,10 @@ def __init__( """Initialize the Google API tool. Args: - func: callable that impelments the tool's logic, can accept one + func: callable that implements the tool's logic, can accept one 'credential" parameter credentials_config: credentials config used to call Google API. If None, - then we don't hanlde the auth logic + then we don't handle the auth logic """ super().__init__(func=func) self._ignore_params.append("credentials") diff --git a/src/google/adk/tools/bigquery/config.py b/src/google/adk/tools/bigquery/config.py index b2c02cfd2..936591cef 100644 --- a/src/google/adk/tools/bigquery/config.py +++ b/src/google/adk/tools/bigquery/config.py @@ -34,7 +34,7 @@ class WriteMode(Enum): """Only protected write operations are allowed in a BigQuery session. In this mode write operations in the anonymous dataset of a BigQuery session - are allowed. For example, a temporaray table can be created, manipulated and + are allowed. For example, a temporary table can be created, manipulated and deleted in the anonymous dataset during Agent interaction, while protecting permanent tables from being modified or deleted. To learn more about BigQuery sessions, see https://cloud.google.com/bigquery/docs/sessions-intro. diff --git a/src/google/adk/tools/bigquery/data_insights_tool.py b/src/google/adk/tools/bigquery/data_insights_tool.py index a2fdca081..b9718acf5 100644 --- a/src/google/adk/tools/bigquery/data_insights_tool.py +++ b/src/google/adk/tools/bigquery/data_insights_tool.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations import json from typing import Any @@ -34,8 +35,8 @@ def ask_data_insights( ) -> Dict[str, Any]: """Answers questions about structured data in BigQuery tables using natural language. - This function takes auser's question (which can include conversational - history for context) andreferences to specific BigQuery tables, and sends + This function takes a user's question (which can include conversational + history for context) and references to specific BigQuery tables, and sends them to a stateless conversational API. The API uses a GenAI agent to understand the question, generate and execute diff --git a/src/google/adk/tools/bigquery/query_tool.py b/src/google/adk/tools/bigquery/query_tool.py index c44ca67bb..54b645f9a 100644 --- a/src/google/adk/tools/bigquery/query_tool.py +++ b/src/google/adk/tools/bigquery/query_tool.py @@ -101,7 +101,7 @@ def execute_sql( elif config.write_mode == WriteMode.PROTECTED: # In protected write mode, write operation only to a temporary artifact is # allowed. This artifact must have been created in a BigQuery session. In - # such a scenario the session info (session id and the anonymous dataset + # such a scenario, the session info (session id and the anonymous dataset # containing the artifact) is persisted in the tool context. bq_session_info = tool_context.state.get(BIGQUERY_SESSION_INFO_KEY, None) if bq_session_info: @@ -328,7 +328,7 @@ def execute_sql( """ -_execute_sql_protecetd_write_examples = """ +_execute_sql_protected_write_examples = """ Create a temporary table with schema prescribed: >>> execute_sql("my_project", @@ -494,7 +494,7 @@ def get_execute_sql(config: BigQueryToolConfig) -> Callable[..., dict]: # Now, set the new docstring if config.write_mode == WriteMode.PROTECTED: - execute_sql_wrapper.__doc__ += _execute_sql_protecetd_write_examples + execute_sql_wrapper.__doc__ += _execute_sql_protected_write_examples else: execute_sql_wrapper.__doc__ += _execute_sql_write_examples diff --git a/src/google/adk/tools/enterprise_search_tool.py b/src/google/adk/tools/enterprise_search_tool.py index f27b7de67..7980f8f02 100644 --- a/src/google/adk/tools/enterprise_search_tool.py +++ b/src/google/adk/tools/enterprise_search_tool.py @@ -52,7 +52,7 @@ async def process_llm_request( if is_gemini_model(llm_request.model): if is_gemini_1_model(llm_request.model) and llm_request.config.tools: raise ValueError( - 'Enterprise web search tool can not be used with other tools in' + 'Enterprise web search tool cannot be used with other tools in' ' Gemini 1.x.' ) llm_request.config = llm_request.config or types.GenerateContentConfig() diff --git a/src/google/adk/tools/function_tool.py b/src/google/adk/tools/function_tool.py index 2687f1200..a30cd9caa 100644 --- a/src/google/adk/tools/function_tool.py +++ b/src/google/adk/tools/function_tool.py @@ -91,7 +91,7 @@ async def run_async( # Before invoking the function, we check for if the list of args passed in # has all the mandatory arguments or not. # If the check fails, then we don't invoke the tool and let the Agent know - # that there was a missing a input parameter. This will basically help + # that there was a missing input parameter. This will basically help # the underlying model fix the issue and retry. mandatory_args = self._get_mandatory_args() missing_mandatory_args = [ @@ -107,7 +107,7 @@ async def run_async( # Functions are callable objects, but not all callable objects are functions # checking coroutine function is not enough. We also need to check whether - # Callable's __call__ function is a coroutine funciton + # Callable's __call__ function is a coroutine function if ( inspect.iscoroutinefunction(self.func) or hasattr(self.func, '__call__') diff --git a/src/google/adk/tools/google_search_tool.py b/src/google/adk/tools/google_search_tool.py index 4f64bc4a1..6737eebca 100644 --- a/src/google/adk/tools/google_search_tool.py +++ b/src/google/adk/tools/google_search_tool.py @@ -51,7 +51,7 @@ async def process_llm_request( if is_gemini_1_model(llm_request.model): if llm_request.config.tools: raise ValueError( - 'Google search tool can not be used with other tools in Gemini 1.x.' + 'Google search tool cannot be used with other tools in Gemini 1.x.' ) llm_request.config.tools.append( types.Tool(google_search_retrieval=types.GoogleSearchRetrieval()) diff --git a/src/google/adk/tools/openapi_tool/openapi_spec_parser/tool_auth_handler.py b/src/google/adk/tools/openapi_tool/openapi_spec_parser/tool_auth_handler.py index 74166b00e..38f4d7ecd 100644 --- a/src/google/adk/tools/openapi_tool/openapi_spec_parser/tool_auth_handler.py +++ b/src/google/adk/tools/openapi_tool/openapi_spec_parser/tool_auth_handler.py @@ -268,9 +268,9 @@ async def prepare_auth_credentials( ) # here exchangers are doing two different thing: - # for service account the exchanger is doing actualy token exchange - # while for oauth2 it's actually doing the credentail conversion - # from OAuth2 credential to HTTP credentails for setting credential in + # for service account the exchanger is doing actual token exchange + # while for oauth2 it's actually doing the credential conversion + # from OAuth2 credential to HTTP credentials for setting credential in # http header # TODO cleanup the logic: # 1. service account token exchanger should happen before we store them in diff --git a/src/google/adk/tools/url_context_tool.py b/src/google/adk/tools/url_context_tool.py index 95d653602..8700ce814 100644 --- a/src/google/adk/tools/url_context_tool.py +++ b/src/google/adk/tools/url_context_tool.py @@ -49,7 +49,7 @@ async def process_llm_request( llm_request.config = llm_request.config or types.GenerateContentConfig() llm_request.config.tools = llm_request.config.tools or [] if is_gemini_1_model(llm_request.model): - raise ValueError('Url context tool can not be used in Gemini 1.x.') + raise ValueError('Url context tool cannot be used in Gemini 1.x.') elif is_gemini_2_model(llm_request.model): llm_request.config.tools.append( types.Tool(url_context=types.UrlContext()) diff --git a/src/google/adk/tools/vertex_ai_search_tool.py b/src/google/adk/tools/vertex_ai_search_tool.py index 1a658ef62..09567e138 100644 --- a/src/google/adk/tools/vertex_ai_search_tool.py +++ b/src/google/adk/tools/vertex_ai_search_tool.py @@ -91,7 +91,7 @@ async def process_llm_request( if is_gemini_model(llm_request.model): if is_gemini_1_model(llm_request.model) and llm_request.config.tools: raise ValueError( - 'Vertex AI search tool can not be used with other tools in Gemini' + 'Vertex AI search tool cannot be used with other tools in Gemini' ' 1.x.' ) llm_request.config = llm_request.config or types.GenerateContentConfig() diff --git a/tests/integration/fixture/context_variable_agent/agent.py b/tests/integration/fixture/context_variable_agent/agent.py index cef56ccb1..04e19314f 100644 --- a/tests/integration/fixture/context_variable_agent/agent.py +++ b/tests/integration/fixture/context_variable_agent/agent.py @@ -43,7 +43,7 @@ def echo_info(customer_id: str) -> str: def build_global_instruction(invocation_context: InvocationContext) -> str: return ( - 'This is the gloabl agent instruction for invocation:' + 'This is the global agent instruction for invocation:' f' {invocation_context.invocation_id}.' ) diff --git a/tests/integration/fixture/flow_complex_spark/agent.py b/tests/integration/fixture/flow_complex_spark/agent.py index 18ce62ff8..02fbfaeba 100644 --- a/tests/integration/fixture/flow_complex_spark/agent.py +++ b/tests/integration/fixture/flow_complex_spark/agent.py @@ -41,7 +41,7 @@ + Don't ask for clarifications from the user. + Do not ask the user for clarifications or if they have any other questions. + All headers should be bolded. -+ If you have steps in the plan that depend on other information, make sure they are 2 diferent sections in the plan. ++ If you have steps in the plan that depend on other information, make sure they are 2 different sections in the plan. + At the end mention that you will start researching. # Instruction on replying format @@ -68,7 +68,7 @@ # Instruction on replying format -Your reply should be a numbered lsit. +Your reply should be a numbered list. For each question, reply in the following format: "[question_generation_agent]: [generated questions]" @@ -92,7 +92,7 @@ " question." ), instruction="""\ -Inspect all the questions after "[question_generation_agent]: " and asnwer them. +Inspect all the questions after "[question_generation_agent]: " and answer them. # Instruction on replying format diff --git a/tests/integration/fixture/flow_complex_spark/sample.session.json b/tests/integration/fixture/flow_complex_spark/sample.session.json index 31575a84b..ed3a200d3 100644 --- a/tests/integration/fixture/flow_complex_spark/sample.session.json +++ b/tests/integration/fixture/flow_complex_spark/sample.session.json @@ -52,7 +52,7 @@ "response": { "status": "ok", "target_agent_name": "research_assistant", - "message": "Transfered to research_assistant" + "message": "Transferred to research_assistant" } } } @@ -165,7 +165,7 @@ "response": { "status": "ok", "target_agent_name": "spark_assistant", - "message": "Transfered to spark_assistant" + "message": "Transferred to spark_assistant" } } } diff --git a/tests/integration/fixture/tool_agent/agent.py b/tests/integration/fixture/tool_agent/agent.py index a89d20899..2f914750a 100644 --- a/tests/integration/fixture/tool_agent/agent.py +++ b/tests/integration/fixture/tool_agent/agent.py @@ -90,17 +90,17 @@ def complex_function_list_dict( raise ValueError("Wrong param") -def repetive_call_1(param: str): - return f"Call repetive_call_2 tool with param {param + '_repetive'}" +def repetitive_call_1(param: str): + return f"Call repetitive_call_2 tool with param {param + '_repetitive'}" -def repetive_call_2(param: str): +def repetitive_call_2(param: str): return param test_case_retrieval = FilesRetrieval( name="test_case_retrieval", - description="General guidence for agent test cases", + description="General guidance for agent test cases", input_dir=os.path.join(os.path.dirname(__file__), "files"), ) @@ -109,7 +109,7 @@ def repetive_call_2(param: str): rag_corpora=[ "projects/1096655024998/locations/us-central1/ragCorpora/4985766262475849728" ], - description="General guidence for agent test cases", + description="General guidance for agent test cases", ) invalid_rag_retrieval = VertexAiRagRetrieval( @@ -131,7 +131,7 @@ def repetive_call_2(param: str): shell_tool = LangchainTool(ShellTool()) docs_tool = CrewaiTool( - name="direcotry_read_tool", + name="directory_read_tool", description="use this to find files for you.", tool=DirectoryReadTool(directory="."), ) @@ -194,8 +194,8 @@ def repetive_call_2(param: str): list_str_param_function, return_list_str_function, # complex_function_list_dict, - repetive_call_1, - repetive_call_2, + repetitive_call_1, + repetitive_call_2, test_case_retrieval, valid_rag_retrieval, invalid_rag_retrieval, diff --git a/tests/integration/fixture/trip_planner_agent/agent.py b/tests/integration/fixture/trip_planner_agent/agent.py index ea8a33ab4..5c4a9f298 100644 --- a/tests/integration/fixture/trip_planner_agent/agent.py +++ b/tests/integration/fixture/trip_planner_agent/agent.py @@ -105,6 +105,6 @@ instruction=""" Your goal is to plan the best trip according to information listed above. You describe why did you choose the city, list top 3 - attactions and provide a detailed itinerary for each day.""", + attractions and provide a detailed itinerary for each day.""", sub_agents=[identify_agent, gather_agent, plan_agent], ) diff --git a/tests/integration/fixture/trip_planner_agent/trip_inquiry.test.json b/tests/integration/fixture/trip_planner_agent/trip_inquiry.test.json index 317599c6b..bd74aaa1c 100644 --- a/tests/integration/fixture/trip_planner_agent/trip_inquiry.test.json +++ b/tests/integration/fixture/trip_planner_agent/trip_inquiry.test.json @@ -89,7 +89,7 @@ { "id": null, "args": { - "agent_name": "indentify_agent" + "agent_name": "identify_agent" }, "name": "transfer_to_agent" } diff --git a/tests/integration/models/test_litellm_with_function.py b/tests/integration/models/test_litellm_with_function.py index e4ac787e7..b06c8f826 100644 --- a/tests/integration/models/test_litellm_with_function.py +++ b/tests/integration/models/test_litellm_with_function.py @@ -83,7 +83,7 @@ def llm_request(): @pytest.mark.asyncio -async def test_generate_content_asyn_with_function( +async def test_generate_content_async_with_function( oss_llm_with_function, llm_request ): responses = [ @@ -98,7 +98,7 @@ async def test_generate_content_asyn_with_function( @pytest.mark.asyncio -async def test_generate_content_asyn_stream_with_function( +async def test_generate_content_async_stream_with_function( oss_llm_with_function, llm_request ): responses = [ diff --git a/tests/integration/test_evalute_agent_in_fixture.py b/tests/integration/test_evalute_agent_in_fixture.py index 344ba0994..bd09549ee 100644 --- a/tests/integration/test_evalute_agent_in_fixture.py +++ b/tests/integration/test_evalute_agent_in_fixture.py @@ -64,8 +64,8 @@ async def test_evaluate_agents_long_running_4_runs_per_eval_item( await AgentEvaluator.evaluate( agent_module=agent_name, eval_dataset_file_path_or_dir=evalfile, - # Using a slightly higher value helps us manange the variances that may + # Using a slightly higher value helps us manage the variances that may # happen in each eval. - # This, of course, comes at a cost of incrased test run times. + # This, of course, comes at a cost of increased test run times. num_runs=4, ) diff --git a/tests/integration/test_tools.py b/tests/integration/test_tools.py index 39662484e..a9f99791b 100644 --- a/tests/integration/test_tools.py +++ b/tests/integration/test_tools.py @@ -106,12 +106,12 @@ def test_complex_function_calls_success(agent_runner: TestRunner): [{"agent": tool_agent.agent.root_agent}], indirect=True, ) -def test_repetive_call_success(agent_runner: TestRunner): +def test_repetitive_call_success(agent_runner: TestRunner): _call_function_and_assert( agent_runner, - "repetive_call_1", + "repetitive_call_1", "test", - "test_repetive", + "test_repetitive", ) diff --git a/tests/unittests/a2a/converters/test_request_converter.py b/tests/unittests/a2a/converters/test_request_converter.py index 699df14c2..e87076366 100644 --- a/tests/unittests/a2a/converters/test_request_converter.py +++ b/tests/unittests/a2a/converters/test_request_converter.py @@ -366,7 +366,7 @@ def test_end_to_end_conversion_with_fallback_user(self, mock_convert_part): assert result is not None assert ( result["user_id"] == "A2A_USER_test_session_456" - ) # Should fallback to context ID + ) # Should fall back to context ID assert result["session_id"] == "test_session_456" assert isinstance(result["new_message"], genai_types.Content) assert result["new_message"].role == "user" diff --git a/tests/unittests/a2a/utils/test_agent_card_builder.py b/tests/unittests/a2a/utils/test_agent_card_builder.py index fb52dd5ce..91d67bc89 100644 --- a/tests/unittests/a2a/utils/test_agent_card_builder.py +++ b/tests/unittests/a2a/utils/test_agent_card_builder.py @@ -360,7 +360,7 @@ def test_replace_pronouns_basic(self): assert result == "I should do my work and it will be mine." def test_replace_pronouns_case_insensitive(self): - """Test _replace_pronouns with case insensitive matching.""" + """Test _replace_pronouns with case-insensitive matching.""" # Arrange text = "YOU should do YOUR work and it will be YOURS." @@ -1102,7 +1102,7 @@ def test_extract_examples_from_instruction_with_different_patterns(self): assert result is None def test_extract_examples_from_instruction_case_insensitive(self): - """Test _extract_examples_from_instruction with case insensitive matching.""" + """Test _extract_examples_from_instruction with case-insensitive matching.""" # Arrange instruction = ( 'example query: "What is the weather?" example response: "The weather' diff --git a/tests/unittests/agents/test_agent_clone.py b/tests/unittests/agents/test_agent_clone.py index 7bda2a69c..ac5492cf6 100644 --- a/tests/unittests/agents/test_agent_clone.py +++ b/tests/unittests/agents/test_agent_clone.py @@ -270,7 +270,7 @@ def test_clone_invalid_field(): """Test that cloning with invalid fields raises an error.""" original = LlmAgent(name="test_agent", description="Test agent") - with pytest.raises(ValueError, match="Cannot update non-existent fields"): + with pytest.raises(ValueError, match="Cannot update nonexistent fields"): original.clone(update={"invalid_field": "value"}) diff --git a/tests/unittests/agents/test_remote_a2a_agent.py b/tests/unittests/agents/test_remote_a2a_agent.py index 7ef32de66..5bd6bf7ed 100644 --- a/tests/unittests/agents/test_remote_a2a_agent.py +++ b/tests/unittests/agents/test_remote_a2a_agent.py @@ -273,7 +273,7 @@ async def test_resolve_agent_card_from_file_success(self): @pytest.mark.asyncio async def test_resolve_agent_card_from_file_not_found(self): - """Test agent card resolution from non-existent file raises error.""" + """Test agent card resolution from nonexistent file raises error.""" agent = RemoteA2aAgent( name="test_agent", agent_card="/path/to/nonexistent.json" ) diff --git a/tests/unittests/auth/exchanger/test_credential_exchanger_registry.py b/tests/unittests/auth/exchanger/test_credential_exchanger_registry.py index 66b858232..32c4812c2 100644 --- a/tests/unittests/auth/exchanger/test_credential_exchanger_registry.py +++ b/tests/unittests/auth/exchanger/test_credential_exchanger_registry.py @@ -126,7 +126,7 @@ def test_get_exchanger_returns_correct_instance(self): assert isinstance(retrieved_exchanger, BaseCredentialExchanger) def test_get_exchanger_nonexistent_type_returns_none(self): - """Test that get_exchanger returns None for non-existent credential types.""" + """Test that get_exchanger returns None for nonexistent credential types.""" registry = CredentialExchangerRegistry() # Try to get an exchanger that was never registered diff --git a/tests/unittests/auth/exchanger/test_oauth2_credential_exchanger.py b/tests/unittests/auth/exchanger/test_oauth2_credential_exchanger.py index 31288511e..811a0748e 100644 --- a/tests/unittests/auth/exchanger/test_oauth2_credential_exchanger.py +++ b/tests/unittests/auth/exchanger/test_oauth2_credential_exchanger.py @@ -21,7 +21,7 @@ from google.adk.auth.auth_credential import AuthCredentialTypes from google.adk.auth.auth_credential import OAuth2Auth from google.adk.auth.auth_schemes import OpenIdConnectWithConfig -from google.adk.auth.exchanger.base_credential_exchanger import CredentialExchangError +from google.adk.auth.exchanger.base_credential_exchanger import CredentialExchangeError from google.adk.auth.exchanger.oauth2_credential_exchanger import OAuth2CredentialExchanger import pytest @@ -114,7 +114,7 @@ async def test_exchange_missing_auth_scheme(self): try: await exchanger.exchange(credential, None) assert False, "Should have raised ValueError" - except CredentialExchangError as e: + except CredentialExchangeError as e: assert "auth_scheme is required" in str(e) @patch("google.adk.auth.oauth2_credential_util.OAuth2Session") diff --git a/tests/unittests/auth/test_auth_handler.py b/tests/unittests/auth/test_auth_handler.py index 2a65f7795..d0615e402 100644 --- a/tests/unittests/auth/test_auth_handler.py +++ b/tests/unittests/auth/test_auth_handler.py @@ -397,7 +397,7 @@ def test_get_auth_response_exists( assert result == oauth2_credentials_with_auth_uri def test_get_auth_response_not_exists(self, auth_config): - """Test retrieving a non-existent auth response from state.""" + """Test retrieving a nonexistent auth response from state.""" handler = AuthHandler(auth_config) state = MockState() diff --git a/tests/unittests/cli/utils/test_agent_loader.py b/tests/unittests/cli/utils/test_agent_loader.py index 81d6baae6..4a8431935 100644 --- a/tests/unittests/cli/utils/test_agent_loader.py +++ b/tests/unittests/cli/utils/test_agent_loader.py @@ -288,7 +288,7 @@ def test_agent_not_found_error(self): loader = AgentLoader(temp_dir) agents_dir = temp_dir # For use in the expected message string - # Try to load non-existent agent + # Try to load nonexistent agent with pytest.raises(ValueError) as exc_info: loader.load_agent("nonexistent_agent") @@ -330,12 +330,12 @@ def __init__(self): assert "No root_agent found for 'broken_agent'" in str(exc_info.value) def test_agent_internal_module_not_found_error(self): - """Test error when an agent tries to import a non-existent module.""" + """Test error when an agent tries to import a nonexistent module.""" with tempfile.TemporaryDirectory() as temp_dir: temp_path = Path(temp_dir) agent_name = "importer_agent" - # Create agent that imports a non-existent module + # Create agent that imports a nonexistent module agent_file = temp_path / f"{agent_name}.py" agent_file.write_text(dedent(f""" from google.adk.agents.base_agent import BaseAgent @@ -528,7 +528,7 @@ def test_yaml_agent_not_found_error(self): loader = AgentLoader(temp_dir) agents_dir = temp_dir # For use in the expected message string - # Try to load non-existent YAML agent + # Try to load nonexistent YAML agent with pytest.raises(ValueError) as exc_info: loader.load_agent("nonexistent_yaml_agent") diff --git a/tests/unittests/code_executors/test_code_executor_context.py b/tests/unittests/code_executors/test_code_executor_context.py index 5f3a237d3..6a85b7a81 100644 --- a/tests/unittests/code_executors/test_code_executor_context.py +++ b/tests/unittests/code_executors/test_code_executor_context.py @@ -26,7 +26,7 @@ def empty_state() -> State: @pytest.fixture def context_with_data() -> CodeExecutorContext: - """Fixture for a CodeExecutorContext with some pre-populated data.""" + """Fixture for a CodeExecutorContext with some prepopulated data.""" state_data = { "_code_execution_context": { "execution_session_id": "session123", diff --git a/tests/unittests/evaluation/test_gcs_eval_sets_manager.py b/tests/unittests/evaluation/test_gcs_eval_sets_manager.py index b410fd592..51bf037eb 100644 --- a/tests/unittests/evaluation/test_gcs_eval_sets_manager.py +++ b/tests/unittests/evaluation/test_gcs_eval_sets_manager.py @@ -97,7 +97,7 @@ def test_gcs_eval_sets_manager_create_eval_set_invalid_id( app_name = "test_app" eval_set_id = "invalid-id" - with pytest.raises(ValueError, match="Invalid Eval Set Id"): + with pytest.raises(ValueError, match="Invalid Eval Set ID"): gcs_eval_sets_manager.create_eval_set(app_name, eval_set_id) def test_gcs_eval_sets_manager_list_eval_sets_success( diff --git a/tests/unittests/evaluation/test_local_eval_service.py b/tests/unittests/evaluation/test_local_eval_service.py index 49ebead2e..3f01f249c 100644 --- a/tests/unittests/evaluation/test_local_eval_service.py +++ b/tests/unittests/evaluation/test_local_eval_service.py @@ -126,7 +126,7 @@ async def test_perform_inference_success( mock_eval_sets_manager.get_eval_set.return_value = eval_set mock_inference_result = mock.MagicMock() - eval_service._perform_inference_sigle_eval_item = mock.AsyncMock( + eval_service._perform_inference_single_eval_item = mock.AsyncMock( return_value=mock_inference_result ) @@ -146,7 +146,7 @@ async def test_perform_inference_success( mock_eval_sets_manager.get_eval_set.assert_called_once_with( app_name="test_app", eval_set_id="test_eval_set" ) - assert eval_service._perform_inference_sigle_eval_item.call_count == 2 + assert eval_service._perform_inference_single_eval_item.call_count == 2 @pytest.mark.asyncio @@ -166,7 +166,7 @@ async def test_perform_inference_with_case_ids( mock_eval_sets_manager.get_eval_set.return_value = eval_set mock_inference_result = mock.MagicMock() - eval_service._perform_inference_sigle_eval_item = mock.AsyncMock( + eval_service._perform_inference_single_eval_item = mock.AsyncMock( return_value=mock_inference_result ) @@ -182,13 +182,13 @@ async def test_perform_inference_with_case_ids( results.append(result) assert len(results) == 2 - eval_service._perform_inference_sigle_eval_item.assert_any_call( + eval_service._perform_inference_single_eval_item.assert_any_call( app_name="test_app", eval_set_id="test_eval_set", eval_case=eval_set.eval_cases[0], root_agent=dummy_agent, ) - eval_service._perform_inference_sigle_eval_item.assert_any_call( + eval_service._perform_inference_single_eval_item.assert_any_call( app_name="test_app", eval_set_id="test_eval_set", eval_case=eval_set.eval_cases[2], @@ -355,7 +355,7 @@ def test_generate_final_eval_status_doesn_t_throw_on(eval_service): # eval case. # We go over all the possible values of EvalStatus one by one and expect - # the _generate_final_eval_status to handle it without throwing an exeception. + # the _generate_final_eval_status to handle it without throwing an exception. for status in EvalStatus: eval_metric_result = EvalMetricResult( metric_name="metric1", threshold=0.5, eval_status=status diff --git a/tests/unittests/evaluation/test_local_eval_sets_manager.py b/tests/unittests/evaluation/test_local_eval_sets_manager.py index 20159c66a..c56d45d6f 100644 --- a/tests/unittests/evaluation/test_local_eval_sets_manager.py +++ b/tests/unittests/evaluation/test_local_eval_sets_manager.py @@ -24,7 +24,7 @@ from google.adk.evaluation.eval_case import Invocation from google.adk.evaluation.eval_set import EvalSet from google.adk.evaluation.local_eval_sets_manager import _EVAL_SET_FILE_EXTENSION -from google.adk.evaluation.local_eval_sets_manager import convert_eval_set_to_pydanctic_schema +from google.adk.evaluation.local_eval_sets_manager import convert_eval_set_to_pydantic_schema from google.adk.evaluation.local_eval_sets_manager import load_eval_set_from_file from google.adk.evaluation.local_eval_sets_manager import LocalEvalSetsManager from google.genai import types as genai_types @@ -32,10 +32,10 @@ import pytest -class TestConvertEvalSetToPydancticSchema: - """Tests convert_eval_set_to_pydanctic_schema method.""" +class TestConvertEvalSetToPydanticSchema: + """Tests convert_eval_set_to_pydantic_schema method.""" - def test_convert_eval_set_to_pydanctic_schema_complete(self): + def test_convert_eval_set_to_pydantic_schema_complete(self): eval_set_id = "test_eval_set" eval_set_in_json_format = [{ "name": "roll_17_sided_dice_twice", @@ -71,7 +71,7 @@ def test_convert_eval_set_to_pydanctic_schema_complete(self): }, }] - eval_set = convert_eval_set_to_pydanctic_schema( + eval_set = convert_eval_set_to_pydantic_schema( eval_set_id, eval_set_in_json_format ) @@ -93,14 +93,14 @@ def test_convert_eval_set_to_pydanctic_schema_complete(self): == 1 ) - def test_convert_eval_set_to_pydanctic_schema_minimal(self): + def test_convert_eval_set_to_pydantic_schema_minimal(self): eval_set_id = "test_eval_set" eval_set_in_json_format = [{ "name": "minimal_case", "data": [{"query": "Hello", "reference": "World"}], }] - eval_set = convert_eval_set_to_pydanctic_schema( + eval_set = convert_eval_set_to_pydantic_schema( eval_set_id, eval_set_in_json_format ) @@ -117,7 +117,7 @@ def test_convert_eval_set_to_pydanctic_schema_minimal(self): == "World" ) - def test_convert_eval_set_to_pydanctic_schema_empty_tool_use_and_intermediate_responses( + def test_convert_eval_set_to_pydantic_schema_empty_tool_use_and_intermediate_responses( self, ): eval_set_id = "test_eval_set" @@ -131,7 +131,7 @@ def test_convert_eval_set_to_pydanctic_schema_empty_tool_use_and_intermediate_re }], }] - eval_set = convert_eval_set_to_pydanctic_schema( + eval_set = convert_eval_set_to_pydantic_schema( eval_set_id, eval_set_in_json_format ) @@ -150,7 +150,7 @@ def test_convert_eval_set_to_pydanctic_schema_empty_tool_use_and_intermediate_re == 0 ) - def test_convert_eval_set_to_pydanctic_schema_empty_initial_session(self): + def test_convert_eval_set_to_pydantic_schema_empty_initial_session(self): eval_set_id = "test_eval_set" eval_set_in_json_format = [{ "name": "empty_session", @@ -158,14 +158,14 @@ def test_convert_eval_set_to_pydanctic_schema_empty_initial_session(self): "initial_session": {}, }] - eval_set = convert_eval_set_to_pydanctic_schema( + eval_set = convert_eval_set_to_pydantic_schema( eval_set_id, eval_set_in_json_format ) assert eval_set.eval_set_id == eval_set_id assert eval_set.eval_cases[0].session_input is None - def test_convert_eval_set_to_pydanctic_schema_invalid_data(self): + def test_convert_eval_set_to_pydantic_schema_invalid_data(self): # This test implicitly checks for potential validation errors during Pydantic # object creation eval_set_id = "test_eval_set" @@ -190,7 +190,7 @@ def test_convert_eval_set_to_pydanctic_schema_invalid_data(self): }] with pytest.raises(ValidationError): - convert_eval_set_to_pydanctic_schema(eval_set_id, eval_set_in_json_format) + convert_eval_set_to_pydantic_schema(eval_set_id, eval_set_in_json_format) class TestLoadEvalSetFromFile: @@ -300,14 +300,14 @@ def test_load_eval_set_from_file_invalid_json(self, tmp_path): def test_load_eval_set_from_file_invalid_data(self, tmp_path, mocker): # Create a dummy file with invalid data that fails both Pydantic validation # and the old format conversion. We mock the - # convert_eval_set_to_pydanctic_schema function to raise a ValueError + # convert_eval_set_to_pydantic_schema function to raise a ValueError # so that we can assert that the exception is raised. file_path = tmp_path / "invalid_data.json" with open(file_path, "w", encoding="utf-8") as f: f.write('{"invalid": "data"}') mocker.patch( - "google.adk.evaluation.local_eval_sets_manager.convert_eval_set_to_pydanctic_schema", + "google.adk.evaluation.local_eval_sets_manager.convert_eval_set_to_pydantic_schema", side_effect=ValueError(), ) @@ -387,7 +387,7 @@ def test_local_eval_sets_manager_create_eval_set_invalid_id( app_name = "test_app" eval_set_id = "invalid-id" - with pytest.raises(ValueError, match="Invalid Eval Set Id"): + with pytest.raises(ValueError, match="Invalid Eval Set ID"): local_eval_sets_manager.create_eval_set(app_name, eval_set_id) def test_local_eval_sets_manager_list_eval_sets_success( diff --git a/tests/unittests/flows/llm_flows/test_agent_transfer.py b/tests/unittests/flows/llm_flows/test_agent_transfer.py index 5268d0ca0..dd52106d1 100644 --- a/tests/unittests/flows/llm_flows/test_agent_transfer.py +++ b/tests/unittests/flows/llm_flows/test_agent_transfer.py @@ -130,7 +130,7 @@ def test_auto_to_auto_to_single(): ] # sub_agent_1 should still be the current agent. sub_agent_1_1 is single so it should - # not be the current agent, otherwise the conversation will be tied to + # not be the current agent; otherwise, the conversation will be tied to # sub_agent_1_1 forever. assert testing_utils.simplify_events(runner.run('test2')) == [ ('sub_agent_1', 'response2'), diff --git a/tests/unittests/flows/llm_flows/test_functions_sequential.py b/tests/unittests/flows/llm_flows/test_functions_sequential.py index a88d90f3d..5ae073c61 100644 --- a/tests/unittests/flows/llm_flows/test_functions_sequential.py +++ b/tests/unittests/flows/llm_flows/test_functions_sequential.py @@ -64,13 +64,13 @@ def increase_by_one(x: int) -> int: assert testing_utils.simplify_contents(mockModel.requests[0].contents) == [ ('user', 'test') ] - # 3 items: user content, functaion call / response for the 1st call + # 3 items: user content, function call / response for the 1st call assert testing_utils.simplify_contents(mockModel.requests[1].contents) == [ ('user', 'test'), ('model', function_call({'x': 1})), ('user', function_response({'result': 2})), ] - # 5 items: user content, functaion call / response for two calls + # 5 items: user content, function call / response for two calls assert testing_utils.simplify_contents(mockModel.requests[2].contents) == [ ('user', 'test'), ('model', function_call({'x': 1})), @@ -78,7 +78,7 @@ def increase_by_one(x: int) -> int: ('model', function_call({'x': 2})), ('user', function_response({'result': 3})), ] - # 7 items: user content, functaion call / response for three calls + # 7 items: user content, function call / response for three calls assert testing_utils.simplify_contents(mockModel.requests[3].contents) == [ ('user', 'test'), ('model', function_call({'x': 1})), diff --git a/tests/unittests/flows/llm_flows/test_functions_simple.py b/tests/unittests/flows/llm_flows/test_functions_simple.py index 166800cc1..063b24bae 100644 --- a/tests/unittests/flows/llm_flows/test_functions_simple.py +++ b/tests/unittests/flows/llm_flows/test_functions_simple.py @@ -31,7 +31,7 @@ def test_simple_function(): function_call_1 = types.Part.from_function_call( name='increase_by_one', args={'x': 1} ) - function_respones_2 = types.Part.from_function_response( + function_responses_2 = types.Part.from_function_response( name='increase_by_one', response={'result': 2} ) responses: list[types.Content] = [ @@ -53,7 +53,7 @@ def increase_by_one(x: int) -> int: runner = testing_utils.InMemoryRunner(agent) assert testing_utils.simplify_events(runner.run('test')) == [ ('root_agent', function_call_1), - ('root_agent', function_respones_2), + ('root_agent', function_responses_2), ('root_agent', 'response1'), ] @@ -64,7 +64,7 @@ def increase_by_one(x: int) -> int: assert testing_utils.simplify_contents(mock_model.requests[1].contents) == [ ('user', 'test'), ('model', function_call_1), - ('user', function_respones_2), + ('user', function_responses_2), ] # Asserts the function calls. diff --git a/tests/unittests/tools/bigquery/test_bigquery_query_tool.py b/tests/unittests/tools/bigquery/test_bigquery_query_tool.py index f0e673da6..51104263f 100644 --- a/tests/unittests/tools/bigquery/test_bigquery_query_tool.py +++ b/tests/unittests/tools/bigquery/test_bigquery_query_tool.py @@ -750,7 +750,7 @@ def test_execute_sql_non_select_stmt_write_protected_persistent_target( ): """Test execute_sql tool for non-SELECT query when writes are protected. - This is a special case when the destination table is a persistent/permananent + This is a special case when the destination table is a persistent/permanent one and the protected write is enabled. In this case the operation should fail. """ project = "my_project" diff --git a/tests/unittests/tools/openapi_tool/openapi_spec_parser/test.yaml b/tests/unittests/tools/openapi_tool/openapi_spec_parser/test.yaml index 0cea00362..1cc139a66 100644 --- a/tests/unittests/tools/openapi_tool/openapi_spec_parser/test.yaml +++ b/tests/unittests/tools/openapi_tool/openapi_spec_parser/test.yaml @@ -871,7 +871,7 @@ components: type: string iCalUID: description: |- - Event unique identifier as defined in RFC5545. It is used to uniquely identify events accross calendaring systems and must be supplied when importing events via the import method. + Event unique identifier as defined in RFC5545. It is used to uniquely identify events across calendaring systems and must be supplied when importing events via the import method. Note that the iCalUID and the id are not identical and only one of them should be supplied at event creation time. One difference in their semantics is that in recurring events, all occurrences of one event have different ids while they all share the same iCalUIDs. To retrieve an event using its iCalUID, call the events.list method using the iCalUID parameter. To retrieve an event using its id, call the events.get method. type: string id: diff --git a/tests/unittests/tools/openapi_tool/openapi_spec_parser/test_tool_auth_handler.py b/tests/unittests/tools/openapi_tool/openapi_spec_parser/test_tool_auth_handler.py index e405ce5b8..16b0d3b84 100644 --- a/tests/unittests/tools/openapi_tool/openapi_spec_parser/test_tool_auth_handler.py +++ b/tests/unittests/tools/openapi_tool/openapi_spec_parser/test_tool_auth_handler.py @@ -150,11 +150,11 @@ async def test_openid_connect_with_auth_response( tool_context = create_mock_tool_context() mock_auth_handler = MagicMock() - returned_credentail = AuthCredential( + returned_credential = AuthCredential( auth_type=AuthCredentialTypes.OPEN_ID_CONNECT, oauth2=OAuth2Auth(auth_response_uri='test_auth_response_uri'), ) - mock_auth_handler.get_auth_response.return_value = returned_credentail + mock_auth_handler.get_auth_response.return_value = returned_credential mock_auth_handler_path = 'google.adk.tools.tool_context.AuthHandler' monkeypatch.setattr( mock_auth_handler_path, lambda *args, **kwargs: mock_auth_handler @@ -176,7 +176,7 @@ async def test_openid_connect_with_auth_response( stored_credential = credential_store.get_credential( openid_connect_scheme, openid_connect_credential ) - assert stored_credential == returned_credentail + assert stored_credential == returned_credential mock_auth_handler.get_auth_response.assert_called_once() diff --git a/tests/unittests/tools/test_build_function_declaration.py b/tests/unittests/tools/test_build_function_declaration.py index edf3c7128..98d917382 100644 --- a/tests/unittests/tools/test_build_function_declaration.py +++ b/tests/unittests/tools/test_build_function_declaration.py @@ -360,7 +360,7 @@ def function_string_return(param: str) -> str: assert function_decl.response.type == types.Type.STRING -def test_fucntion_with_no_response_annotations(): +def test_function_with_no_response_annotations(): """Test a function that has no response annotations.""" def transfer_to_agent(agent_name: str, tool_context: ToolContext): diff --git a/tests/unittests/tools/test_enterprise_web_search_tool.py b/tests/unittests/tools/test_enterprise_web_search_tool.py index 390da4a78..9eabcf0ba 100644 --- a/tests/unittests/tools/test_enterprise_web_search_tool.py +++ b/tests/unittests/tools/test_enterprise_web_search_tool.py @@ -93,6 +93,4 @@ async def test_process_llm_request_failure_with_multiple_tools_gemini_1_models() await tool.process_llm_request( tool_context=tool_context, llm_request=llm_request ) - assert 'can not be used with other tools in Gemini 1.x.' in str( - exc_info.value - ) + assert 'cannot be used with other tools in Gemini 1.x.' in str(exc_info.value) diff --git a/tests/unittests/tools/test_function_tool.py b/tests/unittests/tools/test_function_tool.py index 871f58dcb..4d0682bc5 100644 --- a/tests/unittests/tools/test_function_tool.py +++ b/tests/unittests/tools/test_function_tool.py @@ -29,14 +29,14 @@ def function_for_testing_with_no_args(): async def async_function_for_testing_with_1_arg_and_tool_context( arg1, tool_context ): - """Async function for testing with 1 arge and tool context.""" + """Async function for testing with 1 arg and tool context.""" assert arg1 assert tool_context return arg1 async def async_function_for_testing_with_2_arg_and_no_tool_context(arg1, arg2): - """Async function for testing with 2 arge and no tool context.""" + """Async function for testing with 2 args and no tool context.""" assert arg1 assert arg2 return arg1 @@ -55,7 +55,7 @@ async def __call__(self, arg1, arg2): def function_for_testing_with_1_arg_and_tool_context(arg1, tool_context): - """Function for testing with 1 arge and tool context.""" + """Function for testing with 1 arg and tool context.""" assert arg1 assert tool_context return arg1 @@ -71,7 +71,7 @@ async def __call__(self, arg1, tool_context): def function_for_testing_with_2_arg_and_no_tool_context(arg1, arg2): - """Function for testing with 2 arge and no tool context.""" + """Function for testing with 2 args and no tool context.""" assert arg1 assert arg2 return arg1 @@ -273,7 +273,7 @@ async def test_run_async_missing_all_arg_async_func(): @pytest.mark.asyncio async def test_run_async_with_optional_args_not_set_sync_func(): - """Test that run_async calls the function for sync funciton with optional args not set.""" + """Test that run_async calls the function for sync function with optional args not set.""" def func_with_optional_args(arg1, arg2=None, *, arg3, arg4=None, **kwargs): return f"{arg1},{arg3}" @@ -286,7 +286,7 @@ def func_with_optional_args(arg1, arg2=None, *, arg3, arg4=None, **kwargs): @pytest.mark.asyncio async def test_run_async_with_optional_args_not_set_async_func(): - """Test that run_async calls the function for async funciton with optional args not set.""" + """Test that run_async calls the function for async function with optional args not set.""" async def async_func_with_optional_args( arg1, arg2=None, *, arg3, arg4=None, **kwargs diff --git a/tests/unittests/tools/test_gemini_schema_util.py b/tests/unittests/tools/test_gemini_schema_util.py index 31057a41a..6cbb7ad34 100644 --- a/tests/unittests/tools/test_gemini_schema_util.py +++ b/tests/unittests/tools/test_gemini_schema_util.py @@ -185,7 +185,7 @@ def test_to_gemini_schema_nested_dict(self): }, } gemini_schema = _to_gemini_schema(openapi_schema) - # Since metadata is not properties nor item, it will call to_gemini_schema recursively. + # Since metadata is neither properties nor item, it will call to_gemini_schema recursively. assert isinstance(gemini_schema.properties["metadata"], Schema) assert ( gemini_schema.properties["metadata"].type == Type.OBJECT @@ -471,7 +471,7 @@ def test_sanitize_schema_formats_for_gemini_nullable(self): "properties": { "case_id": { "description": "The ID of the case.", - "title": "Case Id", + "title": "Case ID", "type": "string", }, "next_page_token": { @@ -492,7 +492,7 @@ def test_sanitize_schema_formats_for_gemini_nullable(self): "properties": { "case_id": { "description": "The ID of the case.", - "title": "Case Id", + "title": "Case ID", "type": "string", }, "next_page_token": { diff --git a/tests/unittests/tools/test_google_search_tool.py b/tests/unittests/tools/test_google_search_tool.py index 9623875aa..2f090abb1 100644 --- a/tests/unittests/tools/test_google_search_tool.py +++ b/tests/unittests/tools/test_google_search_tool.py @@ -186,7 +186,7 @@ async def test_process_llm_request_with_gemini_1_model_and_existing_tools_raises with pytest.raises( ValueError, match=( - 'Google search tool can not be used with other tools in Gemini 1.x' + 'Google search tool cannot be used with other tools in Gemini 1.x' ), ): await tool.process_llm_request( @@ -215,7 +215,7 @@ async def test_process_llm_request_with_path_based_gemini_1_model_and_existing_t with pytest.raises( ValueError, match=( - 'Google search tool can not be used with other tools in Gemini 1.x' + 'Google search tool cannot be used with other tools in Gemini 1.x' ), ): await tool.process_llm_request( diff --git a/tests/unittests/tools/test_url_context_tool.py b/tests/unittests/tools/test_url_context_tool.py index cbbbb0c9a..eaa739159 100644 --- a/tests/unittests/tools/test_url_context_tool.py +++ b/tests/unittests/tools/test_url_context_tool.py @@ -146,7 +146,7 @@ async def test_process_llm_request_with_gemini_1_model_raises_error(self): ) with pytest.raises( - ValueError, match='Url context tool can not be used in Gemini 1.x' + ValueError, match='Url context tool cannot be used in Gemini 1.x' ): await tool.process_llm_request( tool_context=tool_context, llm_request=llm_request @@ -166,7 +166,7 @@ async def test_process_llm_request_with_path_based_gemini_1_model_raises_error( ) with pytest.raises( - ValueError, match='Url context tool can not be used in Gemini 1.x' + ValueError, match='Url context tool cannot be used in Gemini 1.x' ): await tool.process_llm_request( tool_context=tool_context, llm_request=llm_request diff --git a/tests/unittests/tools/test_vertex_ai_search_tool.py b/tests/unittests/tools/test_vertex_ai_search_tool.py index 12ee2f60f..0df19288a 100644 --- a/tests/unittests/tools/test_vertex_ai_search_tool.py +++ b/tests/unittests/tools/test_vertex_ai_search_tool.py @@ -207,7 +207,7 @@ async def test_process_llm_request_with_gemini_1_and_other_tools_raises_error( with pytest.raises( ValueError, match=( - 'Vertex AI search tool can not be used with other tools in' + 'Vertex AI search tool cannot be used with other tools in' ' Gemini 1.x' ), ): @@ -237,7 +237,7 @@ async def test_process_llm_request_with_path_based_gemini_1_and_other_tools_rais with pytest.raises( ValueError, match=( - 'Vertex AI search tool can not be used with other tools in' + 'Vertex AI search tool cannot be used with other tools in' ' Gemini 1.x' ), ): diff --git a/tests/unittests/utils/test_model_name_utils.py b/tests/unittests/utils/test_model_name_utils.py index 4e4ddd06a..9c0d946c6 100644 --- a/tests/unittests/utils/test_model_name_utils.py +++ b/tests/unittests/utils/test_model_name_utils.py @@ -115,7 +115,7 @@ def test_is_gemini_model_edge_cases(self): assert is_gemini_model('gemini_1_5_flash') is False def test_is_gemini_model_case_sensitivity(self): - """Test that model detection is case sensitive.""" + """Test that model detection is case-sensitive.""" assert is_gemini_model('Gemini-2.5-pro') is False assert is_gemini_model('GEMINI-2.5-pro') is False assert is_gemini_model('gemini-2.5-PRO') is True # Only the start matters