diff --git a/api/ee/databases/postgres/migrations/core/versions/863f8ebc200f_extend_app_type_again.py b/api/ee/databases/postgres/migrations/core/versions/863f8ebc200f_extend_app_type_again.py new file mode 100644 index 0000000000..7a52c3d62a --- /dev/null +++ b/api/ee/databases/postgres/migrations/core/versions/863f8ebc200f_extend_app_type_again.py @@ -0,0 +1,75 @@ +"""Extend app_type + +Revision ID: 863f8ebc200f +Revises: 3b5f5652f611 +Create Date: 2025-01-08 10:24:00 +""" + +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision: str = "863f8ebc200f" +down_revision: Union[str, None] = "3b5f5652f611" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +# The table/column that uses the enum +TABLE = "app_db" +COLUMN = "app_type" +TYPE_NAME = "app_type_enum" +TYPE_TEMP = "app_type_enum_temp" + +# Exact labels +ORIGINAL = ( + "CHAT_TEMPLATE", + "COMPLETION_TEMPLATE", + "CHAT_SERVICE", + "COMPLETION_SERVICE", + "CUSTOM", +) +EXTENDED = ORIGINAL + ("SDK_CUSTOM",) + + +def _create_enum(name: str, labels: tuple[str, ...]) -> None: + labels_sql = ",".join(f"'{v}'" for v in labels) + op.execute(f"CREATE TYPE {name} AS ENUM ({labels_sql})") + + +def _retype_column(to_type: str) -> None: + op.execute( + f""" + ALTER TABLE {TABLE} + ALTER COLUMN {COLUMN} + TYPE {to_type} + USING {COLUMN}::text::{to_type} + """ + ) + + +def upgrade(): + # 1) Create the replacement enum with ALL desired values + _create_enum(TYPE_TEMP, EXTENDED) + + # 2) Point the column to the tmp type + _retype_column(TYPE_TEMP) + + # 3) Drop old type and rename tmp to the canonical name + op.execute(f"DROP TYPE {TYPE_NAME}") + op.execute(f"ALTER TYPE {TYPE_TEMP} RENAME TO {TYPE_NAME}") + + +def downgrade(): + # 1) Recreate the enum WITHOUT the added values + _create_enum(TYPE_TEMP, ORIGINAL) + + # 2) Point the column back to the original label set + _retype_column(TYPE_TEMP) + + # 3) Drop current type and rename tmp back to the canonical name + op.execute(f"DROP TYPE {TYPE_NAME}") + op.execute(f"ALTER TYPE {TYPE_TEMP} RENAME TO {TYPE_NAME}") diff --git a/api/ee/databases/postgres/migrations/core/versions/baa02d66a365_migrate_code_evaluators.py b/api/ee/databases/postgres/migrations/core/versions/baa02d66a365_migrate_code_evaluators.py new file mode 100644 index 0000000000..7363127679 --- /dev/null +++ b/api/ee/databases/postgres/migrations/core/versions/baa02d66a365_migrate_code_evaluators.py @@ -0,0 +1,63 @@ +"""migrate data.script from string to object + +Revision ID: baa02d66a365 +Revises: 863f8ebc200f +Create Date: 2025-11-06 15:49:00 +""" + +from typing import Sequence, Union +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision: str = "baa02d66a365" +down_revision: Union[str, None] = "863f8ebc200f" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + # Convert data.script from a JSON string to: + # {"content": , "runtime": "python"} + op.execute( + sa.text( + """ + UPDATE public.workflow_revisions + SET data = jsonb_set( + data::jsonb, + '{script}', + jsonb_build_object( + 'content', data->>'script', + 'runtime', 'python' + ) + )::json + WHERE data->>'script' IS NOT NULL + AND json_typeof(data->'script') = 'string'; + """ + ) + ) + + +def downgrade() -> None: + # Revert only objects shaped like: + # {"content": , "runtime": "python"} -> "" + op.execute( + sa.text( + """ + UPDATE public.workflow_revisions + SET data = jsonb_set( + data::jsonb, + '{script}', + to_jsonb( (data->'script'->>'content') ) + )::json + WHERE json_typeof(data->'script') = 'object' + AND (data->'script') ? 'content' + AND json_typeof(data->'script'->'content') = 'string' + AND ( + (data->'script' ? 'runtime') IS FALSE + OR (data->'script'->>'runtime') = 'python' + ); + """ + ) + ) diff --git a/api/ee/tests/manual/evaluations/sdk/__init__.py b/api/ee/tests/manual/evaluations/sdk/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/ee/tests/manual/evaluations/sdk/openai_agent.py b/api/ee/tests/manual/evaluations/sdk/openai_agent.py new file mode 100644 index 0000000000..575e370c7d --- /dev/null +++ b/api/ee/tests/manual/evaluations/sdk/openai_agent.py @@ -0,0 +1,98 @@ +# /// script +# dependencies = ["agenta", "openai-agents", "openinference-instrumentation-openai-agents", "ipdb", "opentelemetry-api", "opentelemetry-sdk"] +# /// + +from agents import ( + Agent, + InputGuardrail, + GuardrailFunctionOutput, + Runner, + WebSearchTool, + run_demo_loop, +) +from agents.exceptions import InputGuardrailTripwireTriggered +from pydantic import BaseModel +import asyncio +from dotenv import load_dotenv +import os + +load_dotenv() +import agenta as ag + +# from openinference.instrumentation.openai_agents import OpenAIAgentsInstrumentor +# from opentelemetry import trace +# from opentelemetry.sdk.trace.export import ConsoleSpanExporter, BatchSpanProcessor +# from opentelemetry.sdk.trace import ReadableSpan +# from opentelemetry.sdk.trace.export import SpanProcessor +# from opentelemetry.trace import Span +from typing import Optional + +# os.environ["AGENTA_API_KEY"] = "" + +# ag.init() +# OpenAIAgentsInstrumentor().instrument() + + +class AgentaQuestionOutput(BaseModel): + is_agenta_question: bool + reasoning: str + + +guardrail_agent = Agent( + name="Guardrail check", + instructions="Check if the user is asking something about Agenta, the LLMOps platform. Their question might be ambiguous, so you need to be careful.", + output_type=AgentaQuestionOutput, + model="gpt-4o-mini", +) + +web_research_agent = Agent( + name="Web Research Agent", + handoff_description="Specialist agent for web research. You will use this agent to research the user's question when the documentation is not enough. You mainly search the websites agenta.ai and docs.agenta.ai", + instructions="You will search the web to answer the user's question about Agenta, the LLMOps platform.", + tools=[ + WebSearchTool(), + ], + model="gpt-4o-mini", +) + + +async def guardrail_function(ctx, agent, input_data): + result = await Runner.run(guardrail_agent, input_data, context=ctx.context) + final_output = result.final_output_as(AgentaQuestionOutput) + return GuardrailFunctionOutput( + output_info=final_output, + tripwire_triggered=not final_output.is_agenta_question, + ) + + +triage_agent = Agent( + name="Triage Agent", + instructions="You determine which agent to use based on the user's question on agenta", + handoffs=[web_research_agent], + input_guardrails=[ + InputGuardrail(guardrail_function=guardrail_function), + ], + model="gpt-4o-mini", +) + +# async def main(): +# # Example 1: History question +# # agent = Agent(name="Assistant", instructions="You are a helpful assistant.") +# await run_demo_loop(triage_agent) + +# # try: +# # result = await Runner.run(triage_agent, "What is the meaning of life?") +# # import ipdb; ipdb.set_trace() +# # print(result.final_output) +# # except InputGuardrailTripwireTriggered as e: +# # print("Guardrail blocked this input:", e) + +# # # Example 2: General/philosophical question +# # try: +# # result = await Runner.run(triage_agent, "What is the meaning of life?") +# # print(result.final_output) +# # except InputGuardrailTripwireTriggered as e: +# # print("Guardrail blocked this input:", e) + +# if __name__ == "__main__": +# asyncio.run(main()) diff --git a/api/ee/tests/manual/evaluations/sdk/quick_start.py b/api/ee/tests/manual/evaluations/sdk/quick_start.py new file mode 100644 index 0000000000..a60fdebfc9 --- /dev/null +++ b/api/ee/tests/manual/evaluations/sdk/quick_start.py @@ -0,0 +1,206 @@ +""" +Agenta SDK Quick Start Tutorial +================================ + +This tutorial demonstrates how to: +1. Create a simple application that returns country capitals +2. Create evaluators to check if the application's output is correct +3. Run an evaluation to test your application + +The new @application and @evaluator decorators make this simple and intuitive! +""" + +from dotenv import load_dotenv + +load_dotenv() + +import asyncio +import random + +from agenta.sdk.evaluations import aevaluate, display + +import agenta as ag +from agenta.sdk.workflows import builtin + +# Initialize Agenta SDK +ag.init() + + +# Test data: countries and their capitals +my_testcases_data = [ + {"country": "Germany", "capital": "Berlin"}, + {"country": "France", "capital": "Paris"}, + {"country": "Spain", "capital": "Madrid"}, + {"country": "Italy", "capital": "Rome"}, +] + + +# ============================================================================ +# STEP 1: Define your application +# ============================================================================ + + +@ag.application( + slug="capital_quiz_app", + # + name="Capital Quiz Application", + description="Returns the capital of a given country (sometimes incorrectly for testing)", +) +async def capital_quiz_app(capital: str, country: str): + """ + A simple application that returns country capitals. + + Args: + capital: The expected capital (from testcase) + country: The country name (from testcase) + + Returns: + The capital city name (sometimes wrong for testing purposes) + """ + # Randomly return wrong answer for testing + chance = random.choice([True, False, True]) + return capital if chance else "Aloha" + + +# ============================================================================ +# STEP 2: Define your evaluators +# ============================================================================ + + +@ag.evaluator( + slug="exact_match_evaluator", + # + name="Exact Match Evaluator", + description="Checks if the application's output exactly matches the expected capital", +) +async def exact_match_evaluator(capital: str, outputs: str): + """ + Evaluates if the application's output matches the expected answer. + + Args: + capital: The expected capital (from testcase) + outputs: What the application returned + + Returns: + Dictionary with score and success flag + """ + is_correct = outputs == capital + return { + "score": 1 if is_correct else 0, + "success": is_correct, + } + + +@ag.evaluator( + slug="random_score_evaluator", + # + name="Random Score Evaluator", + description="Assigns a random score (for demonstration purposes)", +) +async def random_score_evaluator(capital: str): + """ + A demo evaluator that assigns random scores. + + Args: + capital: The expected capital (from testcase, not used but shows it's available) + + Returns: + Dictionary with random score + """ + score = random.randint(0, 100) + return { + "myscore": score, + "success": score > 30, + } + + +# ============================================================================ +# STEP 3: Use builtin evaluators +# ============================================================================ + +# You can also use Agenta's builtin evaluators like LLM-as-a-judge +llm_judge_evaluator = builtin.auto_ai_critique( + slug="llm_judge_evaluator", + # + name="LLM Judge Evaluator", + description="Uses an LLM to judge if the answer is correct", + # + correct_answer_key="capital", + prompt_template=[ + { + "role": "system", + "content": "You are a judge that evaluates geography knowledge.", + }, + { + "role": "user", + "content": ( + "The correct capital is: {{capital}}\n" + "The student's answer is: {{outputs}}\n\n" + "Is the student's answer correct?\n" + "Respond with ONLY a number from 0.0 (completely wrong) to 1.0 (completely correct).\n" + "Nothing else - just the number." + ), + }, + ], +) + + +# ============================================================================ +# STEP 4: Run the evaluation +# ============================================================================ + + +async def run_evaluation(): + """Create a testset and run evaluation with your app and evaluators.""" + + # Create a testset from your test data + print("Creating testset...") + my_testset = await ag.testsets.aupsert( + name="Country Capitals", + data=my_testcases_data, + ) + + if not my_testset or not my_testset.id: + print("❌ Failed to create testset") + return None + + print(f"✅ Testset created with {len(my_testcases_data)} test cases\n") + + # Run evaluation + print("Running evaluation...") + eval_result = await aevaluate( + testsets=[my_testset.id], + applications=[capital_quiz_app], + evaluators=[ + exact_match_evaluator, + random_score_evaluator, + llm_judge_evaluator, + ], + ) + + return eval_result + + +async def main(): + """Main entry point.""" + print("=" * 70) + print("Agenta SDK Quick Start Tutorial") + print("=" * 70) + print() + + eval_data = await run_evaluation() + + if not eval_data: + print("❌ Evaluation failed") + exit(1) + + print("\n" + "=" * 70) + print("Evaluation Results") + print("=" * 70) + # await display(eval_data) + + print("\n✅ Tutorial complete!") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/api/ee/tests/manual/evaluations/sdk/requirements.txt b/api/ee/tests/manual/evaluations/sdk/requirements.txt new file mode 100644 index 0000000000..3d152e9bfe --- /dev/null +++ b/api/ee/tests/manual/evaluations/sdk/requirements.txt @@ -0,0 +1,3 @@ +openai-agents +openinference-instrumentation-openai-agents +dotenv \ No newline at end of file diff --git a/api/ee/tests/manual/evaluations/sdk/test_handlers.py b/api/ee/tests/manual/evaluations/sdk/test_handlers.py new file mode 100644 index 0000000000..4808a14421 --- /dev/null +++ b/api/ee/tests/manual/evaluations/sdk/test_handlers.py @@ -0,0 +1,202 @@ +import os + +from dotenv import load_dotenv + +load_dotenv() + +from fastapi import FastAPI + +os.environ["AGENTA_SERVICE_MIDDLEWARE_AUTH_ENABLED"] = "false" + +import agenta as ag + +ag.init() + + +from agenta.sdk.models.workflows import ( + WorkflowServiceBatchResponse, + WorkflowServiceStreamResponse, + WorkflowServiceRequestData, + WorkflowServiceResponseData, +) +from agenta.sdk.decorators.routing import ( + route, + default_app, + create_app, +) +from agenta.sdk.decorators.running import ( + WorkflowServiceRequest, + workflow, +) + +custom_app = create_app() + +public_app = FastAPI() + +public_app.mount("/services", app=default_app) + +app = public_app + + +@route("/tokens-async", app=default_app) +async def async_gen(request: WorkflowServiceRequest): + for i in range((request.data.inputs or {}).get("count", 3)): + yield {"async_token": chr(97 + i)} + + +""" +curl -i -N \ + -H "Accept: application/json" \ + -H "Content-Type: application/json" \ + -d '{"data": {"inputs": {"prompt": "hello"}}}' \ + http://127.0.0.1:8000/services/tokens-async/invoke +""" + +""" +curl -i -N \ + -H "Accept: text/event-stream" \ + -H "Content-Type: application/json" \ + -d '{"data": {"inputs": {"prompt": "hello"}}}' \ + http://127.0.0.1:8000/services/tokens-async/invoke +""" + + +@route("/tokens-sync", app=default_app) +def sync_tokens(request: WorkflowServiceRequest): + for i in range((request.data.inputs or {}).get("count", 2)): + yield {"async_token": chr(120 + i)} + + +""" +curl -i -N \ + -H "Accept: application/json" \ + -H "Content-Type: application/json" \ + -d '{"data": {"inputs": {"prompt": "hello"}}}' \ + http://127.0.0.1:8000/services/tokens-sync/invoke +""" + +""" +curl -i -N \ + -H "Accept: text/event-stream" \ + -H "Content-Type: application/json" \ + -d '{"data": {"inputs": {"prompt": "hello"}}}' \ + http://127.0.0.1:8000/services/tokens-sync/invoke +""" + + +@route("/tokens-batch", app=default_app) +@workflow(aggregate=True) +def batch_tokens(request: WorkflowServiceRequest): + for i in range((request.data.inputs or {}).get("count", 2)): + yield {"token": chr(ord("A") + i)} + + +""" +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"data": {"inputs": {"prompt": "hello"}}}' \ + http://127.0.0.1:8000/services/tokens-batch/invoke +""" + + +@route("/greet-async", app=default_app) +async def greet(request: WorkflowServiceRequest): + name = (request.data.inputs or {}).get("name", "world") + return {"message": f"Hello, {name}!"} + + +""" +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"data": {"inputs": {"name": "Agenta"}}}' \ + http://127.0.0.1:8000/services/greet-async/invoke +""" + + +@route("/echo-sync", app=default_app) +def echo(request: WorkflowServiceRequest): + return {"echo": request.data.inputs} + + +""" +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"data": {"inputs": {"aloha": "mahalo"}}}' \ + http://127.0.0.1:8000/services/echo-sync/invoke +""" + + +@route("/already-batch", app=default_app) +def already_batch(request: WorkflowServiceRequest): + return WorkflowServiceBatchResponse( + data=WorkflowServiceResponseData(outputs={"ready": True}) + ) + + +""" +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"data": {"inputs": {"aloha": "mahalo"}}}' \ + http://127.0.0.1:8000/services/already-batch/invoke +""" + + +@route("/already-stream", app=default_app) +def already_stream(request: WorkflowServiceRequest): + async def iterator(): + yield {"ready": "no"} + yield {"ready": "go"} + + return WorkflowServiceStreamResponse(generator=iterator) + + +""" +curl -i -N \ + -H "Accept: application/json" \ + -H "Content-Type: application/json" \ + -d '{"data": {"inputs": {"aloha": "mahalo"}}}' \ + http://127.0.0.1:8000/services/already-stream/invoke +""" + + +@route("/kwargs", app=default_app) +def kwargs_handler(**kwargs): + return {"got": sorted(kwargs.keys())} + + +""" +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"data": {"inputs": {"aloha": "mahalo"}}}' \ + http://127.0.0.1:8000/services/kwargs/invoke +""" + + +@route("/unknown", app=default_app) +def unknown_handler(unknown: str): + return {"got": unknown} + + +""" +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"data": {"inputs": {"aloha": "mahalo"}}}' \ + http://127.0.0.1:8000/services/unknown/invoke +""" + + +@route("/echo_custom", app=default_app) +def echo_custom(aloha: str): + return {"got": aloha} + + +""" +curl -i http://127.0.0.1:8000/services/echo_custom/inspect +""" + +""" +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"data": {"inputs": {"aloha": "mahalo"}}}' \ + http://127.0.0.1:8000/services/echo_custom/invoke +""" diff --git a/api/ee/tests/manual/evaluations/sdk/test_local.py b/api/ee/tests/manual/evaluations/sdk/test_local.py new file mode 100644 index 0000000000..579770c6a9 --- /dev/null +++ b/api/ee/tests/manual/evaluations/sdk/test_local.py @@ -0,0 +1,680 @@ +import asyncio + +from dotenv import load_dotenv + +load_dotenv() + +import agenta as ag + +ag.init() + +from agenta.sdk.models.workflows import ( + WorkflowServiceRequestData, + WorkflowServiceResponseData, + WorkflowServiceRequest, +) +from agenta.sdk.decorators.running import workflow +from agenta.sdk.decorators.tracing import instrument +from agenta.sdk.workflows import builtin + +print("-----------------------------------------------------------------------") + +from agenta.sdk.workflows.handlers import echo_v0 + + +@instrument(annotate=True) +def echo_custom(aloha: str): + return {"got": aloha} + + +echo_manual = workflow(uri="echo")() + + +print(echo_custom(aloha="mahalo"), echo_custom) +print(echo_v0(aloha="mahalo"), echo_v0) +print(echo_manual(aloha="mahalo"), echo_manual) + + +print("-----------------------------------------------------------------------") +print() +print("-----------------------------------------------------------------------") + +builtin_auto_exact_match = builtin.auto_exact_match() + +print( + builtin_auto_exact_match( + inputs={"correct_answer": "mahalo"}, + outputs="mahalo", + ), + builtin_auto_exact_match, +) +print( + builtin_auto_exact_match( + inputs={"correct_answer": "mahalo"}, + outputs="mahala", + ), + builtin_auto_exact_match, +) + +print("-----------------------------------------------------------------------") + +print( + asyncio.run(builtin_auto_exact_match.inspect()).model_dump( + mode="json", + exclude_none=True, + ), +) + +print("-----------------------------------------------------------------------") +print() +print("-----------------------------------------------------------------------") + + +builtin_auto_regex_test = builtin.auto_regex_test( + regex_pattern="^ma.*o$", +) + +print( + builtin_auto_regex_test( + outputs="mahalo", + ), + builtin_auto_regex_test, +) + +print( + builtin_auto_regex_test( + outputs="mahala", + ), + builtin_auto_regex_test, +) + +print("-----------------------------------------------------------------------") + +print( + asyncio.run(builtin_auto_regex_test.inspect()).model_dump( + mode="json", + exclude_none=True, + ), +) + +print("-----------------------------------------------------------------------") +print() +print("-----------------------------------------------------------------------") + +builtin_field_match_test = builtin.field_match_test( + json_field="answer", + correct_answer_key="aloha", +) + +print( + builtin_field_match_test( + inputs={"aloha": "mahalo"}, + outputs={"answer": "mahalo"}, + ), + builtin_field_match_test, +) + +print( + builtin_field_match_test( + inputs={"aloha": "mahalo"}, + outputs={"answer": "mahala"}, + ), + builtin_field_match_test, +) + +print("-----------------------------------------------------------------------") + +print( + asyncio.run(builtin_field_match_test.inspect()).model_dump( + mode="json", + exclude_none=True, + ), +) + +print("-----------------------------------------------------------------------") +print() +print("-----------------------------------------------------------------------") + +print("auto_webhook_test") + +print("-----------------------------------------------------------------------") +print() +print("-----------------------------------------------------------------------") + +builtin_auto_custom_code_run = builtin.auto_custom_code_run( + code="evaluate = lambda app_params, inputs, output, correct_answer: 1.0 if output in correct_answer else 0.0", +) + +print( + asyncio.run( + builtin_auto_custom_code_run( + inputs={"correct_answer": "mahalo"}, + outputs="mahalo", + ) + ), + builtin_auto_custom_code_run, +) + + +print( + asyncio.run( + builtin_auto_custom_code_run( + inputs={"correct_answer": "mahalo"}, + outputs="mahala", + ) + ), + builtin_auto_custom_code_run, +) + + +print("-----------------------------------------------------------------------") + +print( + asyncio.run(builtin_auto_custom_code_run.inspect()).model_dump( + mode="json", + exclude_none=True, + ), +) + +print("-----------------------------------------------------------------------") +print() +print("-----------------------------------------------------------------------") + +builtin_ai_critique = builtin.auto_ai_critique( + prompt_template=[ + { + "role": "system", + "content": "You are an evaluator grading an LLM App.\nYou will be given INPUTS, the LLM APP OUTPUT, the CORRECT ANSWER used in the LLM APP.\n\n- Ensure that the LLM APP OUTPUT has the same meaning as the CORRECT ANSWER\n\n\n\n-The score should be between 0 and 1 with one decimal point\n-A score of 1 means that the answer is perfect. This is the highest (best) score. Only when perfect match, otherwise something betweeen 0 and 1.\nA score of 0 means that the answer does not meet any of the criteria. This is the lowest possible score you can give.\n\n\n\nANSWER ONLY THE SCORE. DO NOT USE MARKDOWN. DO NOT PROVIDE ANYTHING OTHER THAN THE NUMBER\n", + }, + { + "role": "user", + "content": "{{correct_answer}}\n{{prediction}}", + }, + ], +) + +print( + asyncio.run( + builtin_ai_critique( + inputs={ + "country": "Germany", + "correct_answer": "Berlin", + }, + outputs="Berlin", + ) + ), + builtin_ai_critique, +) + +print( + asyncio.run( + builtin_ai_critique( + inputs={ + "country": "Germany", + "correct_answer": "Berlin", + }, + outputs="Paris", + ) + ), + builtin_ai_critique, +) + +print("-----------------------------------------------------------------------") + +print( + asyncio.run(builtin_ai_critique.inspect()).model_dump( + mode="json", + exclude_none=True, + ), +) + +print("-----------------------------------------------------------------------") + +print( + asyncio.run( + builtin_ai_critique.invoke( + request=WorkflowServiceRequest( + data=WorkflowServiceRequestData( + inputs={ + "country": "Germany", + "correct_answer": "Berlin", + }, + outputs="Berlin", + ) + ) + ) + ).model_dump(mode="json", exclude_none=True), + builtin_ai_critique, +) + +print( + asyncio.run( + builtin_ai_critique.invoke( + request=WorkflowServiceRequest( + data=WorkflowServiceRequestData( + inputs={ + "country": "Germany", + "correct_answer": "Berlin", + }, + outputs="Paris", + ) + ) + ) + ).model_dump(mode="json", exclude_none=True), + builtin_ai_critique, +) + +print("-----------------------------------------------------------------------") +print() +print("-----------------------------------------------------------------------") + +builtin_auto_starts_with = builtin.auto_starts_with( + prefix="ma", +) + +print( + builtin_auto_starts_with( + outputs="mahalo", + ), + builtin_auto_starts_with, +) + +print( + builtin_auto_starts_with( + outputs="mohalo", + ), + builtin_auto_starts_with, +) + +print("-----------------------------------------------------------------------") + +print( + asyncio.run(builtin_auto_starts_with.inspect()).model_dump( + mode="json", + exclude_none=True, + ), +) + +print("-----------------------------------------------------------------------") +print() +print("-----------------------------------------------------------------------") + +builtin_auto_ends_with = builtin.auto_ends_with( + suffix="lo", +) + +print( + builtin_auto_ends_with( + outputs="mahalo", + ), + builtin_auto_ends_with, +) + +print( + builtin_auto_ends_with( + outputs="mahala", + ), + builtin_auto_ends_with, +) + +print("-----------------------------------------------------------------------") + +print( + asyncio.run(builtin_auto_ends_with.inspect()).model_dump( + mode="json", + exclude_none=True, + ), +) + +print("-----------------------------------------------------------------------") +print() +print("-----------------------------------------------------------------------") + +builtin_auto_contains = builtin.auto_contains( + substring="ha", +) + +print( + builtin_auto_contains( + outputs="mahalo", + ), + builtin_auto_contains, +) + +print( + builtin_auto_contains( + outputs="maala", + ), + builtin_auto_contains, +) + + +print("-----------------------------------------------------------------------") + +print( + asyncio.run(builtin_auto_contains.inspect()).model_dump( + mode="json", + exclude_none=True, + ), +) + +print("-----------------------------------------------------------------------") +print() +print("-----------------------------------------------------------------------") + +builtin_auto_contains_any = builtin.auto_contains_any( + substrings=["maha", "lo"], +) + +print( + builtin_auto_contains_any( + outputs="mahalo", + ), + builtin_auto_contains_any, +) + +print( + builtin_auto_contains_any( + outputs="mohala", + ), + builtin_auto_contains_any, +) + +print("-----------------------------------------------------------------------") + +print( + asyncio.run(builtin_auto_contains_any.inspect()).model_dump( + mode="json", + exclude_none=True, + ), +) + +print("-----------------------------------------------------------------------") +print() +print("-----------------------------------------------------------------------") + +builtin_auto_contains_all = builtin.auto_contains_all( + substrings=["maha", "lo"], +) + +print( + builtin_auto_contains_all( + outputs="mahalo", + ), + builtin_auto_contains_all, +) + +print( + builtin_auto_contains_all( + outputs="mahala", + ), + builtin_auto_contains_all, +) + +print("-----------------------------------------------------------------------") + +print( + asyncio.run(builtin_auto_contains_all.inspect()).model_dump( + mode="json", + exclude_none=True, + ), +) + +print("-----------------------------------------------------------------------") +print() +print("-----------------------------------------------------------------------") + +builtin_auto_contains_json = builtin.auto_contains_json() + +print( + builtin_auto_contains_json( + outputs='{"aloha": "mahalo"}', + ), + builtin_auto_contains_json, +) + +print( + builtin_auto_contains_json( + outputs={"aloha": "mahalo"}, + ), + builtin_auto_contains_json, +) + +print( + builtin_auto_contains_json( + outputs="mahala", + ), + builtin_auto_contains_json, +) + +print("-----------------------------------------------------------------------") + +print( + asyncio.run(builtin_auto_contains_json.inspect()).model_dump( + mode="json", + exclude_none=True, + ), +) + +print("-----------------------------------------------------------------------") +print() +print("-----------------------------------------------------------------------") + +builtin_auto_json_diff = builtin.auto_json_diff() + +print( + builtin_auto_json_diff( + inputs={"correct_answer": {"aloha": "mahalo"}}, + outputs={"aloha": "mahalo"}, + ), + builtin_auto_json_diff, +) + +print( + builtin_auto_json_diff( + inputs={"correct_answer": {"aloha": "mahalo"}}, + outputs={"mahalo": "aloha"}, + ), + builtin_auto_json_diff, +) + + +print("-----------------------------------------------------------------------") + +print( + asyncio.run(builtin_auto_json_diff.inspect()).model_dump( + mode="json", + exclude_none=True, + ), +) + +print("-----------------------------------------------------------------------") +print() +print("-----------------------------------------------------------------------") + + +builtin_auto_levenshtein_distance = builtin.auto_levenshtein_distance( + threshold=0.9, +) + +print( + builtin_auto_levenshtein_distance( + inputs={"correct_answer": "mahalo"}, + outputs="mahalo", + ), + builtin_auto_levenshtein_distance, +) + +print( + builtin_auto_levenshtein_distance( + inputs={"correct_answer": "mahalo"}, + outputs="mahala", + ), + builtin_auto_levenshtein_distance, +) + +print("-----------------------------------------------------------------------") + +print( + asyncio.run(builtin_auto_levenshtein_distance.inspect()).model_dump( + mode="json", + exclude_none=True, + ), +) + +print("-----------------------------------------------------------------------") +print() +print("-----------------------------------------------------------------------") + +builtin_auto_similarity_match = builtin.auto_similarity_match( + threshold=0.9, +) + +print( + builtin_auto_similarity_match( + inputs={"correct_answer": "mahalo"}, + outputs="mahalo", + ), + builtin_auto_similarity_match, +) + +print( + builtin_auto_similarity_match( + inputs={"correct_answer": "mahalo"}, + outputs="mohala", + ), + builtin_auto_similarity_match, +) + + +print("-----------------------------------------------------------------------") + +print( + asyncio.run(builtin_auto_similarity_match.inspect()).model_dump( + mode="json", + exclude_none=True, + ), +) + + +print("-----------------------------------------------------------------------") +print() +print("-----------------------------------------------------------------------") + +builtin_auto_semantic_similarity = builtin.auto_semantic_similarity( + threshold=0.9, +) + +print( + asyncio.run( + builtin_auto_semantic_similarity( + inputs={"correct_answer": "mahalo"}, + outputs="mahalo", + ) + ), + builtin_auto_semantic_similarity, +) + +print( + asyncio.run( + builtin_auto_semantic_similarity( + inputs={"correct_answer": "mahalo"}, + outputs="mohala", + ) + ), + builtin_auto_semantic_similarity, +) + +print("-----------------------------------------------------------------------") + +print( + asyncio.run(builtin_auto_semantic_similarity.inspect()).model_dump( + mode="json", + exclude_none=True, + ), +) + + +print("-----------------------------------------------------------------------") +print() +print("-----------------------------------------------------------------------") + +builtin_completion = builtin.completion( + config=builtin.SinglePromptConfig( + **{ + "prompt": { + "messages": [ + { + "role": "user", + "content": "What's the capital of {{country}}?", + } + ] + } + } # type: ignore + ), +) + +print( + asyncio.run( + builtin_completion( + inputs={"country": "Germany"}, + ) + ), + builtin_completion, +) + + +print("-----------------------------------------------------------------------") + +print( + asyncio.run(builtin_completion.inspect()).model_dump( + mode="json", + exclude_none=True, + ), +) + + +print("-----------------------------------------------------------------------") +print() +print("-----------------------------------------------------------------------") + +builtin_chat = builtin.chat( + config=builtin.SinglePromptConfig( + **{ + "prompt": { + "messages": [ + { + "role": "assistant", + "content": "Always respond in uppercase.", + } + ] + } + } + ), +) + +print( + asyncio.run( + builtin_chat( + messages=[ + { + "role": "user", + "content": "What's the capital of Germany?", + } + ] + ) + ), + builtin_chat, +) + +print("-----------------------------------------------------------------------") + +print( + asyncio.run(builtin_chat.inspect()).model_dump( + mode="json", + exclude_none=True, + ), +) + + +print("-----------------------------------------------------------------------") diff --git a/api/ee/tests/manual/evaluations/sdk/test_loop.py b/api/ee/tests/manual/evaluations/sdk/test_loop.py new file mode 100644 index 0000000000..0e933ee24d --- /dev/null +++ b/api/ee/tests/manual/evaluations/sdk/test_loop.py @@ -0,0 +1,152 @@ +import asyncio +import random + +from dotenv import load_dotenv + +load_dotenv() + +import agenta as ag + +ag.init() + +from agenta.sdk.decorators import application, evaluator +from agenta.sdk.workflows import builtin +from agenta.sdk.evaluations import aevaluate, display + + +my_testcases_data = [ + { + "country": "Germany", + "capital": "Berlin", + }, + { + "country": "France", + "capital": "Paris", + }, + { + "country": "Spain", + "capital": "Madrid", + }, + { + "country": "Italy", + "capital": "Rome", + }, +] + + +@application( + slug="my_application", + # + name="my_application", + description="A simple workflow that returns the capital of a country", + # + parameters=dict(aloha="mahalo"), +) +async def my_application(capital: str, country: str): + chance = random.choice([True, False, True]) + _outputs = capital if chance else "Aloha" + + return _outputs + + +@evaluator( + slug="my_match_workflow", + # + name="my_match_workflow", + description="A simple workflow that returns the capital of a country", + # + parameters=dict(aloha="mahalo"), +) +async def my_match_evaluator(capital: str, outputs: str): + _outputs = { + "score": outputs == capital and 1 or 0, + "success": outputs == capital, + } + + return _outputs + + +@evaluator( + slug="my_random_evaluator", + # + name="my_random_evaluator", + description="A simple evaluator that returns a random score", +) +async def my_random_evaluator(capital: str): + score = random.randint(0, 100) + _outputs = { + "myscore": score, + "success": score > 30, + } + + return _outputs + + +my_llm_as_a_judge_evaluator = builtin.auto_ai_critique( + slug="my_llm_as_a_judge_evaluator", + # + name="my_llm_as_a_judge_evaluator", + description="Use an LLM to judge if the previous answer is correct", + # + correct_answer_key="capital", + model="openai/gpt-4o-mini", + prompt_template=[ + { + "role": "system", + "content": "You are a judge that evaluates if the previous answer is correct.", + }, + { + "role": "user", + "content": ( + "The correct answer is {{capital}}.\n" + "The previous answer is {{outputs}}.\n" + "Is the previous answer correct? Answer with a decimal 'score' from 0.0 to 1.0. " + "Nothing else, just a number, no boilerplate, nothing, JUST A FLOAT" + ), + }, + ], +) + + +async def run_evaluation(): + my_testset = await ag.testsets.aupsert( + name="Capitals", + # + data=my_testcases_data, + ) + + if not my_testset or not my_testset.id: + print("Failed to create or update testset") + return None + + eval = await aevaluate( + name="Capital Evaluation", + description="An evaluation to test the capitals application", + # + testsets=[ + my_testset.id, + ], + applications=[ + my_application, + ], + evaluators=[ + my_match_evaluator, + my_random_evaluator, + my_llm_as_a_judge_evaluator, + ], + ) + + return eval + + +async def main(): + eval_data = await run_evaluation() + + if not eval_data: + exit(1) + + # await display(eval_data) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/api/ee/tests/manual/evaluations/sdk/test_loop_agent.py b/api/ee/tests/manual/evaluations/sdk/test_loop_agent.py new file mode 100644 index 0000000000..05627865f0 --- /dev/null +++ b/api/ee/tests/manual/evaluations/sdk/test_loop_agent.py @@ -0,0 +1,208 @@ +import asyncio +import random + +from dotenv import load_dotenv + +load_dotenv() + +import agenta as ag + +ag.init() + +from agenta.sdk.decorators import application, evaluator +from agenta.sdk.workflows import builtin +from agenta.sdk.evaluations import aevaluate, display + +from agents import Runner +from agents.exceptions import InputGuardrailTripwireTriggered + +from openai_agent import triage_agent + +from openinference.instrumentation.openai_agents import OpenAIAgentsInstrumentor + +OpenAIAgentsInstrumentor().instrument() + + +my_testcases_data = [ + { + "question": "What is agenta?", + "rubic": "The answer should mention llmops platform and open-source", + "guardrail": False, + }, + { + "question": "How much does agenta cost?", + "rubic": "The answer should mention the three pricing tiers, the cost in usd, how much traces costs, retention periods, features, and the free tier", + "guardrail": False, + }, + { + "question": "How do I use azure in Agenta?", + "rubic": "The answer should mention the azure provider and the steps to set it up in the model hub", + "guardrail": False, + }, + { + "question": "What is the meaning of life?", + "rubic": "The agent should refuse to answer", + "guardrail": True, + }, +] + + +@application( + slug="agenta_agent", + # + name="agenta_agent", + description="A simple workflow that returns the answer to a question", +) +async def agenta_agent( + question: str, +): + try: + outputs = await Runner.run(triage_agent, question) + return outputs.final_output + except InputGuardrailTripwireTriggered as e: + return "I'm sorry, I can't answer that question." + + +@evaluator( + slug="my_random_evaluator", + # + name="my_random_evaluator", + description="A simple evaluator that returns a random score", +) +async def my_random_evaluator(question: str, outputs: str): + # inputs: dict = request.data.inputs # type:ignore + score = random.randint(0, 100) + _outputs = { + "myscore": score, + "success": score > 30, + } + + return _outputs + + +@evaluator( + slug="guardrail_span_evaluator", + # + name="guardrail_span_evaluator", + description="Evaluates if the agent's guardrail logic was correctly triggered by inspecting the trace for the 'is_agenta_question' flag.", +) +async def guardrail_span_evaluator(question: str, guardrail: bool, trace): + # Flexibly search: Guardrail check -> response -> is_agenta_question + def find_span_by_name(obj, name: str): + if isinstance(obj, dict): + if obj.get("span_name") == name: + return obj + for value in obj.values(): + found = find_span_by_name(value, name) + if found is not None: + return found + elif isinstance(obj, list): + for item in obj: + found = find_span_by_name(item, name) + if found is not None: + return found + return None + + def find_value_by_key(obj, key: str): + if isinstance(obj, dict): + if key in obj: + return obj[key] + for value in obj.values(): + found = find_value_by_key(value, key) + if found is not None: + return found + elif isinstance(obj, list): + for item in obj: + found = find_value_by_key(item, key) + if found is not None: + return found + return None + + guardrail_span = find_span_by_name(trace, "Guardrail check") + response_span = ( + find_span_by_name(guardrail_span, "response") if guardrail_span else None + ) + detected_is_agenta = ( + find_value_by_key(response_span, "is_agenta_question") + if response_span + else None + ) + + expected_is_agenta = not bool(guardrail) + success = ( + detected_is_agenta is not None + and bool(detected_is_agenta) == expected_is_agenta + ) + + return { + "success": success, + "score": 1 if success else 0, + } + + +my_llm_as_a_judge_evaluator = builtin.auto_ai_critique( + slug="my_llm_as_a_judge_evaluator", + # + name="my_llm_as_a_judge_evaluator", + description="Use an LLM to judge if the previous answer meets the rubric criteria", + # + correct_answer_key="rubic", + model="gpt-4o-mini", + prompt_template=[ + { + "role": "system", + "content": "You are an expert evaluator that judges answers based on given rubric criteria.", + }, + { + "role": "user", + "content": ( + "Question: {{question}}\n" + "Rubric criteria: {{rubic}}\n" + "Answer provided: {{outputs}}\n\n" + "Evaluate if the answer meets the rubric criteria. Answer with a decimal 'score' from 0.0 to 1.0. " + "Nothing else, just a number, no boilerplate, nothing, JUST A FLOAT" + ), + }, + ], +) + + +async def run_evaluation(): + my_testset = await ag.testsets.aupsert( + name="Agenta Questions", + # + data=my_testcases_data, + ) + + if not my_testset or not my_testset.id: + print("Failed to create or update testset") + return None + + eval = await aevaluate( + testsets=[ + my_testset.id, + ], + applications=[ + agenta_agent, + ], + evaluators=[ + my_random_evaluator, + guardrail_span_evaluator, + my_llm_as_a_judge_evaluator, + ], + ) + + return eval + + +async def main(): + eval_data = await run_evaluation() + + if not eval_data: + exit(1) + + # await display(eval_data) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/api/ee/tests/manual/evaluations/sdk/test_openai_agent_evaluator.py b/api/ee/tests/manual/evaluations/sdk/test_openai_agent_evaluator.py new file mode 100644 index 0000000000..2900bf2982 --- /dev/null +++ b/api/ee/tests/manual/evaluations/sdk/test_openai_agent_evaluator.py @@ -0,0 +1,323 @@ +# /// script +# dependencies = ["openai-agents", "litellm"] +# /// +from dotenv import load_dotenv + +load_dotenv() + +from agents import Runner +import asyncio +import json +from typing import Optional, Dict, Any, List +from uuid import uuid4 +from agents.exceptions import InputGuardrailTripwireTriggered + +from pydantic import BaseModel, Field +from litellm import acompletion + +import agenta as ag + +ag.init() + +from agenta.sdk.evaluations import aevaluate, display + +from agenta.sdk.models.workflows import ( + ApplicationRevision, + ApplicationServiceRequest, + EvaluatorRevision, + EvaluatorServiceRequest, +) + +from openai_agent import triage_agent + + +class EvaluationOutput(BaseModel): + score: int = Field(..., ge=0, le=5, description="Score between 0-5") + reasoning: str = Field(..., description="Detailed reasoning for the score") + + +async def llm_judge( + prompt: str, + inputs: Dict[str, Any], + outputs: Any, + input_keys: Optional[List[str]] = None, + output_key: Optional[str] = None, + model: str = "gpt-4o-mini", + temperature: float = 0.1, + json_schema: Optional[BaseModel] = None, + max_tokens: int = 500, +) -> Dict[str, Any]: + """ + Generic LLM judge function for evaluations. + + Args: + prompt: The evaluation prompt template (without variables) + inputs: Input data dictionary + outputs: Output data from the trace + input_keys: List of input keys to include in the prompt. If None, includes all + output_key: Key from outputs to include. If None, includes the entire outputs + model: LLM model to use (default: gpt-4o-mini) + temperature: Temperature for LLM generation (default: 0.1) + json_schema: Pydantic model for structured output (default: EvaluationOutput) + max_tokens: Maximum tokens for response (default: 500) + + Returns: + Dictionary containing the evaluation results + """ + # Use default schema if none provided + if json_schema is None: + json_schema = EvaluationOutput + + # Build the dynamic variables section + variables_section = [] + + # Add input variables + if input_keys is None: + # Include all inputs + for key, value in inputs.items(): + variables_section.append(f"{key}: {value}") + else: + # Include only specified input keys + for key in input_keys: + if key in inputs: + variables_section.append(f"{key}: {inputs[key]}") + + # Add output variable + if output_key is not None and isinstance(outputs, dict): + variables_section.append(f"{output_key}: {outputs.get(output_key, '')}") + else: + variables_section.append( + f"output: {outputs if not isinstance(outputs, dict) else str(outputs)}" + ) + + # Combine prompt with dynamic variables + full_prompt = prompt + "\n\n" + "\n".join(variables_section) + + try: + # Call OpenAI via LiteLLM with structured output + response = await acompletion( + model=model, + messages=[ + { + "role": "system", + "content": "You are an expert evaluator. Always provide fair and detailed evaluations based on the given criteria.", + }, + {"role": "user", "content": full_prompt}, + ], + response_format=json_schema, + temperature=temperature, + max_tokens=max_tokens, + ) + + # Extract the structured response + evaluation = response.choices[0].message.content + evaluation = json.loads(evaluation) + + # Convert to dictionary and add success field + outputs = evaluation.dict() if hasattr(evaluation, "dict") else evaluation + if "score" in outputs: + outputs["success"] = outputs["score"] >= 3 # Consider score >= 3 as success + + return outputs + + except Exception as e: + # Fallback if LLM call fails + return { + "score": 0, + "reasoning": f"LLM evaluation failed: {str(e)}", + "success": False, + } + + +def create_llm_evaluator( + prompt: str, + input_keys: Optional[List[str]] = None, + output_key: Optional[str] = None, + model: str = "gpt-4o-mini", + temperature: float = 0.1, + json_schema: Optional[BaseModel] = None, + max_tokens: int = 500, + *, + name: Optional[str] = None, +): + """ + Factory function to create LLM evaluator functions with different configurations. + + Args: + prompt: The evaluation prompt template (static, without variables) + input_keys: List of input keys to include. If None, includes all + output_key: Key from outputs to include. If None, includes entire outputs + model: LLM model to use + temperature: Temperature for LLM generation + json_schema: Pydantic model for structured output + max_tokens: Maximum tokens for response + + Returns: + An evaluator function that can be used in run_evaluation + """ + + async def evaluator( + request: EvaluatorServiceRequest, + inputs: Dict[str, Any], + outputs: Dict[str, Any], + **kwargs, + ): + return await llm_judge( + prompt=prompt, + inputs=inputs, + outputs=outputs, + input_keys=input_keys, + output_key=output_key, + model=model, + temperature=temperature, + json_schema=json_schema, + max_tokens=max_tokens, + ) + + # Ensure unique function identity for handler registry + unique_name = name or f"llm_evaluator_{uuid4().hex[:8]}" + try: + evaluator.__name__ = unique_name # type: ignore[attr-defined] + evaluator.__qualname__ = unique_name # type: ignore[attr-defined] + except Exception: + pass + + return evaluator + + +my_testcases_data = [ + { + "question": "What is agenta?", + "rubic": "The answer should mention llmops platform and open-source", + }, + { + "question": "How much does agenta cost?", + "rubic": "The answer should mention the three pricing tiers, the cost in usd, how much traces costs, retention periods, features, and the free tier", + }, + { + "question": "How do I use azure in Agenta?", + "rubic": "The answer should mention the azure provider and the steps to set it up in the model hub", + }, + { + "question": "What is the meaning of life?", + "rubic": "The agent should refuse to answer", + }, +] + + +async def agenta_agent( + request: ApplicationServiceRequest, + inputs: Dict[str, Any], + **kwargs, +): + try: + outputs = await Runner.run(triage_agent, inputs.get("question")) + return outputs.final_output + except InputGuardrailTripwireTriggered as e: + return "I'm sorry, I can't answer that question." + + +async def llm_as_a_judge( + request: EvaluatorServiceRequest, + inputs: Dict[str, Any], + outputs: Dict[str, Any], + **kwargs, +): + # Define the evaluation prompt template (static, without variables) + prompt = """You are an expert evaluator. Please evaluate the following answer based on the given rubric. + +Please provide a score from 0-5 and detailed reasoning for your evaluation. The score should reflect how well the answer meets the criteria specified in the rubric. + +Score guidelines: +- 0: Incorrect. the rubic is not met at all. +- 1: Mostly incorrect with minimal relevance +- 2: Partially correct but missing key elements +- 3: Generally correct but could be more complete +- 4: Good answer with minor omissions +- 5: Excellent answer that fully meets the rubric criteria""" + + # Use the reusable LLM judge function + return await llm_judge( + prompt=prompt, + inputs=inputs, + outputs=outputs, + input_keys=["question", "rubic"], + output_key="output", + ) + + +async def run_evaluation(): + # Define evaluation prompts + rubric_evaluation_prompt = """You are an expert evaluator. Please evaluate the following answer based on the given rubric. + +Please provide a score from 0-5 and detailed reasoning for your evaluation. The score should reflect how well the answer meets the criteria specified in the rubric. + +Score guidelines: +- 0: Incorrect. the rubic is not met at all. +- 1: Mostly incorrect with minimal relevance +- 2: Partially correct but missing key elements +- 3: Generally correct but could be more complete +- 4: Good answer with minor omissions +- 5: Excellent answer that fully meets the rubric criteria""" + + length_evaluation_prompt = """You are an expert evaluator. Please evaluate the length of the following answer. + +Please provide a score from 0-5 and detailed reasoning for your evaluation. The score should reflect how appropriate the length is for a chatbot response. + +Score guidelines: +- 0: Extremely long (multiple paragraphs, verbose) +- 1: Too long (more than 2-3 sentences, unnecessarily detailed) +- 2: Somewhat long (could be more concise) +- 3: Appropriate length (1-2 sentences, concise but complete) +- 4: Good length (brief but informative) +- 5: Perfect length (concise, clear, and to the point) + +The ideal chatbot response should be concise, clear, and typically no more than 1-2 sentences unless the question requires more detail.""" + + my_testset = await ag.testsets.aupsert( + name="Capitals", + # + data=my_testcases_data, + ) + + specs = dict( + testsets=[ + my_testset.id, + ], + applications=[ + agenta_agent, + ], + evaluators=[ + # Rubric evaluation + create_llm_evaluator( + prompt=rubric_evaluation_prompt, + input_keys=["question", "rubic"], + output_key="output", + name="rubric_evaluator", + ), + # Length evaluation (checks if answers are appropriately concise) + create_llm_evaluator( + prompt=length_evaluation_prompt, + input_keys=[], # Only evaluate the output length + output_key="output", # Evaluate the chatbot's output + name="length_evaluator", + ), + ], + ) + + eval = await aevaluate(**specs) + + return eval + + +async def main(): + eval_data = await run_evaluation() + + if not eval_data: + exit(1) + + # await display(eval_data) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/api/ee/tests/manual/evaluations/sdk/test_serve.py b/api/ee/tests/manual/evaluations/sdk/test_serve.py new file mode 100644 index 0000000000..35047a36ae --- /dev/null +++ b/api/ee/tests/manual/evaluations/sdk/test_serve.py @@ -0,0 +1,692 @@ +from typing import Optional +import os + +from dotenv import load_dotenv + +load_dotenv() + +from fastapi import FastAPI + +os.environ["AGENTA_SERVICE_MIDDLEWARE_AUTH_ENABLED"] = "false" + +import agenta as ag + +ag.init( + api_url="http://localhost", + api_key="...", +) + + +from agenta.sdk.models.workflows import ( + WorkflowServiceRequestData, + WorkflowServiceResponseData, + WorkflowServiceBatchResponse, + WorkflowServiceStreamResponse, +) +from agenta.sdk.decorators.routing import ( + route, + default_app, + create_app, +) +from agenta.sdk.decorators.running import ( + WorkflowServiceRequest, + workflow, +) +from agenta.sdk.decorators.tracing import ( + instrument, +) + +from agenta.sdk.workflows import builtin + +from agenta.sdk.workflows.utils import HANDLER_REGISTRY + +custom_app = create_app() + +public_app = FastAPI() + +public_app.mount("/services", app=default_app) + +app = public_app + + +@route("/tokens-async", app=default_app) +async def async_gen(request: WorkflowServiceRequest): + for i in range((request.data.inputs or {}).get("count", 3)): + yield {"async_token": chr(97 + i)} + + +""" +curl -i -N \ + -H "Accept: application/json" \ + -H "Content-Type: application/json" \ + -d '{"data": {"inputs": {"prompt": "hello"}}}' \ + http://127.0.0.1:8000/services/tokens-async/invoke +""" + +""" +curl -i -N \ + -H "Accept: text/event-stream" \ + -H "Content-Type: application/json" \ + -d '{"data": {"inputs": {"prompt": "hello"}}}' \ + http://127.0.0.1:8000/services/tokens-async/invoke +""" + + +@route("/tokens-sync", app=default_app) +def sync_tokens(request: WorkflowServiceRequest): + for i in range((request.data.inputs or {}).get("count", 2)): + yield {"async_token": chr(120 + i)} + + +""" +curl -i -N \ + -H "Accept: application/json" \ + -H "Content-Type: application/json" \ + -d '{"data": {"inputs": {"prompt": "hello"}}}' \ + http://127.0.0.1:8000/services/tokens-sync/invoke +""" + +""" +curl -i -N \ + -H "Accept: text/event-stream" \ + -H "Content-Type: application/json" \ + -d '{"data": {"inputs": {"prompt": "hello"}}}' \ + http://127.0.0.1:8000/services/tokens-sync/invoke +""" + + +@route("/tokens-batch", app=default_app) +@workflow(aggregate=True) +def batch_tokens(request: WorkflowServiceRequest): + for i in range((request.data.inputs or {}).get("count", 2)): + yield {"token": chr(ord("A") + i)} + + +""" +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"data": {"inputs": {"prompt": "hello"}}}' \ + http://127.0.0.1:8000/services/tokens-batch/invoke +""" + + +@route("/greet-async", app=default_app) +async def greet(request: WorkflowServiceRequest): + name = (request.data.inputs or {}).get("name", "world") + return {"message": f"Hello, {name}!"} + + +""" +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"data": {"inputs": {"name": "Agenta"}}}' \ + http://127.0.0.1:8000/services/greet-async/invoke +""" + + +@route("/echo-sync", app=default_app) +def echo(request: WorkflowServiceRequest): + return {"echo": request.data.inputs} + + +""" +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"data": {"inputs": {"aloha": "mahalo"}}}' \ + http://127.0.0.1:8000/services/echo-sync/invoke +""" + + +@route("/already-batch", app=default_app) +def already_batch(request: WorkflowServiceRequest): + return WorkflowServiceBatchResponse( + data=WorkflowServiceResponseData(outputs={"ready": True}) + ) + + +""" +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"data": {"inputs": {"aloha": "mahalo"}}}' \ + http://127.0.0.1:8000/services/already-batch/invoke +""" + + +@route("/already-stream", app=default_app) +def already_stream(request: WorkflowServiceRequest): + async def iterator(): + yield {"ready": "no"} + yield {"ready": "go"} + + return WorkflowServiceStreamResponse(generator=iterator) + + +""" +curl -i -N \ + -H "Accept: application/json" \ + -H "Content-Type: application/json" \ + -d '{"data": {"inputs": {"aloha": "mahalo"}}}' \ + http://127.0.0.1:8000/services/already-stream/invoke +""" + + +@route("/kwargs", app=default_app) +def kwargs_handler(**kwargs): + return {"got": sorted(kwargs.keys())} + + +""" +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"data": {"inputs": {"aloha": "mahalo"}}}' \ + http://127.0.0.1:8000/services/kwargs/invoke +""" + + +@route("/unknown", app=default_app) +def unknown_handler(unknown: str): + return {"got": unknown} + + +""" +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"data": {"inputs": {"aloha": "mahalo"}}}' \ + http://127.0.0.1:8000/services/unknown/invoke +""" + + +@route("/echo_custom", app=default_app) +def echo_custom(aloha: str): + return {"got": aloha} + + +""" +curl -i http://127.0.0.1:8000/services/echo_custom/inspect +""" + +""" +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"data": {"inputs": {"aloha": "mahalo"}}}' \ + http://127.0.0.1:8000/services/echo_custom/invoke +""" + + +echo_manual = workflow(uri="echo")() + +route("/echo_manual", app=default_app)(echo_manual) + +builtin_echo = builtin.echo() + +route("/echo", app=default_app)(builtin_echo) + + +""" +curl -i http://127.0.0.1:8000/services/echo/inspect +""" + +""" +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"data": {"inputs": {"aloha": "mahalo"}}}' \ + http://127.0.0.1:8000/services/echo/invoke +""" + +route("/auto_exact_match", app=default_app)(builtin.auto_exact_match()) + + +""" +curl -i http://127.0.0.1:8000/services/auto_exact_match/inspect +""" + +""" {"version":"2025.07.14","data":{"outputs":{"success":true}}} +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"data": {"parameters": {"correct_answer_key": "correct_answer"}, "inputs": {"correct_answer": "mahalo"}, "outputs": "mahalo"}}' \ + http://127.0.0.1:8000/services/auto_exact_match/invoke +""" + +""" {"version":"2025.07.14","data":{"outputs":{"success":false}}} +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"data": {"parameters": {"correct_answer_key": "correct_answer"}, "inputs": {"correct_answer": "mahalo"}, "outputs": "mahala"}}' \ + http://127.0.0.1:8000/services/auto_exact_match/invoke +""" + +route("/auto_regex_test", app=default_app)( + builtin.auto_regex_test( + regex_pattern="^ma.*o$", + ) +) + +""" +curl -i http://127.0.0.1:8000/services/auto_regex_test/inspect +""" + +""" {"version":"2025.07.14","data":{"outputs":{"success":true}}} +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"data": {"parameters": {"regex_pattern": "^ma.*o$"}, "inputs": {"correct_answer": "mahalo"}, "outputs": "mahalo"}}' \ + http://127.0.0.1:8000/services/auto_regex_test/invoke +""" + +""" {"version":"2025.07.14","data":{"outputs":{"success":false}}} +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"data": {"parameters": {"regex_pattern": "^ma.*o$"}, "inputs": {"correct_answer": "mahalo"}, "outputs": "mahala"}}' \ + http://127.0.0.1:8000/services/auto_regex_test/invoke +""" + +route("/field_match_test", app=default_app)( + builtin.field_match_test( + json_field="answer", + ) +) + +""" +curl -i http://127.0.0.1:8000/services/field_match_test/inspect +""" + +""" {"version":"2025.07.14","data":{"outputs":{"success":true}}} +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"data": {"parameters": {"json_field": "answer", "correct_answer_key": "correct_answer"}, "inputs": {"correct_answer": "mahalo"}, "outputs": {"answer": "mahalo"}}}' \ + http://127.0.0.1:8000/services/field_match_test/invoke +""" + +""" {"version":"2025.07.14","data":{"outputs":{"success":false}}} +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"data": {"parameters": {"json_field": "answer", "correct_answer_key": "correct_answer"}, "inputs": {"correct_answer": "mahalo"}, "outputs": {"answer": "mahala"}}}' \ + http://127.0.0.1:8000/services/field_match_test/invoke +""" + + +@public_app.post("/my_webhook") +async def my_webhook( + inputs: Optional[dict] = None, + output: Optional[str] = None, + correct_answer: Optional[str] = None, +): + return {"score": 1 if output == correct_answer else 0} + + +""" curl on http://127.0.0.1:8000/my_webhook +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"inputs": {"correct_answer": "mahalo"}, "output": "mahalo", "correct_answer": "mahalo"}' \ + http://127.0.0.1:8000/my_webhook +""" + +route("/auto_webhook_test", app=default_app)( + builtin.auto_webhook_test( + webhook_url="http://127.0.0.1:8000/my_webhook", + ) +) + +""" +curl -i http://127.0.0.1:8000/services/auto_webhook_test/inspect +""" + + +""" {"version":"2025.07.14","data":{"outputs":{"success":true}}} +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"data": {"parameters": {"correct_answer_key": "correct_answer", "webhook_url": "http://127.0.0.1:8000/my_webhook"}, "inputs": {"correct_answer": "mahalo"}, "outputs": "mahalo"}}' \ + http://127.0.0.1:8000/services/auto_webhook_test/invoke +""" + +""" {"version":"2025.07.14","data":{"outputs":{"success":false}}} +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"data": {"parameters": {"correct_answer_key": "correct_answer", "webhook_url": "http://127.0.0.1:8000/my_webhook"}, "inputs": {"correct_answer": "mahalo"}, "outputs": "mahala"}}' \ + http://127.0.0.1:8000/services/auto_webhook_test/invoke +""" + +route("/auto_custom_code_run", app=default_app)( + builtin.auto_custom_code_run( + code="evaluate = lambda app_params, inputs, output, correct_answer: 1.0 if output in correct_answer else 0.0", + ) +) + +""" +curl -i http://127.0.0.1:8000/services/auto_custom_code_run/inspect +""" + +""" {"version":"2025.07.14","data":{"outputs":{"success":true}}} +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"data": {"parameters": {"correct_answer_key": "correct_answer", "code": "evaluate = lambda app_params, inputs, output, correct_answer: 1.0 if output in correct_answer else 0.0"}, "inputs": {"correct_answer": "mahalo"}, "outputs": "mahalo"}}' \ + http://127.0.0.1:8000/services/auto_custom_code_run/invoke +""" + +""" {"version":"2025.07.14","data":{"outputs":{"success":false}}} +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"data": {"parameters": {"correct_answer_key": "correct_answer", "code": "evaluate = lambda app_params, inputs, output, correct_answer: 1.0 if output in correct_answer else 0.0"}, "inputs": {"correct_answer": "mahalo"}, "outputs": "mahala"}}' \ + http://127.0.0.1:8000/services/auto_custom_code_run/invoke +""" + +route("/auto_ai_critique", app=default_app)( + builtin.auto_ai_critique( + prompt_template=[ + { + "role": "system", + "content": "You are an evaluator grading an LLM App.\nYou will be given INPUTS, the LLM APP OUTPUT, the CORRECT ANSWER used in the LLM APP.\n\n- Ensure that the LLM APP OUTPUT has the same meaning as the CORRECT ANSWER\n\n\n\n-The score should be between 0 and 1\n-A score of 1 means that the answer is perfect. This is the highest (best) score.\nA score of 0 means that the answer does not meet any of the criteria. This is the lowest possible score you can give.\n\n\n\nANSWER ONLY THE SCORE. DO NOT USE MARKDOWN. DO NOT PROVIDE ANYTHING OTHER THAN THE NUMBER\n", + }, + { + "role": "user", + "content": "{{correct_answer}}\n{{prediction}}", + }, + ] + ) +) + +""" +curl -i http://127.0.0.1:8000/services/auto_ai_critique/inspect +""" + +""" {"version":"2025.07.14","data":{"outputs":{"score":1,0,"success":true}}} +curl -i -N \ + -H "Content-Type: application/json" \ + -H "Authorization: ApiKey ZKoZDbEr.856b25f9d620e3a5b090d2eb0db92b9c915b4551f404c092d076e0dab9268a31" \ + -d '{ + "data": { + "inputs": { + "country": "Germany", + "correct_answer": "Berlin" + }, + "outputs": "Berlin", + "parameters": { + "correct_answer_key": "correct_answer", + "prompt_template": [ + { + "role": "system", + "content": "You are an evaluator grading an LLM App.\nYou will be given INPUTS, the LLM APP OUTPUT, the CORRECT ANSWER used in the LLM APP.\n\n- Ensure that the LLM APP OUTPUT has the same meaning as the CORRECT ANSWER\n\n\n\n-The score should be between 0 and 1\n-A score of 1 means that the answer is perfect. This is the highest (best) score.\nA score of 0 means that the answer does not meet any of the criteria. This is the lowest possible score you can give.\n\n\n\nANSWER ONLY THE SCORE. DO NOT USE MARKDOWN. DO NOT PROVIDE ANYTHING OTHER THAN THE NUMBER\n" + }, + { + "role": "user", + "content": "{{correct_answer}}\n{{prediction}}" + } + ] + } + } + }' \ + http://127.0.0.1:8000/services/auto_ai_critique/invoke +""" + +""" {"version":"2025.07.14","data":{"outputs":{"score":0.0,"success":false}}} +curl -i -N \ + -H "Content-Type: application/json" \ + -H "Authorization: ApiKey ZKoZDbEr.856b25f9d620e3a5b090d2eb0db92b9c915b4551f404c092d076e0dab9268a31" \ + -d '{ + "data": { + "inputs": { + "country": "Germany", + "correct_answer": "Berlin" + }, + "outputs": "Kyoto", + "parameters": { + "correct_answer_key": "correct_answer", + "prompt_template": [ + { + "role": "system", + "content": "You are an evaluator grading an LLM App.\nYou will be given INPUTS, the LLM APP OUTPUT, the CORRECT ANSWER used in the LLM APP.\n\n- Ensure that the LLM APP OUTPUT has the same meaning as the CORRECT ANSWER\n\n\n\n-The score should be between 0 and 1\n-A score of 1 means that the answer is perfect. This is the highest (best) score.\nA score of 0 means that the answer does not meet any of the criteria. This is the lowest possible score you can give.\n\n\n\nANSWER ONLY THE SCORE. DO NOT USE MARKDOWN. DO NOT PROVIDE ANYTHING OTHER THAN THE NUMBER\n" + }, + { + "role": "user", + "content": "{{correct_answer}}\n{{prediction}}" + } + ] + } + } + }' \ + http://127.0.0.1:8000/services/auto_ai_critique/invoke +""" + +route("/auto_starts_with", app=default_app)( + builtin.auto_starts_with( + prefix="ma", + ) +) + +""" +curl -i http://127.0.0.1:8000/services/auto_starts_with/inspect +""" + +""" {"version":"2025.07.14","data":{"outputs":{"success":true}}} +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"data": {"parameters": {"prefix": "ma"}, "inputs": {"correct_answer": "mahalo"}, "outputs": "mahalo"}}' \ + http://127.0.0.1:8000/services/auto_starts_with/invoke +""" + +""" {"version":"2025.07.14","data":{"outputs":{"success":false}}} +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"data": {"parameters": {"prefix": "ma"}, "inputs": {"correct_answer": "mahalo"}, "outputs": "mohalo"}}' \ + http://127.0.0.1:8000/services/auto_starts_with/invoke +""" + +route("/auto_ends_with", app=default_app)( + builtin.auto_ends_with( + suffix="lo", + ) +) + +""" +curl -i http://127.0.0.1:8000/services/auto_ends_with/inspect +""" + +""" {"version":"2025.07.14","data":{"outputs":{"success":true}}} +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"data": {"parameters": {"suffix": "lo"}, "inputs": {"correct_answer": "mahalo"}, "outputs": "mahalo"}}' \ + http://127.0.0.1:8000/services/auto_ends_with/invoke +""" + +""" {"version":"2025.07.14","data":{"outputs":{"success":false}}} +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"data": {"parameters": {"suffix": "lo"}, "inputs": {"correct_answer": "mahalo"}, "outputs": "mahala"}}' \ + http://127.0.0.1:8000/services/auto_ends_with/invoke +""" + +route("/auto_contains", app=default_app)( + builtin.auto_contains( + substring="ha", + ) +) + +""" +curl -i http://127.0.0.1:8000/services/auto_contains/inspect +""" + +""" {"version":"2025.07.14","data":{"outputs":{"success":true}}} +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"data": {"parameters": {"substring": "mahalo"}, "inputs": {"correct_answer": "mahalo"}, "outputs": "mahalo"}}' \ + http://127.0.0.1:8000/services/auto_contains/invoke +""" + +""" {"version":"2025.07.14","data":{"outputs":{"success":false}}} +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"data": {"parameters": {"substring": "mahalo"}, "inputs": {"correct_answer": "mahalo"}, "outputs": "mahala"}}' \ + http://127.0.0.1:8000/services/auto_contains/invoke +""" + +route("/auto_contains_any", app=default_app)( + builtin.auto_contains_any( + substrings=["maha", "lo"], + ) +) + +""" +curl -i http://127.0.0.1:8000/services/auto_contains_any/inspect +""" + +""" {"version":"2025.07.14","data":{"outputs":{"success":true}}} +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"data": {"parameters": {"substrings": ["maha","lo"]}, "inputs": {"correct_answer": "mahalo"}, "outputs": "mahalo"}}' \ + http://127.0.0.1:8000/services/auto_contains_any/invoke +""" + +""" {"version":"2025.07.14","data":{"outputs":{"success":false}}} +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"data": {"parameters": {"substrings": ["moha","lo"]}, "inputs": {"correct_answer": "mahalo"}, "outputs": "mahala"}}' \ + http://127.0.0.1:8000/services/auto_contains_any/invoke +""" + +route("/auto_contains_all", app=default_app)( + builtin.auto_contains_all( + substrings=["maha", "lo"], + ) +) + +""" +curl -i http://127.0.0.1:8000/services/auto_contains_all/inspect +""" + +""" {"version":"2025.07.14","data":{"outputs":{"success":true}}} +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"data": {"parameters": {"substrings": ["maha","lo"]}, "inputs": {"correct_answer": "mahalo"}, "outputs": "mahalo"}}' \ + http://127.0.0.1:8000/services/auto_contains_all/invoke +""" + +""" {"version":"2025.07.14","data":{"outputs":{"success":false}}} +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"data": {"parameters": {"substrings": ["maha","lo"]}, "inputs": {"correct_answer": "mahalo"}, "outputs": "mahala"}}' \ + http://127.0.0.1:8000/services/auto_contains_all/invoke +""" + +route("/auto_contains_json", app=default_app)(builtin.auto_contains_json()) + +""" +curl -i http://127.0.0.1:8000/services/auto_contains_json/inspect +""" + +""" {"version":"2025.07.14","data":{"outputs":{"success":true}}} +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"data": {"outputs": {"aloha": "mahalo"}}}' \ + http://127.0.0.1:8000/services/auto_contains_json/invoke +""" + +""" {"version":"2025.07.14","data":{"outputs":{"success":false}}} +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"data": {"outputs": "mahalo"}}' \ + http://127.0.0.1:8000/services/auto_contains_json/invoke +""" + +route("/auto_json_diff", app=default_app)(builtin.auto_json_diff()) + +""" +curl -i http://127.0.0.1:8000/services/auto_json_diff/inspect +""" + +""" {"version":"2025.07.14","data":{"outputs":{"score":1.0,"success":true}}} +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"data": {"parameters": {"correct_answer_key": "correct_answer"}, "inputs": {"correct_answer": {"aloha": "mahalo"}}, "outputs": {"aloha": "mahalo"}}}' \ + http://127.0.0.1:8000/services/auto_json_diff/invoke +""" + +""" {"version":"2025.07.14","data":{"outputs":{"score":1.0,"success":false}}} +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"data": {"parameters": {"correct_answer_key": "correct_answer"}, "inputs": {"correct_answer": {"aloha": "mahalo"}}, "outputs": {"mahalo": "aloha"}}}' \ + http://127.0.0.1:8000/services/auto_json_diff/invoke +""" + +route("/auto_levenshtein_distance", app=default_app)( + builtin.auto_levenshtein_distance() +) + +""" +curl -i http://127.0.0.1:8000/services/auto_levenshtein_distance/inspect +""" + +""" {"version":"2025.07.14","data":{"outputs":{"score":1.0,"success":true}}} +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"data": {"parameters": {"correct_answer_key": "correct_answer"}, "inputs": {"correct_answer": "mahalo"}, "outputs": "mahalo"}}' \ + http://127.0.0.1:8000/services/auto_levenshtein_distance/invoke +""" + +""" {"version":"2025.07.14","data":{"outputs":{"score":0.166,"success":false}}} +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"data": {"parameters": {"correct_answer_key": "correct_answer"}, "inputs": {"correct_answer": "mahalo"}, "outputs": "aloha"}}' \ + http://127.0.0.1:8000/services/auto_levenshtein_distance/invoke +""" + +route("/auto_similarity_match", app=default_app)(builtin.auto_similarity_match()) + +""" +curl -i http://127.0.0.1:8000/services/auto_similarity_match/inspect +""" + +""" {"version":"2025.07.14","data":{"outputs":{"score":1.0,"success":true}}} +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"data": {"parameters": {"correct_answer_key": "correct_answer"}, "inputs": {"correct_answer": "mahalo"}, "outputs": "mahalo"}}' \ + http://127.0.0.1:8000/services/auto_similarity_match/invoke +""" + +""" {"version":"2025.07.14","data":{"outputs":{"score":0.462,"success":false}}} +curl -i -N \ + -H "Content-Type: application/json" \ + -d '{"data": {"parameters": {"correct_answer_key": "correct_answer"}, "inputs": {"correct_answer": "mahalo"}, "outputs": "aloooha"}}' \ + http://127.0.0.1:8000/services/auto_similarity_match/invoke +""" + +route("/auto_semantic_similarity", app=default_app)(builtin.auto_semantic_similarity()) + +""" +curl -i http://127.0.0.1:8000/services/auto_semantic_similarity/inspect +""" + + +route("/completion", app=default_app)( + builtin.completion( + config=builtin.SinglePromptConfig(), + ) +) + +""" +curl -i http://127.0.0.1:8000/services/completion/inspect +""" + +""" +curl -i -N \ + -H "Content-Type: application/json" \ + -H "Authorization: ApiKey ZKoZDbEr.856b25f9d620e3a5b090d2eb0db92b9c915b4551f404c092d076e0dab9268a31" \ + -d '{"data": {"inputs": {"country": "Germany"}, "parameters": {"prompt": {"messages": [{"role": "assistant", "content": "What's the capital of {{country}}?"}]}}}}' \ + http://127.0.0.1:8000/services/completion/invoke +""" + + +route("/chat", app=default_app)( + builtin.chat( + config=builtin.SinglePromptConfig(), + ) +) + +""" +curl -i http://127.0.0.1:8000/services/chat/inspect +""" + +""" +curl -i -N \ + -H "Content-Type: application/json" \ + -H "Authorization: ApiKey ZKoZDbEr.856b25f9d620e3a5b090d2eb0db92b9c915b4551f404c092d076e0dab9268a31" \ + -d '{"data": {"inputs": {"country": "Germany"}, "parameters": {"prompt": {"messages": [{"role": "user", "content": "Hello, world!"}]}}}}' \ + http://127.0.0.1:8000/services/chat/invoke +""" diff --git a/api/ee/tests/manual/evaluations/sdk/testset-management.ipynb b/api/ee/tests/manual/evaluations/sdk/testset-management.ipynb new file mode 100644 index 0000000000..e121df8f7e --- /dev/null +++ b/api/ee/tests/manual/evaluations/sdk/testset-management.ipynb @@ -0,0 +1,543 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "7b9164c8", + "metadata": {}, + "source": [ + "# Managing Testsets with Agenta SDK\n", + "\n", + "This notebook demonstrates how to create, list, and retrieve testsets using the Agenta SDK for evaluation purposes.\n" + ] + }, + { + "cell_type": "markdown", + "id": "2430bced", + "metadata": {}, + "source": [ + "## Initialize Agenta\n", + "\n", + "First, let's set up the Agenta client with your API credentials:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d102b221", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2025-10-23T16:46:06.701Z \u001b[38;5;70m[INFO.]\u001b[0m Agenta - SDK version: 0.51.2 \u001b[38;5;245m[agenta.sdk.agenta_init]\u001b[0m \n", + "2025-10-23T16:46:06.702Z \u001b[38;5;70m[INFO.]\u001b[0m Agenta - Host: http://144.76.237.122 \u001b[38;5;245m[agenta.sdk.agenta_init]\u001b[0m \n", + "2025-10-23T16:46:06.702Z \u001b[38;5;70m[INFO.]\u001b[0m Agenta - OLTP URL: http://144.76.237.122/api/otlp/v1/traces \u001b[38;5;245m[agenta.sdk.tracing.tracing]\u001b[0m \n" + ] + } + ], + "source": [ + "import os\n", + "\n", + "os.environ[\"AGENTA_API_KEY\"] = \"\"\n", + "os.environ[\"AGENTA_HOST\"] = \"https://cloud.agenta.ai/api\"\n", + "\n", + "import agenta as ag\n", + "from getpass import getpass\n", + "\n", + "# Get API key from environment or prompt user\n", + "api_key = os.getenv(\"AGENTA_API_KEY\")\n", + "if not api_key:\n", + " os.environ[\"AGENTA_API_KEY\"] = getpass(\"Enter your Agenta API key: \")\n", + "\n", + "# Initialize the Agenta client\n", + "ag.init()" + ] + }, + { + "cell_type": "markdown", + "id": "60a6619e", + "metadata": {}, + "source": [ + "## Setup\n", + "\n", + "First, let's import the necessary functions from our entities module:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d226403f", + "metadata": {}, + "outputs": [], + "source": [ + "from uuid import UUID" + ] + }, + { + "cell_type": "markdown", + "id": "ceec8441", + "metadata": {}, + "source": [ + "## Creating a Testset\n", + "\n", + "Let's create a testset with some sample data about countries and their capitals:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e2b89655", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "✅ Created testset with ID: 019a11f7-b329-7d50-a256-395834f4864c\n", + " Name: Country Capitals\n", + " Slug: 395834f4864c\n", + " Description: A testset of countries and their capitals for geography evaluation\n" + ] + } + ], + "source": [ + "# Create a testset with simple data\n", + "testset = await ag.testsets.acreate(\n", + " data=[\n", + " {\"country\": \"Germany\", \"capital\": \"Berlin\"},\n", + " {\"country\": \"France\", \"capital\": \"Paris\"},\n", + " {\"country\": \"Spain\", \"capital\": \"Madrid\"},\n", + " {\"country\": \"Italy\", \"capital\": \"Rome\"},\n", + " {\"country\": \"Japan\", \"capital\": \"Tokyo\"},\n", + " ],\n", + " name=\"Country Capitals\",\n", + " description=\"A testset of countries and their capitals for geography evaluation\",\n", + ")\n", + "\n", + "print(f\"✅ Created testset with ID: {testset.id}\")\n", + "print(f\" Name: {testset.name}\")\n", + "print(f\" Slug: {testset.slug}\")\n", + "print(f\" Description: {testset.description}\")\n", + "\n", + "# Save the ID for later use\n", + "testset_id = testset.id" + ] + }, + { + "cell_type": "markdown", + "id": "852d13a8", + "metadata": {}, + "source": [ + "**Expected Output:**\n", + "```\n", + "✅ Created testset with ID: 01963413-3d39-7650-80ce-3ad5d688da6c\n", + " Name: Country Capitals\n", + " Slug: 3ad5d688da6c\n", + " Description: A testset of countries and their capitals for geography evaluation\n", + "```\n", + "\n", + "The `create_testset` function returns a `SimpleTestset` object with the following fields:\n", + "- `id`: Unique UUID for the testset\n", + "- `name`: The name you provided\n", + "- `slug`: A shortened identifier\n", + "- `description`: Your description\n", + "- `data`: The test data in a structured format\n" + ] + }, + { + "cell_type": "markdown", + "id": "ac02ab05", + "metadata": {}, + "source": [ + "## Listing All Testsets\n", + "\n", + "Now let's retrieve all testsets in our project:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b52e8ae2", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "📋 Found 12 testset(s):\n", + "============================================================\n", + "\n", + " 📦 completion_testset\n", + " ID: 0199bec6-b13c-7ea2-999e-8bc9432f5ee0\n", + " Slug: 8bc9432f5ee0\n", + "\n", + " 📦 Agenta Questions\n", + " ID: 0199ca28-8f74-7d52-8b9f-11a58ea131c7\n", + " Slug: 11a58ea131c7\n", + "\n", + " 📦 Agenta Questions\n", + " ID: 0199ec05-dcea-7c02-bb46-cde0731b3da5\n", + " Slug: cde0731b3da5\n", + "\n", + " 📦 Capitals\n", + " ID: 0199ec08-48f8-7cc1-9850-c47d631c7f05\n", + " Slug: c47d631c7f05\n", + "\n", + " 📦 Capitals\n", + " ID: 0199ec0a-2c1c-7be1-bcdb-8599afb38b8e\n", + " Slug: 8599afb38b8e\n", + "\n", + " 📦 Agenta Questions\n", + " ID: 0199ec27-a638-7762-987d-37fa94b0bf83\n", + " Slug: 37fa94b0bf83\n", + "\n", + " 📦 chat-testing\n", + " ID: 019a0cfc-0452-76c2-b0c8-98ab72c444c0\n", + " Slug: 98ab72c444c0\n", + "\n", + " 📦 Capitals\n", + " ID: 019a113e-5412-7822-8dce-7f329ba484a4\n", + " Slug: 7f329ba484a4\n", + "\n", + " 📦 Country Capitals\n", + " ID: 019a11d8-4a06-7d72-8814-b4d9ad81f547\n", + " Slug: b4d9ad81f547\n", + "\n", + " 📦 Country Capitals\n", + " ID: 019a11f2-fbe2-76d1-8d28-7bae8983edcc\n", + " Slug: 7bae8983edcc\n", + "\n", + " 📦 Country Capitals\n", + " ID: 019a11f5-002b-73c1-9042-e4c448a0d9e0\n", + " Slug: e4c448a0d9e0\n", + "\n", + " 📦 Country Capitals\n", + " ID: 019a11f7-b329-7d50-a256-395834f4864c\n", + " Slug: 395834f4864c\n" + ] + } + ], + "source": [ + "# List all testsets\n", + "testsets = await ag.testsets.alist()\n", + "\n", + "print(f\"\\n📋 Found {len(testsets)} testset(s):\")\n", + "print(\"=\" * 60)\n", + "\n", + "for ts in testsets:\n", + " print(f\"\\n 📦 {ts.name}\")\n", + " print(f\" ID: {ts.id}\")\n", + " print(f\" Slug: {ts.slug}\")" + ] + }, + { + "cell_type": "markdown", + "id": "1640d671", + "metadata": {}, + "source": [ + "**Expected Output:**\n", + "```\n", + "📋 Found 3 testset(s):\n", + "============================================================\n", + "\n", + " 📦 Country Capitals\n", + " ID: 01963413-3d39-7650-80ce-3ad5d688da6c\n", + " Slug: country-capitals\n", + "\n", + " 📦 Math Problems\n", + " ID: 01963520-4e4a-8761-91df-4be6e799eb7d\n", + " Slug: math-problems\n", + "```\n" + ] + }, + { + "cell_type": "markdown", + "id": "0cdcd188", + "metadata": {}, + "source": [ + "## Retrieving a Testset by ID\n", + "\n", + "Let's retrieve a specific testset using its ID:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "752d9ad6", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "✅ Retrieved testset: 019a11f7-b329-7d50-a256-395834f4864c\n", + " Testset ID: 019a11f7-b329-7d50-a256-395834f4864c\n", + " Slug: 395834f4864c\n", + " Version: 1\n", + "\n", + " 📊 Contains 5 testcase(s)\n", + "\n", + " Sample testcases:\n", + " 1. {'capital': 'Berlin', 'country': 'Germany'}\n", + " 2. {'capital': 'Paris', 'country': 'France'}\n", + " 3. {'capital': 'Madrid', 'country': 'Spain'}\n" + ] + } + ], + "source": [ + "# Retrieve the testset we just created\n", + "retrieved_testset = await ag.testsets.aretrieve(testset_id=testset_id)\n", + "\n", + "if retrieved_testset:\n", + " print(f\"\\n✅ Retrieved testset: {retrieved_testset.id}\")\n", + " print(f\" Testset ID: {retrieved_testset.testset_id}\")\n", + " print(f\" Slug: {retrieved_testset.slug}\")\n", + " print(f\" Version: {retrieved_testset.version}\")\n", + "\n", + " # Access the testcases\n", + " if retrieved_testset.data and retrieved_testset.data.testcases:\n", + " print(f\"\\n 📊 Contains {len(retrieved_testset.data.testcases)} testcase(s)\")\n", + " print(\"\\n Sample testcases:\")\n", + " for i, testcase in enumerate(retrieved_testset.data.testcases[:3], 1):\n", + " print(f\" {i}. {testcase.data}\")\n", + "else:\n", + " print(\"❌ Testset not found\")" + ] + }, + { + "cell_type": "markdown", + "id": "a78f38ba", + "metadata": {}, + "source": [ + "**Expected Output:**\n", + "```\n", + "✅ Retrieved testset: 01963413-3d39-7650-80ce-3ad5d688da6c\n", + " Testset ID: 01963413-3d39-7650-80ce-3ad5d688da6c\n", + " Slug: country-capitals\n", + " Version: 1\n", + "\n", + " 📊 Contains 5 testcase(s)\n", + "\n", + " Sample testcases:\n", + " 1. {'country': 'Germany', 'capital': 'Berlin'}\n", + " 2. {'country': 'France', 'capital': 'Paris'}\n", + " 3. {'country': 'Spain', 'capital': 'Madrid'}\n", + "```\n" + ] + }, + { + "cell_type": "markdown", + "id": "19e8dc07", + "metadata": {}, + "source": [ + "## Retrieving a Testset by Name\n", + "\n", + "You can find a testset by name by filtering the results from `get_testsets`:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "71ea54d7", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "🔍 Found testset by name: 'Country Capitals'\n", + " ID: 019a11d8-4a06-7d72-8814-b4d9ad81f547\n", + " Slug: b4d9ad81f547\n" + ] + } + ], + "source": [ + "async def get_testset_by_name(name: str):\n", + " \"\"\"Helper function to find a testset by name.\"\"\"\n", + " testsets = await ag.testsets.alist()\n", + "\n", + " if not testsets:\n", + " return None\n", + "\n", + " for testset in testsets:\n", + " if testset.name == name:\n", + " return testset\n", + "\n", + " return None\n", + "\n", + "\n", + "# Usage example\n", + "found_testset = await get_testset_by_name(\"Country Capitals\")\n", + "\n", + "if found_testset:\n", + " print(f\"\\n🔍 Found testset by name: '{found_testset.name}'\")\n", + " print(f\" ID: {found_testset.id}\")\n", + " print(f\" Slug: {found_testset.slug}\")\n", + "else:\n", + " print(\"\\n❌ Testset not found\")" + ] + }, + { + "cell_type": "markdown", + "id": "f48579b0", + "metadata": {}, + "source": [ + "**Expected Output:**\n", + "```\n", + "🔍 Found testset by name: 'Country Capitals'\n", + " ID: 01963413-3d39-7650-80ce-3ad5d688da6c\n", + " Slug: country-capitals\n", + "```\n" + ] + }, + { + "cell_type": "markdown", + "id": "a8e8a8fb", + "metadata": {}, + "source": [ + "## Working with Test Data\n", + "\n", + "Once you have a testset, you can iterate through its testcases for evaluation:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fad427d9", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "🔬 Processing testcases:\n", + "============================================================\n", + "\n", + " Testcase 1:\n", + " Input: Germany\n", + " Expected Output: Berlin\n", + "\n", + " Testcase 2:\n", + " Input: France\n", + " Expected Output: Paris\n", + "\n", + " Testcase 3:\n", + " Input: Spain\n", + " Expected Output: Madrid\n", + "\n", + " Testcase 4:\n", + " Input: Italy\n", + " Expected Output: Rome\n", + "\n", + " Testcase 5:\n", + " Input: Japan\n", + " Expected Output: Tokyo\n", + "\n", + "✅ All testcases processed\n" + ] + } + ], + "source": [ + "# Retrieve the testset\n", + "testset = await ag.testsets.aretrieve(testset_id=testset_id)\n", + "\n", + "if testset and testset.data and testset.data.testcases:\n", + " print(\"\\n🔬 Processing testcases:\")\n", + " print(\"=\" * 60)\n", + "\n", + " for i, testcase in enumerate(testset.data.testcases, 1):\n", + " country = testcase.data.get(\"country\")\n", + " capital = testcase.data.get(\"capital\")\n", + "\n", + " print(f\"\\n Testcase {i}:\")\n", + " print(f\" Input: {country}\")\n", + " print(f\" Expected Output: {capital}\")\n", + "\n", + " # In a real evaluation, you would:\n", + " # 1. Pass the input to your LLM application\n", + " # 2. Compare the output with the expected result\n", + " # 3. Score the result using an evaluator\n", + "\n", + " print(\"\\n✅ All testcases processed\")" + ] + }, + { + "cell_type": "markdown", + "id": "e30ebfe2", + "metadata": {}, + "source": [ + "**Expected Output:**\n", + "```\n", + "🔬 Processing testcases:\n", + "============================================================\n", + "\n", + " Testcase 1:\n", + " Input: Germany\n", + " Expected Output: Berlin\n", + "\n", + " Testcase 2:\n", + " Input: France\n", + " Expected Output: Paris\n", + "\n", + " Testcase 3:\n", + " Input: Spain\n", + " Expected Output: Madrid\n", + "...\n", + "\n", + "✅ All testcases processed\n", + "```\n" + ] + }, + { + "cell_type": "markdown", + "id": "ab7a8db7", + "metadata": {}, + "source": [ + "## Summary\n", + "\n", + "In this notebook, we've covered:\n", + "\n", + "1. **Creating testsets** with `ag.testsets.acreate()` - Pass simple dictionaries of test data\n", + "2. **Listing testsets** with `ag.testsets.alist()` - Get all testsets in your project\n", + "3. **Retrieving by ID** with `ag.testsets.aretrieve()` - Get a specific testset with all its data\n", + "4. **Finding by name** - Use a helper pattern to filter testsets by name\n", + "5. **Working with test data** - Iterate through testcases for evaluation\n", + "\n", + "### Next Steps\n", + "\n", + "Now that you can manage testsets, you can:\n", + "- Configure evaluators to assess your application outputs\n", + "- Run evaluations using these testsets\n", + "- Analyze evaluation results to improve your LLM application\n", + "\n", + "Check out the other notebooks in this series to learn more about the complete evaluation workflow!\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.3" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/api/oss/databases/postgres/migrations/core/versions/863f8ebc200f_extend_app_type_again.py b/api/oss/databases/postgres/migrations/core/versions/863f8ebc200f_extend_app_type_again.py new file mode 100644 index 0000000000..7a52c3d62a --- /dev/null +++ b/api/oss/databases/postgres/migrations/core/versions/863f8ebc200f_extend_app_type_again.py @@ -0,0 +1,75 @@ +"""Extend app_type + +Revision ID: 863f8ebc200f +Revises: 3b5f5652f611 +Create Date: 2025-01-08 10:24:00 +""" + +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision: str = "863f8ebc200f" +down_revision: Union[str, None] = "3b5f5652f611" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +# The table/column that uses the enum +TABLE = "app_db" +COLUMN = "app_type" +TYPE_NAME = "app_type_enum" +TYPE_TEMP = "app_type_enum_temp" + +# Exact labels +ORIGINAL = ( + "CHAT_TEMPLATE", + "COMPLETION_TEMPLATE", + "CHAT_SERVICE", + "COMPLETION_SERVICE", + "CUSTOM", +) +EXTENDED = ORIGINAL + ("SDK_CUSTOM",) + + +def _create_enum(name: str, labels: tuple[str, ...]) -> None: + labels_sql = ",".join(f"'{v}'" for v in labels) + op.execute(f"CREATE TYPE {name} AS ENUM ({labels_sql})") + + +def _retype_column(to_type: str) -> None: + op.execute( + f""" + ALTER TABLE {TABLE} + ALTER COLUMN {COLUMN} + TYPE {to_type} + USING {COLUMN}::text::{to_type} + """ + ) + + +def upgrade(): + # 1) Create the replacement enum with ALL desired values + _create_enum(TYPE_TEMP, EXTENDED) + + # 2) Point the column to the tmp type + _retype_column(TYPE_TEMP) + + # 3) Drop old type and rename tmp to the canonical name + op.execute(f"DROP TYPE {TYPE_NAME}") + op.execute(f"ALTER TYPE {TYPE_TEMP} RENAME TO {TYPE_NAME}") + + +def downgrade(): + # 1) Recreate the enum WITHOUT the added values + _create_enum(TYPE_TEMP, ORIGINAL) + + # 2) Point the column back to the original label set + _retype_column(TYPE_TEMP) + + # 3) Drop current type and rename tmp back to the canonical name + op.execute(f"DROP TYPE {TYPE_NAME}") + op.execute(f"ALTER TYPE {TYPE_TEMP} RENAME TO {TYPE_NAME}") diff --git a/api/oss/databases/postgres/migrations/core/versions/baa02d66a365_migrate_code_evaluators.py b/api/oss/databases/postgres/migrations/core/versions/baa02d66a365_migrate_code_evaluators.py new file mode 100644 index 0000000000..7363127679 --- /dev/null +++ b/api/oss/databases/postgres/migrations/core/versions/baa02d66a365_migrate_code_evaluators.py @@ -0,0 +1,63 @@ +"""migrate data.script from string to object + +Revision ID: baa02d66a365 +Revises: 863f8ebc200f +Create Date: 2025-11-06 15:49:00 +""" + +from typing import Sequence, Union +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision: str = "baa02d66a365" +down_revision: Union[str, None] = "863f8ebc200f" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + # Convert data.script from a JSON string to: + # {"content": , "runtime": "python"} + op.execute( + sa.text( + """ + UPDATE public.workflow_revisions + SET data = jsonb_set( + data::jsonb, + '{script}', + jsonb_build_object( + 'content', data->>'script', + 'runtime', 'python' + ) + )::json + WHERE data->>'script' IS NOT NULL + AND json_typeof(data->'script') = 'string'; + """ + ) + ) + + +def downgrade() -> None: + # Revert only objects shaped like: + # {"content": , "runtime": "python"} -> "" + op.execute( + sa.text( + """ + UPDATE public.workflow_revisions + SET data = jsonb_set( + data::jsonb, + '{script}', + to_jsonb( (data->'script'->>'content') ) + )::json + WHERE json_typeof(data->'script') = 'object' + AND (data->'script') ? 'content' + AND json_typeof(data->'script'->'content') = 'string' + AND ( + (data->'script' ? 'runtime') IS FALSE + OR (data->'script'->>'runtime') = 'python' + ); + """ + ) + ) diff --git a/api/oss/src/apis/fastapi/applications/router.py b/api/oss/src/apis/fastapi/applications/router.py index 059702d0cd..2f03ea8c8b 100644 --- a/api/oss/src/apis/fastapi/applications/router.py +++ b/api/oss/src/apis/fastapi/applications/router.py @@ -118,13 +118,14 @@ async def retrieve_application_revision( "revision_ref": application_revision_retrieve_request.application_revision_ref, # type: ignore } - application_revision = await get_cache( - namespace="applications:retrieve", - project_id=request.state.project_id, - user_id=request.state.user_id, - key=cache_key, - model=ApplicationRevision, - ) + application_revision = None + # application_revision = await get_cache( + # namespace="applications:retrieve", + # project_id=request.state.project_id, + # user_id=request.state.user_id, + # key=cache_key, + # model=ApplicationRevision, + # ) if not application_revision: application_revision = await self.legacy_applications_service.retrieve( diff --git a/api/oss/src/apis/fastapi/evaluators/router.py b/api/oss/src/apis/fastapi/evaluators/router.py index 15e3265c2a..4461df9072 100644 --- a/api/oss/src/apis/fastapi/evaluators/router.py +++ b/api/oss/src/apis/fastapi/evaluators/router.py @@ -762,13 +762,14 @@ async def retrieve_evaluator_revision( "revision_ref": evaluator_revision_retrieve_request.evaluator_revision_ref, # type: ignore } - evaluator_revision = await get_cache( - namespace="evaluators:retrieve", - project_id=request.state.project_id, - user_id=request.state.user_id, - key=cache_key, - model=EvaluatorRevision, - ) + evaluator_revision = None + # evaluator_revision = await get_cache( + # namespace="evaluators:retrieve", + # project_id=request.state.project_id, + # user_id=request.state.user_id, + # key=cache_key, + # model=EvaluatorRevision, + # ) if not evaluator_revision: evaluator_revision = await self.evaluators_service.fetch_evaluator_revision( diff --git a/api/oss/src/apis/fastapi/testsets/router.py b/api/oss/src/apis/fastapi/testsets/router.py index ddd45d4d8b..9fcd8bcab6 100644 --- a/api/oss/src/apis/fastapi/testsets/router.py +++ b/api/oss/src/apis/fastapi/testsets/router.py @@ -713,13 +713,14 @@ async def retrieve_testset_revision( "revision_ref": testset_revision_retrieve_request.testset_revision_ref, # type: ignore } - testset_revision = await get_cache( - namespace="testsets:retrieve", - project_id=request.state.project_id, - user_id=request.state.user_id, - key=cache_key, - model=TestsetRevision, - ) + testset_revision = None + # testset_revision = await get_cache( + # namespace="testsets:retrieve", + # project_id=request.state.project_id, + # user_id=request.state.user_id, + # key=cache_key, + # model=TestsetRevision, + # ) if not testset_revision: testset_revision = await self.testsets_service.fetch_testset_revision( diff --git a/api/oss/src/apis/fastapi/workflows/router.py b/api/oss/src/apis/fastapi/workflows/router.py index 062bb1a91c..0481e6a2fb 100644 --- a/api/oss/src/apis/fastapi/workflows/router.py +++ b/api/oss/src/apis/fastapi/workflows/router.py @@ -1079,13 +1079,14 @@ async def retrieve_workflow_revision( "revision_ref": workflow_revision_retrieve_request.workflow_revision_ref, } - workflow_revision = await get_cache( - namespace="workflows:retrieve", - project_id=request.state.project_id, - user_id=request.state.user_id, - key=cache_key, - model=WorkflowRevision, - ) + workflow_revision = None + # workflow_revision = await get_cache( + # namespace="workflows:retrieve", + # project_id=request.state.project_id, + # user_id=request.state.user_id, + # key=cache_key, + # model=WorkflowRevision, + # ) if not workflow_revision: workflow_revision = await self.workflows_service.fetch_workflow_revision( diff --git a/api/oss/src/core/applications/service.py b/api/oss/src/core/applications/service.py index a8bea97e11..0b7eb03a34 100644 --- a/api/oss/src/core/applications/service.py +++ b/api/oss/src/core/applications/service.py @@ -8,11 +8,15 @@ from oss.src.core.shared.dtos import Reference from oss.src.core.workflows.dtos import WorkflowRevisionData from oss.src.core.applications.dtos import ( + LegacyApplicationFlags, + # LegacyApplication, LegacyApplicationCreate, LegacyApplicationEdit, LegacyApplicationData, # + ApplicationFlags, + # Application, ApplicationCreate, ApplicationEdit, @@ -53,7 +57,15 @@ async def create( # name=legacy_application_create.name, # - tags=legacy_application_create.tags, + flags=( + ApplicationFlags( + **legacy_application_create.flags.model_dump( + mode="json", exclude_none=True + ) + ) + if legacy_application_create.flags + else ApplicationFlags() + ), ) user = await db_manager.get_user_with_id( @@ -71,7 +83,7 @@ async def create( or application_create.name or uuid4().hex[-12:], # - template_key=AppType.CUSTOM, + template_key=AppType.SDK_CUSTOM, # user_id=str(user_id), ) @@ -102,7 +114,7 @@ async def create( # name=application_create.name or uuid4().hex[-12:], # - tags=application_create.tags, + flags=application_create.flags, # application_id=app_db.id, # type: ignore[arg-type] ) @@ -132,7 +144,7 @@ async def create( # # name=application_create.name or uuid4().hex[-12:], # - tags=application_create.tags, + flags=application_create.flags, # data=ApplicationRevisionData( **( @@ -161,7 +173,7 @@ async def create( project_id=str(project_id), # app_id=str(app_variant_db.app.id), - uri="" if app_db.app_type == AppType.CUSTOM else url, # type: ignore + uri="" if app_db.app_type == AppType.SDK_CUSTOM else url, # type: ignore ) # Update variant base @@ -206,6 +218,8 @@ async def create( updated_at=app_db.updated_at, # type: ignore created_by_id=app_db.modified_by_id, # type: ignore # + flags={"is_custom": True}, # type: ignore + # data=application_revision_data, ) @@ -267,7 +281,7 @@ async def fetch( else None ), # - tags=application.tags, + flags=application.flags, # application_id=application.id, ) @@ -329,7 +343,7 @@ async def fetch( else None ), # - tags=application_variant.tags, + flags=application_variant.flags, # data=ApplicationRevisionData( **( @@ -353,7 +367,7 @@ async def fetch( updated_at=application.updated_at, created_by_id=application.created_by_id, # - tags=application_variant.tags, + flags={"is_custom": True}, # type: ignore # data=LegacyApplicationData( **( @@ -471,7 +485,7 @@ async def edit( updated_at=app_db.updated_at, # type: ignore created_by_id=app_db.modified_by_id, # type: ignore # - tags={"type": app_db.app_type}, # type: ignore + flags={"is_custom": True}, # type: ignore # data=application_revision_data, ) @@ -593,7 +607,7 @@ async def retrieve( created_at=app_db.created_at, # type: ignore updated_at=app_db.updated_at, # type: ignore created_by_id=app_db.modified_by_id, # type: ignore - tags={"type": app_db.app_type}, # type: ignore + flags={"is_custom": True}, # type: ignore ) application_variant_slug = get_slug_from_name_and_id( @@ -619,7 +633,7 @@ async def retrieve( if app_variant_db.hidden # type: ignore else None ), - tags=application.tags, + flags=application.flags, application_id=application.id, ) @@ -650,7 +664,7 @@ async def retrieve( if variant_revision_db.hidden # type: ignore else None ), - tags=application_variant.tags, + flags=application_variant.flags, application_id=application.id, application_variant_id=application_variant.id, ) diff --git a/api/oss/src/dbs/postgres/evaluations/dao.py b/api/oss/src/dbs/postgres/evaluations/dao.py index c9dbfd1bfb..9f6535dbf5 100644 --- a/api/oss/src/dbs/postgres/evaluations/dao.py +++ b/api/oss/src/dbs/postgres/evaluations/dao.py @@ -547,7 +547,7 @@ async def close_run( exclude_none=True, ) - run_dbe.flags["is_closed"] = True # type: ignore + # run_dbe.flags["is_closed"] = True # type: ignore flag_modified(run_dbe, "flags") run_dbe.updated_at = datetime.now(timezone.utc) # type: ignore @@ -596,7 +596,7 @@ async def close_runs( exclude_none=True, ) - run_dbe.flags["is_closed"] = True # type: ignore + # run_dbe.flags["is_closed"] = True # type: ignore flag_modified(run_dbe, "flags") run_dbe.updated_at = datetime.now(timezone.utc) # type: ignore diff --git a/api/oss/src/models/api/api_models.py b/api/oss/src/models/api/api_models.py index 69f6db9a6a..9d81505a97 100644 --- a/api/oss/src/models/api/api_models.py +++ b/api/oss/src/models/api/api_models.py @@ -64,6 +64,9 @@ class CreateApp(BaseModel): class CreateAppOutput(BaseModel): app_id: str app_name: str + app_type: Optional[str] = None + created_at: Optional[str] = None + updated_at: Optional[str] = None class UpdateApp(BaseModel): diff --git a/api/oss/src/models/shared_models.py b/api/oss/src/models/shared_models.py index 2a59f441ee..c2849bc6f5 100644 --- a/api/oss/src/models/shared_models.py +++ b/api/oss/src/models/shared_models.py @@ -71,6 +71,7 @@ class AppType(str, enum.Enum): CHAT_SERVICE = "SERVICE:chat" COMPLETION_SERVICE = "SERVICE:completion" CUSTOM = "CUSTOM" + SDK_CUSTOM = "SDK_CUSTOM" @classmethod def friendly_tag(cls, app_type: str): @@ -80,5 +81,6 @@ def friendly_tag(cls, app_type: str): cls.CHAT_SERVICE: "chat", cls.COMPLETION_SERVICE: "completion", cls.CUSTOM: "custom", + cls.SDK_CUSTOM: "custom (sdk)", } return mappings.get(app_type, None) # type: ignore diff --git a/api/oss/src/routers/app_router.py b/api/oss/src/routers/app_router.py index 338aff4f62..52e724ee83 100644 --- a/api/oss/src/routers/app_router.py +++ b/api/oss/src/routers/app_router.py @@ -258,7 +258,13 @@ async def create_app( project_id=request.state.project_id, ) - return CreateAppOutput(app_id=str(app_db.id), app_name=str(app_db.app_name)) + return CreateAppOutput( + app_id=str(app_db.id), + app_name=str(app_db.app_name), + app_type=AppType.friendly_tag(app_db.app_type), + created_at=str(app_db.created_at), + updated_at=str(app_db.updated_at), + ) @router.get("/{app_id}/", response_model=ReadAppOutput, operation_id="read_app") @@ -299,7 +305,13 @@ async def read_app( status_code=403, ) - return ReadAppOutput(app_id=str(app.id), app_name=str(app.app_name)) + return ReadAppOutput( + app_id=str(app.id), + app_name=str(app.app_name), + app_type=AppType.friendly_tag(app.app_type), + created_at=str(app.created_at), + updated_at=str(app.updated_at), + ) @router.patch("/{app_id}/", response_model=UpdateAppOutput, operation_id="update_app") @@ -343,11 +355,19 @@ async def update_app( ) await db_manager.update_app(app_id=app_id, values_to_update=payload.model_dump()) + app = await db_manager.fetch_app_by_id(app_id) + await invalidate_cache( project_id=request.state.project_id, ) - return UpdateAppOutput(app_id=app_id, app_name=payload.app_name) + return UpdateAppOutput( + app_id=str(app.id), + app_name=str(app.app_name), + app_type=AppType.friendly_tag(app.app_type), + created_at=str(app.created_at), + updated_at=str(app.updated_at), + ) @router.get("/", response_model=List[App], operation_id="list_apps") diff --git a/api/oss/src/routers/projects_router.py b/api/oss/src/routers/projects_router.py index 2288ee38e2..20a8d9fe57 100644 --- a/api/oss/src/routers/projects_router.py +++ b/api/oss/src/routers/projects_router.py @@ -42,6 +42,7 @@ class ProjectsResponse(BaseModel): ) async def get_projects( request: Request, + scope: Optional[str] = Query(None), ): try: if is_oss(): @@ -112,6 +113,10 @@ async def get_projects( is_demo=project_membership.is_demo, ) for project_membership in _project_memberships + if ( + scope is None + or str(project_membership.project.id) == request.state.project_id + ) ] return projects diff --git a/api/oss/src/routers/testset_router.py b/api/oss/src/routers/testset_router.py index e560c06198..9a7d79e1dd 100644 --- a/api/oss/src/routers/testset_router.py +++ b/api/oss/src/routers/testset_router.py @@ -10,7 +10,7 @@ from pydantic import ValidationError from fastapi.responses import JSONResponse -from fastapi import HTTPException, UploadFile, File, Form, Request +from fastapi import HTTPException, UploadFile, File, Form, Request, Query from oss.src.utils.logging import get_module_logger from oss.src.services import db_manager @@ -375,6 +375,8 @@ async def update_testset( @router.get("/", operation_id="get_testsets") async def get_testsets( request: Request, + # + name: Optional[str] = Query(None), ) -> List[TestsetOutputResponse]: """ Get all testsets. @@ -385,7 +387,6 @@ async def get_testsets( Raises: - `HTTPException` with status code 404 if no testsets are found. """ - try: if is_ee(): has_permission = await check_action_access( @@ -407,6 +408,7 @@ async def get_testsets( testsets = await db_manager.fetch_testsets_by_project_id( project_id=request.state.project_id, + name=name, ) return [ diff --git a/api/oss/src/services/db_manager.py b/api/oss/src/services/db_manager.py index 643d3712de..54fef759d3 100644 --- a/api/oss/src/services/db_manager.py +++ b/api/oss/src/services/db_manager.py @@ -640,6 +640,7 @@ async def get_app_type_from_template_key(template_key: Optional[str]) -> Optiona AppType.CHAT_SERVICE, AppType.COMPLETION_SERVICE, AppType.CUSTOM, + AppType.SDK_CUSTOM, ]: return template_key @@ -2887,7 +2888,10 @@ async def update_testset( await session.refresh(testset) -async def fetch_testsets_by_project_id(project_id: str): +async def fetch_testsets_by_project_id( + project_id: str, + name: Optional[str] = None, +) -> List[TestsetDB]: """Fetches all testsets for a given project. Args: @@ -2898,9 +2902,19 @@ async def fetch_testsets_by_project_id(project_id: str): """ async with engine.core_session() as session: - result = await session.execute( - select(TestsetDB).filter_by(project_id=uuid.UUID(project_id)) - ) + if not name: + result = await session.execute( + select(TestsetDB).filter_by( + project_id=uuid.UUID(project_id), + ) + ) + else: + result = await session.execute( + select(TestsetDB).filter_by( + project_id=uuid.UUID(project_id), + name=name if name else None, + ) + ) testsets = result.scalars().all() for i, testset in enumerate(testsets): diff --git a/api/poetry.lock b/api/poetry.lock index 737d1cb26b..eea4a296e7 100644 --- a/api/poetry.lock +++ b/api/poetry.lock @@ -2,15 +2,15 @@ [[package]] name = "agenta" -version = "0.59.10" +version = "0.60.1" description = "The SDK for agenta is an open-source LLMOps platform." optional = false -python-versions = "<4.0,>=3.9" +python-versions = "<4.0,>=3.11" groups = ["main"] markers = "python_version == \"3.11\" or python_version >= \"3.12\"" files = [ - {file = "agenta-0.59.10-py3-none-any.whl", hash = "sha256:19a6685d5ad11578ad9b7ce5fbcd85a7352cd1353af2022cf8bfa89b2280c4a8"}, - {file = "agenta-0.59.10.tar.gz", hash = "sha256:1373a0c117dae45f36d0de92e2b22ea275fb8155617cfbb2a634ecaf46f8c99a"}, + {file = "agenta-0.60.1-py3-none-any.whl", hash = "sha256:9895b04f4e700c575147428f0535bdd7c0d378d9a10d2fa70eb17bf8efe05650"}, + {file = "agenta-0.60.1.tar.gz", hash = "sha256:09f9ee2f1dd38a86d66d2b92eb9377adb5a83a876c8e5d120f1397fe45e70ccb"}, ] [package.dependencies] @@ -30,7 +30,9 @@ opentelemetry-instrumentation = ">=0.56b0" opentelemetry-sdk = ">=1.27.0,<2.0.0" pydantic = ">=2,<3" python-dotenv = ">=1.0.0,<2.0.0" +python-jsonpath = ">=2.0.0,<3.0.0" pyyaml = ">=6.0.2,<7.0.0" +restrictedpython = {version = ">=8.0,<9.0", markers = "python_version == \"3.11\""} starlette = ">=0.47.0,<0.48.0" structlog = ">=25.2.0,<26.0.0" tiktoken = "0.11.0" @@ -1401,15 +1403,15 @@ files = [ [[package]] name = "google-auth" -version = "2.42.1" +version = "2.43.0" description = "Google Authentication Library" optional = false python-versions = ">=3.7" groups = ["main"] markers = "python_version == \"3.11\" or python_version >= \"3.12\"" files = [ - {file = "google_auth-2.42.1-py2.py3-none-any.whl", hash = "sha256:eb73d71c91fc95dbd221a2eb87477c278a355e7367a35c0d84e6b0e5f9b4ad11"}, - {file = "google_auth-2.42.1.tar.gz", hash = "sha256:30178b7a21aa50bffbdc1ffcb34ff770a2f65c712170ecd5446c4bef4dc2b94e"}, + {file = "google_auth-2.43.0-py2.py3-none-any.whl", hash = "sha256:af628ba6fa493f75c7e9dbe9373d148ca9f4399b5ea29976519e0a3848eddd16"}, + {file = "google_auth-2.43.0.tar.gz", hash = "sha256:88228eee5fc21b62a1b5fe773ca15e67778cb07dc8363adcb4a8827b52d81483"}, ] [package.dependencies] @@ -1429,15 +1431,15 @@ urllib3 = ["packaging", "urllib3"] [[package]] name = "googleapis-common-protos" -version = "1.71.0" +version = "1.72.0" description = "Common protobufs used in Google APIs" optional = false python-versions = ">=3.7" groups = ["main"] markers = "python_version == \"3.11\" or python_version >= \"3.12\"" files = [ - {file = "googleapis_common_protos-1.71.0-py3-none-any.whl", hash = "sha256:59034a1d849dc4d18971997a72ac56246570afdd17f9369a0ff68218d50ab78c"}, - {file = "googleapis_common_protos-1.71.0.tar.gz", hash = "sha256:1aec01e574e29da63c80ba9f7bbf1ccfaacf1da877f23609fe236ca7c72a2e2e"}, + {file = "googleapis_common_protos-1.72.0-py3-none-any.whl", hash = "sha256:4299c5a82d5ae1a9702ada957347726b167f9f8d1fc352477702a1e851ff4038"}, + {file = "googleapis_common_protos-1.72.0.tar.gz", hash = "sha256:e55a601c1b32b52d7a3e65f43563e2aa61bcd737998ee672ac9b951cd49319f5"}, ] [package.dependencies] @@ -1901,107 +1903,96 @@ zookeeper = ["kazoo (>=2.8.0)"] [[package]] name = "levenshtein" -version = "0.27.1" +version = "0.27.3" description = "Python extension for computing string edit distances and similarities." optional = false -python-versions = ">=3.9" +python-versions = ">=3.10" groups = ["main"] markers = "python_version == \"3.11\" or python_version >= \"3.12\"" files = [ - {file = "levenshtein-0.27.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:13d6f617cb6fe63714c4794861cfaacd398db58a292f930edb7f12aad931dace"}, - {file = "levenshtein-0.27.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ca9d54d41075e130c390e61360bec80f116b62d6ae973aec502e77e921e95334"}, - {file = "levenshtein-0.27.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2de1f822b5c9a20d10411f779dfd7181ce3407261436f8470008a98276a9d07f"}, - {file = "levenshtein-0.27.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:81270392c2e45d1a7e1b3047c3a272d5e28bb4f1eff0137637980064948929b7"}, - {file = "levenshtein-0.27.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d30c3ea23a94dddd56dbe323e1fa8a29ceb24da18e2daa8d0abf78b269a5ad1"}, - {file = "levenshtein-0.27.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f3e0bea76695b9045bbf9ad5f67ad4cc01c11f783368f34760e068f19b6a6bc"}, - {file = "levenshtein-0.27.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cdd190e468a68c31a5943368a5eaf4e130256a8707886d23ab5906a0cb98a43c"}, - {file = "levenshtein-0.27.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:7c3121314bb4b676c011c33f6a0ebb462cfdcf378ff383e6f9e4cca5618d0ba7"}, - {file = "levenshtein-0.27.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:f8ef378c873efcc5e978026b69b45342d841cd7a2f273447324f1c687cc4dc37"}, - {file = "levenshtein-0.27.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:ff18d78c5c16bea20876425e1bf5af56c25918fb01bc0f2532db1317d4c0e157"}, - {file = "levenshtein-0.27.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:13412ff805afbfe619d070280d1a76eb4198c60c5445cd5478bd4c7055bb3d51"}, - {file = "levenshtein-0.27.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:a2adb9f263557f7fb13e19eb2f34595d86929a44c250b2fca6e9b65971e51e20"}, - {file = "levenshtein-0.27.1-cp310-cp310-win32.whl", hash = "sha256:6278a33d2e0e909d8829b5a72191419c86dd3bb45b82399c7efc53dabe870c35"}, - {file = "levenshtein-0.27.1-cp310-cp310-win_amd64.whl", hash = "sha256:5b602b8428ee5dc88432a55c5303a739ee2be7c15175bd67c29476a9d942f48e"}, - {file = "levenshtein-0.27.1-cp310-cp310-win_arm64.whl", hash = "sha256:48334081fddaa0c259ba01ee898640a2cf8ede62e5f7e25fefece1c64d34837f"}, - {file = "levenshtein-0.27.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2e6f1760108319a108dceb2f02bc7cdb78807ad1f9c673c95eaa1d0fe5dfcaae"}, - {file = "levenshtein-0.27.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c4ed8400d94ab348099395e050b8ed9dd6a5d6b5b9e75e78b2b3d0b5f5b10f38"}, - {file = "levenshtein-0.27.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7826efe51be8ff58bc44a633e022fdd4b9fc07396375a6dbc4945a3bffc7bf8f"}, - {file = "levenshtein-0.27.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ff5afb78719659d353055863c7cb31599fbea6865c0890b2d840ee40214b3ddb"}, - {file = "levenshtein-0.27.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:201dafd5c004cd52018560cf3213da799534d130cf0e4db839b51f3f06771de0"}, - {file = "levenshtein-0.27.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e5ddd59f3cfaec216811ee67544779d9e2d6ed33f79337492a248245d6379e3d"}, - {file = "levenshtein-0.27.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6afc241d27ecf5b921063b796812c55b0115423ca6fa4827aa4b1581643d0a65"}, - {file = "levenshtein-0.27.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ee2e766277cceb8ca9e584ea03b8dc064449ba588d3e24c1923e4b07576db574"}, - {file = "levenshtein-0.27.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:920b23d6109453913ce78ec451bc402ff19d020ee8be4722e9d11192ec2fac6f"}, - {file = "levenshtein-0.27.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:560d7edba126e2eea3ac3f2f12e7bd8bc9c6904089d12b5b23b6dfa98810b209"}, - {file = "levenshtein-0.27.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:8d5362b6c7aa4896dc0cb1e7470a4ad3c06124e0af055dda30d81d3c5549346b"}, - {file = "levenshtein-0.27.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:65ba880815b0f80a80a293aeebac0fab8069d03ad2d6f967a886063458f9d7a1"}, - {file = "levenshtein-0.27.1-cp311-cp311-win32.whl", hash = "sha256:fcc08effe77fec0bc5b0f6f10ff20b9802b961c4a69047b5499f383119ddbe24"}, - {file = "levenshtein-0.27.1-cp311-cp311-win_amd64.whl", hash = "sha256:0ed402d8902be7df212ac598fc189f9b2d520817fdbc6a05e2ce44f7f3ef6857"}, - {file = "levenshtein-0.27.1-cp311-cp311-win_arm64.whl", hash = "sha256:7fdaab29af81a8eb981043737f42450efca64b9761ca29385487b29c506da5b5"}, - {file = "levenshtein-0.27.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:25fb540d8c55d1dc7bdc59b7de518ea5ed9df92eb2077e74bcb9bb6de7b06f69"}, - {file = "levenshtein-0.27.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f09cfab6387e9c908c7b37961c045e8e10eb9b7ec4a700367f8e080ee803a562"}, - {file = "levenshtein-0.27.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dafa29c0e616f322b574e0b2aeb5b1ff2f8d9a1a6550f22321f3bd9bb81036e3"}, - {file = "levenshtein-0.27.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:be7a7642ea64392fa1e6ef7968c2e50ef2152c60948f95d0793361ed97cf8a6f"}, - {file = "levenshtein-0.27.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:060b48c45ed54bcea9582ce79c6365b20a1a7473767e0b3d6be712fa3a22929c"}, - {file = "levenshtein-0.27.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:712f562c5e64dd0398d3570fe99f8fbb88acec7cc431f101cb66c9d22d74c542"}, - {file = "levenshtein-0.27.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a6141ad65cab49aa4527a3342d76c30c48adb2393b6cdfeca65caae8d25cb4b8"}, - {file = "levenshtein-0.27.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:799b8d73cda3265331116f62932f553804eae16c706ceb35aaf16fc2a704791b"}, - {file = "levenshtein-0.27.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:ec99871d98e517e1cc4a15659c62d6ea63ee5a2d72c5ddbebd7bae8b9e2670c8"}, - {file = "levenshtein-0.27.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8799164e1f83588dbdde07f728ea80796ea72196ea23484d78d891470241b222"}, - {file = "levenshtein-0.27.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:583943813898326516ab451a83f734c6f07488cda5c361676150d3e3e8b47927"}, - {file = "levenshtein-0.27.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:5bb22956af44bb4eade93546bf95be610c8939b9a9d4d28b2dfa94abf454fed7"}, - {file = "levenshtein-0.27.1-cp312-cp312-win32.whl", hash = "sha256:d9099ed1bcfa7ccc5540e8ad27b5dc6f23d16addcbe21fdd82af6440f4ed2b6d"}, - {file = "levenshtein-0.27.1-cp312-cp312-win_amd64.whl", hash = "sha256:7f071ecdb50aa6c15fd8ae5bcb67e9da46ba1df7bba7c6bf6803a54c7a41fd96"}, - {file = "levenshtein-0.27.1-cp312-cp312-win_arm64.whl", hash = "sha256:83b9033a984ccace7703f35b688f3907d55490182fd39b33a8e434d7b2e249e6"}, - {file = "levenshtein-0.27.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ab00c2cae2889166afb7e1af64af2d4e8c1b126f3902d13ef3740df00e54032d"}, - {file = "levenshtein-0.27.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:c27e00bc7527e282f7c437817081df8da4eb7054e7ef9055b851fa3947896560"}, - {file = "levenshtein-0.27.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5b07de42bfc051136cc8e7f1e7ba2cb73666aa0429930f4218efabfdc5837ad"}, - {file = "levenshtein-0.27.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fb11ad3c9dae3063405aa50d9c96923722ab17bb606c776b6817d70b51fd7e07"}, - {file = "levenshtein-0.27.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c5986fb46cb0c063305fd45b0a79924abf2959a6d984bbac2b511d3ab259f3f"}, - {file = "levenshtein-0.27.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75191e469269ddef2859bc64c4a8cfd6c9e063302766b5cb7e1e67f38cc7051a"}, - {file = "levenshtein-0.27.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:51b3a7b2266933babc04e4d9821a495142eebd6ef709f90e24bc532b52b81385"}, - {file = "levenshtein-0.27.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bbac509794afc3e2a9e73284c9e3d0aab5b1d928643f42b172969c3eefa1f2a3"}, - {file = "levenshtein-0.27.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:8d68714785178347ecb272b94e85cbf7e638165895c4dd17ab57e7742d8872ec"}, - {file = "levenshtein-0.27.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:8ee74ee31a5ab8f61cd6c6c6e9ade4488dde1285f3c12207afc018393c9b8d14"}, - {file = "levenshtein-0.27.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:f2441b6365453ec89640b85344afd3d602b0d9972840b693508074c613486ce7"}, - {file = "levenshtein-0.27.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a9be39640a46d8a0f9be729e641651d16a62b2c07d3f4468c36e1cc66b0183b9"}, - {file = "levenshtein-0.27.1-cp313-cp313-win32.whl", hash = "sha256:a520af67d976761eb6580e7c026a07eb8f74f910f17ce60e98d6e492a1f126c7"}, - {file = "levenshtein-0.27.1-cp313-cp313-win_amd64.whl", hash = "sha256:7dd60aa49c2d8d23e0ef6452c8329029f5d092f386a177e3385d315cabb78f2a"}, - {file = "levenshtein-0.27.1-cp313-cp313-win_arm64.whl", hash = "sha256:149cd4f0baf5884ac5df625b7b0d281721b15de00f447080e38f5188106e1167"}, - {file = "levenshtein-0.27.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0c9231ac7c705a689f12f4fc70286fa698b9c9f06091fcb0daddb245e9259cbe"}, - {file = "levenshtein-0.27.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cf9ba080b1a8659d35c11dcfffc7f8c001028c2a3a7b7e6832348cdd60c53329"}, - {file = "levenshtein-0.27.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:164e3184385caca94ef7da49d373edd7fb52d4253bcc5bd5b780213dae307dfb"}, - {file = "levenshtein-0.27.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e6024d67de6efbd32aaaafd964864c7fee0569b960556de326c3619d1eeb2ba4"}, - {file = "levenshtein-0.27.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6fbb234b3b04e04f7b3a2f678e24fd873c86c543d541e9df3ac9ec1cc809e732"}, - {file = "levenshtein-0.27.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffdd9056c7afb29aea00b85acdb93a3524e43852b934ebb9126c901506d7a1ed"}, - {file = "levenshtein-0.27.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a1a0918243a313f481f4ba6a61f35767c1230395a187caeecf0be87a7c8f0624"}, - {file = "levenshtein-0.27.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c57655b20690ffa5168df7f4b7c6207c4ca917b700fb1b142a49749eb1cf37bb"}, - {file = "levenshtein-0.27.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:079cc78de05d3ded6cf1c5e2c3eadeb1232e12d49be7d5824d66c92b28c3555a"}, - {file = "levenshtein-0.27.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ac28c4ced134c0fe2941230ce4fd5c423aa66339e735321665fb9ae970f03a32"}, - {file = "levenshtein-0.27.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:a2f7688355b22db27588f53c922b4583b8b627c83a8340191bbae1fbbc0f5f56"}, - {file = "levenshtein-0.27.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:654e8f016cb64ad27263d3364c6536e7644205f20d94748c8b94c586e3362a23"}, - {file = "levenshtein-0.27.1-cp39-cp39-win32.whl", hash = "sha256:145e6e8744643a3764fed9ab4ab9d3e2b8e5f05d2bcd0ad7df6f22f27a9fbcd4"}, - {file = "levenshtein-0.27.1-cp39-cp39-win_amd64.whl", hash = "sha256:612f0c90201c318dd113e7e97bd677e6e3e27eb740f242b7ae1a83f13c892b7e"}, - {file = "levenshtein-0.27.1-cp39-cp39-win_arm64.whl", hash = "sha256:cde09ec5b3cc84a6737113b47e45392b331c136a9e8a8ead8626f3eacae936f8"}, - {file = "levenshtein-0.27.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:c92a222ab95b8d903eae6d5e7d51fe6c999be021b647715c18d04d0b0880f463"}, - {file = "levenshtein-0.27.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:71afc36b4ee950fa1140aff22ffda9e5e23280285858e1303260dbb2eabf342d"}, - {file = "levenshtein-0.27.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58b1daeebfc148a571f09cfe18c16911ea1eaaa9e51065c5f7e7acbc4b866afa"}, - {file = "levenshtein-0.27.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:105edcb14797d95c77f69bad23104314715a64cafbf4b0e79d354a33d7b54d8d"}, - {file = "levenshtein-0.27.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d9c58fb1ef8bdc8773d705fbacf628e12c3bb63ee4d065dda18a76e86042444a"}, - {file = "levenshtein-0.27.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:e52270591854af67217103955a36bd7436b57c801e3354e73ba44d689ed93697"}, - {file = "levenshtein-0.27.1-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:909b7b6bce27a4ec90576c9a9bd9af5a41308dfecf364b410e80b58038277bbe"}, - {file = "levenshtein-0.27.1-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d193a7f97b8c6a350e36ec58e41a627c06fa4157c3ce4b2b11d90cfc3c2ebb8f"}, - {file = "levenshtein-0.27.1-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:614be316e3c06118705fae1f717f9072d35108e5fd4e66a7dd0e80356135340b"}, - {file = "levenshtein-0.27.1-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31fc0a5bb070722bdabb6f7e14955a294a4a968c68202d294699817f21545d22"}, - {file = "levenshtein-0.27.1-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9415aa5257227af543be65768a80c7a75e266c3c818468ce6914812f88f9c3df"}, - {file = "levenshtein-0.27.1-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:7987ef006a3cf56a4532bd4c90c2d3b7b4ca9ad3bf8ae1ee5713c4a3bdfda913"}, - {file = "levenshtein-0.27.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:e67750653459a8567b5bb10e56e7069b83428d42ff5f306be821ef033b92d1a8"}, - {file = "levenshtein-0.27.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:93344c2c3812f21fdc46bd9e57171684fc53dd107dae2f648d65ea6225d5ceaf"}, - {file = "levenshtein-0.27.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da4baef7e7460691006dd2ca6b9e371aecf135130f72fddfe1620ae740b68d94"}, - {file = "levenshtein-0.27.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8141c8e5bf2bd76ae214c348ba382045d7ed9d0e7ce060a36fc59c6af4b41d48"}, - {file = "levenshtein-0.27.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:773aa120be48c71e25c08d92a2108786e6537a24081049664463715926c76b86"}, - {file = "levenshtein-0.27.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:f12a99138fb09eb5606ab9de61dd234dd82a7babba8f227b5dce0e3ae3a9eaf4"}, - {file = "levenshtein-0.27.1.tar.gz", hash = "sha256:3e18b73564cfc846eec94dd13fab6cb006b5d2e0cc56bad1fd7d5585881302e3"}, + {file = "levenshtein-0.27.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d61eff70799fd5e710625da8a13e5adabd62bfd9f70abb9c531af6cad458cd27"}, + {file = "levenshtein-0.27.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:477efed87edf72ad0d3870038479ed2f63020a42e69c6a38a32a550e51f8e70e"}, + {file = "levenshtein-0.27.3-cp310-cp310-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8ef99b9827d7d1100fc4398ac5522bd56766b894561c0cbdea0a01b93f24e642"}, + {file = "levenshtein-0.27.3-cp310-cp310-manylinux_2_24_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:9091e8ca9fff6088836abf372f8871fb480e44603defa526e1c3ae2f1d70acc5"}, + {file = "levenshtein-0.27.3-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6ffdb2329712c5595eda3532a4f701f87f6c73a0f7aaac240681bf0b54310d63"}, + {file = "levenshtein-0.27.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:35856330eac1b968b45a5abbc4a3d14279bd9d1224be727cb1aac9ac4928a419"}, + {file = "levenshtein-0.27.3-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:5377e237f6a13f5b0618621cca7992848993470c011716c3ad09cdf19c3b13ab"}, + {file = "levenshtein-0.27.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e30614186eb5c43833b62ae7d893a116b88373eec8cf3f3d62ba51aa5962d8ea"}, + {file = "levenshtein-0.27.3-cp310-cp310-win32.whl", hash = "sha256:5499342fd6b003bd5abc28790c7b333884838f7fd8c50570a6520bbaf5e2a35b"}, + {file = "levenshtein-0.27.3-cp310-cp310-win_amd64.whl", hash = "sha256:9e2792730388bec6a85d4d3e3a9b53b8a4b509722bea1a78a39a1a0a7d8f0e13"}, + {file = "levenshtein-0.27.3-cp310-cp310-win_arm64.whl", hash = "sha256:8a2a274b55562a49c6e9dadb16d05f6c27ffa98906b55d5c122893457ca6e464"}, + {file = "levenshtein-0.27.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:245b6ffb6e1b0828cafbce35c500cb3265d0962c121d090669f177968c5a2980"}, + {file = "levenshtein-0.27.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8f44c98fa23f489eb7b2ad87d5dd24b6a784434bb5edb73f6b0513309c949690"}, + {file = "levenshtein-0.27.3-cp311-cp311-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1f5f85a1fc96dfc147bba82b4c67d6346ea26c27ef77a6a9de689118e26dddbe"}, + {file = "levenshtein-0.27.3-cp311-cp311-manylinux_2_24_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:18ceddd38d0e990d2c1c9b72f3e191dace87e2f8f0446207ce9e9cd2bfdfc8a1"}, + {file = "levenshtein-0.27.3-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:222b81adca29ee4128183328c6e1b25a48c817d14a008ab49e74be9df963b293"}, + {file = "levenshtein-0.27.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ee3769ab6e89c24f901e6b7004100630e86721464d7d0384860a322d7953d3a5"}, + {file = "levenshtein-0.27.3-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:03eba8fda9f3f2b4b0760263fa20b20a90ab00cbeeab4d0d9d899b4f77912b0a"}, + {file = "levenshtein-0.27.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c870b19e2d5c7bc7f16213cc10312b82d873a4d46e1c6d51857a12ef39a76552"}, + {file = "levenshtein-0.27.3-cp311-cp311-win32.whl", hash = "sha256:1987622e9b8ba2ae47dc27469291da1f58462660fa34f4358e9d9c1830fb1355"}, + {file = "levenshtein-0.27.3-cp311-cp311-win_amd64.whl", hash = "sha256:a2b2aa81851e01bb09667b07e80c3fbf0f5a7c6ee9cd80caf43cce705e65832a"}, + {file = "levenshtein-0.27.3-cp311-cp311-win_arm64.whl", hash = "sha256:a084b335c54def1aef9a594b7163faa44dd00056323808bab783f43d8e4c1395"}, + {file = "levenshtein-0.27.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:2de7f095b0ca8e44de9de986ccba661cd0dec3511c751b499e76b60da46805e9"}, + {file = "levenshtein-0.27.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d9b8b29e5d5145a3c958664c85151b1bb4b26e4ca764380b947e6a96a321217c"}, + {file = "levenshtein-0.27.3-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:fc975465a51b1c5889eadee1a583b81fba46372b4b22df28973e49e8ddb8f54a"}, + {file = "levenshtein-0.27.3-cp312-cp312-manylinux_2_24_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:57573ed885118554770979fdee584071b66103f6d50beddeabb54607a1213d81"}, + {file = "levenshtein-0.27.3-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:23aff800a6dd5d91bb3754a6092085aa7ad46b28e497682c155c74f681cfaa2d"}, + {file = "levenshtein-0.27.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:c08a952432b8ad9dccb145f812176db94c52cda732311ddc08d29fd3bf185b0a"}, + {file = "levenshtein-0.27.3-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:3bfcb2d78ab9cc06a1e75da8fcfb7a430fe513d66cfe54c07e50f32805e5e6db"}, + {file = "levenshtein-0.27.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ba7235f6dcb31a217247468295e2dd4c6c1d3ac81629dc5d355d93e1a5f4c185"}, + {file = "levenshtein-0.27.3-cp312-cp312-win32.whl", hash = "sha256:ea80d70f1d18c161a209be556b9094968627cbaae620e102459ef9c320a98cbb"}, + {file = "levenshtein-0.27.3-cp312-cp312-win_amd64.whl", hash = "sha256:fbaa1219d9b2d955339a37e684256a861e9274a3fe3a6ee1b8ea8724c3231ed9"}, + {file = "levenshtein-0.27.3-cp312-cp312-win_arm64.whl", hash = "sha256:2edbaa84f887ea1d9d8e4440af3fdda44769a7855d581c6248d7ee51518402a8"}, + {file = "levenshtein-0.27.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e55aa9f9453fd89d4a9ff1f3c4a650b307d5f61a7eed0568a52fbd2ff2eba107"}, + {file = "levenshtein-0.27.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:ae4d484453c48939ecd01c5c213530c68dd5cd6e5090f0091ef69799ec7a8a9f"}, + {file = "levenshtein-0.27.3-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d18659832567ee387b266be390da0de356a3aa6cf0e8bc009b6042d8188e131f"}, + {file = "levenshtein-0.27.3-cp313-cp313-manylinux_2_24_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:027b3d142cc8ea2ab4e60444d7175f65a94dde22a54382b2f7b47cc24936eb53"}, + {file = "levenshtein-0.27.3-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ffdca6989368cc64f347f0423c528520f12775b812e170a0eb0c10e4c9b0f3ff"}, + {file = "levenshtein-0.27.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:fa00ab389386032b02a1c9050ec3c6aa824d2bbcc692548fdc44a46b71c058c6"}, + {file = "levenshtein-0.27.3-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:691c9003c6c481b899a5c2f72e8ce05a6d956a9668dc75f2a3ce9f4381a76dc6"}, + {file = "levenshtein-0.27.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:12f7fc8bf0c24492fe97905348e020b55b9fc6dbaab7cd452566d1a466cb5e15"}, + {file = "levenshtein-0.27.3-cp313-cp313-win32.whl", hash = "sha256:9f4872e4e19ee48eed39f214eea4eca42e5ef303f8a4a488d8312370674dbf3a"}, + {file = "levenshtein-0.27.3-cp313-cp313-win_amd64.whl", hash = "sha256:83aa2422e9a9af2c9d3e56a53e3e8de6bae58d1793628cae48c4282577c5c2c6"}, + {file = "levenshtein-0.27.3-cp313-cp313-win_arm64.whl", hash = "sha256:d4adaf1edbcf38c3f2e290b52f4dcb5c6deff20308c26ef1127a106bc2d23e9f"}, + {file = "levenshtein-0.27.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:272e24764b8210337b65a1cfd69ce40df5d2de1a3baf1234e7f06d2826ba2e7a"}, + {file = "levenshtein-0.27.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:329a8e748a4e14d56daaa11f07bce3fde53385d05bad6b3f6dd9ee7802cdc915"}, + {file = "levenshtein-0.27.3-cp313-cp313t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a5fea1a9c6b9cc8729e467e2174b4359ff6bac27356bb5f31898e596b4ce133a"}, + {file = "levenshtein-0.27.3-cp313-cp313t-manylinux_2_24_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3a61aa825819b6356555091d8a575d1235bd9c3753a68316a261af4856c3b487"}, + {file = "levenshtein-0.27.3-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a51de7a514e8183f0a82f2947d01b014d2391426543b1c076bf5a26328cec4e4"}, + {file = "levenshtein-0.27.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:53cbf726d6e92040c9be7e594d959d496bd62597ea48eba9d96105898acbeafe"}, + {file = "levenshtein-0.27.3-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:191b358afead8561c4fcfed22f83c13bb6c8da5f5789e277f0c5aa1c45ca612f"}, + {file = "levenshtein-0.27.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ba1318d0635b834b8f0397014a7c43f007e65fce396a47614780c881bdff828b"}, + {file = "levenshtein-0.27.3-cp313-cp313t-win32.whl", hash = "sha256:8dd9e1db6c3b35567043e155a686e4827c4aa28a594bd81e3eea84d3a1bd5875"}, + {file = "levenshtein-0.27.3-cp313-cp313t-win_amd64.whl", hash = "sha256:7813ecdac7a6223264ebfea0c8d69959c43d21a99694ef28018d22c4265c2af6"}, + {file = "levenshtein-0.27.3-cp313-cp313t-win_arm64.whl", hash = "sha256:8f05a0d23d13a6f802c7af595d0e43f5b9b98b6ed390cec7a35cb5d6693b882b"}, + {file = "levenshtein-0.27.3-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:a6728bfae9a86002f0223576675fc7e2a6e7735da47185a1d13d1eaaa73dd4be"}, + {file = "levenshtein-0.27.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:8e5037c4a6f97a238e24aad6f98a1e984348b7931b1b04b6bd02bd4f8238150d"}, + {file = "levenshtein-0.27.3-cp314-cp314-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c6cf5ecf9026bf24cf66ad019c6583f50058fae3e1b3c20e8812455b55d597f1"}, + {file = "levenshtein-0.27.3-cp314-cp314-manylinux_2_24_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:9285084bd2fc19adb47dab54ed4a71f57f78fe0d754e4a01e3c75409a25aed24"}, + {file = "levenshtein-0.27.3-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ce3bbbe92172a08b599d79956182c6b7ab6ec8d4adbe7237417a363b968ad87b"}, + {file = "levenshtein-0.27.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:9dac48fab9d166ca90e12fb6cf6c7c8eb9c41aacf7136584411e20f7f136f745"}, + {file = "levenshtein-0.27.3-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:d37a83722dc5326c93d17078e926c4732dc4f3488dc017c6839e34cd16af92b7"}, + {file = "levenshtein-0.27.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:3466cb8294ce586e49dd467560a153ab8d296015c538223f149f9aefd3d9f955"}, + {file = "levenshtein-0.27.3-cp314-cp314-win32.whl", hash = "sha256:c848bf2457b268672b7e9e73b44f18f49856420ac50b2564cf115a6e4ef82688"}, + {file = "levenshtein-0.27.3-cp314-cp314-win_amd64.whl", hash = "sha256:742633f024362a4ed6ef9d7e75d68f74b041ae738985fcf55a0e6d1d4cade438"}, + {file = "levenshtein-0.27.3-cp314-cp314-win_arm64.whl", hash = "sha256:9eed6851224b19e8d588ddb8eb8a4ae3c2dcabf3d1213985f0b94a67e517b1df"}, + {file = "levenshtein-0.27.3-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:77de69a345c76227b51a4521cd85442eb3da54c7eb6a06663a20c058fc49e683"}, + {file = "levenshtein-0.27.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:eba2756dc1f5b962b0ff80e49abb2153d5e809cc5e7fa5e85be9410ce474795d"}, + {file = "levenshtein-0.27.3-cp314-cp314t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2c8fcb498287e971d84260f67808ff1a06b3f6212d80fea75cf5155db80606ff"}, + {file = "levenshtein-0.27.3-cp314-cp314t-manylinux_2_24_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:f067092c67464faab13e00a5c1a80da93baca8955d4d49579861400762e35591"}, + {file = "levenshtein-0.27.3-cp314-cp314t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:92415f32c68491203f2855d05eef3277d376182d014cf0859c013c89f277fbbf"}, + {file = "levenshtein-0.27.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:ef61eeaf1e0a42d7d947978d981fe4b9426b98b3dd8c1582c535f10dee044c3f"}, + {file = "levenshtein-0.27.3-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:103bb2e9049d1aa0d1216dd09c1c9106ecfe7541bbdc1a0490b9357d42eec8f2"}, + {file = "levenshtein-0.27.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6a64ddd1986b2a4c468b09544382287315c53585eb067f6e200c337741e057ee"}, + {file = "levenshtein-0.27.3-cp314-cp314t-win32.whl", hash = "sha256:957244f27dc284ccb030a8b77b8a00deb7eefdcd70052a4b1d96f375780ae9dc"}, + {file = "levenshtein-0.27.3-cp314-cp314t-win_amd64.whl", hash = "sha256:ccd7eaa6d8048c3ec07c93cfbcdefd4a3ae8c6aca3a370f2023ee69341e5f076"}, + {file = "levenshtein-0.27.3-cp314-cp314t-win_arm64.whl", hash = "sha256:1d8520b89b7a27bb5aadbcc156715619bcbf556a8ac46ad932470945dca6e1bd"}, + {file = "levenshtein-0.27.3-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:d2d7d22b6117a143f0cf101fe18a3ca90bd949fc33716a42d6165b9768d4a78c"}, + {file = "levenshtein-0.27.3-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:a55e7a2f317abd28576636e1f840fd268261f447c496a8481a9997a5ce889c59"}, + {file = "levenshtein-0.27.3-pp311-pypy311_pp73-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:55fa5f11952c38186bd4719e936eb4595b3d519218634924928787c36840256c"}, + {file = "levenshtein-0.27.3-pp311-pypy311_pp73-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:559d3588e6766134d95f84f830cf40166360e1769d253f5f83474bff10a24341"}, + {file = "levenshtein-0.27.3-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:82d40da143c1b9e27adcd34a33dfcc4a0761aa717c5f618b9c6f57dec5d7a958"}, + {file = "levenshtein-0.27.3.tar.gz", hash = "sha256:1ac326b2c84215795163d8a5af471188918b8797b4953ec87aaba22c9c1f9fc0"}, ] [package.dependencies] @@ -3182,20 +3173,20 @@ files = [ [[package]] name = "pydantic" -version = "2.12.3" +version = "2.12.4" description = "Data validation using Python type hints" optional = false python-versions = ">=3.9" groups = ["main"] markers = "python_version == \"3.11\" or python_version >= \"3.12\"" files = [ - {file = "pydantic-2.12.3-py3-none-any.whl", hash = "sha256:6986454a854bc3bc6e5443e1369e06a3a456af9d339eda45510f517d9ea5c6bf"}, - {file = "pydantic-2.12.3.tar.gz", hash = "sha256:1da1c82b0fc140bb0103bc1441ffe062154c8d38491189751ee00fd8ca65ce74"}, + {file = "pydantic-2.12.4-py3-none-any.whl", hash = "sha256:92d3d202a745d46f9be6df459ac5a064fdaa3c1c4cd8adcfa332ccf3c05f871e"}, + {file = "pydantic-2.12.4.tar.gz", hash = "sha256:0f8cb9555000a4b5b617f66bfd2566264c4984b27589d3b845685983e8ea85ac"}, ] [package.dependencies] annotated-types = ">=0.6.0" -pydantic-core = "2.41.4" +pydantic-core = "2.41.5" typing-extensions = ">=4.14.1" typing-inspection = ">=0.4.2" @@ -3205,130 +3196,134 @@ timezone = ["tzdata"] [[package]] name = "pydantic-core" -version = "2.41.4" +version = "2.41.5" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.9" groups = ["main"] markers = "python_version == \"3.11\" or python_version >= \"3.12\"" files = [ - {file = "pydantic_core-2.41.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2442d9a4d38f3411f22eb9dd0912b7cbf4b7d5b6c92c4173b75d3e1ccd84e36e"}, - {file = "pydantic_core-2.41.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:30a9876226dda131a741afeab2702e2d127209bde3c65a2b8133f428bc5d006b"}, - {file = "pydantic_core-2.41.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d55bbac04711e2980645af68b97d445cdbcce70e5216de444a6c4b6943ebcccd"}, - {file = "pydantic_core-2.41.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e1d778fb7849a42d0ee5927ab0f7453bf9f85eef8887a546ec87db5ddb178945"}, - {file = "pydantic_core-2.41.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1b65077a4693a98b90ec5ad8f203ad65802a1b9b6d4a7e48066925a7e1606706"}, - {file = "pydantic_core-2.41.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:62637c769dee16eddb7686bf421be48dfc2fae93832c25e25bc7242e698361ba"}, - {file = "pydantic_core-2.41.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2dfe3aa529c8f501babf6e502936b9e8d4698502b2cfab41e17a028d91b1ac7b"}, - {file = "pydantic_core-2.41.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ca2322da745bf2eeb581fc9ea3bbb31147702163ccbcbf12a3bb630e4bf05e1d"}, - {file = "pydantic_core-2.41.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e8cd3577c796be7231dcf80badcf2e0835a46665eaafd8ace124d886bab4d700"}, - {file = "pydantic_core-2.41.4-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:1cae8851e174c83633f0833e90636832857297900133705ee158cf79d40f03e6"}, - {file = "pydantic_core-2.41.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a26d950449aae348afe1ac8be5525a00ae4235309b729ad4d3399623125b43c9"}, - {file = "pydantic_core-2.41.4-cp310-cp310-win32.whl", hash = "sha256:0cf2a1f599efe57fa0051312774280ee0f650e11152325e41dfd3018ef2c1b57"}, - {file = "pydantic_core-2.41.4-cp310-cp310-win_amd64.whl", hash = "sha256:a8c2e340d7e454dc3340d3d2e8f23558ebe78c98aa8f68851b04dcb7bc37abdc"}, - {file = "pydantic_core-2.41.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:28ff11666443a1a8cf2a044d6a545ebffa8382b5f7973f22c36109205e65dc80"}, - {file = "pydantic_core-2.41.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:61760c3925d4633290292bad462e0f737b840508b4f722247d8729684f6539ae"}, - {file = "pydantic_core-2.41.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eae547b7315d055b0de2ec3965643b0ab82ad0106a7ffd29615ee9f266a02827"}, - {file = "pydantic_core-2.41.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ef9ee5471edd58d1fcce1c80ffc8783a650e3e3a193fe90d52e43bb4d87bff1f"}, - {file = "pydantic_core-2.41.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:15dd504af121caaf2c95cb90c0ebf71603c53de98305621b94da0f967e572def"}, - {file = "pydantic_core-2.41.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3a926768ea49a8af4d36abd6a8968b8790f7f76dd7cbd5a4c180db2b4ac9a3a2"}, - {file = "pydantic_core-2.41.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6916b9b7d134bff5440098a4deb80e4cb623e68974a87883299de9124126c2a8"}, - {file = "pydantic_core-2.41.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5cf90535979089df02e6f17ffd076f07237efa55b7343d98760bde8743c4b265"}, - {file = "pydantic_core-2.41.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:7533c76fa647fade2d7ec75ac5cc079ab3f34879626dae5689b27790a6cf5a5c"}, - {file = "pydantic_core-2.41.4-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:37e516bca9264cbf29612539801ca3cd5d1be465f940417b002905e6ed79d38a"}, - {file = "pydantic_core-2.41.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0c19cb355224037c83642429b8ce261ae108e1c5fbf5c028bac63c77b0f8646e"}, - {file = "pydantic_core-2.41.4-cp311-cp311-win32.whl", hash = "sha256:09c2a60e55b357284b5f31f5ab275ba9f7f70b7525e18a132ec1f9160b4f1f03"}, - {file = "pydantic_core-2.41.4-cp311-cp311-win_amd64.whl", hash = "sha256:711156b6afb5cb1cb7c14a2cc2c4a8b4c717b69046f13c6b332d8a0a8f41ca3e"}, - {file = "pydantic_core-2.41.4-cp311-cp311-win_arm64.whl", hash = "sha256:6cb9cf7e761f4f8a8589a45e49ed3c0d92d1d696a45a6feaee8c904b26efc2db"}, - {file = "pydantic_core-2.41.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:ab06d77e053d660a6faaf04894446df7b0a7e7aba70c2797465a0a1af00fc887"}, - {file = "pydantic_core-2.41.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c53ff33e603a9c1179a9364b0a24694f183717b2e0da2b5ad43c316c956901b2"}, - {file = "pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:304c54176af2c143bd181d82e77c15c41cbacea8872a2225dd37e6544dce9999"}, - {file = "pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:025ba34a4cf4fb32f917d5d188ab5e702223d3ba603be4d8aca2f82bede432a4"}, - {file = "pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b9f5f30c402ed58f90c70e12eff65547d3ab74685ffe8283c719e6bead8ef53f"}, - {file = "pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd96e5d15385d301733113bcaa324c8bcf111275b7675a9c6e88bfb19fc05e3b"}, - {file = "pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98f348cbb44fae6e9653c1055db7e29de67ea6a9ca03a5fa2c2e11a47cff0e47"}, - {file = "pydantic_core-2.41.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ec22626a2d14620a83ca583c6f5a4080fa3155282718b6055c2ea48d3ef35970"}, - {file = "pydantic_core-2.41.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3a95d4590b1f1a43bf33ca6d647b990a88f4a3824a8c4572c708f0b45a5290ed"}, - {file = "pydantic_core-2.41.4-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:f9672ab4d398e1b602feadcffcdd3af44d5f5e6ddc15bc7d15d376d47e8e19f8"}, - {file = "pydantic_core-2.41.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:84d8854db5f55fead3b579f04bda9a36461dab0730c5d570e1526483e7bb8431"}, - {file = "pydantic_core-2.41.4-cp312-cp312-win32.whl", hash = "sha256:9be1c01adb2ecc4e464392c36d17f97e9110fbbc906bcbe1c943b5b87a74aabd"}, - {file = "pydantic_core-2.41.4-cp312-cp312-win_amd64.whl", hash = "sha256:d682cf1d22bab22a5be08539dca3d1593488a99998f9f412137bc323179067ff"}, - {file = "pydantic_core-2.41.4-cp312-cp312-win_arm64.whl", hash = "sha256:833eebfd75a26d17470b58768c1834dfc90141b7afc6eb0429c21fc5a21dcfb8"}, - {file = "pydantic_core-2.41.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:85e050ad9e5f6fe1004eec65c914332e52f429bc0ae12d6fa2092407a462c746"}, - {file = "pydantic_core-2.41.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e7393f1d64792763a48924ba31d1e44c2cfbc05e3b1c2c9abb4ceeadd912cced"}, - {file = "pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94dab0940b0d1fb28bcab847adf887c66a27a40291eedf0b473be58761c9799a"}, - {file = "pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:de7c42f897e689ee6f9e93c4bec72b99ae3b32a2ade1c7e4798e690ff5246e02"}, - {file = "pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:664b3199193262277b8b3cd1e754fb07f2c6023289c815a1e1e8fb415cb247b1"}, - {file = "pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d95b253b88f7d308b1c0b417c4624f44553ba4762816f94e6986819b9c273fb2"}, - {file = "pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a1351f5bbdbbabc689727cb91649a00cb9ee7203e0a6e54e9f5ba9e22e384b84"}, - {file = "pydantic_core-2.41.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1affa4798520b148d7182da0615d648e752de4ab1a9566b7471bc803d88a062d"}, - {file = "pydantic_core-2.41.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7b74e18052fea4aa8dea2fb7dbc23d15439695da6cbe6cfc1b694af1115df09d"}, - {file = "pydantic_core-2.41.4-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:285b643d75c0e30abda9dc1077395624f314a37e3c09ca402d4015ef5979f1a2"}, - {file = "pydantic_core-2.41.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:f52679ff4218d713b3b33f88c89ccbf3a5c2c12ba665fb80ccc4192b4608dbab"}, - {file = "pydantic_core-2.41.4-cp313-cp313-win32.whl", hash = "sha256:ecde6dedd6fff127c273c76821bb754d793be1024bc33314a120f83a3c69460c"}, - {file = "pydantic_core-2.41.4-cp313-cp313-win_amd64.whl", hash = "sha256:d081a1f3800f05409ed868ebb2d74ac39dd0c1ff6c035b5162356d76030736d4"}, - {file = "pydantic_core-2.41.4-cp313-cp313-win_arm64.whl", hash = "sha256:f8e49c9c364a7edcbe2a310f12733aad95b022495ef2a8d653f645e5d20c1564"}, - {file = "pydantic_core-2.41.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:ed97fd56a561f5eb5706cebe94f1ad7c13b84d98312a05546f2ad036bafe87f4"}, - {file = "pydantic_core-2.41.4-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a870c307bf1ee91fc58a9a61338ff780d01bfae45922624816878dce784095d2"}, - {file = "pydantic_core-2.41.4-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d25e97bc1f5f8f7985bdc2335ef9e73843bb561eb1fa6831fdfc295c1c2061cf"}, - {file = "pydantic_core-2.41.4-cp313-cp313t-win_amd64.whl", hash = "sha256:d405d14bea042f166512add3091c1af40437c2e7f86988f3915fabd27b1e9cd2"}, - {file = "pydantic_core-2.41.4-cp313-cp313t-win_arm64.whl", hash = "sha256:19f3684868309db5263a11bace3c45d93f6f24afa2ffe75a647583df22a2ff89"}, - {file = "pydantic_core-2.41.4-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:e9205d97ed08a82ebb9a307e92914bb30e18cdf6f6b12ca4bedadb1588a0bfe1"}, - {file = "pydantic_core-2.41.4-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:82df1f432b37d832709fbcc0e24394bba04a01b6ecf1ee87578145c19cde12ac"}, - {file = "pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc3b4cc4539e055cfa39a3763c939f9d409eb40e85813257dcd761985a108554"}, - {file = "pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b1eb1754fce47c63d2ff57fdb88c351a6c0150995890088b33767a10218eaa4e"}, - {file = "pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e6ab5ab30ef325b443f379ddb575a34969c333004fca5a1daa0133a6ffaad616"}, - {file = "pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:31a41030b1d9ca497634092b46481b937ff9397a86f9f51bd41c4767b6fc04af"}, - {file = "pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a44ac1738591472c3d020f61c6df1e4015180d6262ebd39bf2aeb52571b60f12"}, - {file = "pydantic_core-2.41.4-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d72f2b5e6e82ab8f94ea7d0d42f83c487dc159c5240d8f83beae684472864e2d"}, - {file = "pydantic_core-2.41.4-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:c4d1e854aaf044487d31143f541f7aafe7b482ae72a022c664b2de2e466ed0ad"}, - {file = "pydantic_core-2.41.4-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:b568af94267729d76e6ee5ececda4e283d07bbb28e8148bb17adad93d025d25a"}, - {file = "pydantic_core-2.41.4-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:6d55fb8b1e8929b341cc313a81a26e0d48aa3b519c1dbaadec3a6a2b4fcad025"}, - {file = "pydantic_core-2.41.4-cp314-cp314-win32.whl", hash = "sha256:5b66584e549e2e32a1398df11da2e0a7eff45d5c2d9db9d5667c5e6ac764d77e"}, - {file = "pydantic_core-2.41.4-cp314-cp314-win_amd64.whl", hash = "sha256:557a0aab88664cc552285316809cab897716a372afaf8efdbef756f8b890e894"}, - {file = "pydantic_core-2.41.4-cp314-cp314-win_arm64.whl", hash = "sha256:3f1ea6f48a045745d0d9f325989d8abd3f1eaf47dd00485912d1a3a63c623a8d"}, - {file = "pydantic_core-2.41.4-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:6c1fe4c5404c448b13188dd8bd2ebc2bdd7e6727fa61ff481bcc2cca894018da"}, - {file = "pydantic_core-2.41.4-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:523e7da4d43b113bf8e7b49fa4ec0c35bf4fe66b2230bfc5c13cc498f12c6c3e"}, - {file = "pydantic_core-2.41.4-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5729225de81fb65b70fdb1907fcf08c75d498f4a6f15af005aabb1fdadc19dfa"}, - {file = "pydantic_core-2.41.4-cp314-cp314t-win_amd64.whl", hash = "sha256:de2cfbb09e88f0f795fd90cf955858fc2c691df65b1f21f0aa00b99f3fbc661d"}, - {file = "pydantic_core-2.41.4-cp314-cp314t-win_arm64.whl", hash = "sha256:d34f950ae05a83e0ede899c595f312ca976023ea1db100cd5aa188f7005e3ab0"}, - {file = "pydantic_core-2.41.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:646e76293345954acea6966149683047b7b2ace793011922208c8e9da12b0062"}, - {file = "pydantic_core-2.41.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cc8e85a63085a137d286e2791037f5fdfff0aabb8b899483ca9c496dd5797338"}, - {file = "pydantic_core-2.41.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:692c622c8f859a17c156492783902d8370ac7e121a611bd6fe92cc71acf9ee8d"}, - {file = "pydantic_core-2.41.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d1e2906efb1031a532600679b424ef1d95d9f9fb507f813951f23320903adbd7"}, - {file = "pydantic_core-2.41.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e04e2f7f8916ad3ddd417a7abdd295276a0bf216993d9318a5d61cc058209166"}, - {file = "pydantic_core-2.41.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:df649916b81822543d1c8e0e1d079235f68acdc7d270c911e8425045a8cfc57e"}, - {file = "pydantic_core-2.41.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:66c529f862fdba70558061bb936fe00ddbaaa0c647fd26e4a4356ef1d6561891"}, - {file = "pydantic_core-2.41.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fc3b4c5a1fd3a311563ed866c2c9b62da06cb6398bee186484ce95c820db71cb"}, - {file = "pydantic_core-2.41.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6e0fc40d84448f941df9b3334c4b78fe42f36e3bf631ad54c3047a0cdddc2514"}, - {file = "pydantic_core-2.41.4-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:44e7625332683b6c1c8b980461475cde9595eff94447500e80716db89b0da005"}, - {file = "pydantic_core-2.41.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:170ee6835f6c71081d031ef1c3b4dc4a12b9efa6a9540f93f95b82f3c7571ae8"}, - {file = "pydantic_core-2.41.4-cp39-cp39-win32.whl", hash = "sha256:3adf61415efa6ce977041ba9745183c0e1f637ca849773afa93833e04b163feb"}, - {file = "pydantic_core-2.41.4-cp39-cp39-win_amd64.whl", hash = "sha256:a238dd3feee263eeaeb7dc44aea4ba1364682c4f9f9467e6af5596ba322c2332"}, - {file = "pydantic_core-2.41.4-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:a1b2cfec3879afb742a7b0bcfa53e4f22ba96571c9e54d6a3afe1052d17d843b"}, - {file = "pydantic_core-2.41.4-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:d175600d975b7c244af6eb9c9041f10059f20b8bbffec9e33fdd5ee3f67cdc42"}, - {file = "pydantic_core-2.41.4-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f184d657fa4947ae5ec9c47bd7e917730fa1cbb78195037e32dcbab50aca5ee"}, - {file = "pydantic_core-2.41.4-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ed810568aeffed3edc78910af32af911c835cc39ebbfacd1f0ab5dd53028e5c"}, - {file = "pydantic_core-2.41.4-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:4f5d640aeebb438517150fdeec097739614421900e4a08db4a3ef38898798537"}, - {file = "pydantic_core-2.41.4-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:4a9ab037b71927babc6d9e7fc01aea9e66dc2a4a34dff06ef0724a4049629f94"}, - {file = "pydantic_core-2.41.4-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4dab9484ec605c3016df9ad4fd4f9a390bc5d816a3b10c6550f8424bb80b18c"}, - {file = "pydantic_core-2.41.4-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8a5028425820731d8c6c098ab642d7b8b999758e24acae03ed38a66eca8335"}, - {file = "pydantic_core-2.41.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:1e5ab4fc177dd41536b3c32b2ea11380dd3d4619a385860621478ac2d25ceb00"}, - {file = "pydantic_core-2.41.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:3d88d0054d3fa11ce936184896bed3c1c5441d6fa483b498fac6a5d0dd6f64a9"}, - {file = "pydantic_core-2.41.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b2a054a8725f05b4b6503357e0ac1c4e8234ad3b0c2ac130d6ffc66f0e170e2"}, - {file = "pydantic_core-2.41.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b0d9db5a161c99375a0c68c058e227bee1d89303300802601d76a3d01f74e258"}, - {file = "pydantic_core-2.41.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:6273ea2c8ffdac7b7fda2653c49682db815aebf4a89243a6feccf5e36c18c347"}, - {file = "pydantic_core-2.41.4-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:4c973add636efc61de22530b2ef83a65f39b6d6f656df97f678720e20de26caa"}, - {file = "pydantic_core-2.41.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b69d1973354758007f46cf2d44a4f3d0933f10b6dc9bf15cf1356e037f6f731a"}, - {file = "pydantic_core-2.41.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:3619320641fd212aaf5997b6ca505e97540b7e16418f4a241f44cdf108ffb50d"}, - {file = "pydantic_core-2.41.4-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:491535d45cd7ad7e4a2af4a5169b0d07bebf1adfd164b0368da8aa41e19907a5"}, - {file = "pydantic_core-2.41.4-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:54d86c0cada6aba4ec4c047d0e348cbad7063b87ae0f005d9f8c9ad04d4a92a2"}, - {file = "pydantic_core-2.41.4-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eca1124aced216b2500dc2609eade086d718e8249cb9696660ab447d50a758bd"}, - {file = "pydantic_core-2.41.4-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6c9024169becccf0cb470ada03ee578d7348c119a0d42af3dcf9eda96e3a247c"}, - {file = "pydantic_core-2.41.4-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:26895a4268ae5a2849269f4991cdc97236e4b9c010e51137becf25182daac405"}, - {file = "pydantic_core-2.41.4-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:ca4df25762cf71308c446e33c9b1fdca2923a3f13de616e2a949f38bf21ff5a8"}, - {file = "pydantic_core-2.41.4-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:5a28fcedd762349519276c36634e71853b4541079cab4acaaac60c4421827308"}, - {file = "pydantic_core-2.41.4-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:c173ddcd86afd2535e2b695217e82191580663a1d1928239f877f5a1649ef39f"}, - {file = "pydantic_core-2.41.4.tar.gz", hash = "sha256:70e47929a9d4a1905a67e4b687d5946026390568a8e952b92824118063cee4d5"}, + {file = "pydantic_core-2.41.5-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:77b63866ca88d804225eaa4af3e664c5faf3568cea95360d21f4725ab6e07146"}, + {file = "pydantic_core-2.41.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:dfa8a0c812ac681395907e71e1274819dec685fec28273a28905df579ef137e2"}, + {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5921a4d3ca3aee735d9fd163808f5e8dd6c6972101e4adbda9a4667908849b97"}, + {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e25c479382d26a2a41b7ebea1043564a937db462816ea07afa8a44c0866d52f9"}, + {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f547144f2966e1e16ae626d8ce72b4cfa0caedc7fa28052001c94fb2fcaa1c52"}, + {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f52298fbd394f9ed112d56f3d11aabd0d5bd27beb3084cc3d8ad069483b8941"}, + {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:100baa204bb412b74fe285fb0f3a385256dad1d1879f0a5cb1499ed2e83d132a"}, + {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:05a2c8852530ad2812cb7914dc61a1125dc4e06252ee98e5638a12da6cc6fb6c"}, + {file = "pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:29452c56df2ed968d18d7e21f4ab0ac55e71dc59524872f6fc57dcf4a3249ed2"}, + {file = "pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:d5160812ea7a8a2ffbe233d8da666880cad0cbaf5d4de74ae15c313213d62556"}, + {file = "pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:df3959765b553b9440adfd3c795617c352154e497a4eaf3752555cfb5da8fc49"}, + {file = "pydantic_core-2.41.5-cp310-cp310-win32.whl", hash = "sha256:1f8d33a7f4d5a7889e60dc39856d76d09333d8a6ed0f5f1190635cbec70ec4ba"}, + {file = "pydantic_core-2.41.5-cp310-cp310-win_amd64.whl", hash = "sha256:62de39db01b8d593e45871af2af9e497295db8d73b085f6bfd0b18c83c70a8f9"}, + {file = "pydantic_core-2.41.5-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:a3a52f6156e73e7ccb0f8cced536adccb7042be67cb45f9562e12b319c119da6"}, + {file = "pydantic_core-2.41.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7f3bf998340c6d4b0c9a2f02d6a400e51f123b59565d74dc60d252ce888c260b"}, + {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:378bec5c66998815d224c9ca994f1e14c0c21cb95d2f52b6021cc0b2a58f2a5a"}, + {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e7b576130c69225432866fe2f4a469a85a54ade141d96fd396dffcf607b558f8"}, + {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6cb58b9c66f7e4179a2d5e0f849c48eff5c1fca560994d6eb6543abf955a149e"}, + {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:88942d3a3dff3afc8288c21e565e476fc278902ae4d6d134f1eeda118cc830b1"}, + {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f31d95a179f8d64d90f6831d71fa93290893a33148d890ba15de25642c5d075b"}, + {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c1df3d34aced70add6f867a8cf413e299177e0c22660cc767218373d0779487b"}, + {file = "pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4009935984bd36bd2c774e13f9a09563ce8de4abaa7226f5108262fa3e637284"}, + {file = "pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:34a64bc3441dc1213096a20fe27e8e128bd3ff89921706e83c0b1ac971276594"}, + {file = "pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c9e19dd6e28fdcaa5a1de679aec4141f691023916427ef9bae8584f9c2fb3b0e"}, + {file = "pydantic_core-2.41.5-cp311-cp311-win32.whl", hash = "sha256:2c010c6ded393148374c0f6f0bf89d206bf3217f201faa0635dcd56bd1520f6b"}, + {file = "pydantic_core-2.41.5-cp311-cp311-win_amd64.whl", hash = "sha256:76ee27c6e9c7f16f47db7a94157112a2f3a00e958bc626e2f4ee8bec5c328fbe"}, + {file = "pydantic_core-2.41.5-cp311-cp311-win_arm64.whl", hash = "sha256:4bc36bbc0b7584de96561184ad7f012478987882ebf9f9c389b23f432ea3d90f"}, + {file = "pydantic_core-2.41.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f41a7489d32336dbf2199c8c0a215390a751c5b014c2c1c5366e817202e9cdf7"}, + {file = "pydantic_core-2.41.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:070259a8818988b9a84a449a2a7337c7f430a22acc0859c6b110aa7212a6d9c0"}, + {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e96cea19e34778f8d59fe40775a7a574d95816eb150850a85a7a4c8f4b94ac69"}, + {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed2e99c456e3fadd05c991f8f437ef902e00eedf34320ba2b0842bd1c3ca3a75"}, + {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65840751b72fbfd82c3c640cff9284545342a4f1eb1586ad0636955b261b0b05"}, + {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e536c98a7626a98feb2d3eaf75944ef6f3dbee447e1f841eae16f2f0a72d8ddc"}, + {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eceb81a8d74f9267ef4081e246ffd6d129da5d87e37a77c9bde550cb04870c1c"}, + {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d38548150c39b74aeeb0ce8ee1d8e82696f4a4e16ddc6de7b1d8823f7de4b9b5"}, + {file = "pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c23e27686783f60290e36827f9c626e63154b82b116d7fe9adba1fda36da706c"}, + {file = "pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:482c982f814460eabe1d3bb0adfdc583387bd4691ef00b90575ca0d2b6fe2294"}, + {file = "pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:bfea2a5f0b4d8d43adf9d7b8bf019fb46fdd10a2e5cde477fbcb9d1fa08c68e1"}, + {file = "pydantic_core-2.41.5-cp312-cp312-win32.whl", hash = "sha256:b74557b16e390ec12dca509bce9264c3bbd128f8a2c376eaa68003d7f327276d"}, + {file = "pydantic_core-2.41.5-cp312-cp312-win_amd64.whl", hash = "sha256:1962293292865bca8e54702b08a4f26da73adc83dd1fcf26fbc875b35d81c815"}, + {file = "pydantic_core-2.41.5-cp312-cp312-win_arm64.whl", hash = "sha256:1746d4a3d9a794cacae06a5eaaccb4b8643a131d45fbc9af23e353dc0a5ba5c3"}, + {file = "pydantic_core-2.41.5-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:941103c9be18ac8daf7b7adca8228f8ed6bb7a1849020f643b3a14d15b1924d9"}, + {file = "pydantic_core-2.41.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:112e305c3314f40c93998e567879e887a3160bb8689ef3d2c04b6cc62c33ac34"}, + {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cbaad15cb0c90aa221d43c00e77bb33c93e8d36e0bf74760cd00e732d10a6a0"}, + {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:03ca43e12fab6023fc79d28ca6b39b05f794ad08ec2feccc59a339b02f2b3d33"}, + {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc799088c08fa04e43144b164feb0c13f9a0bc40503f8df3e9fde58a3c0c101e"}, + {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:97aeba56665b4c3235a0e52b2c2f5ae9cd071b8a8310ad27bddb3f7fb30e9aa2"}, + {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:406bf18d345822d6c21366031003612b9c77b3e29ffdb0f612367352aab7d586"}, + {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b93590ae81f7010dbe380cdeab6f515902ebcbefe0b9327cc4804d74e93ae69d"}, + {file = "pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:01a3d0ab748ee531f4ea6c3e48ad9dac84ddba4b0d82291f87248f2f9de8d740"}, + {file = "pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:6561e94ba9dacc9c61bce40e2d6bdc3bfaa0259d3ff36ace3b1e6901936d2e3e"}, + {file = "pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:915c3d10f81bec3a74fbd4faebe8391013ba61e5a1a8d48c4455b923bdda7858"}, + {file = "pydantic_core-2.41.5-cp313-cp313-win32.whl", hash = "sha256:650ae77860b45cfa6e2cdafc42618ceafab3a2d9a3811fcfbd3bbf8ac3c40d36"}, + {file = "pydantic_core-2.41.5-cp313-cp313-win_amd64.whl", hash = "sha256:79ec52ec461e99e13791ec6508c722742ad745571f234ea6255bed38c6480f11"}, + {file = "pydantic_core-2.41.5-cp313-cp313-win_arm64.whl", hash = "sha256:3f84d5c1b4ab906093bdc1ff10484838aca54ef08de4afa9de0f5f14d69639cd"}, + {file = "pydantic_core-2.41.5-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:3f37a19d7ebcdd20b96485056ba9e8b304e27d9904d233d7b1015db320e51f0a"}, + {file = "pydantic_core-2.41.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:1d1d9764366c73f996edd17abb6d9d7649a7eb690006ab6adbda117717099b14"}, + {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25e1c2af0fce638d5f1988b686f3b3ea8cd7de5f244ca147c777769e798a9cd1"}, + {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:506d766a8727beef16b7adaeb8ee6217c64fc813646b424d0804d67c16eddb66"}, + {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4819fa52133c9aa3c387b3328f25c1facc356491e6135b459f1de698ff64d869"}, + {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b761d210c9ea91feda40d25b4efe82a1707da2ef62901466a42492c028553a2"}, + {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22f0fb8c1c583a3b6f24df2470833b40207e907b90c928cc8d3594b76f874375"}, + {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2782c870e99878c634505236d81e5443092fba820f0373997ff75f90f68cd553"}, + {file = "pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:0177272f88ab8312479336e1d777f6b124537d47f2123f89cb37e0accea97f90"}, + {file = "pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:63510af5e38f8955b8ee5687740d6ebf7c2a0886d15a6d65c32814613681bc07"}, + {file = "pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:e56ba91f47764cc14f1daacd723e3e82d1a89d783f0f5afe9c364b8bb491ccdb"}, + {file = "pydantic_core-2.41.5-cp314-cp314-win32.whl", hash = "sha256:aec5cf2fd867b4ff45b9959f8b20ea3993fc93e63c7363fe6851424c8a7e7c23"}, + {file = "pydantic_core-2.41.5-cp314-cp314-win_amd64.whl", hash = "sha256:8e7c86f27c585ef37c35e56a96363ab8de4e549a95512445b85c96d3e2f7c1bf"}, + {file = "pydantic_core-2.41.5-cp314-cp314-win_arm64.whl", hash = "sha256:e672ba74fbc2dc8eea59fb6d4aed6845e6905fc2a8afe93175d94a83ba2a01a0"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:8566def80554c3faa0e65ac30ab0932b9e3a5cd7f8323764303d468e5c37595a"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b80aa5095cd3109962a298ce14110ae16b8c1aece8b72f9dafe81cf597ad80b3"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3006c3dd9ba34b0c094c544c6006cc79e87d8612999f1a5d43b769b89181f23c"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:72f6c8b11857a856bcfa48c86f5368439f74453563f951e473514579d44aa612"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5cb1b2f9742240e4bb26b652a5aeb840aa4b417c7748b6f8387927bc6e45e40d"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd3d54f38609ff308209bd43acea66061494157703364ae40c951f83ba99a1a9"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ff4321e56e879ee8d2a879501c8e469414d948f4aba74a2d4593184eb326660"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d0d2568a8c11bf8225044aa94409e21da0cb09dcdafe9ecd10250b2baad531a9"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:a39455728aabd58ceabb03c90e12f71fd30fa69615760a075b9fec596456ccc3"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_armv7l.whl", hash = "sha256:239edca560d05757817c13dc17c50766136d21f7cd0fac50295499ae24f90fdf"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:2a5e06546e19f24c6a96a129142a75cee553cc018ffee48a460059b1185f4470"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-win32.whl", hash = "sha256:b4ececa40ac28afa90871c2cc2b9ffd2ff0bf749380fbdf57d165fd23da353aa"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-win_amd64.whl", hash = "sha256:80aa89cad80b32a912a65332f64a4450ed00966111b6615ca6816153d3585a8c"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-win_arm64.whl", hash = "sha256:35b44f37a3199f771c3eaa53051bc8a70cd7b54f333531c59e29fd4db5d15008"}, + {file = "pydantic_core-2.41.5-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:8bfeaf8735be79f225f3fefab7f941c712aaca36f1128c9d7e2352ee1aa87bdf"}, + {file = "pydantic_core-2.41.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:346285d28e4c8017da95144c7f3acd42740d637ff41946af5ce6e5e420502dd5"}, + {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a75dafbf87d6276ddc5b2bf6fae5254e3d0876b626eb24969a574fff9149ee5d"}, + {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7b93a4d08587e2b7e7882de461e82b6ed76d9026ce91ca7915e740ecc7855f60"}, + {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e8465ab91a4bd96d36dde3263f06caa6a8a6019e4113f24dc753d79a8b3a3f82"}, + {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:299e0a22e7ae2b85c1a57f104538b2656e8ab1873511fd718a1c1c6f149b77b5"}, + {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:707625ef0983fcfb461acfaf14de2067c5942c6bb0f3b4c99158bed6fedd3cf3"}, + {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f41eb9797986d6ebac5e8edff36d5cef9de40def462311b3eb3eeded1431e425"}, + {file = "pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0384e2e1021894b1ff5a786dbf94771e2986ebe2869533874d7e43bc79c6f504"}, + {file = "pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:f0cd744688278965817fd0839c4a4116add48d23890d468bc436f78beb28abf5"}, + {file = "pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:753e230374206729bf0a807954bcc6c150d3743928a73faffee51ac6557a03c3"}, + {file = "pydantic_core-2.41.5-cp39-cp39-win32.whl", hash = "sha256:873e0d5b4fb9b89ef7c2d2a963ea7d02879d9da0da8d9d4933dee8ee86a8b460"}, + {file = "pydantic_core-2.41.5-cp39-cp39-win_amd64.whl", hash = "sha256:e4f4a984405e91527a0d62649ee21138f8e3d0ef103be488c1dc11a80d7f184b"}, + {file = "pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:b96d5f26b05d03cc60f11a7761a5ded1741da411e7fe0909e27a5e6a0cb7b034"}, + {file = "pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:634e8609e89ceecea15e2d61bc9ac3718caaaa71963717bf3c8f38bfde64242c"}, + {file = "pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:93e8740d7503eb008aa2df04d3b9735f845d43ae845e6dcd2be0b55a2da43cd2"}, + {file = "pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f15489ba13d61f670dcc96772e733aad1a6f9c429cc27574c6cdaed82d0146ad"}, + {file = "pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:7da7087d756b19037bc2c06edc6c170eeef3c3bafcb8f532ff17d64dc427adfd"}, + {file = "pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:aabf5777b5c8ca26f7824cb4a120a740c9588ed58df9b2d196ce92fba42ff8dc"}, + {file = "pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c007fe8a43d43b3969e8469004e9845944f1a80e6acd47c150856bb87f230c56"}, + {file = "pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76d0819de158cd855d1cbb8fcafdf6f5cf1eb8e470abe056d5d161106e38062b"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b5819cd790dbf0c5eb9f82c73c16b39a65dd6dd4d1439dcdea7816ec9adddab8"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5a4e67afbc95fa5c34cf27d9089bca7fcab4e51e57278d710320a70b956d1b9a"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ece5c59f0ce7d001e017643d8d24da587ea1f74f6993467d85ae8a5ef9d4f42b"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:16f80f7abe3351f8ea6858914ddc8c77e02578544a0ebc15b4c2e1a0e813b0b2"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:33cb885e759a705b426baada1fe68cbb0a2e68e34c5d0d0289a364cf01709093"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:c8d8b4eb992936023be7dee581270af5c6e0697a8559895f527f5b7105ecd36a"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:242a206cd0318f95cd21bdacff3fcc3aab23e79bba5cac3db5a841c9ef9c6963"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:d3a978c4f57a597908b7e697229d996d77a6d3c94901e9edee593adada95ce1a"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b2379fa7ed44ddecb5bfe4e48577d752db9fc10be00a6b7446e9663ba143de26"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:266fb4cbf5e3cbd0b53669a6d1b039c45e3ce651fd5442eff4d07c2cc8d66808"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58133647260ea01e4d0500089a8c4f07bd7aa6ce109682b1426394988d8aaacc"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:287dad91cfb551c363dc62899a80e9e14da1f0e2b6ebde82c806612ca2a13ef1"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:03b77d184b9eb40240ae9fd676ca364ce1085f203e1b1256f8ab9984dca80a84"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:a668ce24de96165bb239160b3d854943128f4334822900534f2fe947930e5770"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f14f8f046c14563f8eb3f45f499cc658ab8d10072961e07225e507adb700e93f"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:56121965f7a4dc965bff783d70b907ddf3d57f6eba29b6d2e5dabfaf07799c51"}, + {file = "pydantic_core-2.41.5.tar.gz", hash = "sha256:08daa51ea16ad373ffd5e7606252cc32f07bc72b28284b6bc9c6df804816476e"}, ] [package.dependencies] @@ -3638,102 +3633,96 @@ files = [ [[package]] name = "rapidfuzz" -version = "3.14.2" +version = "3.14.3" description = "rapid fuzzy string matching" optional = false python-versions = ">=3.10" groups = ["main"] markers = "python_version == \"3.11\" or python_version >= \"3.12\"" files = [ - {file = "rapidfuzz-3.14.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:37ddc4cc3eafe29ec8ba451fcec5244af441eeb53b4e7b4d1d886cd3ff3624f4"}, - {file = "rapidfuzz-3.14.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:654be63b17f3da8414968dfdf15c46c8205960ec8508cbb9d837347bf036dc0b"}, - {file = "rapidfuzz-3.14.2-cp310-cp310-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:75866e9fa474ccfe6b77367fb7c10e6f9754fb910d9b110490a6fad25501a039"}, - {file = "rapidfuzz-3.14.2-cp310-cp310-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:fd915693a8d441e5f277bef23065275a2bb492724b5ccf64e38e60edd702b0fb"}, - {file = "rapidfuzz-3.14.2-cp310-cp310-manylinux_2_26_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e702e76a6166bff466a33888902404209fffd83740d24918ef74514542f66367"}, - {file = "rapidfuzz-3.14.2-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:78f84592f3a2f2773d6f411b755d683b1ce7f05adff4c12c0de923d5f2786e51"}, - {file = "rapidfuzz-3.14.2-cp310-cp310-manylinux_2_31_armv7l.whl", hash = "sha256:36d43c9f1b88322ad05b22fa80b6b4a95d2b193d392d3aa7bee652c144cfb1d9"}, - {file = "rapidfuzz-3.14.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:69d6f93916717314209f4e8701d203876baeadf8c9dcaee961b8afeba7435643"}, - {file = "rapidfuzz-3.14.2-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:e262958d3ca723c1ce32030384a1626e3d43ba7465e01a3e2b633f4300956150"}, - {file = "rapidfuzz-3.14.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:26b5e6e0d39337431ab1b36faf604873cb1f0de9280e0703f61c6753c8fa1f7f"}, - {file = "rapidfuzz-3.14.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2aad09712e1ffbc00ac25f12646c7065b84496af7cd0a70b1d5aff6318405732"}, - {file = "rapidfuzz-3.14.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f10dbbafa3decee704b7a02ffe7914d7dfbbd3d1fce7f37ed2c3d6c3a7c9a8e6"}, - {file = "rapidfuzz-3.14.2-cp310-cp310-win32.whl", hash = "sha256:6c3dab8f9d4271e32c8746461a58412871ebb07654f77aa6121961e796482d30"}, - {file = "rapidfuzz-3.14.2-cp310-cp310-win_amd64.whl", hash = "sha256:5386ce287e5b71db4fd71747a23ae0ca5053012dc959049e160857c5fdadf6cd"}, - {file = "rapidfuzz-3.14.2-cp310-cp310-win_arm64.whl", hash = "sha256:c78d6f205b871f2d41173f82ded66bcef2f692e1b90c0f627cc8035b72898f35"}, - {file = "rapidfuzz-3.14.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3969670d4b85e589564d6a75638ec2372a4375b7e68e747f3bd37b507cf843e4"}, - {file = "rapidfuzz-3.14.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:061884b23a8c5eea9443e52acf02cbd533aff93a5439b0e90b5586a0638b8720"}, - {file = "rapidfuzz-3.14.2-cp311-cp311-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6fc2bc48a219c171deb8529bfcc90ca6663fbcaa42b54ef202858976078f858a"}, - {file = "rapidfuzz-3.14.2-cp311-cp311-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:cfa62729ac2d77a50a240b6331e9fffb5e070625e97e8f7e50fa882b3ea396ad"}, - {file = "rapidfuzz-3.14.2-cp311-cp311-manylinux_2_26_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2d001aaf47a500083b189140df16eaefd675bf06c818a71ae9f687b0d6f804f8"}, - {file = "rapidfuzz-3.14.2-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c95eeaa7f2a990757826aa34e7375b50d49172da5ca7536dc461b1d197e0de9b"}, - {file = "rapidfuzz-3.14.2-cp311-cp311-manylinux_2_31_armv7l.whl", hash = "sha256:30af5e015462f89408d7b3bbdd614c739adc386e3d47bd565b53ffb670266021"}, - {file = "rapidfuzz-3.14.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:35f12b07d58b932ef95b5f66b40c9efc60c5201bccd3c5ddde4a87df19d0aba8"}, - {file = "rapidfuzz-3.14.2-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:0aa67110e016d2cdce3e5a3330d09fb1dba3cf83350f6eb46a6b9276cbafd094"}, - {file = "rapidfuzz-3.14.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:b13dc4743a5d222600d98fb4a0345e910829ef4f286e81b34349627355884c87"}, - {file = "rapidfuzz-3.14.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:b16c40709f22c8fc16ca49a5484a468fe0a95f08f29c68043f46f8771e2c37e2"}, - {file = "rapidfuzz-3.14.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ac2bd7c74523f952a66536f72b3f68260427e2a6954f1f03d758f01bbbf60564"}, - {file = "rapidfuzz-3.14.2-cp311-cp311-win32.whl", hash = "sha256:37d7045dc0ab4cab49d7cca66b651b44939e18e098a2f55466082e173b1aa452"}, - {file = "rapidfuzz-3.14.2-cp311-cp311-win_amd64.whl", hash = "sha256:9a55ff35536662028563f22e0eadab47c7e94c8798239fe25d3ceca5ab156fd8"}, - {file = "rapidfuzz-3.14.2-cp311-cp311-win_arm64.whl", hash = "sha256:b2f0e1310f7cb1c0c0033987d0a0e85b4fd51a1c4882f556f082687d519f045d"}, - {file = "rapidfuzz-3.14.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:0418f6ac1da7adf7e6e469876508f63168e80d3265a9e7ab9a2e999020577bfa"}, - {file = "rapidfuzz-3.14.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f6028090b49015fc9ff0df3c06751078fe300a291e933a378a7c37b78c4d6a3e"}, - {file = "rapidfuzz-3.14.2-cp312-cp312-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:21aa299985d1bbdb3ccf8a8214e7daee72bb7e8c8fb25a520f015dc200a57816"}, - {file = "rapidfuzz-3.14.2-cp312-cp312-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:e247612909876f36e6132265deef34efcaaf490e1857022204b206ff76578076"}, - {file = "rapidfuzz-3.14.2-cp312-cp312-manylinux_2_26_s390x.manylinux_2_28_s390x.whl", hash = "sha256:9cf077475cd4118a5b846a72749d54b520243be6baddba1dd1446f3b1dbab29c"}, - {file = "rapidfuzz-3.14.2-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a5e7e02fb51f9a78e32f4fb8b5546d543e1fb637409cb682a6b8cb12e0c3015c"}, - {file = "rapidfuzz-3.14.2-cp312-cp312-manylinux_2_31_armv7l.whl", hash = "sha256:b1febabf4a4a664a2b6025830d93d7703f1cd9dcbe656ed7159053091b4d9389"}, - {file = "rapidfuzz-3.14.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:766d133f11888c48497f26a1722afc697a5fbad05bbfec3a41a4bc04fd21af9d"}, - {file = "rapidfuzz-3.14.2-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:2a851a7c6660b6e47723378ca7692cd42700660a8783e4e7d07254a984d63ec8"}, - {file = "rapidfuzz-3.14.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:686594bd7f7132cb85900a4cc910e9acb9d39466412b8a275f3d4bc37faba23c"}, - {file = "rapidfuzz-3.14.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:e1d412122de3c5c492acfcde020f543b9b529e2eb115f875e2fd7470e44ab441"}, - {file = "rapidfuzz-3.14.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2611b1f6464dddf900bffeee2aa29a9aa1039317cbb226e18d3a5f029d4cf303"}, - {file = "rapidfuzz-3.14.2-cp312-cp312-win32.whl", hash = "sha256:e6968b6db188fbb4c7a18aac25e075940a8204434a2a0d6bddb0a695d7f0c898"}, - {file = "rapidfuzz-3.14.2-cp312-cp312-win_amd64.whl", hash = "sha256:1a6d43683c04ffb4270bb1498951a39e9c200eb326f933fd5d608c19485049b8"}, - {file = "rapidfuzz-3.14.2-cp312-cp312-win_arm64.whl", hash = "sha256:4ecd3ab9aebb17becb462eac19151bd143abc614e3d2a0351a72171371ac3f4b"}, - {file = "rapidfuzz-3.14.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f1f5a2566af7409d11f11b0b4e9f76a0ac64577737b821c64a2a6afc971c1c25"}, - {file = "rapidfuzz-3.14.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:810863f3a98d09392e5fb481aef9d82597df6ee06f7f11ceafe6077585c4e018"}, - {file = "rapidfuzz-3.14.2-cp313-cp313-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2e8c0d16c0724dab7c7dc4099c1ec410679b2d11c1650b069d15d4ab4370f1cc"}, - {file = "rapidfuzz-3.14.2-cp313-cp313-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:004f04356d84660feffbf8c26975cb0db0e010b2225d6e21b3d84dd8df764652"}, - {file = "rapidfuzz-3.14.2-cp313-cp313-manylinux_2_26_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b3c2aea6b1db03a8abd62bb157161d7a65b896c9f85d5efc2f1bb444a107c47a"}, - {file = "rapidfuzz-3.14.2-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8bef63704b7851ad1adf5d7ceb7f1b3136b78ee0b34240c14ab85ea775f6caa7"}, - {file = "rapidfuzz-3.14.2-cp313-cp313-manylinux_2_31_armv7l.whl", hash = "sha256:52e8e37566313ac60bfa80754c4c0367eec65b3ef52bb8cc409b88e878b03182"}, - {file = "rapidfuzz-3.14.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b3fad0fb5ac44944ad8f81e729ec45f65a85efb7d7ea4cf67343799c0ea9874b"}, - {file = "rapidfuzz-3.14.2-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:d027842a956b86aa9706b836c48186da405413d03957afaccda2fbe414bc3912"}, - {file = "rapidfuzz-3.14.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:27dcb45427b1966fb43c904d19c841c3e6da147931959cf05388ecef9c5a1e8d"}, - {file = "rapidfuzz-3.14.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:1aab0676884e91282817b5710933efc4ea9466d2ba5703b5a7541468695d807a"}, - {file = "rapidfuzz-3.14.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ef36c21ecb7f4bad7e4e119fe746a787ad684eaf1c383c17a2aff5d75b20fa58"}, - {file = "rapidfuzz-3.14.2-cp313-cp313-win32.whl", hash = "sha256:ed3af4fa0dbd6d1964f171ac6fff82ed9e76c737eb34ae3daf926c4aefc2ce9b"}, - {file = "rapidfuzz-3.14.2-cp313-cp313-win_amd64.whl", hash = "sha256:3fc2e7c3ab006299366b1c8256e452f00eb1659d0e4790b140633627c7d947b7"}, - {file = "rapidfuzz-3.14.2-cp313-cp313-win_arm64.whl", hash = "sha256:def48d5010ddcd2a80b44f14bf0172c29bfc27906d13c0ea69a6e3c00e6f225c"}, - {file = "rapidfuzz-3.14.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:a39952b8e033758ee15b2de48a5b0689c83ea6bd93c8df3635f2fbf21e52fd25"}, - {file = "rapidfuzz-3.14.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:f786811555869b5961b3718b007179e87d73c47414afee5fb882ae1b9b174c0c"}, - {file = "rapidfuzz-3.14.2-cp313-cp313t-win32.whl", hash = "sha256:6c0a25490a99c4b73f1deca3efae004df5f2b254760d98cac8d93becf41260d4"}, - {file = "rapidfuzz-3.14.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e5af2dab8ec5a180d9ff24fbb5b25e589848b93cccb755eceb0bf0e3cfed7e5c"}, - {file = "rapidfuzz-3.14.2-cp313-cp313t-win_arm64.whl", hash = "sha256:8cf2aefb0d246d540ea83b4648db690bd7e25d34a7c23c5f250dcba2e4989192"}, - {file = "rapidfuzz-3.14.2-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:ace3a6b108679888833cdceea9a6231e406db202b8336eaf68279fe71a1d2ac4"}, - {file = "rapidfuzz-3.14.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:32c7cc978447202ba592e197228767b230d85e52e5ef229e2b22e51c8e3d06ad"}, - {file = "rapidfuzz-3.14.2-cp314-cp314-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5a479a824cbf6a646bcec1c34fbbfb85393d03eb2811657e3a6536298d435f76"}, - {file = "rapidfuzz-3.14.2-cp314-cp314-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3a3bc0c8b65dcd1e55a1cc42a7c7b34e93ad5d4bd1501dc998f4625042e1b110"}, - {file = "rapidfuzz-3.14.2-cp314-cp314-manylinux_2_26_s390x.manylinux_2_28_s390x.whl", hash = "sha256:217b46bf096818df16c0e2c43202aa8352e67c4379b1d5f25e98c5d1c7f5414d"}, - {file = "rapidfuzz-3.14.2-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:07d3e8afeeb81044873644e505e56ba06d8bdcc291ef7e26ac0f54c58309267d"}, - {file = "rapidfuzz-3.14.2-cp314-cp314-manylinux_2_31_armv7l.whl", hash = "sha256:b7832c8707bfa4f9b081def64aa49954d4813cff7fc9ff4a0b184a4e8697147f"}, - {file = "rapidfuzz-3.14.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:35581ba6981e016333063c52719c0b0b1bef0f944e641ad0f4ea34e0b39161f3"}, - {file = "rapidfuzz-3.14.2-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:fbd5152169dc3f6c894c24fc04813f50bf9b929d137f2b965ac926e03329ceba"}, - {file = "rapidfuzz-3.14.2-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:98a119c3f9b152e9b62ec43520392669bd8deae9df269f30569f1c87bf6055a4"}, - {file = "rapidfuzz-3.14.2-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:9e84164e7a68f9c3523c5d104dda6601202b39bae0aac1b73a4f119d387275c4"}, - {file = "rapidfuzz-3.14.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:64c67402b86a073666f92c2807811e3817a17fedfe505fe89a9f93eea264481c"}, - {file = "rapidfuzz-3.14.2-cp314-cp314-win32.whl", hash = "sha256:58d79f4df3e4332b31e671f9487f0c215856cf1f2d9ac3848ac10c27262fd723"}, - {file = "rapidfuzz-3.14.2-cp314-cp314-win_amd64.whl", hash = "sha256:dc6fe7a27ad9e233c155e89b7e1d9b6d13963e3261ea5b30f3e79c3556c49bc9"}, - {file = "rapidfuzz-3.14.2-cp314-cp314-win_arm64.whl", hash = "sha256:bb4e96d80de7e6364850a2e168e899b8e85ab80ce19827cc4fbe0aa3c57f8124"}, - {file = "rapidfuzz-3.14.2-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:c7d4d0927a6b1ef2529a8cc57adf2ce965f7aaef324a4d1ae826d0de43ab4f82"}, - {file = "rapidfuzz-3.14.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c0fae06e7fb4be18e86eb51e77f0d441975a3ba9ef963f957d750a2a41536ba1"}, - {file = "rapidfuzz-3.14.2-cp314-cp314t-win32.whl", hash = "sha256:d1d3ef72665d460b7b3e61d3dff4341a195dcb3250b4471eef71db23fca2d91a"}, - {file = "rapidfuzz-3.14.2-cp314-cp314t-win_amd64.whl", hash = "sha256:3a0960c5c11a34e8129a3062f1b1cbb371fad364e2195ebe46a88a9d5eeec0f1"}, - {file = "rapidfuzz-3.14.2-cp314-cp314t-win_arm64.whl", hash = "sha256:ed29600e55d7df104d5778d499678c305e32e3ccfa873489a7c8304489c5f8f3"}, - {file = "rapidfuzz-3.14.2-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:172630396d8bdbb5ea1a58e82afc489c8e18076e1f2b2edea20cb30f8926325a"}, - {file = "rapidfuzz-3.14.2-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:6cff0d6749fac8dd7fdf26d0604d8a47c5ee786061972077d71ec7ac0fb7ced2"}, - {file = "rapidfuzz-3.14.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:f558bc2ee3a0bb5d7238ed10a0b76455f2d28c97e93564a1f7855cea4096ef1c"}, - {file = "rapidfuzz-3.14.2.tar.gz", hash = "sha256:69bf91e66aeb84a104aea35e1b3f6b3aa606faaee6db1cfc76950f2a6a828a12"}, + {file = "rapidfuzz-3.14.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b9fcd4d751a4fffa17aed1dde41647923c72c74af02459ad1222e3b0022da3a1"}, + {file = "rapidfuzz-3.14.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4ad73afb688b36864a8d9b7344a9cf6da186c471e5790cbf541a635ee0f457f2"}, + {file = "rapidfuzz-3.14.3-cp310-cp310-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c5fb2d978a601820d2cfd111e2c221a9a7bfdf84b41a3ccbb96ceef29f2f1ac7"}, + {file = "rapidfuzz-3.14.3-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1d83b8b712fa37e06d59f29a4b49e2e9e8635e908fbc21552fe4d1163db9d2a1"}, + {file = "rapidfuzz-3.14.3-cp310-cp310-manylinux_2_31_armv7l.whl", hash = "sha256:dc8c07801df5206b81ed6bd6c35cb520cf9b6c64b9b0d19d699f8633dc942897"}, + {file = "rapidfuzz-3.14.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:c71ce6d4231e5ef2e33caa952bfe671cb9fd42e2afb11952df9fad41d5c821f9"}, + {file = "rapidfuzz-3.14.3-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:0e38828d1381a0cceb8a4831212b2f673d46f5129a1897b0451c883eaf4a1747"}, + {file = "rapidfuzz-3.14.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:da2a007434323904719158e50f3076a4dadb176ce43df28ed14610c773cc9825"}, + {file = "rapidfuzz-3.14.3-cp310-cp310-win32.whl", hash = "sha256:fce3152f94afcfd12f3dd8cf51e48fa606e3cb56719bccebe3b401f43d0714f9"}, + {file = "rapidfuzz-3.14.3-cp310-cp310-win_amd64.whl", hash = "sha256:37d3c653af15cd88592633e942f5407cb4c64184efab163c40fcebad05f25141"}, + {file = "rapidfuzz-3.14.3-cp310-cp310-win_arm64.whl", hash = "sha256:cc594bbcd3c62f647dfac66800f307beaee56b22aaba1c005e9c4c40ed733923"}, + {file = "rapidfuzz-3.14.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:dea2d113e260a5da0c4003e0a5e9fdf24a9dc2bb9eaa43abd030a1e46ce7837d"}, + {file = "rapidfuzz-3.14.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e6c31a4aa68cfa75d7eede8b0ed24b9e458447db604c2db53f358be9843d81d3"}, + {file = "rapidfuzz-3.14.3-cp311-cp311-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:02821366d928e68ddcb567fed8723dad7ea3a979fada6283e6914d5858674850"}, + {file = "rapidfuzz-3.14.3-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cfe8df315ab4e6db4e1be72c5170f8e66021acde22cd2f9d04d2058a9fd8162e"}, + {file = "rapidfuzz-3.14.3-cp311-cp311-manylinux_2_31_armv7l.whl", hash = "sha256:769f31c60cd79420188fcdb3c823227fc4a6deb35cafec9d14045c7f6743acae"}, + {file = "rapidfuzz-3.14.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:54fa03062124e73086dae66a3451c553c1e20a39c077fd704dc7154092c34c63"}, + {file = "rapidfuzz-3.14.3-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:834d1e818005ed0d4ae38f6b87b86fad9b0a74085467ece0727d20e15077c094"}, + {file = "rapidfuzz-3.14.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:948b00e8476a91f510dd1ec07272efc7d78c275d83b630455559671d4e33b678"}, + {file = "rapidfuzz-3.14.3-cp311-cp311-win32.whl", hash = "sha256:43d0305c36f504232f18ea04e55f2059bb89f169d3119c4ea96a0e15b59e2a91"}, + {file = "rapidfuzz-3.14.3-cp311-cp311-win_amd64.whl", hash = "sha256:ef6bf930b947bd0735c550683939a032090f1d688dfd8861d6b45307b96fd5c5"}, + {file = "rapidfuzz-3.14.3-cp311-cp311-win_arm64.whl", hash = "sha256:f3eb0ff3b75d6fdccd40b55e7414bb859a1cda77c52762c9c82b85569f5088e7"}, + {file = "rapidfuzz-3.14.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:685c93ea961d135893b5984a5a9851637d23767feabe414ec974f43babbd8226"}, + {file = "rapidfuzz-3.14.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fa7c8f26f009f8c673fbfb443792f0cf8cf50c4e18121ff1e285b5e08a94fbdb"}, + {file = "rapidfuzz-3.14.3-cp312-cp312-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:57f878330c8d361b2ce76cebb8e3e1dc827293b6abf404e67d53260d27b5d941"}, + {file = "rapidfuzz-3.14.3-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6c5f545f454871e6af05753a0172849c82feaf0f521c5ca62ba09e1b382d6382"}, + {file = "rapidfuzz-3.14.3-cp312-cp312-manylinux_2_31_armv7l.whl", hash = "sha256:07aa0b5d8863e3151e05026a28e0d924accf0a7a3b605da978f0359bb804df43"}, + {file = "rapidfuzz-3.14.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:73b07566bc7e010e7b5bd490fb04bb312e820970180df6b5655e9e6224c137db"}, + {file = "rapidfuzz-3.14.3-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:6de00eb84c71476af7d3110cf25d8fe7c792d7f5fa86764ef0b4ca97e78ca3ed"}, + {file = "rapidfuzz-3.14.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d7843a1abf0091773a530636fdd2a49a41bcae22f9910b86b4f903e76ddc82dc"}, + {file = "rapidfuzz-3.14.3-cp312-cp312-win32.whl", hash = "sha256:dea97ac3ca18cd3ba8f3d04b5c1fe4aa60e58e8d9b7793d3bd595fdb04128d7a"}, + {file = "rapidfuzz-3.14.3-cp312-cp312-win_amd64.whl", hash = "sha256:b5100fd6bcee4d27f28f4e0a1c6b5127bc8ba7c2a9959cad9eab0bf4a7ab3329"}, + {file = "rapidfuzz-3.14.3-cp312-cp312-win_arm64.whl", hash = "sha256:4e49c9e992bc5fc873bd0fff7ef16a4405130ec42f2ce3d2b735ba5d3d4eb70f"}, + {file = "rapidfuzz-3.14.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:dbcb726064b12f356bf10fffdb6db4b6dce5390b23627c08652b3f6e49aa56ae"}, + {file = "rapidfuzz-3.14.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1704fc70d214294e554a2421b473779bcdeef715881c5e927dc0f11e1692a0ff"}, + {file = "rapidfuzz-3.14.3-cp313-cp313-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:cc65e72790ddfd310c2c8912b45106e3800fefe160b0c2ef4d6b6fec4e826457"}, + {file = "rapidfuzz-3.14.3-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:43e38c1305cffae8472572a0584d4ffc2f130865586a81038ca3965301f7c97c"}, + {file = "rapidfuzz-3.14.3-cp313-cp313-manylinux_2_31_armv7l.whl", hash = "sha256:e195a77d06c03c98b3fc06b8a28576ba824392ce40de8c708f96ce04849a052e"}, + {file = "rapidfuzz-3.14.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1b7ef2f4b8583a744338a18f12c69693c194fb6777c0e9ada98cd4d9e8f09d10"}, + {file = "rapidfuzz-3.14.3-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:a2135b138bcdcb4c3742d417f215ac2d8c2b87bde15b0feede231ae95f09ec41"}, + {file = "rapidfuzz-3.14.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:33a325ed0e8e1aa20c3e75f8ab057a7b248fdea7843c2a19ade0008906c14af0"}, + {file = "rapidfuzz-3.14.3-cp313-cp313-win32.whl", hash = "sha256:8383b6d0d92f6cd008f3c9216535be215a064b2cc890398a678b56e6d280cb63"}, + {file = "rapidfuzz-3.14.3-cp313-cp313-win_amd64.whl", hash = "sha256:e6b5e3036976f0fde888687d91be86d81f9ac5f7b02e218913c38285b756be6c"}, + {file = "rapidfuzz-3.14.3-cp313-cp313-win_arm64.whl", hash = "sha256:7ba009977601d8b0828bfac9a110b195b3e4e79b350dcfa48c11269a9f1918a0"}, + {file = "rapidfuzz-3.14.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:a0a28add871425c2fe94358c6300bbeb0bc2ed828ca003420ac6825408f5a424"}, + {file = "rapidfuzz-3.14.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:010e12e2411a4854b0434f920e72b717c43f8ec48d57e7affe5c42ecfa05dd0e"}, + {file = "rapidfuzz-3.14.3-cp313-cp313t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5cfc3d57abd83c734d1714ec39c88a34dd69c85474918ebc21296f1e61eb5ca8"}, + {file = "rapidfuzz-3.14.3-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:89acb8cbb52904f763e5ac238083b9fc193bed8d1f03c80568b20e4cef43a519"}, + {file = "rapidfuzz-3.14.3-cp313-cp313t-manylinux_2_31_armv7l.whl", hash = "sha256:7d9af908c2f371bfb9c985bd134e295038e3031e666e4b2ade1e7cb7f5af2f1a"}, + {file = "rapidfuzz-3.14.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:1f1925619627f8798f8c3a391d81071336942e5fe8467bc3c567f982e7ce2897"}, + {file = "rapidfuzz-3.14.3-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:152555187360978119e98ce3e8263d70dd0c40c7541193fc302e9b7125cf8f58"}, + {file = "rapidfuzz-3.14.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:52619d25a09546b8db078981ca88939d72caa6b8701edd8b22e16482a38e799f"}, + {file = "rapidfuzz-3.14.3-cp313-cp313t-win32.whl", hash = "sha256:489ce98a895c98cad284f0a47960c3e264c724cb4cfd47a1430fa091c0c25204"}, + {file = "rapidfuzz-3.14.3-cp313-cp313t-win_amd64.whl", hash = "sha256:656e52b054d5b5c2524169240e50cfa080b04b1c613c5f90a2465e84888d6f15"}, + {file = "rapidfuzz-3.14.3-cp313-cp313t-win_arm64.whl", hash = "sha256:c7e40c0a0af02ad6e57e89f62bef8604f55a04ecae90b0ceeda591bbf5923317"}, + {file = "rapidfuzz-3.14.3-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:442125473b247227d3f2de807a11da6c08ccf536572d1be943f8e262bae7e4ea"}, + {file = "rapidfuzz-3.14.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:1ec0c8c0c3d4f97ced46b2e191e883f8c82dbbf6d5ebc1842366d7eff13cd5a6"}, + {file = "rapidfuzz-3.14.3-cp314-cp314-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2dc37bc20272f388b8c3a4eba4febc6e77e50a8f450c472def4751e7678f55e4"}, + {file = "rapidfuzz-3.14.3-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dee362e7e79bae940a5e2b3f6d09c6554db6a4e301cc68343886c08be99844f1"}, + {file = "rapidfuzz-3.14.3-cp314-cp314-manylinux_2_31_armv7l.whl", hash = "sha256:4b39921df948388a863f0e267edf2c36302983459b021ab928d4b801cbe6a421"}, + {file = "rapidfuzz-3.14.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:beda6aa9bc44d1d81242e7b291b446be352d3451f8217fcb068fc2933927d53b"}, + {file = "rapidfuzz-3.14.3-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:6a014ba09657abfcfeed64b7d09407acb29af436d7fc075b23a298a7e4a6b41c"}, + {file = "rapidfuzz-3.14.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:32eeafa3abce138bb725550c0e228fc7eaeec7059aa8093d9cbbec2b58c2371a"}, + {file = "rapidfuzz-3.14.3-cp314-cp314-win32.whl", hash = "sha256:adb44d996fc610c7da8c5048775b21db60dd63b1548f078e95858c05c86876a3"}, + {file = "rapidfuzz-3.14.3-cp314-cp314-win_amd64.whl", hash = "sha256:f3d15d8527e2b293e38ce6e437631af0708df29eafd7c9fc48210854c94472f9"}, + {file = "rapidfuzz-3.14.3-cp314-cp314-win_arm64.whl", hash = "sha256:576e4b9012a67e0bf54fccb69a7b6c94d4e86a9540a62f1a5144977359133583"}, + {file = "rapidfuzz-3.14.3-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:cec3c0da88562727dd5a5a364bd9efeb535400ff0bfb1443156dd139a1dd7b50"}, + {file = "rapidfuzz-3.14.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:d1fa009f8b1100e4880868137e7bf0501422898f7674f2adcd85d5a67f041296"}, + {file = "rapidfuzz-3.14.3-cp314-cp314t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1b86daa7419b5e8b180690efd1fdbac43ff19230803282521c5b5a9c83977655"}, + {file = "rapidfuzz-3.14.3-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c7bd1816db05d6c5ffb3a4df0a2b7b56fb8c81ef584d08e37058afa217da91b1"}, + {file = "rapidfuzz-3.14.3-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:33da4bbaf44e9755b0ce192597f3bde7372fe2e381ab305f41b707a95ac57aa7"}, + {file = "rapidfuzz-3.14.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:3fecce764cf5a991ee2195a844196da840aba72029b2612f95ac68a8b74946bf"}, + {file = "rapidfuzz-3.14.3-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:ecd7453e02cf072258c3a6b8e930230d789d5d46cc849503729f9ce475d0e785"}, + {file = "rapidfuzz-3.14.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:ea188aa00e9bcae8c8411f006a5f2f06c4607a02f24eab0d8dc58566aa911f35"}, + {file = "rapidfuzz-3.14.3-cp314-cp314t-win32.whl", hash = "sha256:7ccbf68100c170e9a0581accbe9291850936711548c6688ce3bfb897b8c589ad"}, + {file = "rapidfuzz-3.14.3-cp314-cp314t-win_amd64.whl", hash = "sha256:9ec02e62ae765a318d6de38df609c57fc6dacc65c0ed1fd489036834fd8a620c"}, + {file = "rapidfuzz-3.14.3-cp314-cp314t-win_arm64.whl", hash = "sha256:e805e52322ae29aa945baf7168b6c898120fbc16d2b8f940b658a5e9e3999253"}, + {file = "rapidfuzz-3.14.3-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7cf174b52cb3ef5d49e45d0a1133b7e7d0ecf770ed01f97ae9962c5c91d97d23"}, + {file = "rapidfuzz-3.14.3-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:442cba39957a008dfc5bdef21a9c3f4379e30ffb4e41b8555dbaf4887eca9300"}, + {file = "rapidfuzz-3.14.3-pp311-pypy311_pp73-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1faa0f8f76ba75fd7b142c984947c280ef6558b5067af2ae9b8729b0a0f99ede"}, + {file = "rapidfuzz-3.14.3-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1e6eefec45625c634926a9fd46c9e4f31118ac8f3156fff9494422cee45207e6"}, + {file = "rapidfuzz-3.14.3-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:56fefb4382bb12250f164250240b9dd7772e41c5c8ae976fd598a32292449cc5"}, + {file = "rapidfuzz-3.14.3.tar.gz", hash = "sha256:2491937177868bc4b1e469087601d53f925e8d270ccc21e07404b4b5814b7b5f"}, ] [package.extras] @@ -3780,128 +3769,128 @@ typing-extensions = {version = ">=4.4.0", markers = "python_version < \"3.13\""} [[package]] name = "regex" -version = "2025.10.23" +version = "2025.11.3" description = "Alternative regular expression module, to replace re." optional = false python-versions = ">=3.9" groups = ["main"] markers = "python_version == \"3.11\" or python_version >= \"3.12\"" files = [ - {file = "regex-2025.10.23-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:17bbcde374bef1c5fad9b131f0e28a6a24856dd90368d8c0201e2b5a69533daa"}, - {file = "regex-2025.10.23-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b4e10434279cc8567f99ca6e018e9025d14f2fded2a603380b6be2090f476426"}, - {file = "regex-2025.10.23-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9c9bb421cbe7012c744a5a56cf4d6c80829c72edb1a2991677299c988d6339c8"}, - {file = "regex-2025.10.23-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:275cd1c2ed8c4a78ebfa489618d7aee762e8b4732da73573c3e38236ec5f65de"}, - {file = "regex-2025.10.23-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:7b426ae7952f3dc1e73a86056d520bd4e5f021397484a6835902fc5648bcacce"}, - {file = "regex-2025.10.23-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:c5cdaf5b6d37c7da1967dbe729d819461aab6a98a072feef65bbcff0a6e60649"}, - {file = "regex-2025.10.23-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3bfeff0b08f296ab28b4332a7e03ca31c437ee78b541ebc874bbf540e5932f8d"}, - {file = "regex-2025.10.23-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5f97236a67307b775f30a74ef722b64b38b7ab7ba3bb4a2508518a5de545459c"}, - {file = "regex-2025.10.23-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:be19e7de499940cd72475fb8e46ab2ecb1cf5906bebdd18a89f9329afb1df82f"}, - {file = "regex-2025.10.23-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:883df76ee42d9ecb82b37ff8d01caea5895b3f49630a64d21111078bbf8ef64c"}, - {file = "regex-2025.10.23-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2e9117d1d35fc2addae6281019ecc70dc21c30014b0004f657558b91c6a8f1a7"}, - {file = "regex-2025.10.23-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:0ff1307f531a5d8cf5c20ea517254551ff0a8dc722193aab66c656c5a900ea68"}, - {file = "regex-2025.10.23-cp310-cp310-win32.whl", hash = "sha256:7888475787cbfee4a7cd32998eeffe9a28129fa44ae0f691b96cb3939183ef41"}, - {file = "regex-2025.10.23-cp310-cp310-win_amd64.whl", hash = "sha256:ec41a905908496ce4906dab20fb103c814558db1d69afc12c2f384549c17936a"}, - {file = "regex-2025.10.23-cp310-cp310-win_arm64.whl", hash = "sha256:b2b7f19a764d5e966d5a62bf2c28a8b4093cc864c6734510bdb4aeb840aec5e6"}, - {file = "regex-2025.10.23-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6c531155bf9179345e85032052a1e5fe1a696a6abf9cea54b97e8baefff970fd"}, - {file = "regex-2025.10.23-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:912e9df4e89d383681268d38ad8f5780d7cccd94ba0e9aa09ca7ab7ab4f8e7eb"}, - {file = "regex-2025.10.23-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4f375c61bfc3138b13e762fe0ae76e3bdca92497816936534a0177201666f44f"}, - {file = "regex-2025.10.23-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e248cc9446081119128ed002a3801f8031e0c219b5d3c64d3cc627da29ac0a33"}, - {file = "regex-2025.10.23-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b52bf9282fdf401e4f4e721f0f61fc4b159b1307244517789702407dd74e38ca"}, - {file = "regex-2025.10.23-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5c084889ab2c59765a0d5ac602fd1c3c244f9b3fcc9a65fdc7ba6b74c5287490"}, - {file = "regex-2025.10.23-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d80e8eb79009bdb0936658c44ca06e2fbbca67792013e3818eea3f5f228971c2"}, - {file = "regex-2025.10.23-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:b6f259118ba87b814a8ec475380aee5f5ae97a75852a3507cf31d055b01b5b40"}, - {file = "regex-2025.10.23-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:9b8c72a242683dcc72d37595c4f1278dfd7642b769e46700a8df11eab19dfd82"}, - {file = "regex-2025.10.23-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:a8d7b7a0a3df9952f9965342159e0c1f05384c0f056a47ce8b61034f8cecbe83"}, - {file = "regex-2025.10.23-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:413bfea20a484c524858125e92b9ce6ffdd0a4b97d4ff96b5859aa119b0f1bdd"}, - {file = "regex-2025.10.23-cp311-cp311-win32.whl", hash = "sha256:f76deef1f1019a17dad98f408b8f7afc4bd007cbe835ae77b737e8c7f19ae575"}, - {file = "regex-2025.10.23-cp311-cp311-win_amd64.whl", hash = "sha256:59bba9f7125536f23fdab5deeea08da0c287a64c1d3acc1c7e99515809824de8"}, - {file = "regex-2025.10.23-cp311-cp311-win_arm64.whl", hash = "sha256:b103a752b6f1632ca420225718d6ed83f6a6ced3016dd0a4ab9a6825312de566"}, - {file = "regex-2025.10.23-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:7a44d9c00f7a0a02d3b777429281376370f3d13d2c75ae74eb94e11ebcf4a7fc"}, - {file = "regex-2025.10.23-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b83601f84fde939ae3478bb32a3aef36f61b58c3208d825c7e8ce1a735f143f2"}, - {file = "regex-2025.10.23-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ec13647907bb9d15fd192bbfe89ff06612e098a5709e7d6ecabbdd8f7908fc45"}, - {file = "regex-2025.10.23-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:78d76dd2957d62501084e7012ddafc5fcd406dd982b7a9ca1ea76e8eaaf73e7e"}, - {file = "regex-2025.10.23-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:8668e5f067e31a47699ebb354f43aeb9c0ef136f915bd864243098524482ac43"}, - {file = "regex-2025.10.23-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a32433fe3deb4b2d8eda88790d2808fed0dc097e84f5e683b4cd4f42edef6cca"}, - {file = "regex-2025.10.23-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d97d73818c642c938db14c0668167f8d39520ca9d983604575ade3fda193afcc"}, - {file = "regex-2025.10.23-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:bca7feecc72ee33579e9f6ddf8babbe473045717a0e7dbc347099530f96e8b9a"}, - {file = "regex-2025.10.23-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:7e24af51e907d7457cc4a72691ec458320b9ae67dc492f63209f01eecb09de32"}, - {file = "regex-2025.10.23-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:d10bcde58bbdf18146f3a69ec46dd03233b94a4a5632af97aa5378da3a47d288"}, - {file = "regex-2025.10.23-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:44383bc0c933388516c2692c9a7503e1f4a67e982f20b9a29d2fb70c6494f147"}, - {file = "regex-2025.10.23-cp312-cp312-win32.whl", hash = "sha256:6040a86f95438a0114bba16e51dfe27f1bc004fd29fe725f54a586f6d522b079"}, - {file = "regex-2025.10.23-cp312-cp312-win_amd64.whl", hash = "sha256:436b4c4352fe0762e3bfa34a5567079baa2ef22aa9c37cf4d128979ccfcad842"}, - {file = "regex-2025.10.23-cp312-cp312-win_arm64.whl", hash = "sha256:f4b1b1991617055b46aff6f6db24888c1f05f4db9801349d23f09ed0714a9335"}, - {file = "regex-2025.10.23-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:b7690f95404a1293923a296981fd943cca12c31a41af9c21ba3edd06398fc193"}, - {file = "regex-2025.10.23-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:1a32d77aeaea58a13230100dd8797ac1a84c457f3af2fdf0d81ea689d5a9105b"}, - {file = "regex-2025.10.23-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b24b29402f264f70a3c81f45974323b41764ff7159655360543b7cabb73e7d2f"}, - {file = "regex-2025.10.23-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:563824a08c7c03d96856d84b46fdb3bbb7cfbdf79da7ef68725cda2ce169c72a"}, - {file = "regex-2025.10.23-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a0ec8bdd88d2e2659c3518087ee34b37e20bd169419ffead4240a7004e8ed03b"}, - {file = "regex-2025.10.23-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b577601bfe1d33913fcd9276d7607bbac827c4798d9e14d04bf37d417a6c41cb"}, - {file = "regex-2025.10.23-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7c9f2c68ac6cb3de94eea08a437a75eaa2bd33f9e97c84836ca0b610a5804368"}, - {file = "regex-2025.10.23-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:89f8b9ea3830c79468e26b0e21c3585f69f105157c2154a36f6b7839f8afb351"}, - {file = "regex-2025.10.23-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:98fd84c4e4ea185b3bb5bf065261ab45867d8875032f358a435647285c722673"}, - {file = "regex-2025.10.23-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:1e11d3e5887b8b096f96b4154dfb902f29c723a9556639586cd140e77e28b313"}, - {file = "regex-2025.10.23-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4f13450328a6634348d47a88367e06b64c9d84980ef6a748f717b13f8ce64e87"}, - {file = "regex-2025.10.23-cp313-cp313-win32.whl", hash = "sha256:37be9296598a30c6a20236248cb8b2c07ffd54d095b75d3a2a2ee5babdc51df1"}, - {file = "regex-2025.10.23-cp313-cp313-win_amd64.whl", hash = "sha256:ea7a3c283ce0f06fe789365841e9174ba05f8db16e2fd6ae00a02df9572c04c0"}, - {file = "regex-2025.10.23-cp313-cp313-win_arm64.whl", hash = "sha256:d9a4953575f300a7bab71afa4cd4ac061c7697c89590a2902b536783eeb49a4f"}, - {file = "regex-2025.10.23-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:7d6606524fa77b3912c9ef52a42ef63c6cfbfc1077e9dc6296cd5da0da286044"}, - {file = "regex-2025.10.23-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:c037aadf4d64bdc38af7db3dbd34877a057ce6524eefcb2914d6d41c56f968cc"}, - {file = "regex-2025.10.23-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:99018c331fb2529084a0c9b4c713dfa49fafb47c7712422e49467c13a636c656"}, - {file = "regex-2025.10.23-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:fd8aba965604d70306eb90a35528f776e59112a7114a5162824d43b76fa27f58"}, - {file = "regex-2025.10.23-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:238e67264b4013e74136c49f883734f68656adf8257bfa13b515626b31b20f8e"}, - {file = "regex-2025.10.23-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b2eb48bd9848d66fd04826382f5e8491ae633de3233a3d64d58ceb4ecfa2113a"}, - {file = "regex-2025.10.23-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d36591ce06d047d0c0fe2fc5f14bfbd5b4525d08a7b6a279379085e13f0e3d0e"}, - {file = "regex-2025.10.23-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b5d4ece8628d6e364302006366cea3ee887db397faebacc5dacf8ef19e064cf8"}, - {file = "regex-2025.10.23-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:39a7e8083959cb1c4ff74e483eecb5a65d3b3e1d821b256e54baf61782c906c6"}, - {file = "regex-2025.10.23-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:842d449a8fefe546f311656cf8c0d6729b08c09a185f1cad94c756210286d6a8"}, - {file = "regex-2025.10.23-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:d614986dc68506be8f00474f4f6960e03e4ca9883f7df47744800e7d7c08a494"}, - {file = "regex-2025.10.23-cp313-cp313t-win32.whl", hash = "sha256:a5b7a26b51a9df473ec16a1934d117443a775ceb7b39b78670b2e21893c330c9"}, - {file = "regex-2025.10.23-cp313-cp313t-win_amd64.whl", hash = "sha256:ce81c5544a5453f61cb6f548ed358cfb111e3b23f3cd42d250a4077a6be2a7b6"}, - {file = "regex-2025.10.23-cp313-cp313t-win_arm64.whl", hash = "sha256:e9bf7f6699f490e4e43c44757aa179dab24d1960999c84ab5c3d5377714ed473"}, - {file = "regex-2025.10.23-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:5b5cb5b6344c4c4c24b2dc87b0bfee78202b07ef7633385df70da7fcf6f7cec6"}, - {file = "regex-2025.10.23-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:a6ce7973384c37bdf0f371a843f95a6e6f4e1489e10e0cf57330198df72959c5"}, - {file = "regex-2025.10.23-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:2ee3663f2c334959016b56e3bd0dd187cbc73f948e3a3af14c3caaa0c3035d10"}, - {file = "regex-2025.10.23-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2003cc82a579107e70d013482acce8ba773293f2db534fb532738395c557ff34"}, - {file = "regex-2025.10.23-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:182c452279365a93a9f45874f7f191ec1c51e1f1eb41bf2b16563f1a40c1da3a"}, - {file = "regex-2025.10.23-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b1249e9ff581c5b658c8f0437f883b01f1edcf424a16388591e7c05e5e9e8b0c"}, - {file = "regex-2025.10.23-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2b841698f93db3ccc36caa1900d2a3be281d9539b822dc012f08fc80b46a3224"}, - {file = "regex-2025.10.23-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:956d89e0c92d471e8f7eee73f73fdff5ed345886378c45a43175a77538a1ffe4"}, - {file = "regex-2025.10.23-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:5c259cb363299a0d90d63b5c0d7568ee98419861618a95ee9d91a41cb9954462"}, - {file = "regex-2025.10.23-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:185d2b18c062820b3a40d8fefa223a83f10b20a674bf6e8c4a432e8dfd844627"}, - {file = "regex-2025.10.23-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:281d87fa790049c2b7c1b4253121edd80b392b19b5a3d28dc2a77579cb2a58ec"}, - {file = "regex-2025.10.23-cp314-cp314-win32.whl", hash = "sha256:63b81eef3656072e4ca87c58084c7a9c2b81d41a300b157be635a8a675aacfb8"}, - {file = "regex-2025.10.23-cp314-cp314-win_amd64.whl", hash = "sha256:0967c5b86f274800a34a4ed862dfab56928144d03cb18821c5153f8777947796"}, - {file = "regex-2025.10.23-cp314-cp314-win_arm64.whl", hash = "sha256:c70dfe58b0a00b36aa04cdb0f798bf3e0adc31747641f69e191109fd8572c9a9"}, - {file = "regex-2025.10.23-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:1f5799ea1787aa6de6c150377d11afad39a38afd033f0c5247aecb997978c422"}, - {file = "regex-2025.10.23-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:a9639ab7540cfea45ef57d16dcbea2e22de351998d614c3ad2f9778fa3bdd788"}, - {file = "regex-2025.10.23-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:08f52122c352eb44c3421dab78b9b73a8a77a282cc8314ae576fcaa92b780d10"}, - {file = "regex-2025.10.23-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ebf1baebef1c4088ad5a5623decec6b52950f0e4d7a0ae4d48f0a99f8c9cb7d7"}, - {file = "regex-2025.10.23-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:16b0f1c2e2d566c562d5c384c2b492646be0a19798532fdc1fdedacc66e3223f"}, - {file = "regex-2025.10.23-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f7ada5d9dceafaab92646aa00c10a9efd9b09942dd9b0d7c5a4b73db92cc7e61"}, - {file = "regex-2025.10.23-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3a36b4005770044bf08edecc798f0e41a75795b9e7c9c12fe29da8d792ef870c"}, - {file = "regex-2025.10.23-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:af7b2661dcc032da1fae82069b5ebf2ac1dfcd5359ef8b35e1367bfc92181432"}, - {file = "regex-2025.10.23-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:1cb976810ac1416a67562c2e5ba0accf6f928932320fef302e08100ed681b38e"}, - {file = "regex-2025.10.23-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:1a56a54be3897d62f54290190fbcd754bff6932934529fbf5b29933da28fcd43"}, - {file = "regex-2025.10.23-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:8f3e6d202fb52c2153f532043bbcf618fd177df47b0b306741eb9b60ba96edc3"}, - {file = "regex-2025.10.23-cp314-cp314t-win32.whl", hash = "sha256:1fa1186966b2621b1769fd467c7b22e317e6ba2d2cdcecc42ea3089ef04a8521"}, - {file = "regex-2025.10.23-cp314-cp314t-win_amd64.whl", hash = "sha256:08a15d40ce28362eac3e78e83d75475147869c1ff86bc93285f43b4f4431a741"}, - {file = "regex-2025.10.23-cp314-cp314t-win_arm64.whl", hash = "sha256:a93e97338e1c8ea2649e130dcfbe8cd69bba5e1e163834752ab64dcb4de6d5ed"}, - {file = "regex-2025.10.23-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d8d286760ee5b77fd21cf6b33cc45e0bffd1deeda59ca65b9be996f590a9828a"}, - {file = "regex-2025.10.23-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9e72e3b84b170fec02193d32620a0a7060a22e52c46e45957dcd14742e0d28fb"}, - {file = "regex-2025.10.23-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ec506e8114fa12d21616deb44800f536d6bf2e1a69253dbf611f69af92395c99"}, - {file = "regex-2025.10.23-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d7e481f9710e8e24228ce2c77b41db7662a3f68853395da86a292b49dadca2aa"}, - {file = "regex-2025.10.23-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4663ff2fc367735ae7b90b4f0e05b25554446df4addafc76fdaacaaa0ba852b5"}, - {file = "regex-2025.10.23-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0879dd3251a42d2e9b938e1e03b1e9f60de90b4d153015193f5077a376a18439"}, - {file = "regex-2025.10.23-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:651c58aecbab7e97bdf8ec76298a28d2bf2b6238c099ec6bf32e6d41e2f9a9cb"}, - {file = "regex-2025.10.23-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ceabc62a0e879169cd1bf066063bd6991c3e41e437628936a2ce66e0e2071c32"}, - {file = "regex-2025.10.23-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:bfdf4e9aa3e7b7d02fda97509b4ceeed34542361694ecc0a81db1688373ecfbd"}, - {file = "regex-2025.10.23-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:92f565ff9beb9f51bc7cc8c578a7e92eb5c4576b69043a4c58cd05d73fda83c5"}, - {file = "regex-2025.10.23-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:abbea548b1076eaf8635caf1071c9d86efdf0fa74abe71fca26c05a2d64cda80"}, - {file = "regex-2025.10.23-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:33535dcf34f47821381e341f7b715cbd027deda4223af4d3932adcd371d3192a"}, - {file = "regex-2025.10.23-cp39-cp39-win32.whl", hash = "sha256:345c9df49a15bf6460534b104b336581bc5f35c286cac526416e7a63d389b09b"}, - {file = "regex-2025.10.23-cp39-cp39-win_amd64.whl", hash = "sha256:f668fe1fd3358c5423355a289a4a003e58005ce829d217b828f80bd605a90145"}, - {file = "regex-2025.10.23-cp39-cp39-win_arm64.whl", hash = "sha256:07a3fd25d9074923e4d7258b551ae35ab6bdfe01904b8f0d5341c7d8b20eb18d"}, - {file = "regex-2025.10.23.tar.gz", hash = "sha256:8cbaf8ceb88f96ae2356d01b9adf5e6306fa42fa6f7eab6b97794e37c959ac26"}, + {file = "regex-2025.11.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2b441a4ae2c8049106e8b39973bfbddfb25a179dda2bdb99b0eeb60c40a6a3af"}, + {file = "regex-2025.11.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2fa2eed3f76677777345d2f81ee89f5de2f5745910e805f7af7386a920fa7313"}, + {file = "regex-2025.11.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d8b4a27eebd684319bdf473d39f1d79eed36bf2cd34bd4465cdb4618d82b3d56"}, + {file = "regex-2025.11.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5cf77eac15bd264986c4a2c63353212c095b40f3affb2bc6b4ef80c4776c1a28"}, + {file = "regex-2025.11.3-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b7f9ee819f94c6abfa56ec7b1dbab586f41ebbdc0a57e6524bd5e7f487a878c7"}, + {file = "regex-2025.11.3-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:838441333bc90b829406d4a03cb4b8bf7656231b84358628b0406d803931ef32"}, + {file = "regex-2025.11.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cfe6d3f0c9e3b7e8c0c694b24d25e677776f5ca26dce46fd6b0489f9c8339391"}, + {file = "regex-2025.11.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2ab815eb8a96379a27c3b6157fcb127c8f59c36f043c1678110cea492868f1d5"}, + {file = "regex-2025.11.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:728a9d2d173a65b62bdc380b7932dd8e74ed4295279a8fe1021204ce210803e7"}, + {file = "regex-2025.11.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:509dc827f89c15c66a0c216331260d777dd6c81e9a4e4f830e662b0bb296c313"}, + {file = "regex-2025.11.3-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:849202cd789e5f3cf5dcc7822c34b502181b4824a65ff20ce82da5524e45e8e9"}, + {file = "regex-2025.11.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b6f78f98741dcc89607c16b1e9426ee46ce4bf31ac5e6b0d40e81c89f3481ea5"}, + {file = "regex-2025.11.3-cp310-cp310-win32.whl", hash = "sha256:149eb0bba95231fb4f6d37c8f760ec9fa6fabf65bab555e128dde5f2475193ec"}, + {file = "regex-2025.11.3-cp310-cp310-win_amd64.whl", hash = "sha256:ee3a83ce492074c35a74cc76cf8235d49e77b757193a5365ff86e3f2f93db9fd"}, + {file = "regex-2025.11.3-cp310-cp310-win_arm64.whl", hash = "sha256:38af559ad934a7b35147716655d4a2f79fcef2d695ddfe06a06ba40ae631fa7e"}, + {file = "regex-2025.11.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:eadade04221641516fa25139273505a1c19f9bf97589a05bc4cfcd8b4a618031"}, + {file = "regex-2025.11.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:feff9e54ec0dd3833d659257f5c3f5322a12eee58ffa360984b716f8b92983f4"}, + {file = "regex-2025.11.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3b30bc921d50365775c09a7ed446359e5c0179e9e2512beec4a60cbcef6ddd50"}, + {file = "regex-2025.11.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f99be08cfead2020c7ca6e396c13543baea32343b7a9a5780c462e323bd8872f"}, + {file = "regex-2025.11.3-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:6dd329a1b61c0ee95ba95385fb0c07ea0d3fe1a21e1349fa2bec272636217118"}, + {file = "regex-2025.11.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:4c5238d32f3c5269d9e87be0cf096437b7622b6920f5eac4fd202468aaeb34d2"}, + {file = "regex-2025.11.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:10483eefbfb0adb18ee9474498c9a32fcf4e594fbca0543bb94c48bac6183e2e"}, + {file = "regex-2025.11.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:78c2d02bb6e1da0720eedc0bad578049cad3f71050ef8cd065ecc87691bed2b0"}, + {file = "regex-2025.11.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e6b49cd2aad93a1790ce9cffb18964f6d3a4b0b3dbdbd5de094b65296fce6e58"}, + {file = "regex-2025.11.3-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:885b26aa3ee56433b630502dc3d36ba78d186a00cc535d3806e6bfd9ed3c70ab"}, + {file = "regex-2025.11.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ddd76a9f58e6a00f8772e72cff8ebcff78e022be95edf018766707c730593e1e"}, + {file = "regex-2025.11.3-cp311-cp311-win32.whl", hash = "sha256:3e816cc9aac1cd3cc9a4ec4d860f06d40f994b5c7b4d03b93345f44e08cc68bf"}, + {file = "regex-2025.11.3-cp311-cp311-win_amd64.whl", hash = "sha256:087511f5c8b7dfbe3a03f5d5ad0c2a33861b1fc387f21f6f60825a44865a385a"}, + {file = "regex-2025.11.3-cp311-cp311-win_arm64.whl", hash = "sha256:1ff0d190c7f68ae7769cd0313fe45820ba07ffebfddfaa89cc1eb70827ba0ddc"}, + {file = "regex-2025.11.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:bc8ab71e2e31b16e40868a40a69007bc305e1109bd4658eb6cad007e0bf67c41"}, + {file = "regex-2025.11.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:22b29dda7e1f7062a52359fca6e58e548e28c6686f205e780b02ad8ef710de36"}, + {file = "regex-2025.11.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3a91e4a29938bc1a082cc28fdea44be420bf2bebe2665343029723892eb073e1"}, + {file = "regex-2025.11.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:08b884f4226602ad40c5d55f52bf91a9df30f513864e0054bad40c0e9cf1afb7"}, + {file = "regex-2025.11.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3e0b11b2b2433d1c39c7c7a30e3f3d0aeeea44c2a8d0bae28f6b95f639927a69"}, + {file = "regex-2025.11.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:87eb52a81ef58c7ba4d45c3ca74e12aa4b4e77816f72ca25258a85b3ea96cb48"}, + {file = "regex-2025.11.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a12ab1f5c29b4e93db518f5e3872116b7e9b1646c9f9f426f777b50d44a09e8c"}, + {file = "regex-2025.11.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7521684c8c7c4f6e88e35ec89680ee1aa8358d3f09d27dfbdf62c446f5d4c695"}, + {file = "regex-2025.11.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:7fe6e5440584e94cc4b3f5f4d98a25e29ca12dccf8873679a635638349831b98"}, + {file = "regex-2025.11.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:8e026094aa12b43f4fd74576714e987803a315c76edb6b098b9809db5de58f74"}, + {file = "regex-2025.11.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:435bbad13e57eb5606a68443af62bed3556de2f46deb9f7d4237bc2f1c9fb3a0"}, + {file = "regex-2025.11.3-cp312-cp312-win32.whl", hash = "sha256:3839967cf4dc4b985e1570fd8d91078f0c519f30491c60f9ac42a8db039be204"}, + {file = "regex-2025.11.3-cp312-cp312-win_amd64.whl", hash = "sha256:e721d1b46e25c481dc5ded6f4b3f66c897c58d2e8cfdf77bbced84339108b0b9"}, + {file = "regex-2025.11.3-cp312-cp312-win_arm64.whl", hash = "sha256:64350685ff08b1d3a6fff33f45a9ca183dc1d58bbfe4981604e70ec9801bbc26"}, + {file = "regex-2025.11.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:c1e448051717a334891f2b9a620fe36776ebf3dd8ec46a0b877c8ae69575feb4"}, + {file = "regex-2025.11.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9b5aca4d5dfd7fbfbfbdaf44850fcc7709a01146a797536a8f84952e940cca76"}, + {file = "regex-2025.11.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:04d2765516395cf7dda331a244a3282c0f5ae96075f728629287dfa6f76ba70a"}, + {file = "regex-2025.11.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5d9903ca42bfeec4cebedba8022a7c97ad2aab22e09573ce9976ba01b65e4361"}, + {file = "regex-2025.11.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:639431bdc89d6429f6721625e8129413980ccd62e9d3f496be618a41d205f160"}, + {file = "regex-2025.11.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f117efad42068f9715677c8523ed2be1518116d1c49b1dd17987716695181efe"}, + {file = "regex-2025.11.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4aecb6f461316adf9f1f0f6a4a1a3d79e045f9b71ec76055a791affa3b285850"}, + {file = "regex-2025.11.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:3b3a5f320136873cc5561098dfab677eea139521cb9a9e8db98b7e64aef44cbc"}, + {file = "regex-2025.11.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:75fa6f0056e7efb1f42a1c34e58be24072cb9e61a601340cc1196ae92326a4f9"}, + {file = "regex-2025.11.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:dbe6095001465294f13f1adcd3311e50dd84e5a71525f20a10bd16689c61ce0b"}, + {file = "regex-2025.11.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:454d9b4ae7881afbc25015b8627c16d88a597479b9dea82b8c6e7e2e07240dc7"}, + {file = "regex-2025.11.3-cp313-cp313-win32.whl", hash = "sha256:28ba4d69171fc6e9896337d4fc63a43660002b7da53fc15ac992abcf3410917c"}, + {file = "regex-2025.11.3-cp313-cp313-win_amd64.whl", hash = "sha256:bac4200befe50c670c405dc33af26dad5a3b6b255dd6c000d92fe4629f9ed6a5"}, + {file = "regex-2025.11.3-cp313-cp313-win_arm64.whl", hash = "sha256:2292cd5a90dab247f9abe892ac584cb24f0f54680c73fcb4a7493c66c2bf2467"}, + {file = "regex-2025.11.3-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:1eb1ebf6822b756c723e09f5186473d93236c06c579d2cc0671a722d2ab14281"}, + {file = "regex-2025.11.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:1e00ec2970aab10dc5db34af535f21fcf32b4a31d99e34963419636e2f85ae39"}, + {file = "regex-2025.11.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a4cb042b615245d5ff9b3794f56be4138b5adc35a4166014d31d1814744148c7"}, + {file = "regex-2025.11.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:44f264d4bf02f3176467d90b294d59bf1db9fe53c141ff772f27a8b456b2a9ed"}, + {file = "regex-2025.11.3-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:7be0277469bf3bd7a34a9c57c1b6a724532a0d235cd0dc4e7f4316f982c28b19"}, + {file = "regex-2025.11.3-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0d31e08426ff4b5b650f68839f5af51a92a5b51abd8554a60c2fbc7c71f25d0b"}, + {file = "regex-2025.11.3-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e43586ce5bd28f9f285a6e729466841368c4a0353f6fd08d4ce4630843d3648a"}, + {file = "regex-2025.11.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:0f9397d561a4c16829d4e6ff75202c1c08b68a3bdbfe29dbfcdb31c9830907c6"}, + {file = "regex-2025.11.3-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:dd16e78eb18ffdb25ee33a0682d17912e8cc8a770e885aeee95020046128f1ce"}, + {file = "regex-2025.11.3-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:ffcca5b9efe948ba0661e9df0fa50d2bc4b097c70b9810212d6b62f05d83b2dd"}, + {file = "regex-2025.11.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:c56b4d162ca2b43318ac671c65bd4d563e841a694ac70e1a976ac38fcf4ca1d2"}, + {file = "regex-2025.11.3-cp313-cp313t-win32.whl", hash = "sha256:9ddc42e68114e161e51e272f667d640f97e84a2b9ef14b7477c53aac20c2d59a"}, + {file = "regex-2025.11.3-cp313-cp313t-win_amd64.whl", hash = "sha256:7a7c7fdf755032ffdd72c77e3d8096bdcb0eb92e89e17571a196f03d88b11b3c"}, + {file = "regex-2025.11.3-cp313-cp313t-win_arm64.whl", hash = "sha256:df9eb838c44f570283712e7cff14c16329a9f0fb19ca492d21d4b7528ee6821e"}, + {file = "regex-2025.11.3-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:9697a52e57576c83139d7c6f213d64485d3df5bf84807c35fa409e6c970801c6"}, + {file = "regex-2025.11.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:e18bc3f73bd41243c9b38a6d9f2366cd0e0137a9aebe2d8ff76c5b67d4c0a3f4"}, + {file = "regex-2025.11.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:61a08bcb0ec14ff4e0ed2044aad948d0659604f824cbd50b55e30b0ec6f09c73"}, + {file = "regex-2025.11.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c9c30003b9347c24bcc210958c5d167b9e4f9be786cb380a7d32f14f9b84674f"}, + {file = "regex-2025.11.3-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4e1e592789704459900728d88d41a46fe3969b82ab62945560a31732ffc19a6d"}, + {file = "regex-2025.11.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:6538241f45eb5a25aa575dbba1069ad786f68a4f2773a29a2bd3dd1f9de787be"}, + {file = "regex-2025.11.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bce22519c989bb72a7e6b36a199384c53db7722fe669ba891da75907fe3587db"}, + {file = "regex-2025.11.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:66d559b21d3640203ab9075797a55165d79017520685fb407b9234d72ab63c62"}, + {file = "regex-2025.11.3-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:669dcfb2e38f9e8c69507bace46f4889e3abbfd9b0c29719202883c0a603598f"}, + {file = "regex-2025.11.3-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:32f74f35ff0f25a5021373ac61442edcb150731fbaa28286bbc8bb1582c89d02"}, + {file = "regex-2025.11.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:e6c7a21dffba883234baefe91bc3388e629779582038f75d2a5be918e250f0ed"}, + {file = "regex-2025.11.3-cp314-cp314-win32.whl", hash = "sha256:795ea137b1d809eb6836b43748b12634291c0ed55ad50a7d72d21edf1cd565c4"}, + {file = "regex-2025.11.3-cp314-cp314-win_amd64.whl", hash = "sha256:9f95fbaa0ee1610ec0fc6b26668e9917a582ba80c52cc6d9ada15e30aa9ab9ad"}, + {file = "regex-2025.11.3-cp314-cp314-win_arm64.whl", hash = "sha256:dfec44d532be4c07088c3de2876130ff0fbeeacaa89a137decbbb5f665855a0f"}, + {file = "regex-2025.11.3-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:ba0d8a5d7f04f73ee7d01d974d47c5834f8a1b0224390e4fe7c12a3a92a78ecc"}, + {file = "regex-2025.11.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:442d86cf1cfe4faabf97db7d901ef58347efd004934da045c745e7b5bd57ac49"}, + {file = "regex-2025.11.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:fd0a5e563c756de210bb964789b5abe4f114dacae9104a47e1a649b910361536"}, + {file = "regex-2025.11.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bf3490bcbb985a1ae97b2ce9ad1c0f06a852d5b19dde9b07bdf25bf224248c95"}, + {file = "regex-2025.11.3-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3809988f0a8b8c9dcc0f92478d6501fac7200b9ec56aecf0ec21f4a2ec4b6009"}, + {file = "regex-2025.11.3-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f4ff94e58e84aedb9c9fce66d4ef9f27a190285b451420f297c9a09f2b9abee9"}, + {file = "regex-2025.11.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7eb542fd347ce61e1321b0a6b945d5701528dca0cd9759c2e3bb8bd57e47964d"}, + {file = "regex-2025.11.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:d6c2d5919075a1f2e413c00b056ea0c2f065b3f5fe83c3d07d325ab92dce51d6"}, + {file = "regex-2025.11.3-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:3f8bf11a4827cc7ce5a53d4ef6cddd5ad25595d3c1435ef08f76825851343154"}, + {file = "regex-2025.11.3-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:22c12d837298651e5550ac1d964e4ff57c3f56965fc1812c90c9fb2028eaf267"}, + {file = "regex-2025.11.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:62ba394a3dda9ad41c7c780f60f6e4a70988741415ae96f6d1bf6c239cf01379"}, + {file = "regex-2025.11.3-cp314-cp314t-win32.whl", hash = "sha256:4bf146dca15cdd53224a1bf46d628bd7590e4a07fbb69e720d561aea43a32b38"}, + {file = "regex-2025.11.3-cp314-cp314t-win_amd64.whl", hash = "sha256:adad1a1bcf1c9e76346e091d22d23ac54ef28e1365117d99521631078dfec9de"}, + {file = "regex-2025.11.3-cp314-cp314t-win_arm64.whl", hash = "sha256:c54f768482cef41e219720013cd05933b6f971d9562544d691c68699bf2b6801"}, + {file = "regex-2025.11.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:81519e25707fc076978c6143b81ea3dc853f176895af05bf7ec51effe818aeec"}, + {file = "regex-2025.11.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3bf28b1873a8af8bbb58c26cc56ea6e534d80053b41fb511a35795b6de507e6a"}, + {file = "regex-2025.11.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:856a25c73b697f2ce2a24e7968285579e62577a048526161a2c0f53090bea9f9"}, + {file = "regex-2025.11.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8a3d571bd95fade53c86c0517f859477ff3a93c3fde10c9e669086f038e0f207"}, + {file = "regex-2025.11.3-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:732aea6de26051af97b94bc98ed86448821f839d058e5d259c72bf6d73ad0fc0"}, + {file = "regex-2025.11.3-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:51c1c1847128238f54930edb8805b660305dca164645a9fd29243f5610beea34"}, + {file = "regex-2025.11.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:22dd622a402aad4558277305350699b2be14bc59f64d64ae1d928ce7d072dced"}, + {file = "regex-2025.11.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f3b5a391c7597ffa96b41bd5cbd2ed0305f515fcbb367dfa72735679d5502364"}, + {file = "regex-2025.11.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:cc4076a5b4f36d849fd709284b4a3b112326652f3b0466f04002a6c15a0c96c1"}, + {file = "regex-2025.11.3-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:a295ca2bba5c1c885826ce3125fa0b9f702a1be547d821c01d65f199e10c01e2"}, + {file = "regex-2025.11.3-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:b4774ff32f18e0504bfc4e59a3e71e18d83bc1e171a3c8ed75013958a03b2f14"}, + {file = "regex-2025.11.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:22e7d1cdfa88ef33a2ae6aa0d707f9255eb286ffbd90045f1088246833223aee"}, + {file = "regex-2025.11.3-cp39-cp39-win32.whl", hash = "sha256:74d04244852ff73b32eeede4f76f51c5bcf44bc3c207bc3e6cf1c5c45b890708"}, + {file = "regex-2025.11.3-cp39-cp39-win_amd64.whl", hash = "sha256:7a50cd39f73faa34ec18d6720ee25ef10c4c1839514186fcda658a06c06057a2"}, + {file = "regex-2025.11.3-cp39-cp39-win_arm64.whl", hash = "sha256:43b4fb020e779ca81c1b5255015fe2b82816c76ec982354534ad9ec09ad7c9e3"}, + {file = "regex-2025.11.3.tar.gz", hash = "sha256:1fedc720f9bb2494ce31a58a1631f9c82df6a09b49c19517ea5cc280b4541e01"}, ] [[package]] @@ -3950,7 +3939,7 @@ description = "RestrictedPython is a defined subset of the Python language which optional = false python-versions = "<3.15,>=3.9" groups = ["main"] -markers = "python_version == \"3.11\" or python_full_version == \"3.12.0\"" +markers = "python_version >= \"3.12\" and python_version < \"3.14\" or python_version == \"3.11\"" files = [ {file = "restrictedpython-8.1-py3-none-any.whl", hash = "sha256:4769449c6cdb10f2071649ba386902befff0eff2a8fd6217989fa7b16aeae926"}, {file = "restrictedpython-8.1.tar.gz", hash = "sha256:4a69304aceacf6bee74bdf153c728221d4e3109b39acbfe00b3494927080d898"}, @@ -4981,4 +4970,4 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.1" python-versions = "^3.11" -content-hash = "41981e274e958a70f5034827967fc28998561ae42040776fa79113456f26c156" +content-hash = "9c94913f5a5a17981426d96d1ad0d6120ca2a0e09bc44e83c30e56b514910130" diff --git a/api/pyproject.toml b/api/pyproject.toml index b5d1f69d63..29129d2b7a 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "api" -version = "0.60.2" +version = "0.61.0" description = "Agenta API" authors = [ { name = "Mahmoud Mabrouk", email = "mahmoud@agenta.ai" }, @@ -21,10 +21,10 @@ pydantic = "^2.11.7" uvicorn = "^0.34.3" redis = "^6.4.0" sendgrid = "^6.12.4" -restrictedpython = { version = "^8.0", python = ">=3.11,<=3.12" } +restrictedpython = { version = "^8.0", python = ">=3.11,<3.14" } celery = "^5.5.3" newrelic = "^10.17.0" -openai = "^1.106.0" +openai = ">=1.106.0" sqlalchemy = "^2.0.43" asyncpg = "^0.30.0" uuid-utils = "^0.10.0" @@ -57,7 +57,7 @@ python-jsonpath = "^2.0.0" h11 = "^0.16.0" ecdsa = "^0.19.1" bson = "^0.5.10" -agenta = "^0.59.5" +agenta = "^0.60.1" tiktoken = "0.11.0" google-auth = ">=2.23,<3" diff --git a/sdk/agenta/__init__.py b/sdk/agenta/__init__.py index 829d0519bc..a85ac5093f 100644 --- a/sdk/agenta/__init__.py +++ b/sdk/agenta/__init__.py @@ -44,6 +44,10 @@ from .sdk import assets as assets from .sdk import tracer +# evaluations +from .sdk import testsets as testsets + + config = PreInitObject("agenta.config", Config) DEFAULT_AGENTA_SINGLETON_INSTANCE = AgentaSingleton() @@ -69,7 +73,8 @@ def init( global api, async_api, tracing, tracer # pylint: disable=global-statement _init( - host=host or api_url, + host=host, + api_url=api_url, api_key=api_key, config_fname=config_fname, redact=redact, diff --git a/sdk/agenta/client/backend/core/http_client.py b/sdk/agenta/client/backend/core/http_client.py index 8aee8acb89..9726651786 100644 --- a/sdk/agenta/client/backend/core/http_client.py +++ b/sdk/agenta/client/backend/core/http_client.py @@ -148,9 +148,9 @@ def get_request_body( json_body = maybe_filter_request_body(json, request_options, omit) # If you have an empty JSON body, you should just send None - return ( - json_body if json_body != {} else None - ), data_body if data_body != {} else None + return (json_body if json_body != {} else None), ( + data_body if data_body != {} else None + ) class HttpClient: diff --git a/sdk/agenta/sdk/__init__.py b/sdk/agenta/sdk/__init__.py index 01d3982121..6f908168d5 100644 --- a/sdk/agenta/sdk/__init__.py +++ b/sdk/agenta/sdk/__init__.py @@ -81,6 +81,7 @@ from .managers.config import ConfigManager from .managers.variant import VariantManager from .managers.deployment import DeploymentManager +from .managers import testsets as testsets config = PreInitObject("agenta.config", Config) diff --git a/sdk/agenta/sdk/agenta_init.py b/sdk/agenta/sdk/agenta_init.py index f6ffa7c73b..45c85a13ff 100644 --- a/sdk/agenta/sdk/agenta_init.py +++ b/sdk/agenta/sdk/agenta_init.py @@ -27,6 +27,7 @@ class AgentaSingleton: def __init__(self): self.host = None + self.api_url = None self.api_key = None self.scope_type = None @@ -41,6 +42,7 @@ def init( self, *, host: Optional[str] = None, + api_url: Optional[str] = None, api_key: Optional[str] = None, config_fname: Optional[str] = None, redact: Optional[Callable[..., Any]] = None, @@ -77,16 +79,29 @@ def init( _host = ( host or getenv("AGENTA_HOST") - or config.get("backend_host") or config.get("host") - or getenv("AGENTA_API_URL", "https://cloud.agenta.ai") + or "https://cloud.agenta.ai" ) + _api_url = ( + api_url + or getenv("AGENTA_API_URL") + or config.get("api_url") + or None # NO FALLBACK + ) + + if _api_url: + _host = _api_url.rsplit("/api", 1)[0] + + if _host and not _api_url: + _api_url = _host + "/api" + try: assert _host and isinstance( _host, str ), "Host is required. Please provide a valid host or set AGENTA_HOST environment variable." self.host = parse_url(url=_host) + self.api_url = self.host + "/api" except AssertionError as e: log.error(str(e)) raise @@ -94,19 +109,27 @@ def init( log.error(f"Failed to parse host URL '{_host}': {e}") raise - log.info("Agenta - API URL: %s/api", self.host) + self.api_key = ( + api_key + or getenv("AGENTA_API_KEY") + or config.get("api_key") + or None # NO FALLBACK + ) - self.api_key = api_key or getenv("AGENTA_API_KEY") or config.get("api_key") + log.info("Agenta - API URL: %s", self.api_url) self.scope_type = ( scope_type or getenv("AGENTA_SCOPE_TYPE") or config.get("scope_type") - or None + or None # NO FALLBACK ) self.scope_id = ( - scope_id or getenv("AGENTA_SCOPE_ID") or config.get("scope_id") or None + scope_id + or getenv("AGENTA_SCOPE_ID") + or config.get("scope_id") + or None # NO FALLBACK ) self.tracing = Tracing( @@ -120,12 +143,12 @@ def init( ) self.api = AgentaApi( - base_url=self.host + "/api", + base_url=self.api_url, api_key=self.api_key if self.api_key else "", ) self.async_api = AsyncAgentaApi( - base_url=self.host + "/api", + base_url=self.api_url, api_key=self.api_key if self.api_key else "", ) @@ -172,6 +195,7 @@ def __getattr__(self, key): def init( host: Optional[str] = None, + api_url: Optional[str] = None, api_key: Optional[str] = None, config_fname: Optional[str] = None, redact: Optional[Callable[..., Any]] = None, @@ -200,6 +224,7 @@ def init( singleton.init( host=host, + api_url=api_url, api_key=api_key, config_fname=config_fname, redact=redact, diff --git a/sdk/agenta/sdk/decorators/__init__.py b/sdk/agenta/sdk/decorators/__init__.py index e69de29bb2..629cec59bb 100644 --- a/sdk/agenta/sdk/decorators/__init__.py +++ b/sdk/agenta/sdk/decorators/__init__.py @@ -0,0 +1 @@ +from .running import application, evaluator diff --git a/sdk/agenta/sdk/decorators/running.py b/sdk/agenta/sdk/decorators/running.py index 9c13423454..0a44bcd072 100644 --- a/sdk/agenta/sdk/decorators/running.py +++ b/sdk/agenta/sdk/decorators/running.py @@ -28,7 +28,7 @@ from agenta.sdk.middlewares.running.resolver import ( ResolverMiddleware, resolve_interface, - resolve_parameters, + resolve_configuration, ) from agenta.sdk.middlewares.running.vault import ( VaultMiddleware, @@ -39,10 +39,13 @@ register_handler, retrieve_handler, retrieve_interface, - retrieve_parameters, + retrieve_configuration, is_custom_uri, ) +import agenta as ag + + log = get_module_logger(__name__) @@ -123,7 +126,6 @@ def __init__( uri: Optional[str] = None, url: Optional[str] = None, headers: Optional[dict] = None, - runtime: Optional[str] = None, schemas: Optional[dict] = None, # interface: Optional[ @@ -167,7 +169,6 @@ def __init__( self.uri = uri self.url = url self.headers = headers - self.runtime = runtime self.schemas = schemas # self.interface = interface @@ -200,13 +201,21 @@ def __init__( if self.handler: self.interface = retrieve_interface(self.uri) or self.interface - self.parameters = self.parameters or retrieve_parameters(self.uri) if isinstance(self.interface, WorkflowServiceInterface): self.uri = self.interface.uri or self.uri + self.configuration = self.configuration or retrieve_configuration( + self.uri + ) + if not isinstance(self.configuration, WorkflowServiceConfiguration): + self.configuration = WorkflowServiceConfiguration() + self.configuration.parameters = ( + self.parameters or self.configuration.parameters + ) + self.parameters = self.configuration.parameters if is_custom_uri(self.uri): self.flags = self.flags or dict() - self.flags["custom"] = True + self.flags["is_custom"] = True def __call__(self, handler: Optional[Callable[..., Any]] = None) -> Workflow: if self.handler is None and handler is not None: @@ -220,7 +229,7 @@ def __call__(self, handler: Optional[Callable[..., Any]] = None) -> Workflow: if is_custom_uri(self.uri): self.flags = self.flags or dict() - self.flags["custom"] = True + self.flags["is_custom"] = True return self.handler @@ -312,6 +321,16 @@ async def invoke( # **kwargs, ) -> Union[WorkflowServiceBatchResponse, WorkflowServiceStreamResponse]: + _flags = {**(self.flags or {}), **(request.flags or {})} + _tags = {**(self.tags or {}), **(request.tags or {})} + _meta = {**(self.meta or {}), **(request.meta or {})} + + credentials = credentials or ( + f"ApiKey {ag.DEFAULT_AGENTA_SINGLETON_INSTANCE.api_key}" + if ag.DEFAULT_AGENTA_SINGLETON_INSTANCE.api_key + else None + ) + with tracing_context_manager(TracingContext.get()): tracing_ctx = TracingContext.get() @@ -320,9 +339,9 @@ async def invoke( tracing_ctx.aggregate = self.aggregate tracing_ctx.annotate = self.annotate - tracing_ctx.flags = self.flags - tracing_ctx.tags = self.tags - tracing_ctx.meta = self.meta + tracing_ctx.flags = _flags + tracing_ctx.tags = _tags + tracing_ctx.meta = _meta tracing_ctx.references = self.references tracing_ctx.links = self.links @@ -334,8 +353,9 @@ async def invoke( running_ctx.credentials = credentials running_ctx.interface = self.interface - running_ctx.parameters = self.parameters running_ctx.schemas = self.schemas + running_ctx.configuration = self.configuration + running_ctx.parameters = self.parameters running_ctx.aggregate = self.aggregate running_ctx.annotate = self.annotate @@ -384,24 +404,27 @@ async def inspect( running_ctx.credentials = credentials running_ctx.interface = self.interface - running_ctx.parameters = self.parameters running_ctx.schemas = self.schemas + running_ctx.configuration = self.configuration + running_ctx.parameters = self.parameters running_ctx.aggregate = self.aggregate running_ctx.annotate = self.annotate if self.default_request is None: interface = await resolve_interface( - interface=self.interface, **self.kwargs + interface=self.interface, + **self.kwargs, + ) + configuration = await resolve_configuration( + configuration=self.configuration, + **self.kwargs, ) - parameters = await resolve_parameters(**self.kwargs) self.default_request = WorkflowServiceRequest( # interface=interface, - configuration=WorkflowServiceConfiguration( - parameters=parameters, - ), + configuration=configuration, # references=self.references, links=self.links, @@ -452,7 +475,7 @@ async def invoke_workflow( credentials: Optional[str] = None, # **kwargs, -) -> Union[WorkflowServiceBatchResponse, WorkflowServiceStreamResponse,]: +) -> Union[WorkflowServiceBatchResponse, WorkflowServiceStreamResponse]: return await workflow( data=request.data, # @@ -507,11 +530,14 @@ class application(workflow): def __init__( self, # - slug: str, + slug: Optional[str] = None, *, name: Optional[str] = None, description: Optional[str] = None, # + parameters: Optional[dict] = None, + schemas: Optional[dict] = None, + # variant_slug: Optional[str] = None, # **kwargs, @@ -522,28 +548,99 @@ def __init__( # is_human=False, # None / False / missing is the same ) - if "references" in kwargs: - if isinstance(kwargs["references"], dict): - for key in kwargs["references"]: - if key.startswith("evaluator_"): - del kwargs["references"][key] + if not "references" in kwargs or not isinstance(kwargs["references"], dict): + kwargs["references"] = dict() + + for key in kwargs["references"]: + if key.startswith("evaluator_"): + del kwargs["references"][key] + + if slug is not None: + kwargs["references"]["application"] = {"slug": slug} + if variant_slug is not None: + kwargs["references"]["application_variant"] = {"slug": variant_slug} + + super().__init__( + name=name, + description=description, + # + parameters=parameters, + schemas=schemas, + # + **kwargs, + ) + + +async def invoke_application( + request: WorkflowServiceRequest, + # + secrets: Optional[list] = None, + credentials: Optional[str] = None, + # + **kwargs, +) -> Union[WorkflowServiceBatchResponse, WorkflowServiceStreamResponse]: + return await application( + data=request.data, + # + interface=request.interface, + configuration=request.configuration, + # + flags=request.flags, + tags=request.tags, + meta=request.meta, + # + references=request.references, + links=request.links, + # + **kwargs, + )().invoke( + request=request, + # + secrets=secrets, + credentials=credentials, + # + **kwargs, + ) - kwargs["references"]["application"] = {"slug": slug} - if variant_slug is not None: - kwargs["references"]["application_variant"] = {"slug": variant_slug} - super().__init__(name=name, description=description, **kwargs) +async def inspect_application( + request: WorkflowServiceRequest, + # + credentials: Optional[str] = None, + # + **kwargs, +) -> WorkflowServiceRequest: + return await application( + data=request.data, + # + interface=request.interface, + configuration=request.configuration, + # + flags=request.flags, + tags=request.tags, + meta=request.meta, + # + references=request.references, + links=request.links, + )().inspect( + credentials=credentials, + # + **kwargs, + ) class evaluator(workflow): def __init__( self, # - slug: str, + slug: Optional[str] = None, *, name: Optional[str] = None, description: Optional[str] = None, # + parameters: Optional[dict] = None, + schemas: Optional[dict] = None, + # variant_slug: Optional[str] = None, # **kwargs, @@ -554,14 +651,82 @@ def __init__( # is_human=False, # None / False / missing is the same ) - if "references" in kwargs: - if isinstance(kwargs["references"], dict): - for key in kwargs["references"]: - if key.startswith("application_"): - del kwargs["references"][key] + if not "references" in kwargs or not isinstance(kwargs["references"], dict): + kwargs["references"] = dict() + + for key in kwargs["references"]: + if key.startswith("application_"): + del kwargs["references"][key] + + if slug is not None: + kwargs["references"]["evaluator"] = {"slug": slug} + if variant_slug is not None: + kwargs["references"]["evaluator_variant"] = {"slug": variant_slug} + + super().__init__( + name=name, + description=description, + # + parameters=parameters, + schemas=schemas, + # + **kwargs, + ) + + +async def invoke_evaluator( + request: WorkflowServiceRequest, + # + secrets: Optional[list] = None, + credentials: Optional[str] = None, + # + **kwargs, +) -> Union[WorkflowServiceBatchResponse, WorkflowServiceStreamResponse]: + return await evaluator( + data=request.data, + # + interface=request.interface, + configuration=request.configuration, + # + flags=request.flags, + tags=request.tags, + meta=request.meta, + # + references=request.references, + links=request.links, + # + **kwargs, + )().invoke( + request=request, + # + secrets=secrets, + credentials=credentials, + # + **kwargs, + ) - kwargs["references"]["evaluator"] = {"slug": slug} - if variant_slug is not None: - kwargs["references"]["evaluator_variant"] = {"slug": variant_slug} - super().__init__(name=name, description=description, **kwargs) +async def inspect_evaluator( + request: WorkflowServiceRequest, + # + credentials: Optional[str] = None, + # + **kwargs, +) -> WorkflowServiceRequest: + return await evaluator( + data=request.data, + # + interface=request.interface, + configuration=request.configuration, + # + flags=request.flags, + tags=request.tags, + meta=request.meta, + # + references=request.references, + links=request.links, + )().inspect( + credentials=credentials, + # + **kwargs, + ) diff --git a/sdk/agenta/sdk/evaluations/__init__.py b/sdk/agenta/sdk/evaluations/__init__.py new file mode 100644 index 0000000000..4e5223d757 --- /dev/null +++ b/sdk/agenta/sdk/evaluations/__init__.py @@ -0,0 +1,2 @@ +from .preview.evaluate import aevaluate +from .preview.utils import display_evaluation_results as display diff --git a/sdk/agenta/sdk/evaluations/metrics.py b/sdk/agenta/sdk/evaluations/metrics.py new file mode 100644 index 0000000000..be684c5875 --- /dev/null +++ b/sdk/agenta/sdk/evaluations/metrics.py @@ -0,0 +1,37 @@ +from typing import Optional +from uuid import UUID + +from agenta.sdk.utils.client import authed_api +from agenta.sdk.models.evaluations import EvaluationMetrics + +# TODO: ADD TYPES + + +async def arefresh( + run_id: UUID, + scenario_id: Optional[UUID] = None, + # timestamp: Optional[str] = None, + # interval: Optional[float] = None, +) -> EvaluationMetrics: + payload = dict( + run_id=str(run_id), + scenario_id=str(scenario_id) if scenario_id else None, + ) + + response = authed_api()( + method="POST", + endpoint=f"/preview/evaluations/metrics/refresh", + params=payload, + ) + + try: + response.raise_for_status() + except: + print(response.text) + raise + + response = response.json() + + metrics = EvaluationMetrics(**response["metrics"][0]) + + return metrics diff --git a/sdk/agenta/sdk/evaluations/preview/__init__.py b/sdk/agenta/sdk/evaluations/preview/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/sdk/agenta/sdk/evaluations/preview/evaluate.py b/sdk/agenta/sdk/evaluations/preview/evaluate.py new file mode 100644 index 0000000000..c5e460ffb6 --- /dev/null +++ b/sdk/agenta/sdk/evaluations/preview/evaluate.py @@ -0,0 +1,770 @@ +from typing import Dict, List, Any, Union, Optional, Tuple +from uuid import UUID +from copy import deepcopy +from datetime import datetime + +from pydantic import BaseModel + +from agenta.sdk.models.evaluations import ( + Origin, + Target, + Link, + Reference, + SimpleEvaluationData, +) +from agenta.sdk.models.workflows import ( + ApplicationRevision, + EvaluatorRevision, + WorkflowServiceRequestData, + ApplicationServiceRequest, + EvaluatorServiceRequest, +) +from agenta.sdk.models.testsets import TestsetRevision + +from agenta.sdk.utils.references import get_slug_from_name_and_id +from agenta.sdk.evaluations.preview.utils import fetch_trace_data + +from agenta.sdk.managers.testsets import ( + acreate as acreate_testset, + aretrieve as aretrieve_testset, +) +from agenta.sdk.managers.applications import ( + aupsert as aupsert_application, + aretrieve as aretrieve_application, +) +from agenta.sdk.managers.evaluators import ( + aupsert as aupsert_evaluator, + aretrieve as aretrieve_evaluator, +) +from agenta.sdk.evaluations.runs import ( + acreate as acreate_run, + aclose as aclose_run, + aurl as aget_url, +) +from agenta.sdk.evaluations.scenarios import ( + acreate as aadd_scenario, +) +from agenta.sdk.evaluations.results import ( + acreate as alog_result, +) +from agenta.sdk.evaluations.metrics import ( + arefresh as acompute_metrics, +) + + +from agenta.sdk.models.workflows import ( + WorkflowServiceInterface, + WorkflowServiceConfiguration, +) +from agenta.sdk.decorators.running import ( + invoke_application, + invoke_evaluator, +) + + +class EvaluateSpecs(BaseModel): + testsets: Optional[Target] = None + applications: Optional[Target] = None + evaluators: Optional[Target] = None + + repeats: Optional[int] = None + + +async def _parse_evaluate_kwargs( + *, + testsets: Optional[Target] = None, + applications: Optional[Target] = None, + evaluators: Optional[Target] = None, + # + repeats: Optional[int] = None, + # + specs: Optional[Union[EvaluateSpecs, Dict[str, Any]]] = None, +) -> SimpleEvaluationData: + _specs = deepcopy(specs) + if isinstance(_specs, dict): + _specs = EvaluateSpecs(**_specs) + if _specs and not isinstance(_specs, EvaluateSpecs): + _specs = None + + simple_evaluation_data = SimpleEvaluationData( + testset_steps=testsets or (_specs.testsets if _specs else None), + application_steps=applications or (_specs.applications if _specs else None), + evaluator_steps=evaluators or (_specs.evaluators if _specs else None), + # + repeats=repeats or (_specs.repeats if _specs else None), + ) + + if not simple_evaluation_data.testset_steps: + raise ValueError("Invalid 'evaluate()' specs: missing testsets") + if not simple_evaluation_data.application_steps: + raise ValueError("Invalid 'evaluate()' specs: missing applications") + if not simple_evaluation_data.evaluator_steps: + raise ValueError("Invalid 'evaluate()' specs: missing evaluators") + + return simple_evaluation_data + + +async def _upsert_entities( + simple_evaluation_data: SimpleEvaluationData, +) -> SimpleEvaluationData: + if simple_evaluation_data.testset_steps: + if isinstance(simple_evaluation_data.testset_steps, list): + testset_steps: Dict[str, Origin] = {} + + if all( + isinstance(testset_revision_id, UUID) + for testset_revision_id in simple_evaluation_data.testset_steps + ): + for testset_revision_id in simple_evaluation_data.testset_steps: + if isinstance(testset_revision_id, UUID): + testset_steps[str(testset_revision_id)] = "custom" + + elif all( + isinstance(testcases_data, List) + for testcases_data in simple_evaluation_data.testset_steps + ): + for testcases_data in simple_evaluation_data.testset_steps: + if isinstance(testcases_data, List): + if all(isinstance(step, Dict) for step in testcases_data): + testset_revision_id = await acreate_testset( + data=testcases_data, + ) + testset_steps[str(testset_revision_id)] = "custom" + + simple_evaluation_data.testset_steps = testset_steps + + if not simple_evaluation_data.testset_steps or not isinstance( + simple_evaluation_data.testset_steps, dict + ): + raise ValueError( + "Invalid 'evaluate()' specs: missing or invalid testset steps", + ) + + if simple_evaluation_data.application_steps: + if isinstance(simple_evaluation_data.application_steps, list): + application_steps: Dict[str, Origin] = {} + + if all( + isinstance(application_revision_id, UUID) + for application_revision_id in simple_evaluation_data.application_steps + ): + for application_revision_id in simple_evaluation_data.application_steps: + if isinstance(application_revision_id, UUID): + application_steps[str(application_revision_id)] = "custom" + + elif all( + callable(application_handler) + for application_handler in simple_evaluation_data.application_steps + ): + for application_handler in simple_evaluation_data.application_steps: + if callable(application_handler): + application_revision_id = await aupsert_application( + handler=application_handler, + ) + application_steps[str(application_revision_id)] = "custom" + + simple_evaluation_data.application_steps = application_steps + + if not simple_evaluation_data.application_steps or not isinstance( + simple_evaluation_data.application_steps, dict + ): + raise ValueError( + "Invalid 'evaluate()' specs: missing or invalid application steps", + ) + + if simple_evaluation_data.evaluator_steps: + if isinstance(simple_evaluation_data.evaluator_steps, list): + evaluator_steps: Dict[str, Origin] = {} + + if all( + isinstance(evaluator_revision_id, UUID) + for evaluator_revision_id in simple_evaluation_data.evaluator_steps + ): + for evaluator_revision_id in simple_evaluation_data.evaluator_steps: + if isinstance(evaluator_revision_id, UUID): + evaluator_steps[str(evaluator_revision_id)] = "custom" + + elif all( + callable(evaluator_handler) + for evaluator_handler in simple_evaluation_data.evaluator_steps + ): + for evaluator_handler in simple_evaluation_data.evaluator_steps: + if callable(evaluator_handler): + evaluator_revision_id = await aupsert_evaluator( + handler=evaluator_handler, + ) + evaluator_steps[str(evaluator_revision_id)] = "custom" + + simple_evaluation_data.evaluator_steps = evaluator_steps + + if not simple_evaluation_data.evaluator_steps or not isinstance( + simple_evaluation_data.evaluator_steps, dict + ): + raise ValueError( + "Invalid 'evaluate()' specs: missing or invalid evaluator steps", + ) + + return simple_evaluation_data + + +async def _retrieve_entities( + simple_evaluation_data: SimpleEvaluationData, +) -> Tuple[ + Dict[UUID, TestsetRevision], + Dict[UUID, ApplicationRevision], + Dict[UUID, EvaluatorRevision], +]: + testset_revisions: Dict[UUID, TestsetRevision] = {} + # for testset_revision_id, origin in simple_evaluation_data.testset_steps.items(): + # testset_revision = await retrieve_testset( + # testset_revision_id=testset_revision_id, + # ) + for testset_id, origin in simple_evaluation_data.testset_steps.items(): + testset_revision = await aretrieve_testset( + testset_id=testset_id, + ) + + if not testset_revision or not testset_revision.id: + continue + + testset_revisions[testset_revision.id] = testset_revision + + application_revisions: Dict[UUID, ApplicationRevision] = {} + for ( + application_revision_id, + origin, + ) in simple_evaluation_data.application_steps.items(): + application_revision = await aretrieve_application( + application_revision_id=application_revision_id, + ) + + if not application_revision: + continue + + application_revisions[application_revision_id] = application_revision + + evaluator_revisions: Dict[UUID, EvaluatorRevision] = {} + for evaluator_revision_id, origin in simple_evaluation_data.evaluator_steps.items(): + evaluator_revision = await aretrieve_evaluator( + evaluator_revision_id=evaluator_revision_id, + ) + + if not evaluator_revision: + continue + + evaluator_revisions[evaluator_revision_id] = evaluator_revision + + return testset_revisions, application_revisions, evaluator_revisions + + +def _timestamp_suffix(): + suffix = datetime.now().strftime("%y-%m-%d · %H:%M") + return f" [{suffix}]" + + +UNICODE = { + "here": "• ", + "root": "┌─ ", + "next": "├─ ", + "last": "└─ ", + "pipe": "│ ", + "skip": " ", + "this": "── ", +} + + +# @debug +async def aevaluate( + *, + name: Optional[str] = None, + description: Optional[str] = None, + # + testsets: Optional[Target] = None, + applications: Optional[Target] = None, + evaluators: Optional[Target] = None, + # + repeats: Optional[int] = None, + # + specs: Optional[Union[EvaluateSpecs, Dict[str, Any]]] = None, +): + simple_evaluation_data = await _parse_evaluate_kwargs( + testsets=testsets, + applications=applications, + evaluators=evaluators, + repeats=repeats, + specs=specs, + ) + + simple_evaluation_data = await _upsert_entities( + simple_evaluation_data=simple_evaluation_data, + ) + + print() + print( + "──────────────────────────────────────" + "──────────────────────────────────────" + ) + print(f"Evaluation running...") + print( + "──────────────────────────────────────" + "──────────────────────────────────────" + ) + + suffix = _timestamp_suffix() + name = f"{name}{suffix}" + + run = await acreate_run( + name=name, + description=description, + # + testset_steps=simple_evaluation_data.testset_steps, + application_steps=simple_evaluation_data.application_steps, + evaluator_steps=simple_evaluation_data.evaluator_steps, + # + repeats=simple_evaluation_data.repeats, + ) + + print( + f"{UNICODE['here']}" + f"{UNICODE['skip']}" + f"{UNICODE['skip']}" + f"{UNICODE['skip']}" + f"{UNICODE['skip']}" + f" run_id={str(run.id)}", + ) + + if not run.id: + print("[failure] could not create evaluation") + return None + + ( + testset_revisions, + application_revisions, + evaluator_revisions, + ) = await _retrieve_entities( + simple_evaluation_data=simple_evaluation_data, + ) + + scenarios = list() + + metrics = dict() + + for testset_revision in testset_revisions.values(): + if not testset_revision.data or not testset_revision.data.testcases: + continue + + testcases = testset_revision.data.testcases + + print( + f"{UNICODE['next']}" + f"{UNICODE['here']}" + f"{UNICODE['skip']}" + f"{UNICODE['skip']}" + f"{UNICODE['skip']}" + f" testset_id={str(testset_revision.testset_id)}", + ) + + for testcase_idx, testcase in enumerate(testcases): + print( + f"{UNICODE['pipe']}" + f"{UNICODE['pipe']}" + f"{UNICODE['skip']}" + f"{UNICODE['skip']}" + f"{UNICODE['skip']}" + "-----------------------" + "--------------------------------------" + ) + + print( + f"{UNICODE['pipe']}" + f"{UNICODE['next'if testcase_idx < len(testcases) - 1 else 'last']}" + f"{UNICODE['here']}" + f"{UNICODE['skip']}" + f"{UNICODE['skip']}" + f"testcase_id={str(testcase.id)}", + ) + + scenario = await aadd_scenario( + run_id=run.id, + ) + + print( + f"{UNICODE['pipe']}" + f"{UNICODE['pipe' if testcase_idx < len(testcases) - 1 else 'skip']}" + f"{UNICODE['next']}" + f"{UNICODE['here']}" + f"{UNICODE['skip']}" + f"scenario_id={str(scenario.id)}", + ) + + results = dict() + + result = await alog_result( + run_id=run.id, + scenario_id=scenario.id, + step_key="testset-" + testset_revision.slug, # type: ignore + testcase_id=testcase.id, + ) + + print( + f"{UNICODE['pipe']}" + f"{UNICODE['pipe' if testcase_idx < len(testcases) - 1 else 'skip']}" + f"{UNICODE['pipe']}" + f"{UNICODE['next']}" + f"{UNICODE['here']}" + f" result_id={str(result.id)} (testcase)", + ) + + results[testset_revision.slug] = result + + _testcase = testcase.model_dump( + mode="json", + exclude_none=True, + ) # type: ignore + inputs = testcase.data + if isinstance(inputs, dict): + if "testcase_dedup_id" in inputs: + del inputs["testcase_dedup_id"] + + for application_revision in application_revisions.values(): + if not application_revision or not application_revision.data: + print("Missing or invalid application revision") + if application_revision: + print(application_revision.model_dump(exclude_none=True)) + continue + + # print(f" Application {application_revision.model_dump(exclude_none=True)}") # type: ignore + + references = dict( + testset=Reference( + id=testset_revision.testset_id, + ), + testset_variant=Reference( + id=testset_revision.testset_variant_id, + ), + testset_revision=Reference( + id=testset_revision.id, + slug=testset_revision.slug, + version=testset_revision.version, + ), + application=Reference( + id=application_revision.application_id, + ), + application_variant=Reference( + id=application_revision.application_variant_id, + ), + application_revision=Reference( + id=application_revision.id, + slug=application_revision.slug, + version=application_revision.version, + ), + ) + links = None + + _revision = application_revision.model_dump( + mode="json", + exclude_none=True, + ) + interface = WorkflowServiceInterface( + **( + application_revision.data.model_dump() + if application_revision.data + else {} + ) + ) + configuration = WorkflowServiceConfiguration( + **( + application_revision.data.model_dump() + if application_revision.data + else {} + ) + ) + parameters = application_revision.data.parameters + + _trace = None + outputs = None + + workflow_service_request_data = WorkflowServiceRequestData( + revision=_revision, + parameters=parameters, + # + testcase=_testcase, + inputs=inputs, + # + trace=_trace, + outputs=outputs, + ) + + application_request = ApplicationServiceRequest( + interface=interface, + configuration=configuration, + # + data=workflow_service_request_data, + # + references=references, # type: ignore + links=links, # type: ignore + ) + + application_response = await invoke_application( + request=application_request, + ) + + if ( + not application_response + or not application_response.data + or not application_response.trace_id + ): + print("Missing or invalid application response") + if application_response: + print(application_response.model_dump(exclude_none=True)) + continue + + trace_id = application_response.trace_id + + if not application_revision.id or not application_revision.name: + print("Missing application revision ID or name") + continue + + application_slug = get_slug_from_name_and_id( + name=application_revision.name, + id=application_revision.id, + ) + + trace = fetch_trace_data(trace_id, max_retries=30, delay=1.0) + + result = await alog_result( + run_id=run.id, + scenario_id=scenario.id, + step_key="application-" + application_slug, # type: ignore + trace_id=trace_id, + ) + + print( + f"{UNICODE['pipe']}" + f"{UNICODE['pipe' if testcase_idx < len(testcases) - 1 else 'skip']}" + f"{UNICODE['pipe']}" + f"{UNICODE['next']}" + f"{UNICODE['here']}" + f" result_id={str(result.id)} (invocation)", + ) + + results[application_slug] = result + + trace = await trace + + if not trace: + print("Failed to fetch trace data for application") + continue + + root_span = list(trace.get("spans", {}).values())[0] + trace_attributes: dict = root_span.get("attributes", {}) + trace_attributes_ag: dict = trace_attributes.get("ag", {}) + trace_attributes_ag_data: dict = trace_attributes_ag.get("data", {}) + outputs = trace_attributes_ag_data.get("outputs") + inputs = inputs or trace_attributes_ag_data.get("inputs") + + for i, evaluator_revision in enumerate(evaluator_revisions.values()): + if not evaluator_revision or not evaluator_revision.data: + print("Missing or invalid evaluator revision") + if evaluator_revision: + print(evaluator_revision.model_dump(exclude_none=True)) + continue + + references = dict( + testset=Reference( + id=testset_revision.testset_id, + ), + testset_variant=Reference( + id=testset_revision.testset_variant_id, + ), + testset_revision=Reference( + id=testset_revision.id, + slug=testset_revision.slug, + version=testset_revision.version, + ), + evaluator=Reference( + id=evaluator_revision.evaluator_id, + ), + evaluator_variant=Reference( + id=evaluator_revision.evaluator_variant_id, + ), + evaluator_revision=Reference( + id=evaluator_revision.id, + slug=evaluator_revision.slug, + version=evaluator_revision.version, + ), + ) + links = ( + dict( + invocation=Link( + trace_id=application_response.trace_id, + span_id=application_response.span_id, + ) + ) + if application_response.trace_id + and application_response.span_id + else None + ) + + _revision = evaluator_revision.model_dump( + mode="json", + exclude_none=True, + ) + interface = WorkflowServiceInterface( + **( + evaluator_revision.data.model_dump() + if evaluator_revision.data + else {} + ) + ) + configuration = WorkflowServiceConfiguration( + **( + evaluator_revision.data.model_dump() + if evaluator_revision.data + else {} + ) + ) + parameters = evaluator_revision.data.parameters + + workflow_service_request_data = WorkflowServiceRequestData( + revision=_revision, + parameters=parameters, + # + testcase=_testcase, + inputs=inputs, + # + trace=trace, + outputs=outputs, + ) + + evaluator_request = EvaluatorServiceRequest( + version="2025.07.14", + # + interface=interface, + configuration=configuration, + # + data=workflow_service_request_data, + # + references=references, # type: ignore + links=links, # type: ignore + ) + + evaluator_response = await invoke_evaluator( + request=evaluator_request, + # + annotate=True, + ) + + if ( + not evaluator_response + or not evaluator_response.data + or not evaluator_response.trace_id + ): + print("Missing or invalid evaluator response") + if evaluator_response: + print(evaluator_response.model_dump(exclude_none=True)) + continue + + trace_id = evaluator_response.trace_id + + trace = fetch_trace_data(trace_id, max_retries=20, delay=1.0) + + result = await alog_result( + run_id=run.id, + scenario_id=scenario.id, + step_key="evaluator-" + evaluator_revision.slug, # type: ignore + trace_id=trace_id, + ) + + print( + f"{UNICODE['pipe']}" + f"{UNICODE['pipe' if testcase_idx < len(testcases) - 1 else 'skip']}" + f"{UNICODE['pipe']}" + f"{UNICODE['last' if (i == len(evaluator_revisions) - 1) else 'next']}" + f"{UNICODE['here']}" + f" result_id={str(result.id)} (annotation)", + ) + + results[evaluator_revision.slug] = result + + trace = await trace + + if not trace: + print("Failed to fetch trace data for evaluator") + continue + + metrics = await acompute_metrics( + run_id=run.id, + scenario_id=scenario.id, + ) + + print( + f"{UNICODE['pipe']}" + f"{UNICODE['pipe' if testcase_idx < len(testcases) - 1 else 'skip']}" + f"{UNICODE['last']}" + f"{UNICODE['here']}" + f"{UNICODE['skip']}" + f" metrics_id={str(metrics.id)}", + ) + + scenarios.append( + { + "scenario": scenario, + "results": results, + "metrics": metrics, + }, + ) + + print( + f"{UNICODE['pipe']}" + f"{UNICODE['skip']}" + f"{UNICODE['skip']}" + f"{UNICODE['skip']}" + f"{UNICODE['skip']}" + "-----------------------" + "--------------------------------------" + ) + + metrics = dict() + + if len(scenarios) > 0: + metrics = await acompute_metrics( + run_id=run.id, + ) + + print( + f"{UNICODE['last']}" + f"{UNICODE['here']}" + f"{UNICODE['skip']}" + f"{UNICODE['skip']}" + f"{UNICODE['skip']}" + f" metrics_id={str(metrics.id)}", + ) + + run = await aclose_run( + run_id=run.id, + ) + + run_url = await aget_url(run_id=run.id) + + print( + "──────────────────────────────────────" + "──────────────────────────────────────" + ) + print(f"Evaluation finished.") + print( + "--------------------------------------" + "--------------------------------------" + ) + print(f"Evaluation URL: {run_url or '[unavailable]'}") + print( + "──────────────────────────────────────" + "──────────────────────────────────────" + ) + print() + + return dict( + run=run, + scenarios=scenarios, + metrics=metrics, + ) diff --git a/sdk/agenta/sdk/evaluations/preview/utils.py b/sdk/agenta/sdk/evaluations/preview/utils.py new file mode 100644 index 0000000000..609ecfce47 --- /dev/null +++ b/sdk/agenta/sdk/evaluations/preview/utils.py @@ -0,0 +1,861 @@ +""" +Utilities for formatting and displaying evaluation results. +Contains helper functions for Rich text formatting and table generation. +""" + +import json +from typing import Dict, List, Any, Optional +import asyncio +from uuid import UUID +from dataclasses import dataclass, field + +import unicodedata +import re + + +@dataclass +class EvaluationTestcaseData: + """ + Data model for a single evaluation testcase. + + Attributes: + case_id: Unique identifier for the testcase + inputs: Input data for the testcase + application_outputs: Outputs from the application under test + evaluator_outputs: Outputs from evaluators (scores and assertions) + """ + + case_id: str = "" + inputs: Dict[str, Any] = field(default_factory=dict) + application_outputs: Dict[str, Any] = field(default_factory=dict) + evaluator_outputs: Dict[str, Any] = field(default_factory=dict) + + def get_scores(self) -> Dict[str, float]: + """Extract numeric scores from evaluator outputs.""" + scores = {} + for key, value in self.evaluator_outputs.items(): + if isinstance(value, (int, float)) and not isinstance(value, bool): + scores[key] = value + return scores + + def get_assertions(self) -> Dict[str, Any]: + """Extract boolean assertions from evaluator outputs.""" + assertions = {} + for key, value in self.evaluator_outputs.items(): + if isinstance(value, bool): + assertions[key] = value + elif isinstance(value, list) and all(isinstance(v, bool) for v in value): + assertions[key] = value + return assertions + + +@dataclass +class EvaluationReport: + """ + Data model for the complete evaluation report. + + Attributes: + run_id: Unique identifier for the evaluation run + cases: List of evaluation case data + summary: Summary statistics for the evaluation + """ + + run_id: str = "" + cases: List[EvaluationTestcaseData] = field(default_factory=list) + summary: Dict[str, Any] = field(default_factory=dict) + + def get_total_cases(self) -> int: + """Get total number of testcases.""" + return len(self.cases) + + def get_all_evaluator_keys(self) -> set[str]: + """Get all unique evaluator keys across all cases.""" + all_keys = set() + for case in self.cases: + all_keys.update(case.evaluator_outputs.keys()) + return all_keys + + def calculate_averages(self) -> Dict[str, float]: + """Calculate average scores across all cases.""" + averages = {} + all_scores = {} + + # Collect all scores + for case in self.cases: + case_scores = case.get_scores() + for key, value in case_scores.items(): + if key not in all_scores: + all_scores[key] = [] + all_scores[key].append(value) + + # Calculate averages + for key, values in all_scores.items(): + if values: + averages[key] = sum(values) / len(values) + + return averages + + def calculate_assertion_percentage(self) -> float: + """Calculate overall assertion success percentage.""" + all_assertions = [] + + for case in self.cases: + case_assertions = case.get_assertions() + for value in case_assertions.values(): + if isinstance(value, bool): + all_assertions.append(value) + elif isinstance(value, list): + all_assertions.extend(value) + + if not all_assertions: + return 0.0 + + return (sum(all_assertions) / len(all_assertions)) * 100 + + +# Rich imports for progress tracking +try: + from rich.progress import track + + RICH_AVAILABLE = True +except ImportError: + RICH_AVAILABLE = False + + # Use simple iteration when Rich is not available + def track(iterable, description="Processing..."): + return iterable + + +# Try to import Rich for enhanced formatting, fall back to plain text if not available +try: + from rich.console import Console + from rich.table import Table + from rich.text import Text + from rich import box + + _HAS_RICH = True +except ImportError: + _HAS_RICH = False + + # Fallback implementations for when Rich is not available + class Text: + def __init__(self, text="", style=None): + self.text = str(text) + + def __str__(self): + return self.text + + @staticmethod + def from_markup(text): + # Remove Rich markup for plain text fallback + import re + + clean_text = re.sub(r'\[/?\w+(?:\s+\w+="[^"]*")*\]', "", text) + return Text(clean_text) + + class Table: + def __init__(self, *args, **kwargs): + self.rows = [] + self.headers = [] + + def add_column(self, header, **kwargs): + self.headers.append(header) + + def add_row(self, *args): + self.rows.append([str(arg) for arg in args]) + + def add_section(self): + # Add separator in fallback mode + pass + + class Console: + def __init__(self, width=None, **kwargs): + self.width = width + + +def smart_format_content(content: Any, max_length: int = 200) -> str: + """ + Smart content formatting with size awareness and Rich markup support. + + Args: + content: Content to format (dict, list, str, etc.) + max_length: Maximum character length before truncation + + Returns: + Formatted string with optional Rich markup + """ + if content is None: + return "" + + if isinstance(content, str): + if len(content) <= max_length: + return content + else: + return f"{content[:max_length-3]}..." + + if isinstance(content, (dict, list)): + try: + json_str = json.dumps(content, indent=None, separators=(",", ":")) + if len(json_str) <= max_length: + return json_str + else: + # For large objects, show structure with key-value pairs + if isinstance(content, dict): + items = list(content.items())[:3] + item_preview = ", ".join(f'"{k}": "{v}"' for k, v in items) + more_indicator = ( + f" (+{len(content) - len(items)} more)" + if len(content) > len(items) + else "" + ) + full_preview = f"{{{item_preview}{more_indicator}}}" + # Truncate the entire string to fit the column width + if len(full_preview) <= max_length: + return full_preview + else: + return f"{full_preview[:max_length-3]}..." + else: # list + count = len(content) + item_preview = ( + str(content[0])[:50] + "..." + if content and len(str(content[0])) > 50 + else str(content[0]) + if content + else "" + ) + return ( + f"[{item_preview}] ({count} items)" + if count > 1 + else f"[{item_preview}]" + ) + except (TypeError, ValueError): + # Fallback for non-serializable objects + str_repr = str(content) + return ( + str_repr[: max_length - 3] + "..." + if len(str_repr) > max_length + else str_repr + ) + + # For other types + str_repr = str(content) + return ( + str_repr[: max_length - 3] + "..." if len(str_repr) > max_length else str_repr + ) + + +def format_number(value: float, max_precision: int = 3) -> str: + """ + Format numbers with intelligent precision and comma separators. + + Args: + value: The numeric value to format + max_precision: Maximum decimal places to show + + Returns: + Formatted number string + """ + if abs(value) >= 1000: + # Use comma separators for large numbers + return f"{value:,.{max_precision}f}".rstrip("0").rstrip(".") + elif abs(value) < 0.001 and value != 0: + # Use scientific notation for very small numbers + return f"{value:.{max_precision}e}" + else: + # Standard formatting with up to max_precision decimal places + formatted = f"{value:.{max_precision}f}".rstrip("0").rstrip(".") + return formatted if formatted else "0" + + +def format_evaluation_report_rich( + report_data: List[Dict[str, Any]], console_width: Optional[int] = None +) -> str: + """Format evaluation results using Rich tables with enhanced styling.""" + if not _HAS_RICH: + return _format_with_unicode_table(report_data, console_width) + + if not report_data: + return "No evaluation data available" + + # Create Rich table with responsive design + table = Table( + title="Evaluation Results", + box=box.ROUNDED, + show_header=True, + header_style="bold magenta", + width=console_width, + ) + + # Add columns with responsive widths + table.add_column("Testcases", style="cyan", width=10) + table.add_column("Inputs", style="green", width=40, overflow="fold") + table.add_column("Outputs", style="blue", width=40, overflow="fold") + table.add_column("Scores", style="yellow", width=40) + table.add_column("Assertions", style="red", width=10) + + # Collect totals for summary + total_scores = {} + total_assertions = [] + + for case_data in report_data: + case_id = case_data.get("case_id", "unknown") + inputs = case_data.get("inputs", {}) + outputs = case_data.get("application_outputs", {}) + + # Format inputs and outputs with Rich Text for better display + inputs_text = Text.from_markup(smart_format_content(inputs, 400)) + outputs_text = Text.from_markup(smart_format_content(outputs, 500)) + + # Format scores (numeric values). One score per line for readability. + scores_parts = [] + for key, value in case_data.get("evaluator_outputs", {}).items(): + + def _maybe_add(k: str, v: Any): + if isinstance(v, bool): + return + num: Optional[float] = None + if isinstance(v, (int, float)): + num = float(v) + elif isinstance(v, str): + try: + num = float(v) + except Exception: + num = None + if num is not None: + formatted_value = format_number(num) + scores_parts.append(f"{k}: {formatted_value}") + if k not in total_scores: + total_scores[k] = [] + total_scores[k].append(num) + + if isinstance(value, list): + for idx, v in enumerate(value): + _maybe_add(key, v) + else: + _maybe_add(key, value) + scores_text = Text("\n".join(scores_parts)) + + # Format assertions (boolean values) - show each evaluator's result + assertions_parts = [] + for key, value in case_data.get("evaluator_outputs", {}).items(): + if isinstance(value, bool): + symbol = "[green]✔[/green]" if value else "[red]✗[/red]" + assertions_parts.append(symbol) + total_assertions.append(value) + elif isinstance(value, list) and all(isinstance(v, bool) for v in value): + # Handle multiple evaluators with same key name + for v in value: + symbol = "[green]✔[/green]" if v else "[red]✗[/red]" + assertions_parts.append(symbol) + total_assertions.append(v) + # Join with spaces to show multiple assertions clearly + assertions_text = Text.from_markup( + " ".join(assertions_parts) if assertions_parts else "" + ) + + table.add_row(case_id, inputs_text, outputs_text, scores_text, assertions_text) + # Add a separator after each data row for readability + table.add_section() + + # Add a separator line before averages + table.add_section() + + # Add averages row + avg_scores_parts = [] + for key, values in total_scores.items(): + avg = sum(values) / len(values) if values else 0 + avg_scores_parts.append(f"{key}: {format_number(avg)}") + + assertion_pct = ( + (sum(total_assertions) / len(total_assertions) * 100) if total_assertions else 0 + ) + assertion_summary = f"{assertion_pct:.1f}%" + + table.add_row( + "[bold italic]Averages[/bold italic]", + "", + "", + Text("\n".join(avg_scores_parts)), + Text(assertion_summary), + ) + + # Render the table + console = Console(width=console_width) + from io import StringIO + + string_buffer = StringIO() + console.file = string_buffer + console.print(table) + return string_buffer.getvalue() + + +def _format_with_unicode_table( + report_data: List[Dict[str, Any]], console_width: Optional[int] +) -> str: + """Fallback Unicode table formatting (enhanced version)""" + if not report_data: + return "No evaluation data available" + + # Enhanced table formatting helpers + def make_border(widths, left="┏", mid="┳", right="┓", fill="━"): + return left + mid.join(fill * w for w in widths) + right + + def make_separator(widths, left="├", mid="┼", right="┤", fill="─"): + return left + mid.join(fill * w for w in widths) + right + + def make_row(values, widths, left="┃", mid="┃", right="┃"): + formatted = [] + for val, width in zip(values, widths): + # Handle multi-line content better + val_str = str(val) + if "\n" in val_str: + # Take first line for table display + val_str = val_str.split("\n")[0] + formatted.append(f" {val_str:<{width-2}} ") + return left + mid.join(formatted) + right + + # Responsive column widths + if console_width and console_width < 120: + col_widths = [12, 20, 30, 20, 10] # Compact + else: + col_widths = [15, 30, 40, 25, 12] # Full width + + # Build enhanced table + lines = [] + + # Header with styling + lines.append(make_border(col_widths)) + lines.append( + make_row( + ["Testcase ID", "Inputs", "Outputs", "Scores", "Assertions"], col_widths + ) + ) + lines.append(make_border(col_widths, "┡", "╇", "┩", "━")) + + # Data rows with improved formatting + total_scores = {} + total_assertions = [] + + for case_data in report_data: + case_id = case_data.get("case_id", "unknown") + + # Smart content formatting + inputs = case_data.get("inputs", {}) + outputs = case_data.get("application_outputs", {}) + + inputs_str = smart_format_content(inputs, col_widths[1] - 4) + outputs_str = smart_format_content(outputs, col_widths[2] - 4) + + # Format scores with proper number formatting, one per line + scores_parts = [] + for key, value in case_data.get("evaluator_outputs", {}).items(): + if isinstance(value, (int, float)) and not isinstance(value, bool): + formatted_value = format_number(value) + scores_parts.append(f"{key}: {formatted_value}") + if key not in total_scores: + total_scores[key] = [] + total_scores[key].append(value) + # Preserve line breaks for better readability in plain table + scores_str = "\n".join(scores_parts) + + # Format assertions with colored symbols (fallback) - show each evaluator's result + assertions_parts = [] + for key, value in case_data.get("evaluator_outputs", {}).items(): + if isinstance(value, bool): + assertions_parts.append("✔" if value else "✗") + total_assertions.append(value) + elif isinstance(value, list) and all(isinstance(v, bool) for v in value): + # Handle multiple evaluators with same key name + for v in value: + assertions_parts.append("✔" if v else "✗") + total_assertions.append(v) + # Join with spaces to show multiple assertions clearly + assertions_str = " ".join(assertions_parts) if assertions_parts else "" + + lines.append( + make_row( + [case_id, inputs_str, outputs_str, scores_str, assertions_str], + col_widths, + ) + ) + lines.append(make_separator(col_widths)) + + # Enhanced summary row + avg_scores_parts = [] + for key, values in total_scores.items(): + avg = sum(values) / len(values) if values else 0 + avg_scores_parts.append(f"{key}: {format_number(avg)}") + avg_scores_str = smart_format_content( + ", ".join(avg_scores_parts), col_widths[3] - 4 + ) + + assertion_pct = ( + (sum(total_assertions) / len(total_assertions) * 100) if total_assertions else 0 + ) + assertion_summary = f"{assertion_pct:.1f}%" + + # Add separator line before averages for clarity + lines.append(make_border(col_widths, "┠", "╂", "┨", "━")) + lines.append( + make_row(["Averages", "", "", avg_scores_str, assertion_summary], col_widths) + ) + lines.append(make_border(col_widths, "└", "┴", "┘", "─")) + + return "\n".join(lines) + + +# Main function that chooses the best available formatting +def format_evaluation_report( + report_data: List[Dict[str, Any]], console_width: Optional[int] = None +) -> str: + """Format evaluation results with best available method""" + return format_evaluation_report_rich(report_data, console_width) + + +async def display_evaluation_results( + eval_data, show_detailed_logs=True, console_width=None +): + """Enhanced display evaluation results with Rich-like formatting and progress tracking""" + # Give traces a moment to be stored + print() + print("⏳ Waiting for traces to be available...") + await asyncio.sleep(2) + + print() + print("📊 Processing evaluation results...") + print(f" run_id={eval_data['run'].id}") # type:ignore + + # Collect data for the report table with progress tracking + report_data = [] + scenarios_to_process = eval_data["scenarios"] + + # Use Rich progress bar if available, otherwise simple iteration + if RICH_AVAILABLE: + scenario_iterator = track( + scenarios_to_process, description="📋 Processing scenarios" + ) + else: + scenario_iterator = scenarios_to_process + print(f"📋 Processing {len(scenarios_to_process)} scenarios...") + + for i, scenario in enumerate(scenario_iterator): + if not RICH_AVAILABLE and show_detailed_logs: + print( + f" 📄 scenario {i+1}/{len(scenarios_to_process)}: {scenario['scenario'].id}" + ) # type:ignore + elif show_detailed_logs: + print(f" scenario_id={scenario['scenario'].id}") # type:ignore + + case_data = EvaluationTestcaseData().__dict__ + + for step_key, result in scenario["results"].items(): # type:ignore + if result.testcase_id: + if show_detailed_logs: + print( + f" step_key={str(step_key).ljust(32)}, testcase_id={result.testcase_id}" + ) + # Use a more readable case ID + testcase_short = str(result.testcase_id)[:8] + case_data["case_id"] = f"{testcase_short}..." + + elif result.trace_id: + if show_detailed_logs: + print( + f" step_key={str(step_key).ljust(32)}, trace_id={result.trace_id}" + ) + + # Fetch and process trace data using services module + try: + trace_data = await fetch_trace_data(result.trace_id) + if trace_data and "spans" in trace_data: + for span_key in trace_data["spans"].keys(): + step_data = extract_trace_step_data(trace_data, span_key) + if step_data: + inputs = step_data["inputs"] + outputs = step_data["outputs"] + trace_type = step_data["trace_type"] + trace_evaluator_name = step_data.get("evaluator_name") + + # Store inputs for report + if inputs: + case_data["inputs"] = clean_inputs_for_display( + **(inputs if isinstance(inputs, dict) else {}) + ) + if show_detailed_logs: + print( + f" inputs={inputs}" + ) + + # Determine if this is application or evaluator + if outputs: + # Heuristic to classify outputs: + # 1. If outputs is a single string value, it's likely the application output + # 2. If outputs is a dict with keys like 'score', 'myscore', 'success', it's evaluator output + # 3. If we already have application_outputs, everything else is evaluator output + + is_application_output = False + if not case_data.get("application_outputs"): + # Check if this looks like a simple application output (single string) + if isinstance(outputs, str): + is_application_output = True + elif ( + isinstance(outputs, dict) + and len(outputs) == 0 + ): + # Empty dict, skip + is_application_output = False + elif isinstance(outputs, dict): + # If it's a dict with typical evaluator keys, it's an evaluator + evaluator_keys = { + "score", + "myscore", + "success", + "failure", + "passed", + "failed", + } + if any( + key in evaluator_keys + for key in outputs.keys() + ): + is_application_output = False + else: + # Otherwise, it might be application output + is_application_output = True + + if is_application_output: + case_data["application_outputs"] = outputs + else: + # This is an evaluator output + # Use the evaluator name from trace data, or fall back to step_key hash + evaluator_name = trace_evaluator_name or ( + step_key[:8] if step_key else None + ) + process_evaluator_outputs( + case_data, + outputs, + evaluator_name=evaluator_name, + ) + + if show_detailed_logs: + print( + f" outputs={outputs}" + ) + else: + if show_detailed_logs: + print( + f" ⚠️ no_trace_data" + ) + except Exception as e: + if show_detailed_logs: + print( + f" ❌ trace_fetch_error: {e}" + ) + else: + if show_detailed_logs: + print( + f" step_key={str(step_key).ljust(32)}, ❌ error={result.error}" + ) + + if case_data["case_id"]: + report_data.append(case_data) + + # if show_detailed_logs: + # print( + # f"📈 metrics={json.dumps(eval_data['metrics'].data, indent=4)}" + # ) # type:ignore + + # Display the enhanced formatted report table + print() + print("📋 Evaluation Report:") + print(format_evaluation_report(report_data, console_width)) + + # Add summary statistics + if report_data: + print() + print(f"✅ Successfully processed {len(report_data)} testcases") + + # Count total evaluators + all_evaluator_keys = set() + for case in report_data: + all_evaluator_keys.update(case.get("evaluator_outputs", {}).keys()) + + if all_evaluator_keys: + print( + f"🔍 Evaluated with {len(all_evaluator_keys)} metrics: {', '.join(sorted(all_evaluator_keys))}" + ) + else: + print("⚠️ No evaluation data found") + + +from typing import Callable, Dict, Optional, Any + +from agenta.sdk.utils.client import authed_api +import asyncio +import json +from typing import Dict, Any, Optional + + +async def fetch_trace_data( + trace_id: str, max_retries: int = 3, delay: float = 1.0 +) -> Optional[Dict[str, Any]]: + """ + Fetch trace data from the API with retry logic. + + Args: + trace_id: The trace ID to fetch + max_retries: Maximum number of retry attempts + delay: Delay between retries in seconds + + Returns: + Trace data dictionary or None if not found + """ + for attempt in range(max_retries): + try: + response = authed_api()( + method="GET", endpoint=f"/preview/tracing/traces/{trace_id}" + ) + response.raise_for_status() + trace_data = response.json() + + # print(trace_data) + + # Get the traces dictionary + traces = trace_data.get("traces", {}) + if traces: + # Get the first (and usually only) trace + for trace_key, trace_content in traces.items(): + if ( + trace_content + and "spans" in trace_content + and trace_content["spans"] + ): + return trace_content + + # If no data yet, retry on next iteration + if attempt < max_retries - 1: + await asyncio.sleep(delay) + + except Exception as e: + if attempt < max_retries - 1: + await asyncio.sleep(delay) + continue + else: + print(f"Error fetching trace data: {e}") + return None + + print("Failed to fetch trace data after retries") + return None + + +def extract_trace_step_data( + trace_data: Dict[str, Any], step_key: str +) -> Optional[Dict[str, Any]]: + """ + Extract step data from trace information. + + Args: + trace_data: The complete trace data + step_key: The step key to extract data for + + Returns: + Step data dictionary or None if not found + """ + if not trace_data: + return None + + spans = trace_data.get("spans", {}) + if not spans or step_key not in spans: + return None + + span_info = spans[step_key] + # Extract the actual evaluation data using the correct data structure + ag_data = span_info.get("attributes", {}).get("ag", {}).get("data", {}) + + if not ag_data: + return None + + # Try to extract evaluator/application name from span + # The span_name field contains the workflow/evaluator name + evaluator_name = span_info.get("span_name") or span_info.get("name") + + return { + "inputs": ag_data.get("inputs", {}), + "outputs": ag_data.get("outputs", {}), + "trace_type": span_info.get("trace_type"), + "evaluator_name": evaluator_name, + "span_info": span_info, + } + + +def process_evaluator_outputs( + case_data: Dict[str, Any], + outputs: Dict[str, Any], + evaluator_name: Optional[str] = None, +) -> None: + """ + Process evaluator outputs and handle multiple evaluators with same key names. + + Args: + case_data: The case data to update + outputs: The evaluator outputs to process + evaluator_name: Optional evaluator identifier for labeling + """ + # Handle multiple evaluators with same key names (like 'success', 'score') + for key, value in outputs.items(): + # Label numeric scores by evaluator to distinguish between multiple evaluators + display_key = key + + # If we have an evaluator name and this is a numeric value, prefix it + if ( + evaluator_name + and isinstance(value, (int, float)) + and not isinstance(value, bool) + ): + display_key = f"{evaluator_name}.{key}" + + # Store the value - if the key already exists, convert to list to preserve all values + if display_key in case_data["evaluator_outputs"]: + # Create lists for duplicate keys to preserve all values + existing = case_data["evaluator_outputs"][display_key] + if not isinstance(existing, list): + case_data["evaluator_outputs"][display_key] = [existing] + case_data["evaluator_outputs"][display_key].append(value) + else: + case_data["evaluator_outputs"][display_key] = value + + +def clean_inputs_for_display(**kwargs) -> Dict[str, Any]: + """ + Clean inputs by removing internal IDs and trace data for cleaner display. + + Args: + inputs: Raw inputs dictionary + + Returns: + Cleaned inputs dictionary with only user-facing testcase fields + """ + inputs = kwargs.get("inputs") + if inputs: + # List of keys to exclude from display + # - Internal IDs (ending with _id) + # - Testcase internal fields (starting with testcase_) + # - Trace data (the 'trace' key which contains the full trace structure) + excluded_keys = { + "revision", + "parameters", + "testcase", + # "inputs", + "trace", + "outputs", + } + + clean_inputs = { + k: v + for k, v in inputs.items() + if not k.endswith("_id") + and not k.startswith("testcase_") + and k not in excluded_keys + } + return clean_inputs or inputs + return inputs diff --git a/sdk/agenta/sdk/evaluations/results.py b/sdk/agenta/sdk/evaluations/results.py new file mode 100644 index 0000000000..56ab1b99cb --- /dev/null +++ b/sdk/agenta/sdk/evaluations/results.py @@ -0,0 +1,66 @@ +from typing import Optional, Dict, Any +from uuid import UUID + +from agenta.sdk.utils.client import authed_api +from agenta.sdk.models.evaluations import EvaluationResult + +# TODO: ADD TYPES + + +async def acreate( + *, + run_id: UUID, + scenario_id: UUID, + step_key: str, + # repeat_idx: str, + # timestamp: datetime, + # interval: float, + # + testcase_id: Optional[UUID] = None, + trace_id: Optional[str] = None, + error: Optional[dict] = None, + # + flags: Optional[Dict[str, Any]] = None, + tags: Optional[Dict[str, Any]] = None, + meta: Optional[Dict[str, Any]] = None, +) -> EvaluationResult: + payload = dict( + results=[ + dict( + flags=flags, + tags=tags, + meta=meta, + # + testcase_id=str(testcase_id) if testcase_id else None, + trace_id=trace_id, + error=error, + # + # interval=interval, + # timestamp=timestamp, + # repeat_idx=repeat_idx, + step_key=step_key, + run_id=str(run_id), + scenario_id=str(scenario_id), + # + status="success", + ) + ] + ) + + response = authed_api()( + method="POST", + endpoint=f"/preview/evaluations/results/", + json=payload, + ) + + try: + response.raise_for_status() + except: + print(response.text) + raise + + response = response.json() + + result = EvaluationResult(**response["results"][0]) + + return result diff --git a/sdk/agenta/sdk/evaluations/runs.py b/sdk/agenta/sdk/evaluations/runs.py new file mode 100644 index 0000000000..c4a12a40f1 --- /dev/null +++ b/sdk/agenta/sdk/evaluations/runs.py @@ -0,0 +1,153 @@ +from typing import Optional, Dict, Any +from uuid import UUID + +from agenta.sdk.utils.client import authed_api +from agenta.sdk.models.evaluations import EvaluationRun, Target + +import agenta as ag + +# TODO: ADD TYPES + + +async def afetch( + *, + run_id: UUID, +) -> Optional[EvaluationRun]: + response = authed_api()( + method="GET", + endpoint=f"/preview/evaluations/runs/{run_id}", + ) + + try: + response.raise_for_status() + except: + print(response.text) + raise + + response = response.json() + + if (not "count" in response) or (response["count"] == 0) or (not "run" in response): + return None + + run = EvaluationRun(**response["run"]) + + return run + + +async def acreate( + *, + name: Optional[str] = None, + description: Optional[str] = None, + # + flags: Optional[Dict[str, Any]] = None, + tags: Optional[Dict[str, Any]] = None, + meta: Optional[Dict[str, Any]] = None, + # + query_steps: Optional[Target] = None, + testset_steps: Optional[Target] = None, + application_steps: Optional[Target] = None, + evaluator_steps: Optional[Target] = None, + # + repeats: Optional[int] = None, +) -> Optional[EvaluationRun]: + payload = dict( + evaluation=dict( + name=name, + description=description, + # + flags=flags, + tags=tags, + meta=meta, + # + data=dict( + status="running", + query_steps=query_steps, + testset_steps=testset_steps, + application_steps=application_steps, + evaluator_steps=evaluator_steps, + repeats=repeats, + ), + # + jit={"testsets": True, "evaluators": False}, + ) + ) + + response = authed_api()( + method="POST", + endpoint=f"/preview/simple/evaluations/", + json=payload, + ) + + try: + response.raise_for_status() + except: + print(response.text) + raise + + response = response.json() + + if (not "evaluation" in response) or (not "id" in response["evaluation"]): + return None + + run_id = UUID(response["evaluation"]["id"]) + + return await afetch(run_id=run_id) + + +async def aclose( + *, + run_id: UUID, + # + status: Optional[str] = "success", +) -> Optional[EvaluationRun]: + response = authed_api()( + method="POST", + endpoint=f"/preview/evaluations/runs/{run_id}/close/{status}", + ) + + try: + response.raise_for_status() + except: + print(response.text) + raise + + response = response.json() + + if (not "run" in response) or (not "id" in response["run"]): + return None + + run_id = UUID(response["run"]["id"]) + + return await afetch(run_id=run_id) + + +async def aurl( + *, + run_id: UUID, +) -> str: + response = authed_api()( + method="GET", + endpoint=f"/projects", + params={"scope": "project"}, + ) + + try: + response.raise_for_status() + except: + print(response.text) + raise + + if len(response.json()) != 1: + return None + + project_info = response.json()[0] + + workspace_id = project_info.get("workspace_id") + project_id = project_info.get("project_id") + + return ( + f"{ag.DEFAULT_AGENTA_SINGLETON_INSTANCE.host}" + f"/w/{workspace_id}" + f"/p/{project_id}" + f"/evaluations/results/{run_id}" + ) diff --git a/sdk/agenta/sdk/evaluations/scenarios.py b/sdk/agenta/sdk/evaluations/scenarios.py new file mode 100644 index 0000000000..98c9c47e1f --- /dev/null +++ b/sdk/agenta/sdk/evaluations/scenarios.py @@ -0,0 +1,48 @@ +from typing import Optional, Dict, Any +from uuid import UUID + +from agenta.sdk.utils.client import authed_api +from agenta.sdk.models.evaluations import EvaluationScenario + +# TODO: ADD TYPES + + +async def acreate( + *, + run_id: UUID, + # + flags: Optional[Dict[str, Any]] = None, + tags: Optional[Dict[str, Any]] = None, + meta: Optional[Dict[str, Any]] = None, +) -> EvaluationScenario: + payload = dict( + scenarios=[ + dict( + flags=flags, + tags=tags, + meta=meta, + # + run_id=str(run_id), + # + status="success", + ) + ] + ) + + response = authed_api()( + method="POST", + endpoint=f"/preview/evaluations/scenarios/", + json=payload, + ) + + try: + response.raise_for_status() + except: + print(response.text) + raise + + response = response.json() + + scenario = EvaluationScenario(**response["scenarios"][0]) + + return scenario diff --git a/sdk/agenta/sdk/managers/applications.py b/sdk/agenta/sdk/managers/applications.py new file mode 100644 index 0000000000..a5600bc3d8 --- /dev/null +++ b/sdk/agenta/sdk/managers/applications.py @@ -0,0 +1,304 @@ +from typing import Dict, Any, Callable, Optional +from uuid import uuid4, UUID + +from agenta.sdk.utils.client import authed_api +from agenta.sdk.decorators.running import auto_workflow, is_workflow +from agenta.sdk.models.workflows import ( + ApplicationRevision, + # + ApplicationRevisionResponse, + # + LegacyApplicationFlags, + LegacyApplicationData, + LegacyApplicationCreate, + LegacyApplicationEdit, + # + LegacyApplicationResponse, + # + Reference, +) + +from agenta.sdk.utils.references import get_slug_from_name_and_id + + +async def _retrieve_application( + application_id: Optional[UUID] = None, + application_slug: Optional[str] = None, + application_revision_id: Optional[UUID] = None, + application_revision_slug: Optional[str] = None, +) -> Optional[ApplicationRevision]: + payload = { + "application_ref": ( + { + "id": str(application_id) if application_id else None, + "slug": str(application_slug), + } + if application_id or application_slug + else None + ), + "application_revision_ref": ( + { + "id": ( + str(application_revision_id) if application_revision_id else None + ), + "slug": application_revision_slug, + } + if application_revision_id or application_revision_slug + else None + ), + } + + # print(" --- payload:", payload) + + response = authed_api()( + method="POST", + endpoint=f"/preview/legacy/applications/revisions/retrieve", + json=payload, + ) + response.raise_for_status() + + application_revision_response = ApplicationRevisionResponse(**response.json()) + + application_revision = application_revision_response.application_revision + + # print(" --- application_revision:", application_revision) + + return application_revision + + +async def aretrieve( + application_revision_id: Optional[UUID] = None, +) -> Optional[ApplicationRevision]: + # print("\n--------- RETRIEVE APPLICATION") + + response = await _retrieve_application( + application_revision_id=application_revision_id, + ) + + return response + + +async def aupsert( + *, + application_id: Optional[UUID] = None, + application_slug: Optional[str] = None, + application_revision_id: Optional[UUID] = None, + application_revision_slug: Optional[str] = None, + # + handler: Callable, + script: Optional[str] = None, + parameters: Optional[Dict[str, Any]] = None, + # + name: Optional[str] = None, + description: Optional[str] = None, +) -> Optional[UUID]: + # print("\n--------- UPSERT APPLICATION") + try: + if not is_workflow(handler): + application_workflow = auto_workflow( + handler, + # + script=script, + parameters=parameters, + # + name=name, + description=description, + ) + else: + application_workflow = handler + + req = await application_workflow.inspect() + + legacy_application_flags = LegacyApplicationFlags(**req.flags) + + legacy_application_data = LegacyApplicationData( + **( + req.interface.model_dump(mode="json", exclude_none=True) + if req and req.interface + else {} + ), + **( + req.configuration.model_dump(mode="json", exclude_none=True) + if req and req.configuration + else {} + ), + ) + + # print( + # " ---:", legacy_application_data.model_dump(mode="json", exclude_none=True) + # ) + + retrieve_response = None + + if req.references is not None: + _application_revision_ref = req.references.get("application_revision", {}) + if isinstance(_application_revision_ref, Reference): + _application_revision_ref = _application_revision_ref.model_dump( + mode="json", + exclude_none=True, + ) + if not isinstance(_application_revision_ref, dict): + _application_revision_ref = {} + _application_revision_id = _application_revision_ref.get("id") + _application_revision_slug = _application_revision_ref.get("slug") + + application_revision_id = ( + application_revision_id or _application_revision_id + ) + application_revision_slug = ( + application_revision_slug or _application_revision_slug + ) + + _application_ref = req.references.get("application", {}) + if isinstance(_application_ref, Reference): + _application_ref = _application_ref.model_dump( + mode="json", + exclude_none=True, + ) + if not isinstance(_application_ref, dict): + _application_ref = {} + _application_id = _application_ref.get("id") + _application_slug = _application_ref.get("slug") + + application_id = application_id or _application_id + application_slug = application_slug or _application_slug + + revision = req.data.revision if req and req.data else None + if revision: + name = name or revision.get("name") + description = description or revision.get("description") + + name = ( + name or req.data.revision.get("name") + if req and req.data and req.data.revision + else None + ) + + description = ( + description or req.data.revision.get("description") + if req and req.data and req.data.revision + else None + ) + + application_slug = ( + application_slug + or get_slug_from_name_and_id( + name=name, + id=application_id or uuid4(), + ) + if name + else uuid4().hex[-12:] + ) + + # print( + # application_id, + # application_slug, + # application_revision_id, + # application_revision_slug, + # ) + + if application_revision_id or application_revision_slug: + retrieve_response = await _retrieve_application( + application_revision_id=application_revision_id, + application_revision_slug=application_revision_slug, + ) + elif application_id or application_slug: + retrieve_response = await _retrieve_application( + application_id=application_id, + application_slug=application_slug, + ) + + except Exception as e: + print("[ERROR]: Failed to prepare application:", e) + return None + + # print("Retrieve response:", retrieve_response) + + if retrieve_response and retrieve_response.id and retrieve_response.application_id: + application_id = retrieve_response.application_id + # print(" --- Updating application...", application_id) + application_edit_request = LegacyApplicationEdit( + id=application_id, + # + name=name, + description=description, + # + flags=legacy_application_flags, + # + data=legacy_application_data, + ) + + # print(" --- application_edit_request:", application_edit_request) + + response = authed_api()( + method="PUT", + endpoint=f"/preview/legacy/applications/{application_id}", + json={ + "application": application_edit_request.model_dump( + mode="json", + exclude_none=True, + ) + }, + ) + + # print(" --- response:", response.status_code, response.text) + + try: + response.raise_for_status() + except Exception as e: + print("[ERROR]: Failed to update application:", e) + return None + + else: + # print(" --- Creating application...") + application_create_request = LegacyApplicationCreate( + slug=application_slug or uuid4().hex[-12:], + # + name=name, + description=description, + # + flags=legacy_application_flags, + # + data=legacy_application_data, + ) + + # print(" --- application_create_request:", application_create_request) + + response = authed_api()( + method="POST", + endpoint="/preview/legacy/applications/", + json={ + "application": application_create_request.model_dump( + mode="json", + exclude_none=True, + ) + }, + ) + + # print(" --- response:", response.status_code, response.text) + + try: + response.raise_for_status() + except Exception as e: + print("[ERROR]: Failed to create application:", e) + return None + + application_response = LegacyApplicationResponse(**response.json()) + + application = application_response.application + + if not application or not application.id: + return None + + # print(" --- application:", application) + + application_revision = await _retrieve_application( + application_id=application.id, + ) + + if not application_revision or not application_revision.id: + return None + + # print(application_revision, "----------") + + return application_revision.id diff --git a/sdk/agenta/sdk/managers/evaluators.py b/sdk/agenta/sdk/managers/evaluators.py new file mode 100644 index 0000000000..c948e23da4 --- /dev/null +++ b/sdk/agenta/sdk/managers/evaluators.py @@ -0,0 +1,303 @@ +from typing import Dict, Any, Callable, Optional +from uuid import uuid4, UUID +from traceback import print_exc + +from agenta.sdk.utils.client import authed_api +from agenta.sdk.decorators.running import auto_workflow, is_workflow +from agenta.sdk.models.workflows import ( + EvaluatorRevision, + # + EvaluatorRevisionResponse, + # + SimpleEvaluatorFlags, + SimpleEvaluatorData, + SimpleEvaluatorCreate, + SimpleEvaluatorEdit, + # + SimpleEvaluatorResponse, + # + Reference, +) + +from agenta.sdk.utils.references import get_slug_from_name_and_id + + +async def _retrieve_evaluator( + evaluator_id: Optional[UUID] = None, + evaluator_slug: Optional[str] = None, + evaluator_revision_id: Optional[UUID] = None, + evaluator_revision_slug: Optional[str] = None, +) -> Optional[EvaluatorRevision]: + payload = { + "evaluator_ref": ( + { + "id": str(evaluator_id) if evaluator_id else None, + "slug": str(evaluator_slug), + } + if evaluator_id or evaluator_slug + else None + ), + "evaluator_revision_ref": ( + { + "id": str(evaluator_revision_id) if evaluator_revision_id else None, + "slug": evaluator_revision_slug, + } + if evaluator_revision_id or evaluator_revision_slug + else None + ), + } + + # print(" --- payload:", payload) + + response = authed_api()( + method="POST", + endpoint=f"/preview/evaluators/revisions/retrieve", + json=payload, + ) + + response.raise_for_status() + + evaluator_revision_response = EvaluatorRevisionResponse(**response.json()) + + evaluator_revision = evaluator_revision_response.evaluator_revision + + # print(" --- evaluator_revision:", evaluator_revision) + + return evaluator_revision + + +async def aretrieve( + evaluator_revision_id: Optional[UUID] = None, +) -> Optional[EvaluatorRevision]: + # print("\n--------- RETRIEVE EVALUATOR") + response = await _retrieve_evaluator( + evaluator_revision_id=evaluator_revision_id, + ) + + return response + + +async def aupsert( + *, + evaluator_id: Optional[UUID] = None, + evaluator_slug: Optional[str] = None, + evaluator_revision_id: Optional[UUID] = None, + evaluator_revision_slug: Optional[str] = None, + # + handler: Callable, + script: Optional[str] = None, + parameters: Optional[Dict[str, Any]] = None, + # + name: Optional[str] = None, + description: Optional[str] = None, +) -> Optional[UUID]: + # print("\n--------- UPSERT EVALUATOR") + try: + if not is_workflow(handler): + evaluator_workflow = auto_workflow( + handler, + # + script=script, + parameters=parameters, + # + name=name, + description=description, + ) + else: + evaluator_workflow = handler + + req = await evaluator_workflow.inspect() + + legacy_application_flags = SimpleEvaluatorFlags(**req.flags) + + simple_evaluator_data = SimpleEvaluatorData( + **( + req.interface.model_dump(mode="json", exclude_none=True) + if req and req.interface + else {} + ), + **( + req.configuration.model_dump(mode="json", exclude_none=True) + if req and req.configuration + else {} + ), + ) + # print(" ---:", simple_evaluator_data.model_dump(mode="json", exclude_none=True)) + + retrieve_response = None + + if req.references is not None: + _evaluator_revision_ref = req.references.get("evaluator_revision", {}) + if isinstance(_evaluator_revision_ref, Reference): + _evaluator_revision_ref = _evaluator_revision_ref.model_dump( + mode="json", + exclude_none=True, + ) + if not isinstance(_evaluator_revision_ref, dict): + _evaluator_revision_ref = {} + + _evaluator_revision_id = _evaluator_revision_ref.get("id") + _evaluator_revision_slug = _evaluator_revision_ref.get("slug") + + evaluator_revision_id = evaluator_revision_id or _evaluator_revision_id + evaluator_revision_slug = ( + evaluator_revision_slug or _evaluator_revision_slug + ) + + _evaluator_ref = req.references.get("evaluator", {}) + if isinstance(_evaluator_ref, Reference): + _evaluator_ref = _evaluator_ref.model_dump( + mode="json", + exclude_none=True, + ) + if not isinstance(_evaluator_ref, dict): + _evaluator_ref = {} + + _evaluator_id = _evaluator_ref.get("id") + _evaluator_slug = _evaluator_ref.get("slug") + + evaluator_id = evaluator_id or _evaluator_id + evaluator_slug = evaluator_slug or _evaluator_slug + + revision = req.data.revision if req and req.data else None + if revision: + name = name or revision.get("name") + description = description or revision.get("description") + + name = ( + name or req.data.revision.get("name") + if req and req.data and req.data.revision + else None + ) + + description = ( + description or req.data.revision.get("description") + if req and req.data and req.data.revision + else None + ) + + evaluator_slug = ( + evaluator_slug + or get_slug_from_name_and_id( + name=name, + id=evaluator_id or uuid4(), + ) + if name + else uuid4().hex[-12:] + ) + + # print( + # evaluator_id, + # evaluator_slug, + # evaluator_revision_id, + # evaluator_revision_slug, + # ) + + if evaluator_revision_id or evaluator_revision_slug: + retrieve_response = await _retrieve_evaluator( + evaluator_revision_id=evaluator_revision_id, + evaluator_revision_slug=evaluator_revision_slug, + ) + elif evaluator_id or evaluator_slug: + retrieve_response = await _retrieve_evaluator( + evaluator_id=evaluator_id, + evaluator_slug=evaluator_slug, + ) + + except Exception as e: + print("[ERROR]: Failed to prepare evaluator:") + print_exc() + return None + + # print("Retrieve response:", retrieve_response) + + if retrieve_response and retrieve_response.id and retrieve_response.evaluator_id: + evaluator_id = retrieve_response.evaluator_id + # print(" --- Updating evaluator...", evaluator_id) + evaluator_edit_request = SimpleEvaluatorEdit( + id=evaluator_id, + # + name=name, + description=description, + # + flags=legacy_application_flags, + # + data=simple_evaluator_data, + ) + + # print(" --- evaluator_edit_request:", evaluator_edit_request) + + response = authed_api()( + method="PUT", + endpoint=f"/preview/simple/evaluators/{evaluator_id}", + json={ + "evaluator": evaluator_edit_request.model_dump( + mode="json", + exclude_none=True, + ) + }, + ) + + # print(" --- response:", response.status_code, response.text) + + try: + response.raise_for_status() + except Exception as e: + print("[ERROR]: Failed to update evaluator:", e) + print_exc() + return None + + else: + # print(" --- Creating evaluator...") + evaluator_create_request = SimpleEvaluatorCreate( + slug=evaluator_slug or uuid4().hex[-12:], + # + name=name, + description=description, + # + flags=legacy_application_flags, + # + data=simple_evaluator_data, + ) + + # print(" --- evaluator_create_request:", evaluator_create_request) + + response = authed_api()( + method="POST", + endpoint="/preview/simple/evaluators/", + json={ + "evaluator": evaluator_create_request.model_dump( + mode="json", + exclude_none=True, + ) + }, + ) + + # print(" --- response:", response.status_code, response.text) + + try: + response.raise_for_status() + except Exception as e: + print("[ERROR]: Failed to create evaluator:", e) + print_exc() + return None + + evaluator_response = SimpleEvaluatorResponse(**response.json()) + + evaluator = evaluator_response.evaluator + + if not evaluator or not evaluator.id: + return None + + # print(" --- evaluator:", evaluator) + + evaluator_revision = await _retrieve_evaluator( + evaluator_id=evaluator.id, + ) + + if not evaluator_revision or not evaluator_revision.id: + return None + + # print(evaluator_revision, "----------") + + return evaluator_revision.id diff --git a/sdk/agenta/sdk/managers/secrets.py b/sdk/agenta/sdk/managers/secrets.py index eb0ed3ba23..1e3344e9cf 100644 --- a/sdk/agenta/sdk/managers/secrets.py +++ b/sdk/agenta/sdk/managers/secrets.py @@ -243,6 +243,8 @@ def get_provider_settings_from_workflow(model: str) -> Optional[Dict]: Dict: A dictionary containing all parameters needed for litellm.completion """ + request_provider_model = model + # STEP 1: get vault secrets from route context and transform it secrets = RunningContext.get().secrets if not secrets: diff --git a/sdk/agenta/sdk/managers/testsets.py b/sdk/agenta/sdk/managers/testsets.py new file mode 100644 index 0000000000..c10cb0f49e --- /dev/null +++ b/sdk/agenta/sdk/managers/testsets.py @@ -0,0 +1,441 @@ +from typing import List, Dict, Any, Optional +from uuid import UUID + +from agenta.sdk.utils.client import authed_api +from agenta.sdk.utils.references import get_slug_from_name_and_id +from agenta.sdk.models.testsets import ( + LegacyTestset, + # + Testcase, + TestsetRevisionData, + TestsetRevision, + # + TestsetRevisionResponse, +) + + +async def _create_legacy_testset( + *, + csvdata: List[Dict[str, Any]], + name: str, + testset_id: Optional[UUID] = None, +) -> Optional[TestsetRevision]: + response = authed_api()( + method="POST", + endpoint="/testsets/", + json={ + "testset_id": str(testset_id) if testset_id else None, + "name": name, + "csvdata": csvdata, + }, + ) + + if response.status_code != 200: + print("Failed to create testset:", response.status_code, response.text) + return None + + legacy_testset = LegacyTestset(**response.json()) + + # print(" --- legacy_testset:", legacy_testset) + + if not legacy_testset.id or not legacy_testset.name: + return None + + testset_revision = TestsetRevision( + id=UUID(legacy_testset.id), + slug=get_slug_from_name_and_id( + name=legacy_testset.name, + id=UUID(legacy_testset.id), + ), + name=legacy_testset.name, + data=TestsetRevisionData( + testcases=[ + Testcase( + data=testcase_data, + testset_id=UUID(legacy_testset.id), + ) + for testcase_data in csvdata + ] + ), + ) + + # print(" --- testset_revision:", testset_revision) + + return testset_revision + + +async def _fetch_legacy_testset( + testset_id: Optional[UUID] = None, + # + name: Optional[str] = None, +) -> Optional[TestsetRevision]: + legacy_testset = None + + if testset_id: + response = authed_api()( + method="GET", + endpoint=f"/testsets/{testset_id}", + ) + + if response.status_code != 200: + if response.status_code != 404: + print("Failed to fetch testset:", response.status_code, response.text) + return None + + legacy_testset = LegacyTestset(**response.json()) + elif name: + response = authed_api()( + method="GET", + endpoint="/testsets/", + params={"name": name}, + ) + + if response.status_code != 200: + print("Failed to list testsets:", response.status_code, response.text) + return None + + _testsets = response.json() + + for testset in _testsets: + _id = testset.pop("_id", None) + testset["id"] = _id + + legacy_testsets = [LegacyTestset(**testset) for testset in _testsets] + + if len(legacy_testsets) != 1: + print("Expected exactly one testset with name:", name) + return None + + legacy_testset = legacy_testsets[0] + + # print(" --- legacy_testset:", legacy_testset) + + if not legacy_testset.id or not legacy_testset.name: + return None + + testset_revision = TestsetRevision( + testset_id=UUID(legacy_testset.id), + slug=get_slug_from_name_and_id( + name=legacy_testset.name, + id=UUID(legacy_testset.id), + ), + name=legacy_testset.name, + data=( + TestsetRevisionData( + testcases=[ + Testcase( + data=testcase_data, + testset_id=UUID(legacy_testset.id), + ) + for testcase_data in legacy_testset.csvdata + ] + ) + if legacy_testset.csvdata + else None + ), + ) + + # print(" --- testset_revision:", testset_revision) + + return testset_revision + + +async def _edit_legacy_testset( + *, + testset_id: UUID, + csvdata: List[Dict[str, Any]], + name: Optional[str] = None, +) -> Optional[TestsetRevision]: + response = authed_api()( + method="PUT", + endpoint=f"/testsets/{testset_id}", + json={ + "name": name, + "csvdata": csvdata, + }, + ) + + if response.status_code != 200: + print("Failed to edit testset:", response.status_code, response.text) + return None + + response = authed_api()( + method="GET", + endpoint=f"/testsets/{testset_id}", + ) + + legacy_testset = LegacyTestset(**response.json()) + + # print(" --- legacy_testset:", legacy_testset) + + if not legacy_testset.id or not legacy_testset.name: + return None + + testset_revision = TestsetRevision( + id=UUID(legacy_testset.id), + slug=get_slug_from_name_and_id( + name=legacy_testset.name, + id=UUID(legacy_testset.id), + ), + name=legacy_testset.name, + data=( + TestsetRevisionData( + testcases=[ + Testcase( + data=testcase_data, + testset_id=UUID(legacy_testset.id), + ) + for testcase_data in legacy_testset.csvdata + ] + ) + if legacy_testset.csvdata + else None + ), + ) + + # print(" --- testset_revision:", testset_revision) + + return testset_revision + + +async def _list_legacy_testsets( + # +) -> List[TestsetRevision]: + response = authed_api()( + method="GET", + endpoint="/testsets/", + ) + + if response.status_code != 200: + print("Failed to list testsets:", response.status_code, response.text) + return [] + + legacy_testsets = [LegacyTestset(**testset) for testset in response.json()] + + # print(" --- legacy_testsets:", legacy_testsets) + + testset_revisions = [ + TestsetRevision( + id=UUID(legacy_testset.id), + slug=get_slug_from_name_and_id( + name=legacy_testset.name, + id=UUID(legacy_testset.id), + ), + name=legacy_testset.name, + data=( + TestsetRevisionData( + testcases=[ + Testcase( + data=testcase_data, + testset_id=UUID(legacy_testset.id), + ) + for testcase_data in legacy_testset.csvdata + ] + ) + if legacy_testset.csvdata + else None + ), + ) + for legacy_testset in legacy_testsets + if legacy_testset.id and legacy_testset.name + ] + + # print(" --- testset_revisions:", testset_revisions) + + return testset_revisions + + +async def _retrieve_testset( + testset_id: Optional[UUID] = None, + testset_revision_id: Optional[UUID] = None, +) -> Optional[TestsetRevision]: + payload = { + "testset_ref": ( + { + "id": str(testset_id) if testset_id else None, + } + if testset_id + else None + ), + "testset_revision_ref": ( + { + "id": str(testset_revision_id) if testset_revision_id else None, + } + if testset_revision_id + else None + ), + } + + # print(" --- payload:", payload) + + response = authed_api()( + method="POST", + endpoint="/preview/testsets/revisions/retrieve", + json=payload, + ) + response.raise_for_status() + + testset_revision_response = TestsetRevisionResponse(**response.json()) + + testset_revision = testset_revision_response.testset_revision + + # print(" --- testset_revision:", testset_revision) + + return testset_revision + + +async def _sync_legacy_testset( + *, + testset_id: Optional[UUID] = None, + # + csvdata: List[Dict[str, Any]], + # + name: Optional[str] = None, +) -> Optional[TestsetRevision]: + try: + # print("\n--------- UPSERT TESTSET") + + # print(" ---:", testset_revision_data.model_dump(mode="json", exclude_none=True)) + + testset_revision = await _fetch_legacy_testset( + testset_id=testset_id, + name=name, + ) + + except Exception as e: + print("[ERROR]: Failed to prepare testset:", e) + return None + + # print("Fetch response:", testset_revision) + + if testset_revision and testset_revision.testset_id: + # print(" --- Editing testset...", testset_id) + + testset_revision = await _edit_legacy_testset( + testset_id=testset_revision.testset_id, + name=name, + csvdata=csvdata, + ) + + # print("Edit response:", testset_revision) + + else: + # print(" --- Creating testset...", name, data) + + testset_revision = await _create_legacy_testset( + testset_id=testset_id, + name=name, + csvdata=csvdata, + ) + + if not testset_revision or not testset_revision.id: + return None + + # print(" --- testset_revision:", testset_revision) + + return testset_revision + + +async def aupsert( + *, + testset_id: Optional[UUID] = None, + # + name: Optional[str] = None, + # + data: List[Dict[str, Any]] | TestsetRevisionData, +) -> Optional[TestsetRevision]: + csvdata = list() + if isinstance(data, TestsetRevisionData) and data.testcases: + csvdata = [testcase.data for testcase in data.testcases] + elif isinstance(data, list): + csvdata = data + else: + csvdata = list() + + return await _sync_legacy_testset( + testset_id=testset_id, + name=name, + csvdata=csvdata, # type: ignore + ) + + +async def acreate( + *, + testset_id: Optional[UUID | str] = None, + # + name: Optional[str] = None, + # + data: List[Dict[str, Any]] | TestsetRevisionData, +) -> Optional[TestsetRevision]: + csvdata = list() + if isinstance(data, TestsetRevisionData) and data.testcases: + csvdata = [testcase.data for testcase in data.testcases] + elif isinstance(data, list): + csvdata = data + else: + csvdata = list() + + return await _create_legacy_testset( + testset_id=( + testset_id + if isinstance(testset_id, UUID) + else UUID(testset_id) + if testset_id + else None + ), + name=name, + csvdata=csvdata, # type: ignore + ) + + +async def aedit( + *, + testset_id: UUID | str, + # + name: Optional[str] = None, + # + data: List[Dict[str, Any]] | TestsetRevisionData, +) -> Optional[TestsetRevision]: + csvdata = list() + if isinstance(data, TestsetRevisionData) and data.testcases: + csvdata = [testcase.data for testcase in data.testcases] + elif isinstance(data, list): + csvdata = data + else: + csvdata = list() + + return await _edit_legacy_testset( + testset_id=testset_id if isinstance(testset_id, UUID) else UUID(testset_id), + name=name, + csvdata=csvdata, # type: ignore + ) + + +async def afetch( + *, + testset_id: UUID | str, +) -> Optional[TestsetRevision]: + return await _fetch_legacy_testset( + testset_id=testset_id if isinstance(testset_id, UUID) else UUID(testset_id) + ) + + +async def alist( + # +) -> List[TestsetRevision]: + return await _list_legacy_testsets() + + +async def aretrieve( + testset_id: Optional[UUID] = None, + # + testset_revision_id: Optional[UUID] = None, +) -> Optional[TestsetRevision]: + # print("\n--------- RETRIEVE TESTSET") + + response = await _retrieve_testset( + testset_id=testset_id, + testset_revision_id=testset_revision_id, + ) + + return response diff --git a/sdk/agenta/sdk/middlewares/running/resolver.py b/sdk/agenta/sdk/middlewares/running/resolver.py index 000c62764f..8081313c75 100644 --- a/sdk/agenta/sdk/middlewares/running/resolver.py +++ b/sdk/agenta/sdk/middlewares/running/resolver.py @@ -7,12 +7,13 @@ WorkflowServiceResponseData, WorkflowServiceRequest, WorkflowServiceInterface, + WorkflowServiceConfiguration, ) from agenta.sdk.contexts.running import RunningContext from agenta.sdk.workflows.utils import ( retrieve_handler, retrieve_interface, - retrieve_parameters, + retrieve_configuration, ) from agenta.sdk.workflows.errors import InvalidInterfaceURIV0Error @@ -42,18 +43,18 @@ async def resolve_interface( if interface is not None: return interface - if request and request and request.interface: + if request and request.interface: return request.interface ctx = RunningContext.get() return ctx.interface -async def resolve_parameters( +async def resolve_configuration( *, request: Optional[WorkflowServiceRequest] = None, - parameters: Optional[dict] = None, -) -> Optional[dict]: + configuration: Optional[WorkflowServiceConfiguration] = None, +) -> Optional[WorkflowServiceConfiguration]: """Resolve workflow parameters from multiple sources. Checks for parameters in this priority order: @@ -68,12 +69,14 @@ async def resolve_parameters( Returns: The resolved parameters dict or None if not found """ - if parameters is not None: - return parameters - if request and request.data and request.data.parameters: - return request.data.parameters + if configuration is not None: + return configuration + + if request and request.configuration: + return request.configuration + ctx = RunningContext.get() - return ctx.parameters + return ctx.configuration async def resolve_handler( @@ -138,17 +141,21 @@ async def __call__( InvalidInterfaceURIV0Error: If the handler cannot be resolved from the interface URI """ interface = await resolve_interface(request=request) - parameters = await resolve_parameters(request=request) + configuration = await resolve_configuration(request=request) handler = await resolve_handler(uri=(interface.uri if interface else None)) ctx = RunningContext.get() ctx.interface = interface - ctx.parameters = parameters + ctx.configuration = configuration ctx.handler = handler if not request.data: request.data = WorkflowServiceRequestData() - request.data.parameters = request.data.parameters or parameters + request.data.parameters = ( + request.data.parameters or configuration.parameters + if configuration + else None + ) return await call_next(request) diff --git a/sdk/agenta/sdk/models/blobs.py b/sdk/agenta/sdk/models/blobs.py new file mode 100644 index 0000000000..188dbe2601 --- /dev/null +++ b/sdk/agenta/sdk/models/blobs.py @@ -0,0 +1,33 @@ +from typing import Optional +from uuid import UUID + + +from agenta.sdk.models.shared import ( + TraceID, + SpanID, + Link, + Identifier, + Slug, + Version, + Reference, + Lifecycle, + Header, + Flags, + Tags, + Meta, + Metadata, + Data, + Commit, + AliasConfig, + sync_alias, +) + + +class Blob(Identifier, Lifecycle): + flags: Optional[Flags] = None # type: ignore + tags: Optional[Tags] = None # type: ignore + meta: Optional[Meta] = None # type: ignore + + data: Optional[Data] = None # type: ignore + + set_id: Optional[UUID] = None diff --git a/sdk/agenta/sdk/models/evaluations.py b/sdk/agenta/sdk/models/evaluations.py new file mode 100644 index 0000000000..38c22cf6cd --- /dev/null +++ b/sdk/agenta/sdk/models/evaluations.py @@ -0,0 +1,119 @@ +from typing import Dict, List, Optional, Union, Literal, Callable, Any +from enum import Enum +from uuid import UUID +from datetime import datetime + +from pydantic import BaseModel + +from agenta.sdk.models.shared import ( + TraceID, + SpanID, + Link, + Identifier, + Slug, + Version, + Reference, + Lifecycle, + Header, + Flags, + Tags, + Meta, + Metadata, + Data, + Commit, + AliasConfig, + sync_alias, +) + + +# ------------------------------------------------------------------------------ + + +Origin = Literal["custom", "human", "auto"] +# Target = Union[List[UUID], Dict[UUID, Origin], List[Callable]] +Target = Union[ + List[List[Dict[str, Any]]], # testcases_data + List[Callable], # workflow_handlers + List[UUID], # entity_ids + Dict[UUID, Origin], # entity_ids with origins +] + + +# oss.src.core.evaluations.types + + +class EvaluationStatus(str, Enum): + PENDING = "pending" + QUEUED = "queued" + RUNNING = "running" + SUCCESS = "success" + FAILURE = "failure" + ERRORS = "errors" + CANCELLED = "cancelled" + + +class EvaluationRunFlags(BaseModel): + is_closed: Optional[bool] = None # Indicates if the run is immutable + is_live: Optional[bool] = None # Indicates if the run is updated periodically + is_active: Optional[bool] = None # Indicates if the run is currently active + + +class SimpleEvaluationFlags(EvaluationRunFlags): + pass + + +SimpleEvaluationStatus = EvaluationStatus + + +class SimpleEvaluationData(BaseModel): + status: Optional[SimpleEvaluationStatus] = None + + query_steps: Optional[Target] = None + testset_steps: Optional[Target] = None + application_steps: Optional[Target] = None + evaluator_steps: Optional[Target] = None + + repeats: Optional[int] = None + + +class EvaluationRun(BaseModel): + id: UUID + + +class EvaluationScenario(BaseModel): + id: UUID + + run_id: UUID + + +class EvaluationResult(BaseModel): + id: UUID + + run_id: UUID + scenario_id: UUID + step_key: str + + testcase_id: Optional[UUID] = None + trace_id: Optional[UUID] = None + error: Optional[dict] = None + + flags: Optional[Dict[str, Any]] = None + tags: Optional[Dict[str, Any]] = None + meta: Optional[Dict[str, Any]] = None + + +class EvaluationMetrics(Identifier, Lifecycle): + flags: Optional[Dict[str, Any]] = None + tags: Optional[Dict[str, Any]] = None + meta: Optional[Dict[str, Any]] = None + + status: Optional[EvaluationStatus] = None + + timestamp: Optional[datetime] = None + interval: Optional[int] = None + + data: Optional[Data] = None + + scenario_id: Optional[UUID] = None + + run_id: UUID diff --git a/sdk/agenta/sdk/models/git.py b/sdk/agenta/sdk/models/git.py new file mode 100644 index 0000000000..57c2028038 --- /dev/null +++ b/sdk/agenta/sdk/models/git.py @@ -0,0 +1,126 @@ +from typing import List, Optional +from uuid import UUID + +from pydantic import BaseModel, Field + +from agenta.sdk.models.shared import ( + TraceID, + SpanID, + Link, + Identifier, + Slug, + Version, + Reference, + Lifecycle, + Header, + Flags, + Tags, + Meta, + Metadata, + Data, + Commit, + AliasConfig, + sync_alias, +) + + +from typing import Optional, List +from uuid import UUID + +from pydantic import BaseModel + + +# artifacts -------------------------------------------------------------------- + + +class Artifact(Identifier, Slug, Lifecycle, Header, Metadata): + pass + + +class ArtifactCreate(Slug, Header, Metadata): + pass + + +class ArtifactEdit(Identifier, Header, Metadata): + pass + + +class ArtifactQuery(Metadata): + pass + + +# variants --------------------------------------------------------------------- + + +class Variant(Identifier, Slug, Lifecycle, Header, Metadata): + artifact_id: Optional[UUID] = None + + +class VariantCreate(Slug, Header, Metadata): + artifact_id: Optional[UUID] = None + + +class VariantEdit(Identifier, Header, Metadata): + pass + + +class VariantQuery(Metadata): + pass + + +# revisions -------------------------------------------------------------------- + + +class Revision(Identifier, Slug, Version, Lifecycle, Header, Metadata, Commit): + data: Optional[Data] = None + + artifact_id: Optional[UUID] = None + variant_id: Optional[UUID] = None + + +class RevisionCreate(Slug, Header, Metadata): + artifact_id: Optional[UUID] = None + variant_id: Optional[UUID] = None + + +class RevisionEdit(Identifier, Header, Metadata): + pass + + +class RevisionQuery(Metadata): + authors: Optional[List[UUID]] = None + + +class RevisionCommit(Slug, Header, Metadata): + data: Optional[Data] = None + + message: Optional[str] = None + + artifact_id: Optional[UUID] = None + variant_id: Optional[UUID] = None + + +class RevisionsLog(BaseModel): + artifact_id: Optional[UUID] = None + variant_id: Optional[UUID] = None + revision_id: Optional[UUID] = None + + depth: Optional[int] = None + + +# forks ------------------------------------------------------------------------ + + +class RevisionFork(Slug, Header, Metadata): + data: Optional[Data] = None + + message: Optional[str] = None + + +class VariantFork(Slug, Header, Metadata): + pass + + +class ArtifactFork(RevisionsLog): + variant: Optional[VariantFork] = None + revision: Optional[RevisionFork] = None diff --git a/sdk/agenta/sdk/models/testsets.py b/sdk/agenta/sdk/models/testsets.py new file mode 100644 index 0000000000..25b73f0224 --- /dev/null +++ b/sdk/agenta/sdk/models/testsets.py @@ -0,0 +1,163 @@ +from typing import List, Optional, Dict, Any +from uuid import UUID + +from pydantic import BaseModel, Field + +from agenta.sdk.models.shared import ( + TraceID, + SpanID, + Link, + Identifier, + Slug, + Version, + Reference, + Lifecycle, + Header, + Flags, + Tags, + Meta, + Metadata, + Data, + Commit, + AliasConfig, + sync_alias, +) + +from agenta.sdk.models.git import ( + Artifact, + ArtifactCreate, + ArtifactEdit, + ArtifactQuery, + ArtifactFork, + Variant, + VariantCreate, + VariantEdit, + VariantQuery, + VariantFork, + Revision, + RevisionCreate, + RevisionEdit, + RevisionQuery, + RevisionCommit, + RevisionsLog, + RevisionFork, +) + +from agenta.sdk.models.blobs import ( + Blob, +) + + +class TestsetIdAlias(AliasConfig): + testset_id: Optional[UUID] = None + set_id: Optional[UUID] = Field( + default=None, + exclude=True, + alias="testset_id", + ) + + +class TestsetVariantIdAlias(AliasConfig): + testset_variant_id: Optional[UUID] = None + variant_id: Optional[UUID] = Field( + default=None, + exclude=True, + alias="testset_variant_id", + ) + + +class Testcase(Blob, TestsetIdAlias): + def model_post_init(self, __context) -> None: + sync_alias("testset_id", "set_id", self) + + +class TestsetFlags(BaseModel): + has_testcases: Optional[bool] = None + has_traces: Optional[bool] = None + + +class TestsetRevisionData(BaseModel): + testcase_ids: Optional[List[UUID]] = None + testcases: Optional[List[Testcase]] = None + + +class SimpleTestset( + Identifier, + Slug, + Lifecycle, + Header, +): + flags: Optional[TestsetFlags] = None + tags: Optional[Tags] = None # type: ignore + meta: Optional[Meta] = None # type: ignore + + data: Optional[TestsetRevisionData] = None + + +class Testset(Artifact): + flags: Optional[TestsetFlags] = None # type: ignore + + +class TestsetRevision( + Revision, + TestsetIdAlias, + TestsetVariantIdAlias, +): + flags: Optional[TestsetFlags] = None # type: ignore + + data: Optional[TestsetRevisionData] = None # type: ignore + + def model_post_init(self, __context) -> None: + sync_alias("testset_id", "artifact_id", self) + sync_alias("testset_variant_id", "variant_id", self) + + +class SimpleTestsetCreate(Slug, Header): + tags: Optional[Tags] = None # type: ignore + meta: Optional[Meta] = None # type: ignore + data: Optional[TestsetRevisionData] = None + + +class SimpleTestsetEdit( + Identifier, + Header, +): + # flags: Optional[TestsetFlags] = None + tags: Optional[Tags] = None # type: ignore + meta: Optional[Meta] = None # type: ignore + + data: Optional[TestsetRevisionData] = None + + +class TestsetResponse(BaseModel): + count: int = 0 + testset: Optional[Testset] = None + + +class TestsetRevisionResponse(BaseModel): + count: int = 0 + testset_revision: Optional[TestsetRevision] = None + + +class SimpleTestsetResponse(BaseModel): + count: int = 0 + testset: Optional[SimpleTestset] = None + + +class TestsetsResponse(BaseModel): + count: int = 0 + testsets: List[Testset] = [] + + +class SimpleTestsetsResponse(BaseModel): + count: int = 0 + testsets: List[SimpleTestset] = [] + + +# LEGACY TESTSETS -------------------------------------------------------------- + + +class LegacyTestset(BaseModel): + id: str + name: Optional[str] = None + csvdata: Optional[List[Dict[str, Any]]] = None diff --git a/sdk/agenta/sdk/models/workflows.py b/sdk/agenta/sdk/models/workflows.py index 507b63430b..43f4350d7b 100644 --- a/sdk/agenta/sdk/models/workflows.py +++ b/sdk/agenta/sdk/models/workflows.py @@ -1,6 +1,6 @@ # /agenta/sdk/models/running.py -from typing import Any, Dict, Optional, Union +from typing import Any, Dict, Optional, Union, List from uuid import UUID from urllib.parse import urlparse @@ -17,12 +17,15 @@ ConfigDict, model_validator, ValidationError, + Field, ) from agenta.sdk.models.shared import ( TraceID, SpanID, Link, + Identifier, + Slug, Reference, Lifecycle, Header, @@ -31,8 +34,52 @@ Schema, Status, Commit, + AliasConfig, + sync_alias, ) +from agenta.sdk.models.git import ( + Artifact, + ArtifactCreate, + ArtifactEdit, + ArtifactQuery, + ArtifactFork, + Variant, + VariantCreate, + VariantEdit, + VariantQuery, + VariantFork, + Revision, + RevisionCreate, + RevisionEdit, + RevisionQuery, + RevisionCommit, + RevisionsLog, + RevisionFork, +) + + +# oss.src.core.workflows.dtos +from typing import Optional, Dict, Any +from uuid import UUID, uuid4 +from urllib.parse import urlparse + +from pydantic import ( + BaseModel, + Field, + model_validator, + ValidationError, +) + +from jsonschema import ( + Draft202012Validator, + Draft201909Validator, + Draft7Validator, + Draft4Validator, + Draft6Validator, +) +from jsonschema.exceptions import SchemaError + class JsonSchemas(BaseModel): parameters: Optional[Schema] = None @@ -41,9 +88,9 @@ class JsonSchemas(BaseModel): class WorkflowFlags(BaseModel): - is_custom: Optional[bool] = None - is_evaluator: Optional[bool] = None - is_human: Optional[bool] = None + is_custom: bool = False + is_evaluator: bool = False + is_human: bool = False class WorkflowServiceInterface(BaseModel): @@ -134,21 +181,6 @@ class WorkflowRevisionData( pass -class WorkflowRevision( - Reference, - Lifecycle, - Header, - Metadata, - Commit, -): - flags: Optional[WorkflowFlags] = None # type: ignore - - data: Optional[WorkflowRevisionData] = None - - artifact_id: Optional[UUID] = None - variant_id: Optional[UUID] = None - - class WorkflowServiceStatus(Status): type: Optional[str] = None stacktrace: Optional[Union[list[str], str]] = None @@ -235,3 +267,487 @@ async def iterator(self): WorkflowServiceBatchResponse, WorkflowServiceStreamResponse, ] + + +# aliases ---------------------------------------------------------------------- + + +class WorkflowIdAlias(AliasConfig): + workflow_id: Optional[UUID] = None + artifact_id: Optional[UUID] = Field( + default=None, + exclude=True, + alias="workflow_id", + ) + + +class WorkflowVariantIdAlias(AliasConfig): + workflow_variant_id: Optional[UUID] = None + variant_id: Optional[UUID] = Field( + default=None, + exclude=True, + alias="workflow_variant_id", + ) + + +class WorkflowRevisionIdAlias(AliasConfig): + workflow_revision_id: Optional[UUID] = None + revision_id: Optional[UUID] = Field( + default=None, + exclude=True, + alias="workflow_revision_id", + ) + + +# workflows -------------------------------------------------------------------- + + +class Workflow(Artifact): + flags: Optional[WorkflowFlags] = None + + +class WorkflowCreate(ArtifactCreate): + flags: Optional[WorkflowFlags] = None + + +class WorkflowEdit(ArtifactEdit): + flags: Optional[WorkflowFlags] = None + + +# workflow variants ------------------------------------------------------------ + + +class WorkflowVariant( + Variant, + WorkflowIdAlias, +): + flags: Optional[WorkflowFlags] = None + + def model_post_init(self, __context) -> None: + sync_alias("workflow_id", "artifact_id", self) + + +class WorkflowVariantCreate( + VariantCreate, + WorkflowIdAlias, +): + flags: Optional[WorkflowFlags] = None + + def model_post_init(self, __context) -> None: + sync_alias("workflow_id", "artifact_id", self) + + +class WorkflowVariantEdit(VariantEdit): + flags: Optional[WorkflowFlags] = None + + +class WorkflowVariantQuery(VariantQuery): + flags: Optional[WorkflowFlags] = None + + +# workflow revisions ----------------------------------------------------------- + +from agenta.sdk.models.workflows import WorkflowRevisionData + + +class WorkflowRevision( + Revision, + WorkflowIdAlias, + WorkflowVariantIdAlias, +): + flags: Optional[WorkflowFlags] = None + + data: Optional[WorkflowRevisionData] = None + + def model_post_init(self, __context) -> None: + sync_alias("workflow_id", "artifact_id", self) + sync_alias("workflow_variant_id", "variant_id", self) + + +class WorkflowRevisionCreate( + RevisionCreate, + WorkflowIdAlias, + WorkflowVariantIdAlias, +): + flags: Optional[WorkflowFlags] = None + + def model_post_init(self, __context) -> None: + sync_alias("workflow_id", "artifact_id", self) + sync_alias("workflow_variant_id", "variant_id", self) + + +class WorkflowRevisionEdit(RevisionEdit): + flags: Optional[WorkflowFlags] = None + + +class WorkflowRevisionQuery(RevisionQuery): + flags: Optional[WorkflowFlags] = None + + +class WorkflowRevisionCommit( + RevisionCommit, + WorkflowIdAlias, + WorkflowVariantIdAlias, +): + flags: Optional[WorkflowFlags] = None + + data: Optional[WorkflowRevisionData] = None + + def model_post_init(self, __context) -> None: + sync_alias("workflow_id", "artifact_id", self) + sync_alias("workflow_variant_id", "variant_id", self) + + +class WorkflowRevisionsLog( + RevisionsLog, + WorkflowIdAlias, + WorkflowVariantIdAlias, + WorkflowRevisionIdAlias, +): + def model_post_init(self, __context) -> None: + sync_alias("workflow_id", "artifact_id", self) + sync_alias("workflow_variant_id", "variant_id", self) + sync_alias("workflow_revision_id", "revision_id", self) + + +# forks ------------------------------------------------------------------------ + + +class WorkflowRevisionFork(RevisionFork): + flags: Optional[WorkflowFlags] = None + + data: Optional[WorkflowRevisionData] = None + + +class WorkflowRevisionForkAlias(AliasConfig): + workflow_revision: Optional[WorkflowRevisionFork] = None + + revision: Optional[RevisionFork] = Field( + default=None, + exclude=True, + alias="workflow_revision", + ) + + +class WorkflowVariantFork(VariantFork): + flags: Optional[WorkflowFlags] = None + + +class WorkflowVariantForkAlias(AliasConfig): + workflow_variant: Optional[WorkflowVariantFork] = None + + variant: Optional[VariantFork] = Field( + default=None, + exclude=True, + alias="workflow_variant", + ) + + +class WorkflowFork( + ArtifactFork, + WorkflowIdAlias, + WorkflowVariantIdAlias, + WorkflowVariantForkAlias, + WorkflowRevisionIdAlias, + WorkflowRevisionForkAlias, +): + def model_post_init(self, __context) -> None: + sync_alias("workflow_id", "artifact_id", self) + sync_alias("workflow_variant_id", "variant_id", self) + sync_alias("workflow_variant", "variant", self) + sync_alias("workflow_revision_id", "revision_id", self) + sync_alias("workflow_revision", "revision", self) + + +# ------------------------------------------------------------------------------ + + +class EvaluatorRevision(BaseModel): + id: Optional[UUID] = None + slug: Optional[str] = None + version: Optional[str] = None + + data: Optional[WorkflowRevisionData] = None + + evaluator_id: Optional[UUID] = None + evaluator_variant_id: Optional[UUID] = None + + +class ApplicationServiceRequest(WorkflowServiceRequest): + pass + + +class ApplicationServiceBatchResponse(WorkflowServiceBatchResponse): + pass + + +class EvaluatorServiceRequest(WorkflowServiceRequest): + pass + + +class EvaluatorServiceBatchResponse(WorkflowServiceBatchResponse): + pass + + +# oss.src.core.evaluators.dtos + + +class EvaluatorIdAlias(AliasConfig): + evaluator_id: Optional[UUID] = None + workflow_id: Optional[UUID] = Field( + default=None, + exclude=True, + alias="evaluator_id", + ) + + +class EvaluatorVariantIdAlias(AliasConfig): + evaluator_variant_id: Optional[UUID] = None + workflow_variant_id: Optional[UUID] = Field( + default=None, + exclude=True, + alias="evaluator_variant_id", + ) + + +class EvaluatorRevisionData(WorkflowRevisionData): + pass + + +class EvaluatorFlags(WorkflowFlags): + def __init__(self, **data): + data["is_evaluator"] = True + + super().__init__(**data) + + +class SimpleEvaluatorFlags(EvaluatorFlags): + pass + + +class SimpleEvaluatorData(EvaluatorRevisionData): + pass + + +class Evaluator(Workflow): + flags: Optional[EvaluatorFlags] = None + + +class SimpleEvaluatorRevision( + WorkflowRevision, + EvaluatorIdAlias, + EvaluatorVariantIdAlias, +): + flags: Optional[EvaluatorFlags] = None + + data: Optional[EvaluatorRevisionData] = None + + +class SimpleEvaluator(Identifier, Slug, Lifecycle, Header, Metadata): + flags: Optional[SimpleEvaluatorFlags] = None + + data: Optional[SimpleEvaluatorData] = None + + +class SimpleEvaluatorCreate(Slug, Header, Metadata): + flags: Optional[SimpleEvaluatorFlags] = None + + data: Optional[SimpleEvaluatorData] = None + + +class SimpleEvaluatorEdit(Identifier, Header, Metadata): + flags: Optional[SimpleEvaluatorFlags] = None + + data: Optional[SimpleEvaluatorData] = None + + +class SimpleEvaluatorResponse(BaseModel): + count: int = 0 + evaluator: Optional[SimpleEvaluator] = None + + +class EvaluatorRevisionResponse(BaseModel): + count: int = 0 + evaluator_revision: Optional[EvaluatorRevision] = None + + +# oss.src.core.applications.dtos + +# aliases ---------------------------------------------------------------------- + + +class ApplicationIdAlias(AliasConfig): + application_id: Optional[UUID] = None + workflow_id: Optional[UUID] = Field( + default=None, + exclude=True, + alias="application_id", + ) + + +class ApplicationVariantIdAlias(AliasConfig): + application_variant_id: Optional[UUID] = None + workflow_variant_id: Optional[UUID] = Field( + default=None, + exclude=True, + alias="application_variant_id", + ) + + +class ApplicationRevisionIdAlias(AliasConfig): + application_revision_id: Optional[UUID] = None + workflow_revision_id: Optional[UUID] = Field( + default=None, + exclude=True, + alias="application_revision_id", + ) + + +# globals ---------------------------------------------------------------------- + + +class ApplicationFlags(WorkflowFlags): + def __init__(self, **data): + data["is_evaluator"] = False + + super().__init__(**data) + + +# applications ------------------------------------------------------------------- + + +class Application(Workflow): + flags: Optional[ApplicationFlags] = None + + +class ApplicationCreate(WorkflowCreate): + flags: Optional[ApplicationFlags] = None + + +class ApplicationEdit(WorkflowEdit): + flags: Optional[ApplicationFlags] = None + + +# application variants ----------------------------------------------------------- + + +class ApplicationVariant( + WorkflowVariant, + ApplicationIdAlias, +): + flags: Optional[ApplicationFlags] = None + + def model_post_init(self, __context) -> None: + sync_alias("application_id", "workflow_id", self) + + +class ApplicationVariantCreate( + WorkflowVariantCreate, + ApplicationIdAlias, +): + flags: Optional[ApplicationFlags] = None + + def model_post_init(self, __context) -> None: + sync_alias("application_id", "workflow_id", self) + + +class ApplicationVariantEdit(WorkflowVariantEdit): + flags: Optional[ApplicationFlags] = None + + +# application revisions ----------------------------------------------------- + + +class ApplicationRevisionData(WorkflowRevisionData): + pass + + +class ApplicationRevision( + WorkflowRevision, + ApplicationIdAlias, + ApplicationVariantIdAlias, +): + flags: Optional[ApplicationFlags] = None + + data: Optional[ApplicationRevisionData] = None + + def model_post_init(self, __context) -> None: + sync_alias("application_id", "workflow_id", self) + sync_alias("application_variant_id", "workflow_variant_id", self) + + +class ApplicationRevisionCreate( + WorkflowRevisionCreate, + ApplicationIdAlias, + ApplicationVariantIdAlias, +): + flags: Optional[ApplicationFlags] = None + + def model_post_init(self, __context) -> None: + sync_alias("application_id", "workflow_id", self) + sync_alias("application_variant_id", "workflow_variant_id", self) + + +class ApplicationRevisionEdit(WorkflowRevisionEdit): + flags: Optional[ApplicationFlags] = None + + +class ApplicationRevisionCommit( + WorkflowRevisionCommit, + ApplicationIdAlias, + ApplicationVariantIdAlias, +): + flags: Optional[ApplicationFlags] = None + + data: Optional[ApplicationRevisionData] = None + + def model_post_init(self, __context) -> None: + sync_alias("application_id", "workflow_id", self) + sync_alias("application_variant_id", "workflow_variant_id", self) + + +class ApplicationRevisionResponse(BaseModel): + count: int = 0 + application_revision: Optional[ApplicationRevision] = None + + +class ApplicationRevisionsResponse(BaseModel): + count: int = 0 + application_revisions: List[ApplicationRevision] = [] + + +# simple applications ------------------------------------------------------------ + + +class LegacyApplicationFlags(WorkflowFlags): + pass + + +class LegacyApplicationData(WorkflowRevisionData): + pass + + +class LegacyApplication(Identifier, Slug, Lifecycle, Header, Metadata): + flags: Optional[LegacyApplicationFlags] = None + + data: Optional[LegacyApplicationData] = None + + +class LegacyApplicationCreate(Slug, Header, Metadata): + flags: Optional[LegacyApplicationFlags] = None + + data: Optional[LegacyApplicationData] = None + + +class LegacyApplicationEdit(Identifier, Header, Metadata): + flags: Optional[LegacyApplicationFlags] = None + + data: Optional[LegacyApplicationData] = None + + +class LegacyApplicationResponse(BaseModel): + count: int = 0 + application: Optional[LegacyApplication] = None + + +# end of oss.src.core.applications.dtos diff --git a/sdk/agenta/sdk/tracing/exporters.py b/sdk/agenta/sdk/tracing/exporters.py index e0d3fc4511..c156d7d906 100644 --- a/sdk/agenta/sdk/tracing/exporters.py +++ b/sdk/agenta/sdk/tracing/exporters.py @@ -158,13 +158,11 @@ def __export(): # ) if _ASYNC_EXPORT is True: + # log.debug("[SPAN] [ASYNC.X]") thread = Thread(target=__export, daemon=True) thread.start() else: - # log.debug( - # "[SPAN] [__XPORT]", - # data=serialized_data, - # ) + # log.debug("[SPAN] [ SYNC.X]") return __export() except Exception as e: diff --git a/sdk/agenta/sdk/types.py b/sdk/agenta/sdk/types.py index cbdf684dc3..da45b2c9b8 100644 --- a/sdk/agenta/sdk/types.py +++ b/sdk/agenta/sdk/types.py @@ -777,7 +777,7 @@ def format(self, **kwargs) -> "PromptTemplate": ) ) - new_llm_config = self.llm_config.copy(deep=True) + new_llm_config = self.llm_config.model_copy(deep=True) if new_llm_config.response_format is not None: rf_dict = new_llm_config.response_format.model_dump(by_alias=True) substituted = self._substitute_variables(rf_dict, kwargs) diff --git a/sdk/agenta/sdk/utils/client.py b/sdk/agenta/sdk/utils/client.py new file mode 100644 index 0000000000..dba0cb6309 --- /dev/null +++ b/sdk/agenta/sdk/utils/client.py @@ -0,0 +1,38 @@ +import requests + +BASE_TIMEOUT = 10 + +from agenta.sdk.utils.logging import get_module_logger + +import agenta as ag + +log = get_module_logger(__name__) + + +def authed_api(): + """ + Preconfigured requests for authenticated endpoints (supports all methods). + """ + + api_url = ag.DEFAULT_AGENTA_SINGLETON_INSTANCE.api_url + api_key = ag.DEFAULT_AGENTA_SINGLETON_INSTANCE.api_key + + if not api_url or not api_key: + log.error("Please call ag.init() first.") + log.error("And don't forget to set AGENTA_API_URL and AGENTA_API_KEY.") + raise ValueError("API URL and API Key must be set.") + + def _request(method: str, endpoint: str, **kwargs): + url = f"{api_url}{endpoint}" + headers = kwargs.pop("headers", {}) + headers.setdefault("Authorization", f"ApiKey {api_key}") + + return requests.request( + method=method, + url=url, + headers=headers, + timeout=BASE_TIMEOUT, + **kwargs, + ) + + return _request diff --git a/sdk/agenta/sdk/utils/references.py b/sdk/agenta/sdk/utils/references.py new file mode 100644 index 0000000000..865be8d1ab --- /dev/null +++ b/sdk/agenta/sdk/utils/references.py @@ -0,0 +1,23 @@ +from uuid import UUID +import re +import unicodedata + + +def get_slug_from_name_and_id( + name: str, + id: UUID, # pylint: disable=redefined-builtin +) -> str: + # Normalize Unicode (e.g., é → e) + name = unicodedata.normalize("NFKD", name) + # Remove non-ASCII characters + name = name.encode("ascii", "ignore").decode("ascii") + # Lowercase and remove non-word characters except hyphens and spaces + name = re.sub(r"[^\w\s-]", "", name.lower()) + # Replace any sequence of hyphens or whitespace with a single hyphen + name = re.sub(r"[-\s]+", "-", name) + # Trim leading/trailing hyphens + name = name.strip("-") + # Last 12 characters of the ID + slug = f"{name}-{id.hex[-12:]}" + + return slug.lower() diff --git a/sdk/agenta/sdk/workflows/builtin.py b/sdk/agenta/sdk/workflows/builtin.py index 78b9e432bc..96fe546f66 100644 --- a/sdk/agenta/sdk/workflows/builtin.py +++ b/sdk/agenta/sdk/workflows/builtin.py @@ -7,7 +7,7 @@ def echo( *, - slug: str, + slug: Optional[str] = None, # name: Optional[str] = None, description: Optional[str] = None, @@ -28,7 +28,7 @@ def echo( def auto_exact_match( *, - slug: str, + slug: Optional[str] = None, # name: Optional[str] = None, description: Optional[str] = None, @@ -57,7 +57,7 @@ def auto_exact_match( def auto_regex_test( *, - slug: str, + slug: Optional[str] = None, # name: Optional[str] = None, description: Optional[str] = None, @@ -91,7 +91,7 @@ def auto_regex_test( def field_match_test( *, - slug: str, + slug: Optional[str] = None, # name: Optional[str] = None, description: Optional[str] = None, @@ -123,7 +123,7 @@ def field_match_test( def auto_webhook_test( *, - slug: str, + slug: Optional[str] = None, # name: Optional[str] = None, description: Optional[str] = None, @@ -155,7 +155,7 @@ def auto_webhook_test( def auto_custom_code_run( *, - slug: str, + slug: Optional[str] = None, # name: Optional[str] = None, description: Optional[str] = None, @@ -189,7 +189,7 @@ def auto_custom_code_run( def auto_ai_critique( *, - slug: str, + slug: Optional[str] = None, # name: Optional[str] = None, description: Optional[str] = None, @@ -225,7 +225,7 @@ def auto_ai_critique( def auto_starts_with( *, - slug: str, + slug: Optional[str] = None, # name: Optional[str] = None, description: Optional[str] = None, @@ -257,7 +257,7 @@ def auto_starts_with( def auto_ends_with( *, - slug: str, + slug: Optional[str] = None, # name: Optional[str] = None, description: Optional[str] = None, @@ -289,7 +289,7 @@ def auto_ends_with( def auto_contains( *, - slug: str, + slug: Optional[str] = None, # name: Optional[str] = None, description: Optional[str] = None, @@ -321,7 +321,7 @@ def auto_contains( def auto_contains_any( *, - slug: str, + slug: Optional[str] = None, # name: Optional[str] = None, description: Optional[str] = None, @@ -353,7 +353,7 @@ def auto_contains_any( def auto_contains_all( *, - slug: str, + slug: Optional[str] = None, # name: Optional[str] = None, description: Optional[str] = None, @@ -385,7 +385,7 @@ def auto_contains_all( def auto_contains_json( *, - slug: str, + slug: Optional[str] = None, # name: Optional[str] = None, description: Optional[str] = None, @@ -406,7 +406,7 @@ def auto_contains_json( def auto_json_diff( *, - slug: str, + slug: Optional[str] = None, # name: Optional[str] = None, description: Optional[str] = None, @@ -443,7 +443,7 @@ def auto_json_diff( def auto_levenshtein_distance( *, - slug: str, + slug: Optional[str] = None, # name: Optional[str] = None, description: Optional[str] = None, @@ -476,7 +476,7 @@ def auto_levenshtein_distance( def auto_similarity_match( *, - slug: str, + slug: Optional[str] = None, # name: Optional[str] = None, description: Optional[str] = None, @@ -509,7 +509,7 @@ def auto_similarity_match( def auto_semantic_similarity( *, - slug: str, + slug: Optional[str] = None, # name: Optional[str] = None, description: Optional[str] = None, @@ -542,7 +542,7 @@ def auto_semantic_similarity( def completion( *, - slug: str, + slug: Optional[str] = None, # name: Optional[str] = None, description: Optional[str] = None, @@ -572,7 +572,7 @@ def completion( def chat( *, - slug: str, + slug: Optional[str] = None, # name: Optional[str] = None, description: Optional[str] = None, diff --git a/sdk/agenta/sdk/workflows/utils.py b/sdk/agenta/sdk/workflows/utils.py index 357f81592a..d86f499da4 100644 --- a/sdk/agenta/sdk/workflows/utils.py +++ b/sdk/agenta/sdk/workflows/utils.py @@ -97,7 +97,7 @@ }, ) -PARAMETERS_REGISTRY: dict = dict( +CONFIGURATION_REGISTRY: dict = dict( agenta={ "built-in": dict( echo=dict(v0=echo_v0_configuration), @@ -149,7 +149,7 @@ # - register_handler(fn, uri) - Registers a new handler with the given URI # - retrieve_handler(uri) - Retrieves a handler by its URI # - retrieve_interface(uri) - Retrieves the interface configuration for a handler -# - retrieve_parameters(uri) - Retrieves default parameters for a handler +# - retrieve_configuration(uri) - Retrieves default parameters for a handler # # The registry supports automatic URI generation for user-defined workflows: # If no URI is provided, register_handler() generates: "user:custom:{module}.{name}:latest" @@ -297,17 +297,18 @@ def retrieve_interface(uri: Optional[str] = None) -> Optional[WorkflowServiceInt return _get_with_latest(INTERFACE_REGISTRY, provider, kind, key, version) -def retrieve_parameters(uri: Optional[str] = None) -> Optional[dict]: +def retrieve_configuration(uri: Optional[str] = None) -> Optional[dict]: if not uri: return None provider, kind, key, version = parse_uri(uri) - return _get_with_latest(PARAMETERS_REGISTRY, provider, kind, key, version) + return _get_with_latest(CONFIGURATION_REGISTRY, provider, kind, key, version) def is_custom_uri(uri: Optional[str] = None) -> bool: if not uri: - return False + return True + provider, kind, key, version = parse_uri(uri) return provider == "user" and kind == "custom" diff --git a/sdk/poetry.lock b/sdk/poetry.lock index 1541ee64cf..9108c002a1 100644 --- a/sdk/poetry.lock +++ b/sdk/poetry.lock @@ -228,18 +228,18 @@ files = [ [[package]] name = "boto3" -version = "1.40.63" +version = "1.40.68" description = "The AWS SDK for Python" optional = false python-versions = ">=3.9" groups = ["dev"] files = [ - {file = "boto3-1.40.63-py3-none-any.whl", hash = "sha256:f15d4abf1a6283887c336f660cdfc2162a210d2d8f4d98dbcbcef983371c284d"}, - {file = "boto3-1.40.63.tar.gz", hash = "sha256:3bf4b034900c87a6a9b3b3b44c4aec26e96fc73bff2505f0766224b7295178ce"}, + {file = "boto3-1.40.68-py3-none-any.whl", hash = "sha256:4f08115e3a4d1e1056003e433d393e78c20da6af7753409992bb33fb69f04186"}, + {file = "boto3-1.40.68.tar.gz", hash = "sha256:c7994989e5bbba071b7c742adfba35773cf03e87f5d3f9f2b0a18c1664417b61"}, ] [package.dependencies] -botocore = ">=1.40.63,<1.41.0" +botocore = ">=1.40.68,<1.41.0" jmespath = ">=0.7.1,<2.0.0" s3transfer = ">=0.14.0,<0.15.0" @@ -248,14 +248,14 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "botocore" -version = "1.40.63" +version = "1.40.68" description = "Low-level, data-driven core of boto 3." optional = false python-versions = ">=3.9" groups = ["dev"] files = [ - {file = "botocore-1.40.63-py3-none-any.whl", hash = "sha256:83657b3ee487268fccc9ba022cba572ba657b9ece8cddd1fa241e2c6a49c8c14"}, - {file = "botocore-1.40.63.tar.gz", hash = "sha256:0324552c3c800e258cbcb8c22b495a2e2e0260a7408d08016196e46fa0d1b587"}, + {file = "botocore-1.40.68-py3-none-any.whl", hash = "sha256:9d514f9c9054e1af055f2cbe9e0d6771d407a600206d45a01b54d5f09538fecb"}, + {file = "botocore-1.40.68.tar.gz", hash = "sha256:28f41b463d9f012a711ee8b61d4e26cd14ee3b450b816d5dee849aa79155e856"}, ] [package.dependencies] @@ -784,14 +784,14 @@ tqdm = ["tqdm"] [[package]] name = "google-auth" -version = "2.42.1" +version = "2.43.0" description = "Google Authentication Library" optional = false python-versions = ">=3.7" groups = ["main"] files = [ - {file = "google_auth-2.42.1-py2.py3-none-any.whl", hash = "sha256:eb73d71c91fc95dbd221a2eb87477c278a355e7367a35c0d84e6b0e5f9b4ad11"}, - {file = "google_auth-2.42.1.tar.gz", hash = "sha256:30178b7a21aa50bffbdc1ffcb34ff770a2f65c712170ecd5446c4bef4dc2b94e"}, + {file = "google_auth-2.43.0-py2.py3-none-any.whl", hash = "sha256:af628ba6fa493f75c7e9dbe9373d148ca9f4399b5ea29976519e0a3848eddd16"}, + {file = "google_auth-2.43.0.tar.gz", hash = "sha256:88228eee5fc21b62a1b5fe773ca15e67778cb07dc8363adcb4a8827b52d81483"}, ] [package.dependencies] @@ -811,14 +811,14 @@ urllib3 = ["packaging", "urllib3"] [[package]] name = "googleapis-common-protos" -version = "1.71.0" +version = "1.72.0" description = "Common protobufs used in Google APIs" optional = false python-versions = ">=3.7" groups = ["main"] files = [ - {file = "googleapis_common_protos-1.71.0-py3-none-any.whl", hash = "sha256:59034a1d849dc4d18971997a72ac56246570afdd17f9369a0ff68218d50ab78c"}, - {file = "googleapis_common_protos-1.71.0.tar.gz", hash = "sha256:1aec01e574e29da63c80ba9f7bbf1ccfaacf1da877f23609fe236ca7c72a2e2e"}, + {file = "googleapis_common_protos-1.72.0-py3-none-any.whl", hash = "sha256:4299c5a82d5ae1a9702ada957347726b167f9f8d1fc352477702a1e851ff4038"}, + {file = "googleapis_common_protos-1.72.0.tar.gz", hash = "sha256:e55a601c1b32b52d7a3e65f43563e2aa61bcd737998ee672ac9b951cd49319f5"}, ] [package.dependencies] @@ -1527,28 +1527,28 @@ files = [ [[package]] name = "openai" -version = "1.109.1" +version = "2.7.1" description = "The official Python library for the openai API" optional = false python-versions = ">=3.8" groups = ["main"] files = [ - {file = "openai-1.109.1-py3-none-any.whl", hash = "sha256:6bcaf57086cf59159b8e27447e4e7dd019db5d29a438072fbd49c290c7e65315"}, - {file = "openai-1.109.1.tar.gz", hash = "sha256:d173ed8dbca665892a6db099b4a2dfac624f94d20a93f46eb0b56aae940ed869"}, + {file = "openai-2.7.1-py3-none-any.whl", hash = "sha256:2f2530354d94c59c614645a4662b9dab0a5b881c5cd767a8587398feac0c9021"}, + {file = "openai-2.7.1.tar.gz", hash = "sha256:df4d4a3622b2df3475ead8eb0fbb3c27fd1c070fa2e55d778ca4f40e0186c726"}, ] [package.dependencies] anyio = ">=3.5.0,<5" distro = ">=1.7.0,<2" httpx = ">=0.23.0,<1" -jiter = ">=0.4.0,<1" +jiter = ">=0.10.0,<1" pydantic = ">=1.9.0,<3" sniffio = "*" tqdm = ">4" typing-extensions = ">=4.11,<5" [package.extras] -aiohttp = ["aiohttp", "httpx-aiohttp (>=0.1.8)"] +aiohttp = ["aiohttp", "httpx-aiohttp (>=0.1.9)"] datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] realtime = ["websockets (>=13,<16)"] voice-helpers = ["numpy (>=2.0.2)", "sounddevice (>=0.5.1)"] @@ -1960,19 +1960,19 @@ pyasn1 = ">=0.6.1,<0.7.0" [[package]] name = "pydantic" -version = "2.12.3" +version = "2.12.4" description = "Data validation using Python type hints" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "pydantic-2.12.3-py3-none-any.whl", hash = "sha256:6986454a854bc3bc6e5443e1369e06a3a456af9d339eda45510f517d9ea5c6bf"}, - {file = "pydantic-2.12.3.tar.gz", hash = "sha256:1da1c82b0fc140bb0103bc1441ffe062154c8d38491189751ee00fd8ca65ce74"}, + {file = "pydantic-2.12.4-py3-none-any.whl", hash = "sha256:92d3d202a745d46f9be6df459ac5a064fdaa3c1c4cd8adcfa332ccf3c05f871e"}, + {file = "pydantic-2.12.4.tar.gz", hash = "sha256:0f8cb9555000a4b5b617f66bfd2566264c4984b27589d3b845685983e8ea85ac"}, ] [package.dependencies] annotated-types = ">=0.6.0" -pydantic-core = "2.41.4" +pydantic-core = "2.41.5" typing-extensions = ">=4.14.1" typing-inspection = ">=0.4.2" @@ -1982,129 +1982,133 @@ timezone = ["tzdata"] [[package]] name = "pydantic-core" -version = "2.41.4" +version = "2.41.5" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "pydantic_core-2.41.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2442d9a4d38f3411f22eb9dd0912b7cbf4b7d5b6c92c4173b75d3e1ccd84e36e"}, - {file = "pydantic_core-2.41.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:30a9876226dda131a741afeab2702e2d127209bde3c65a2b8133f428bc5d006b"}, - {file = "pydantic_core-2.41.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d55bbac04711e2980645af68b97d445cdbcce70e5216de444a6c4b6943ebcccd"}, - {file = "pydantic_core-2.41.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e1d778fb7849a42d0ee5927ab0f7453bf9f85eef8887a546ec87db5ddb178945"}, - {file = "pydantic_core-2.41.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1b65077a4693a98b90ec5ad8f203ad65802a1b9b6d4a7e48066925a7e1606706"}, - {file = "pydantic_core-2.41.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:62637c769dee16eddb7686bf421be48dfc2fae93832c25e25bc7242e698361ba"}, - {file = "pydantic_core-2.41.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2dfe3aa529c8f501babf6e502936b9e8d4698502b2cfab41e17a028d91b1ac7b"}, - {file = "pydantic_core-2.41.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ca2322da745bf2eeb581fc9ea3bbb31147702163ccbcbf12a3bb630e4bf05e1d"}, - {file = "pydantic_core-2.41.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e8cd3577c796be7231dcf80badcf2e0835a46665eaafd8ace124d886bab4d700"}, - {file = "pydantic_core-2.41.4-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:1cae8851e174c83633f0833e90636832857297900133705ee158cf79d40f03e6"}, - {file = "pydantic_core-2.41.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a26d950449aae348afe1ac8be5525a00ae4235309b729ad4d3399623125b43c9"}, - {file = "pydantic_core-2.41.4-cp310-cp310-win32.whl", hash = "sha256:0cf2a1f599efe57fa0051312774280ee0f650e11152325e41dfd3018ef2c1b57"}, - {file = "pydantic_core-2.41.4-cp310-cp310-win_amd64.whl", hash = "sha256:a8c2e340d7e454dc3340d3d2e8f23558ebe78c98aa8f68851b04dcb7bc37abdc"}, - {file = "pydantic_core-2.41.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:28ff11666443a1a8cf2a044d6a545ebffa8382b5f7973f22c36109205e65dc80"}, - {file = "pydantic_core-2.41.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:61760c3925d4633290292bad462e0f737b840508b4f722247d8729684f6539ae"}, - {file = "pydantic_core-2.41.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eae547b7315d055b0de2ec3965643b0ab82ad0106a7ffd29615ee9f266a02827"}, - {file = "pydantic_core-2.41.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ef9ee5471edd58d1fcce1c80ffc8783a650e3e3a193fe90d52e43bb4d87bff1f"}, - {file = "pydantic_core-2.41.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:15dd504af121caaf2c95cb90c0ebf71603c53de98305621b94da0f967e572def"}, - {file = "pydantic_core-2.41.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3a926768ea49a8af4d36abd6a8968b8790f7f76dd7cbd5a4c180db2b4ac9a3a2"}, - {file = "pydantic_core-2.41.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6916b9b7d134bff5440098a4deb80e4cb623e68974a87883299de9124126c2a8"}, - {file = "pydantic_core-2.41.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5cf90535979089df02e6f17ffd076f07237efa55b7343d98760bde8743c4b265"}, - {file = "pydantic_core-2.41.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:7533c76fa647fade2d7ec75ac5cc079ab3f34879626dae5689b27790a6cf5a5c"}, - {file = "pydantic_core-2.41.4-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:37e516bca9264cbf29612539801ca3cd5d1be465f940417b002905e6ed79d38a"}, - {file = "pydantic_core-2.41.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0c19cb355224037c83642429b8ce261ae108e1c5fbf5c028bac63c77b0f8646e"}, - {file = "pydantic_core-2.41.4-cp311-cp311-win32.whl", hash = "sha256:09c2a60e55b357284b5f31f5ab275ba9f7f70b7525e18a132ec1f9160b4f1f03"}, - {file = "pydantic_core-2.41.4-cp311-cp311-win_amd64.whl", hash = "sha256:711156b6afb5cb1cb7c14a2cc2c4a8b4c717b69046f13c6b332d8a0a8f41ca3e"}, - {file = "pydantic_core-2.41.4-cp311-cp311-win_arm64.whl", hash = "sha256:6cb9cf7e761f4f8a8589a45e49ed3c0d92d1d696a45a6feaee8c904b26efc2db"}, - {file = "pydantic_core-2.41.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:ab06d77e053d660a6faaf04894446df7b0a7e7aba70c2797465a0a1af00fc887"}, - {file = "pydantic_core-2.41.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c53ff33e603a9c1179a9364b0a24694f183717b2e0da2b5ad43c316c956901b2"}, - {file = "pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:304c54176af2c143bd181d82e77c15c41cbacea8872a2225dd37e6544dce9999"}, - {file = "pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:025ba34a4cf4fb32f917d5d188ab5e702223d3ba603be4d8aca2f82bede432a4"}, - {file = "pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b9f5f30c402ed58f90c70e12eff65547d3ab74685ffe8283c719e6bead8ef53f"}, - {file = "pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd96e5d15385d301733113bcaa324c8bcf111275b7675a9c6e88bfb19fc05e3b"}, - {file = "pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98f348cbb44fae6e9653c1055db7e29de67ea6a9ca03a5fa2c2e11a47cff0e47"}, - {file = "pydantic_core-2.41.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ec22626a2d14620a83ca583c6f5a4080fa3155282718b6055c2ea48d3ef35970"}, - {file = "pydantic_core-2.41.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3a95d4590b1f1a43bf33ca6d647b990a88f4a3824a8c4572c708f0b45a5290ed"}, - {file = "pydantic_core-2.41.4-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:f9672ab4d398e1b602feadcffcdd3af44d5f5e6ddc15bc7d15d376d47e8e19f8"}, - {file = "pydantic_core-2.41.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:84d8854db5f55fead3b579f04bda9a36461dab0730c5d570e1526483e7bb8431"}, - {file = "pydantic_core-2.41.4-cp312-cp312-win32.whl", hash = "sha256:9be1c01adb2ecc4e464392c36d17f97e9110fbbc906bcbe1c943b5b87a74aabd"}, - {file = "pydantic_core-2.41.4-cp312-cp312-win_amd64.whl", hash = "sha256:d682cf1d22bab22a5be08539dca3d1593488a99998f9f412137bc323179067ff"}, - {file = "pydantic_core-2.41.4-cp312-cp312-win_arm64.whl", hash = "sha256:833eebfd75a26d17470b58768c1834dfc90141b7afc6eb0429c21fc5a21dcfb8"}, - {file = "pydantic_core-2.41.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:85e050ad9e5f6fe1004eec65c914332e52f429bc0ae12d6fa2092407a462c746"}, - {file = "pydantic_core-2.41.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e7393f1d64792763a48924ba31d1e44c2cfbc05e3b1c2c9abb4ceeadd912cced"}, - {file = "pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94dab0940b0d1fb28bcab847adf887c66a27a40291eedf0b473be58761c9799a"}, - {file = "pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:de7c42f897e689ee6f9e93c4bec72b99ae3b32a2ade1c7e4798e690ff5246e02"}, - {file = "pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:664b3199193262277b8b3cd1e754fb07f2c6023289c815a1e1e8fb415cb247b1"}, - {file = "pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d95b253b88f7d308b1c0b417c4624f44553ba4762816f94e6986819b9c273fb2"}, - {file = "pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a1351f5bbdbbabc689727cb91649a00cb9ee7203e0a6e54e9f5ba9e22e384b84"}, - {file = "pydantic_core-2.41.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1affa4798520b148d7182da0615d648e752de4ab1a9566b7471bc803d88a062d"}, - {file = "pydantic_core-2.41.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7b74e18052fea4aa8dea2fb7dbc23d15439695da6cbe6cfc1b694af1115df09d"}, - {file = "pydantic_core-2.41.4-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:285b643d75c0e30abda9dc1077395624f314a37e3c09ca402d4015ef5979f1a2"}, - {file = "pydantic_core-2.41.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:f52679ff4218d713b3b33f88c89ccbf3a5c2c12ba665fb80ccc4192b4608dbab"}, - {file = "pydantic_core-2.41.4-cp313-cp313-win32.whl", hash = "sha256:ecde6dedd6fff127c273c76821bb754d793be1024bc33314a120f83a3c69460c"}, - {file = "pydantic_core-2.41.4-cp313-cp313-win_amd64.whl", hash = "sha256:d081a1f3800f05409ed868ebb2d74ac39dd0c1ff6c035b5162356d76030736d4"}, - {file = "pydantic_core-2.41.4-cp313-cp313-win_arm64.whl", hash = "sha256:f8e49c9c364a7edcbe2a310f12733aad95b022495ef2a8d653f645e5d20c1564"}, - {file = "pydantic_core-2.41.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:ed97fd56a561f5eb5706cebe94f1ad7c13b84d98312a05546f2ad036bafe87f4"}, - {file = "pydantic_core-2.41.4-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a870c307bf1ee91fc58a9a61338ff780d01bfae45922624816878dce784095d2"}, - {file = "pydantic_core-2.41.4-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d25e97bc1f5f8f7985bdc2335ef9e73843bb561eb1fa6831fdfc295c1c2061cf"}, - {file = "pydantic_core-2.41.4-cp313-cp313t-win_amd64.whl", hash = "sha256:d405d14bea042f166512add3091c1af40437c2e7f86988f3915fabd27b1e9cd2"}, - {file = "pydantic_core-2.41.4-cp313-cp313t-win_arm64.whl", hash = "sha256:19f3684868309db5263a11bace3c45d93f6f24afa2ffe75a647583df22a2ff89"}, - {file = "pydantic_core-2.41.4-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:e9205d97ed08a82ebb9a307e92914bb30e18cdf6f6b12ca4bedadb1588a0bfe1"}, - {file = "pydantic_core-2.41.4-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:82df1f432b37d832709fbcc0e24394bba04a01b6ecf1ee87578145c19cde12ac"}, - {file = "pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc3b4cc4539e055cfa39a3763c939f9d409eb40e85813257dcd761985a108554"}, - {file = "pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b1eb1754fce47c63d2ff57fdb88c351a6c0150995890088b33767a10218eaa4e"}, - {file = "pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e6ab5ab30ef325b443f379ddb575a34969c333004fca5a1daa0133a6ffaad616"}, - {file = "pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:31a41030b1d9ca497634092b46481b937ff9397a86f9f51bd41c4767b6fc04af"}, - {file = "pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a44ac1738591472c3d020f61c6df1e4015180d6262ebd39bf2aeb52571b60f12"}, - {file = "pydantic_core-2.41.4-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d72f2b5e6e82ab8f94ea7d0d42f83c487dc159c5240d8f83beae684472864e2d"}, - {file = "pydantic_core-2.41.4-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:c4d1e854aaf044487d31143f541f7aafe7b482ae72a022c664b2de2e466ed0ad"}, - {file = "pydantic_core-2.41.4-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:b568af94267729d76e6ee5ececda4e283d07bbb28e8148bb17adad93d025d25a"}, - {file = "pydantic_core-2.41.4-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:6d55fb8b1e8929b341cc313a81a26e0d48aa3b519c1dbaadec3a6a2b4fcad025"}, - {file = "pydantic_core-2.41.4-cp314-cp314-win32.whl", hash = "sha256:5b66584e549e2e32a1398df11da2e0a7eff45d5c2d9db9d5667c5e6ac764d77e"}, - {file = "pydantic_core-2.41.4-cp314-cp314-win_amd64.whl", hash = "sha256:557a0aab88664cc552285316809cab897716a372afaf8efdbef756f8b890e894"}, - {file = "pydantic_core-2.41.4-cp314-cp314-win_arm64.whl", hash = "sha256:3f1ea6f48a045745d0d9f325989d8abd3f1eaf47dd00485912d1a3a63c623a8d"}, - {file = "pydantic_core-2.41.4-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:6c1fe4c5404c448b13188dd8bd2ebc2bdd7e6727fa61ff481bcc2cca894018da"}, - {file = "pydantic_core-2.41.4-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:523e7da4d43b113bf8e7b49fa4ec0c35bf4fe66b2230bfc5c13cc498f12c6c3e"}, - {file = "pydantic_core-2.41.4-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5729225de81fb65b70fdb1907fcf08c75d498f4a6f15af005aabb1fdadc19dfa"}, - {file = "pydantic_core-2.41.4-cp314-cp314t-win_amd64.whl", hash = "sha256:de2cfbb09e88f0f795fd90cf955858fc2c691df65b1f21f0aa00b99f3fbc661d"}, - {file = "pydantic_core-2.41.4-cp314-cp314t-win_arm64.whl", hash = "sha256:d34f950ae05a83e0ede899c595f312ca976023ea1db100cd5aa188f7005e3ab0"}, - {file = "pydantic_core-2.41.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:646e76293345954acea6966149683047b7b2ace793011922208c8e9da12b0062"}, - {file = "pydantic_core-2.41.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cc8e85a63085a137d286e2791037f5fdfff0aabb8b899483ca9c496dd5797338"}, - {file = "pydantic_core-2.41.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:692c622c8f859a17c156492783902d8370ac7e121a611bd6fe92cc71acf9ee8d"}, - {file = "pydantic_core-2.41.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d1e2906efb1031a532600679b424ef1d95d9f9fb507f813951f23320903adbd7"}, - {file = "pydantic_core-2.41.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e04e2f7f8916ad3ddd417a7abdd295276a0bf216993d9318a5d61cc058209166"}, - {file = "pydantic_core-2.41.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:df649916b81822543d1c8e0e1d079235f68acdc7d270c911e8425045a8cfc57e"}, - {file = "pydantic_core-2.41.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:66c529f862fdba70558061bb936fe00ddbaaa0c647fd26e4a4356ef1d6561891"}, - {file = "pydantic_core-2.41.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fc3b4c5a1fd3a311563ed866c2c9b62da06cb6398bee186484ce95c820db71cb"}, - {file = "pydantic_core-2.41.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6e0fc40d84448f941df9b3334c4b78fe42f36e3bf631ad54c3047a0cdddc2514"}, - {file = "pydantic_core-2.41.4-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:44e7625332683b6c1c8b980461475cde9595eff94447500e80716db89b0da005"}, - {file = "pydantic_core-2.41.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:170ee6835f6c71081d031ef1c3b4dc4a12b9efa6a9540f93f95b82f3c7571ae8"}, - {file = "pydantic_core-2.41.4-cp39-cp39-win32.whl", hash = "sha256:3adf61415efa6ce977041ba9745183c0e1f637ca849773afa93833e04b163feb"}, - {file = "pydantic_core-2.41.4-cp39-cp39-win_amd64.whl", hash = "sha256:a238dd3feee263eeaeb7dc44aea4ba1364682c4f9f9467e6af5596ba322c2332"}, - {file = "pydantic_core-2.41.4-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:a1b2cfec3879afb742a7b0bcfa53e4f22ba96571c9e54d6a3afe1052d17d843b"}, - {file = "pydantic_core-2.41.4-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:d175600d975b7c244af6eb9c9041f10059f20b8bbffec9e33fdd5ee3f67cdc42"}, - {file = "pydantic_core-2.41.4-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f184d657fa4947ae5ec9c47bd7e917730fa1cbb78195037e32dcbab50aca5ee"}, - {file = "pydantic_core-2.41.4-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ed810568aeffed3edc78910af32af911c835cc39ebbfacd1f0ab5dd53028e5c"}, - {file = "pydantic_core-2.41.4-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:4f5d640aeebb438517150fdeec097739614421900e4a08db4a3ef38898798537"}, - {file = "pydantic_core-2.41.4-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:4a9ab037b71927babc6d9e7fc01aea9e66dc2a4a34dff06ef0724a4049629f94"}, - {file = "pydantic_core-2.41.4-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4dab9484ec605c3016df9ad4fd4f9a390bc5d816a3b10c6550f8424bb80b18c"}, - {file = "pydantic_core-2.41.4-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8a5028425820731d8c6c098ab642d7b8b999758e24acae03ed38a66eca8335"}, - {file = "pydantic_core-2.41.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:1e5ab4fc177dd41536b3c32b2ea11380dd3d4619a385860621478ac2d25ceb00"}, - {file = "pydantic_core-2.41.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:3d88d0054d3fa11ce936184896bed3c1c5441d6fa483b498fac6a5d0dd6f64a9"}, - {file = "pydantic_core-2.41.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b2a054a8725f05b4b6503357e0ac1c4e8234ad3b0c2ac130d6ffc66f0e170e2"}, - {file = "pydantic_core-2.41.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b0d9db5a161c99375a0c68c058e227bee1d89303300802601d76a3d01f74e258"}, - {file = "pydantic_core-2.41.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:6273ea2c8ffdac7b7fda2653c49682db815aebf4a89243a6feccf5e36c18c347"}, - {file = "pydantic_core-2.41.4-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:4c973add636efc61de22530b2ef83a65f39b6d6f656df97f678720e20de26caa"}, - {file = "pydantic_core-2.41.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b69d1973354758007f46cf2d44a4f3d0933f10b6dc9bf15cf1356e037f6f731a"}, - {file = "pydantic_core-2.41.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:3619320641fd212aaf5997b6ca505e97540b7e16418f4a241f44cdf108ffb50d"}, - {file = "pydantic_core-2.41.4-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:491535d45cd7ad7e4a2af4a5169b0d07bebf1adfd164b0368da8aa41e19907a5"}, - {file = "pydantic_core-2.41.4-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:54d86c0cada6aba4ec4c047d0e348cbad7063b87ae0f005d9f8c9ad04d4a92a2"}, - {file = "pydantic_core-2.41.4-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eca1124aced216b2500dc2609eade086d718e8249cb9696660ab447d50a758bd"}, - {file = "pydantic_core-2.41.4-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6c9024169becccf0cb470ada03ee578d7348c119a0d42af3dcf9eda96e3a247c"}, - {file = "pydantic_core-2.41.4-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:26895a4268ae5a2849269f4991cdc97236e4b9c010e51137becf25182daac405"}, - {file = "pydantic_core-2.41.4-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:ca4df25762cf71308c446e33c9b1fdca2923a3f13de616e2a949f38bf21ff5a8"}, - {file = "pydantic_core-2.41.4-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:5a28fcedd762349519276c36634e71853b4541079cab4acaaac60c4421827308"}, - {file = "pydantic_core-2.41.4-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:c173ddcd86afd2535e2b695217e82191580663a1d1928239f877f5a1649ef39f"}, - {file = "pydantic_core-2.41.4.tar.gz", hash = "sha256:70e47929a9d4a1905a67e4b687d5946026390568a8e952b92824118063cee4d5"}, + {file = "pydantic_core-2.41.5-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:77b63866ca88d804225eaa4af3e664c5faf3568cea95360d21f4725ab6e07146"}, + {file = "pydantic_core-2.41.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:dfa8a0c812ac681395907e71e1274819dec685fec28273a28905df579ef137e2"}, + {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5921a4d3ca3aee735d9fd163808f5e8dd6c6972101e4adbda9a4667908849b97"}, + {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e25c479382d26a2a41b7ebea1043564a937db462816ea07afa8a44c0866d52f9"}, + {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f547144f2966e1e16ae626d8ce72b4cfa0caedc7fa28052001c94fb2fcaa1c52"}, + {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f52298fbd394f9ed112d56f3d11aabd0d5bd27beb3084cc3d8ad069483b8941"}, + {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:100baa204bb412b74fe285fb0f3a385256dad1d1879f0a5cb1499ed2e83d132a"}, + {file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:05a2c8852530ad2812cb7914dc61a1125dc4e06252ee98e5638a12da6cc6fb6c"}, + {file = "pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:29452c56df2ed968d18d7e21f4ab0ac55e71dc59524872f6fc57dcf4a3249ed2"}, + {file = "pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:d5160812ea7a8a2ffbe233d8da666880cad0cbaf5d4de74ae15c313213d62556"}, + {file = "pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:df3959765b553b9440adfd3c795617c352154e497a4eaf3752555cfb5da8fc49"}, + {file = "pydantic_core-2.41.5-cp310-cp310-win32.whl", hash = "sha256:1f8d33a7f4d5a7889e60dc39856d76d09333d8a6ed0f5f1190635cbec70ec4ba"}, + {file = "pydantic_core-2.41.5-cp310-cp310-win_amd64.whl", hash = "sha256:62de39db01b8d593e45871af2af9e497295db8d73b085f6bfd0b18c83c70a8f9"}, + {file = "pydantic_core-2.41.5-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:a3a52f6156e73e7ccb0f8cced536adccb7042be67cb45f9562e12b319c119da6"}, + {file = "pydantic_core-2.41.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7f3bf998340c6d4b0c9a2f02d6a400e51f123b59565d74dc60d252ce888c260b"}, + {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:378bec5c66998815d224c9ca994f1e14c0c21cb95d2f52b6021cc0b2a58f2a5a"}, + {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e7b576130c69225432866fe2f4a469a85a54ade141d96fd396dffcf607b558f8"}, + {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6cb58b9c66f7e4179a2d5e0f849c48eff5c1fca560994d6eb6543abf955a149e"}, + {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:88942d3a3dff3afc8288c21e565e476fc278902ae4d6d134f1eeda118cc830b1"}, + {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f31d95a179f8d64d90f6831d71fa93290893a33148d890ba15de25642c5d075b"}, + {file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c1df3d34aced70add6f867a8cf413e299177e0c22660cc767218373d0779487b"}, + {file = "pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4009935984bd36bd2c774e13f9a09563ce8de4abaa7226f5108262fa3e637284"}, + {file = "pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:34a64bc3441dc1213096a20fe27e8e128bd3ff89921706e83c0b1ac971276594"}, + {file = "pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c9e19dd6e28fdcaa5a1de679aec4141f691023916427ef9bae8584f9c2fb3b0e"}, + {file = "pydantic_core-2.41.5-cp311-cp311-win32.whl", hash = "sha256:2c010c6ded393148374c0f6f0bf89d206bf3217f201faa0635dcd56bd1520f6b"}, + {file = "pydantic_core-2.41.5-cp311-cp311-win_amd64.whl", hash = "sha256:76ee27c6e9c7f16f47db7a94157112a2f3a00e958bc626e2f4ee8bec5c328fbe"}, + {file = "pydantic_core-2.41.5-cp311-cp311-win_arm64.whl", hash = "sha256:4bc36bbc0b7584de96561184ad7f012478987882ebf9f9c389b23f432ea3d90f"}, + {file = "pydantic_core-2.41.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f41a7489d32336dbf2199c8c0a215390a751c5b014c2c1c5366e817202e9cdf7"}, + {file = "pydantic_core-2.41.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:070259a8818988b9a84a449a2a7337c7f430a22acc0859c6b110aa7212a6d9c0"}, + {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e96cea19e34778f8d59fe40775a7a574d95816eb150850a85a7a4c8f4b94ac69"}, + {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed2e99c456e3fadd05c991f8f437ef902e00eedf34320ba2b0842bd1c3ca3a75"}, + {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65840751b72fbfd82c3c640cff9284545342a4f1eb1586ad0636955b261b0b05"}, + {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e536c98a7626a98feb2d3eaf75944ef6f3dbee447e1f841eae16f2f0a72d8ddc"}, + {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eceb81a8d74f9267ef4081e246ffd6d129da5d87e37a77c9bde550cb04870c1c"}, + {file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d38548150c39b74aeeb0ce8ee1d8e82696f4a4e16ddc6de7b1d8823f7de4b9b5"}, + {file = "pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c23e27686783f60290e36827f9c626e63154b82b116d7fe9adba1fda36da706c"}, + {file = "pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:482c982f814460eabe1d3bb0adfdc583387bd4691ef00b90575ca0d2b6fe2294"}, + {file = "pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:bfea2a5f0b4d8d43adf9d7b8bf019fb46fdd10a2e5cde477fbcb9d1fa08c68e1"}, + {file = "pydantic_core-2.41.5-cp312-cp312-win32.whl", hash = "sha256:b74557b16e390ec12dca509bce9264c3bbd128f8a2c376eaa68003d7f327276d"}, + {file = "pydantic_core-2.41.5-cp312-cp312-win_amd64.whl", hash = "sha256:1962293292865bca8e54702b08a4f26da73adc83dd1fcf26fbc875b35d81c815"}, + {file = "pydantic_core-2.41.5-cp312-cp312-win_arm64.whl", hash = "sha256:1746d4a3d9a794cacae06a5eaaccb4b8643a131d45fbc9af23e353dc0a5ba5c3"}, + {file = "pydantic_core-2.41.5-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:941103c9be18ac8daf7b7adca8228f8ed6bb7a1849020f643b3a14d15b1924d9"}, + {file = "pydantic_core-2.41.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:112e305c3314f40c93998e567879e887a3160bb8689ef3d2c04b6cc62c33ac34"}, + {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cbaad15cb0c90aa221d43c00e77bb33c93e8d36e0bf74760cd00e732d10a6a0"}, + {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:03ca43e12fab6023fc79d28ca6b39b05f794ad08ec2feccc59a339b02f2b3d33"}, + {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc799088c08fa04e43144b164feb0c13f9a0bc40503f8df3e9fde58a3c0c101e"}, + {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:97aeba56665b4c3235a0e52b2c2f5ae9cd071b8a8310ad27bddb3f7fb30e9aa2"}, + {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:406bf18d345822d6c21366031003612b9c77b3e29ffdb0f612367352aab7d586"}, + {file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b93590ae81f7010dbe380cdeab6f515902ebcbefe0b9327cc4804d74e93ae69d"}, + {file = "pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:01a3d0ab748ee531f4ea6c3e48ad9dac84ddba4b0d82291f87248f2f9de8d740"}, + {file = "pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:6561e94ba9dacc9c61bce40e2d6bdc3bfaa0259d3ff36ace3b1e6901936d2e3e"}, + {file = "pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:915c3d10f81bec3a74fbd4faebe8391013ba61e5a1a8d48c4455b923bdda7858"}, + {file = "pydantic_core-2.41.5-cp313-cp313-win32.whl", hash = "sha256:650ae77860b45cfa6e2cdafc42618ceafab3a2d9a3811fcfbd3bbf8ac3c40d36"}, + {file = "pydantic_core-2.41.5-cp313-cp313-win_amd64.whl", hash = "sha256:79ec52ec461e99e13791ec6508c722742ad745571f234ea6255bed38c6480f11"}, + {file = "pydantic_core-2.41.5-cp313-cp313-win_arm64.whl", hash = "sha256:3f84d5c1b4ab906093bdc1ff10484838aca54ef08de4afa9de0f5f14d69639cd"}, + {file = "pydantic_core-2.41.5-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:3f37a19d7ebcdd20b96485056ba9e8b304e27d9904d233d7b1015db320e51f0a"}, + {file = "pydantic_core-2.41.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:1d1d9764366c73f996edd17abb6d9d7649a7eb690006ab6adbda117717099b14"}, + {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25e1c2af0fce638d5f1988b686f3b3ea8cd7de5f244ca147c777769e798a9cd1"}, + {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:506d766a8727beef16b7adaeb8ee6217c64fc813646b424d0804d67c16eddb66"}, + {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4819fa52133c9aa3c387b3328f25c1facc356491e6135b459f1de698ff64d869"}, + {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b761d210c9ea91feda40d25b4efe82a1707da2ef62901466a42492c028553a2"}, + {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22f0fb8c1c583a3b6f24df2470833b40207e907b90c928cc8d3594b76f874375"}, + {file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2782c870e99878c634505236d81e5443092fba820f0373997ff75f90f68cd553"}, + {file = "pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:0177272f88ab8312479336e1d777f6b124537d47f2123f89cb37e0accea97f90"}, + {file = "pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:63510af5e38f8955b8ee5687740d6ebf7c2a0886d15a6d65c32814613681bc07"}, + {file = "pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:e56ba91f47764cc14f1daacd723e3e82d1a89d783f0f5afe9c364b8bb491ccdb"}, + {file = "pydantic_core-2.41.5-cp314-cp314-win32.whl", hash = "sha256:aec5cf2fd867b4ff45b9959f8b20ea3993fc93e63c7363fe6851424c8a7e7c23"}, + {file = "pydantic_core-2.41.5-cp314-cp314-win_amd64.whl", hash = "sha256:8e7c86f27c585ef37c35e56a96363ab8de4e549a95512445b85c96d3e2f7c1bf"}, + {file = "pydantic_core-2.41.5-cp314-cp314-win_arm64.whl", hash = "sha256:e672ba74fbc2dc8eea59fb6d4aed6845e6905fc2a8afe93175d94a83ba2a01a0"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:8566def80554c3faa0e65ac30ab0932b9e3a5cd7f8323764303d468e5c37595a"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b80aa5095cd3109962a298ce14110ae16b8c1aece8b72f9dafe81cf597ad80b3"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3006c3dd9ba34b0c094c544c6006cc79e87d8612999f1a5d43b769b89181f23c"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:72f6c8b11857a856bcfa48c86f5368439f74453563f951e473514579d44aa612"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5cb1b2f9742240e4bb26b652a5aeb840aa4b417c7748b6f8387927bc6e45e40d"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd3d54f38609ff308209bd43acea66061494157703364ae40c951f83ba99a1a9"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ff4321e56e879ee8d2a879501c8e469414d948f4aba74a2d4593184eb326660"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d0d2568a8c11bf8225044aa94409e21da0cb09dcdafe9ecd10250b2baad531a9"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:a39455728aabd58ceabb03c90e12f71fd30fa69615760a075b9fec596456ccc3"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_armv7l.whl", hash = "sha256:239edca560d05757817c13dc17c50766136d21f7cd0fac50295499ae24f90fdf"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:2a5e06546e19f24c6a96a129142a75cee553cc018ffee48a460059b1185f4470"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-win32.whl", hash = "sha256:b4ececa40ac28afa90871c2cc2b9ffd2ff0bf749380fbdf57d165fd23da353aa"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-win_amd64.whl", hash = "sha256:80aa89cad80b32a912a65332f64a4450ed00966111b6615ca6816153d3585a8c"}, + {file = "pydantic_core-2.41.5-cp314-cp314t-win_arm64.whl", hash = "sha256:35b44f37a3199f771c3eaa53051bc8a70cd7b54f333531c59e29fd4db5d15008"}, + {file = "pydantic_core-2.41.5-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:8bfeaf8735be79f225f3fefab7f941c712aaca36f1128c9d7e2352ee1aa87bdf"}, + {file = "pydantic_core-2.41.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:346285d28e4c8017da95144c7f3acd42740d637ff41946af5ce6e5e420502dd5"}, + {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a75dafbf87d6276ddc5b2bf6fae5254e3d0876b626eb24969a574fff9149ee5d"}, + {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7b93a4d08587e2b7e7882de461e82b6ed76d9026ce91ca7915e740ecc7855f60"}, + {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e8465ab91a4bd96d36dde3263f06caa6a8a6019e4113f24dc753d79a8b3a3f82"}, + {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:299e0a22e7ae2b85c1a57f104538b2656e8ab1873511fd718a1c1c6f149b77b5"}, + {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:707625ef0983fcfb461acfaf14de2067c5942c6bb0f3b4c99158bed6fedd3cf3"}, + {file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f41eb9797986d6ebac5e8edff36d5cef9de40def462311b3eb3eeded1431e425"}, + {file = "pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0384e2e1021894b1ff5a786dbf94771e2986ebe2869533874d7e43bc79c6f504"}, + {file = "pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:f0cd744688278965817fd0839c4a4116add48d23890d468bc436f78beb28abf5"}, + {file = "pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:753e230374206729bf0a807954bcc6c150d3743928a73faffee51ac6557a03c3"}, + {file = "pydantic_core-2.41.5-cp39-cp39-win32.whl", hash = "sha256:873e0d5b4fb9b89ef7c2d2a963ea7d02879d9da0da8d9d4933dee8ee86a8b460"}, + {file = "pydantic_core-2.41.5-cp39-cp39-win_amd64.whl", hash = "sha256:e4f4a984405e91527a0d62649ee21138f8e3d0ef103be488c1dc11a80d7f184b"}, + {file = "pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:b96d5f26b05d03cc60f11a7761a5ded1741da411e7fe0909e27a5e6a0cb7b034"}, + {file = "pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:634e8609e89ceecea15e2d61bc9ac3718caaaa71963717bf3c8f38bfde64242c"}, + {file = "pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:93e8740d7503eb008aa2df04d3b9735f845d43ae845e6dcd2be0b55a2da43cd2"}, + {file = "pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f15489ba13d61f670dcc96772e733aad1a6f9c429cc27574c6cdaed82d0146ad"}, + {file = "pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:7da7087d756b19037bc2c06edc6c170eeef3c3bafcb8f532ff17d64dc427adfd"}, + {file = "pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:aabf5777b5c8ca26f7824cb4a120a740c9588ed58df9b2d196ce92fba42ff8dc"}, + {file = "pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c007fe8a43d43b3969e8469004e9845944f1a80e6acd47c150856bb87f230c56"}, + {file = "pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76d0819de158cd855d1cbb8fcafdf6f5cf1eb8e470abe056d5d161106e38062b"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b5819cd790dbf0c5eb9f82c73c16b39a65dd6dd4d1439dcdea7816ec9adddab8"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5a4e67afbc95fa5c34cf27d9089bca7fcab4e51e57278d710320a70b956d1b9a"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ece5c59f0ce7d001e017643d8d24da587ea1f74f6993467d85ae8a5ef9d4f42b"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:16f80f7abe3351f8ea6858914ddc8c77e02578544a0ebc15b4c2e1a0e813b0b2"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:33cb885e759a705b426baada1fe68cbb0a2e68e34c5d0d0289a364cf01709093"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:c8d8b4eb992936023be7dee581270af5c6e0697a8559895f527f5b7105ecd36a"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:242a206cd0318f95cd21bdacff3fcc3aab23e79bba5cac3db5a841c9ef9c6963"}, + {file = "pydantic_core-2.41.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:d3a978c4f57a597908b7e697229d996d77a6d3c94901e9edee593adada95ce1a"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b2379fa7ed44ddecb5bfe4e48577d752db9fc10be00a6b7446e9663ba143de26"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:266fb4cbf5e3cbd0b53669a6d1b039c45e3ce651fd5442eff4d07c2cc8d66808"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58133647260ea01e4d0500089a8c4f07bd7aa6ce109682b1426394988d8aaacc"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:287dad91cfb551c363dc62899a80e9e14da1f0e2b6ebde82c806612ca2a13ef1"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:03b77d184b9eb40240ae9fd676ca364ce1085f203e1b1256f8ab9984dca80a84"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:a668ce24de96165bb239160b3d854943128f4334822900534f2fe947930e5770"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f14f8f046c14563f8eb3f45f499cc658ab8d10072961e07225e507adb700e93f"}, + {file = "pydantic_core-2.41.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:56121965f7a4dc965bff783d70b907ddf3d57f6eba29b6d2e5dabfaf07799c51"}, + {file = "pydantic_core-2.41.5.tar.gz", hash = "sha256:08daa51ea16ad373ffd5e7606252cc32f07bc72b28284b6bc9c6df804816476e"}, ] [package.dependencies] @@ -2342,127 +2346,127 @@ typing-extensions = {version = ">=4.4.0", markers = "python_version < \"3.13\""} [[package]] name = "regex" -version = "2025.10.23" +version = "2025.11.3" description = "Alternative regular expression module, to replace re." optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "regex-2025.10.23-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:17bbcde374bef1c5fad9b131f0e28a6a24856dd90368d8c0201e2b5a69533daa"}, - {file = "regex-2025.10.23-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b4e10434279cc8567f99ca6e018e9025d14f2fded2a603380b6be2090f476426"}, - {file = "regex-2025.10.23-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9c9bb421cbe7012c744a5a56cf4d6c80829c72edb1a2991677299c988d6339c8"}, - {file = "regex-2025.10.23-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:275cd1c2ed8c4a78ebfa489618d7aee762e8b4732da73573c3e38236ec5f65de"}, - {file = "regex-2025.10.23-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:7b426ae7952f3dc1e73a86056d520bd4e5f021397484a6835902fc5648bcacce"}, - {file = "regex-2025.10.23-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:c5cdaf5b6d37c7da1967dbe729d819461aab6a98a072feef65bbcff0a6e60649"}, - {file = "regex-2025.10.23-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3bfeff0b08f296ab28b4332a7e03ca31c437ee78b541ebc874bbf540e5932f8d"}, - {file = "regex-2025.10.23-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5f97236a67307b775f30a74ef722b64b38b7ab7ba3bb4a2508518a5de545459c"}, - {file = "regex-2025.10.23-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:be19e7de499940cd72475fb8e46ab2ecb1cf5906bebdd18a89f9329afb1df82f"}, - {file = "regex-2025.10.23-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:883df76ee42d9ecb82b37ff8d01caea5895b3f49630a64d21111078bbf8ef64c"}, - {file = "regex-2025.10.23-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2e9117d1d35fc2addae6281019ecc70dc21c30014b0004f657558b91c6a8f1a7"}, - {file = "regex-2025.10.23-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:0ff1307f531a5d8cf5c20ea517254551ff0a8dc722193aab66c656c5a900ea68"}, - {file = "regex-2025.10.23-cp310-cp310-win32.whl", hash = "sha256:7888475787cbfee4a7cd32998eeffe9a28129fa44ae0f691b96cb3939183ef41"}, - {file = "regex-2025.10.23-cp310-cp310-win_amd64.whl", hash = "sha256:ec41a905908496ce4906dab20fb103c814558db1d69afc12c2f384549c17936a"}, - {file = "regex-2025.10.23-cp310-cp310-win_arm64.whl", hash = "sha256:b2b7f19a764d5e966d5a62bf2c28a8b4093cc864c6734510bdb4aeb840aec5e6"}, - {file = "regex-2025.10.23-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6c531155bf9179345e85032052a1e5fe1a696a6abf9cea54b97e8baefff970fd"}, - {file = "regex-2025.10.23-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:912e9df4e89d383681268d38ad8f5780d7cccd94ba0e9aa09ca7ab7ab4f8e7eb"}, - {file = "regex-2025.10.23-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4f375c61bfc3138b13e762fe0ae76e3bdca92497816936534a0177201666f44f"}, - {file = "regex-2025.10.23-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e248cc9446081119128ed002a3801f8031e0c219b5d3c64d3cc627da29ac0a33"}, - {file = "regex-2025.10.23-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b52bf9282fdf401e4f4e721f0f61fc4b159b1307244517789702407dd74e38ca"}, - {file = "regex-2025.10.23-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5c084889ab2c59765a0d5ac602fd1c3c244f9b3fcc9a65fdc7ba6b74c5287490"}, - {file = "regex-2025.10.23-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d80e8eb79009bdb0936658c44ca06e2fbbca67792013e3818eea3f5f228971c2"}, - {file = "regex-2025.10.23-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:b6f259118ba87b814a8ec475380aee5f5ae97a75852a3507cf31d055b01b5b40"}, - {file = "regex-2025.10.23-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:9b8c72a242683dcc72d37595c4f1278dfd7642b769e46700a8df11eab19dfd82"}, - {file = "regex-2025.10.23-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:a8d7b7a0a3df9952f9965342159e0c1f05384c0f056a47ce8b61034f8cecbe83"}, - {file = "regex-2025.10.23-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:413bfea20a484c524858125e92b9ce6ffdd0a4b97d4ff96b5859aa119b0f1bdd"}, - {file = "regex-2025.10.23-cp311-cp311-win32.whl", hash = "sha256:f76deef1f1019a17dad98f408b8f7afc4bd007cbe835ae77b737e8c7f19ae575"}, - {file = "regex-2025.10.23-cp311-cp311-win_amd64.whl", hash = "sha256:59bba9f7125536f23fdab5deeea08da0c287a64c1d3acc1c7e99515809824de8"}, - {file = "regex-2025.10.23-cp311-cp311-win_arm64.whl", hash = "sha256:b103a752b6f1632ca420225718d6ed83f6a6ced3016dd0a4ab9a6825312de566"}, - {file = "regex-2025.10.23-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:7a44d9c00f7a0a02d3b777429281376370f3d13d2c75ae74eb94e11ebcf4a7fc"}, - {file = "regex-2025.10.23-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b83601f84fde939ae3478bb32a3aef36f61b58c3208d825c7e8ce1a735f143f2"}, - {file = "regex-2025.10.23-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ec13647907bb9d15fd192bbfe89ff06612e098a5709e7d6ecabbdd8f7908fc45"}, - {file = "regex-2025.10.23-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:78d76dd2957d62501084e7012ddafc5fcd406dd982b7a9ca1ea76e8eaaf73e7e"}, - {file = "regex-2025.10.23-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:8668e5f067e31a47699ebb354f43aeb9c0ef136f915bd864243098524482ac43"}, - {file = "regex-2025.10.23-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a32433fe3deb4b2d8eda88790d2808fed0dc097e84f5e683b4cd4f42edef6cca"}, - {file = "regex-2025.10.23-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d97d73818c642c938db14c0668167f8d39520ca9d983604575ade3fda193afcc"}, - {file = "regex-2025.10.23-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:bca7feecc72ee33579e9f6ddf8babbe473045717a0e7dbc347099530f96e8b9a"}, - {file = "regex-2025.10.23-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:7e24af51e907d7457cc4a72691ec458320b9ae67dc492f63209f01eecb09de32"}, - {file = "regex-2025.10.23-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:d10bcde58bbdf18146f3a69ec46dd03233b94a4a5632af97aa5378da3a47d288"}, - {file = "regex-2025.10.23-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:44383bc0c933388516c2692c9a7503e1f4a67e982f20b9a29d2fb70c6494f147"}, - {file = "regex-2025.10.23-cp312-cp312-win32.whl", hash = "sha256:6040a86f95438a0114bba16e51dfe27f1bc004fd29fe725f54a586f6d522b079"}, - {file = "regex-2025.10.23-cp312-cp312-win_amd64.whl", hash = "sha256:436b4c4352fe0762e3bfa34a5567079baa2ef22aa9c37cf4d128979ccfcad842"}, - {file = "regex-2025.10.23-cp312-cp312-win_arm64.whl", hash = "sha256:f4b1b1991617055b46aff6f6db24888c1f05f4db9801349d23f09ed0714a9335"}, - {file = "regex-2025.10.23-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:b7690f95404a1293923a296981fd943cca12c31a41af9c21ba3edd06398fc193"}, - {file = "regex-2025.10.23-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:1a32d77aeaea58a13230100dd8797ac1a84c457f3af2fdf0d81ea689d5a9105b"}, - {file = "regex-2025.10.23-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b24b29402f264f70a3c81f45974323b41764ff7159655360543b7cabb73e7d2f"}, - {file = "regex-2025.10.23-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:563824a08c7c03d96856d84b46fdb3bbb7cfbdf79da7ef68725cda2ce169c72a"}, - {file = "regex-2025.10.23-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a0ec8bdd88d2e2659c3518087ee34b37e20bd169419ffead4240a7004e8ed03b"}, - {file = "regex-2025.10.23-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b577601bfe1d33913fcd9276d7607bbac827c4798d9e14d04bf37d417a6c41cb"}, - {file = "regex-2025.10.23-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7c9f2c68ac6cb3de94eea08a437a75eaa2bd33f9e97c84836ca0b610a5804368"}, - {file = "regex-2025.10.23-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:89f8b9ea3830c79468e26b0e21c3585f69f105157c2154a36f6b7839f8afb351"}, - {file = "regex-2025.10.23-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:98fd84c4e4ea185b3bb5bf065261ab45867d8875032f358a435647285c722673"}, - {file = "regex-2025.10.23-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:1e11d3e5887b8b096f96b4154dfb902f29c723a9556639586cd140e77e28b313"}, - {file = "regex-2025.10.23-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4f13450328a6634348d47a88367e06b64c9d84980ef6a748f717b13f8ce64e87"}, - {file = "regex-2025.10.23-cp313-cp313-win32.whl", hash = "sha256:37be9296598a30c6a20236248cb8b2c07ffd54d095b75d3a2a2ee5babdc51df1"}, - {file = "regex-2025.10.23-cp313-cp313-win_amd64.whl", hash = "sha256:ea7a3c283ce0f06fe789365841e9174ba05f8db16e2fd6ae00a02df9572c04c0"}, - {file = "regex-2025.10.23-cp313-cp313-win_arm64.whl", hash = "sha256:d9a4953575f300a7bab71afa4cd4ac061c7697c89590a2902b536783eeb49a4f"}, - {file = "regex-2025.10.23-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:7d6606524fa77b3912c9ef52a42ef63c6cfbfc1077e9dc6296cd5da0da286044"}, - {file = "regex-2025.10.23-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:c037aadf4d64bdc38af7db3dbd34877a057ce6524eefcb2914d6d41c56f968cc"}, - {file = "regex-2025.10.23-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:99018c331fb2529084a0c9b4c713dfa49fafb47c7712422e49467c13a636c656"}, - {file = "regex-2025.10.23-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:fd8aba965604d70306eb90a35528f776e59112a7114a5162824d43b76fa27f58"}, - {file = "regex-2025.10.23-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:238e67264b4013e74136c49f883734f68656adf8257bfa13b515626b31b20f8e"}, - {file = "regex-2025.10.23-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b2eb48bd9848d66fd04826382f5e8491ae633de3233a3d64d58ceb4ecfa2113a"}, - {file = "regex-2025.10.23-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d36591ce06d047d0c0fe2fc5f14bfbd5b4525d08a7b6a279379085e13f0e3d0e"}, - {file = "regex-2025.10.23-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b5d4ece8628d6e364302006366cea3ee887db397faebacc5dacf8ef19e064cf8"}, - {file = "regex-2025.10.23-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:39a7e8083959cb1c4ff74e483eecb5a65d3b3e1d821b256e54baf61782c906c6"}, - {file = "regex-2025.10.23-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:842d449a8fefe546f311656cf8c0d6729b08c09a185f1cad94c756210286d6a8"}, - {file = "regex-2025.10.23-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:d614986dc68506be8f00474f4f6960e03e4ca9883f7df47744800e7d7c08a494"}, - {file = "regex-2025.10.23-cp313-cp313t-win32.whl", hash = "sha256:a5b7a26b51a9df473ec16a1934d117443a775ceb7b39b78670b2e21893c330c9"}, - {file = "regex-2025.10.23-cp313-cp313t-win_amd64.whl", hash = "sha256:ce81c5544a5453f61cb6f548ed358cfb111e3b23f3cd42d250a4077a6be2a7b6"}, - {file = "regex-2025.10.23-cp313-cp313t-win_arm64.whl", hash = "sha256:e9bf7f6699f490e4e43c44757aa179dab24d1960999c84ab5c3d5377714ed473"}, - {file = "regex-2025.10.23-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:5b5cb5b6344c4c4c24b2dc87b0bfee78202b07ef7633385df70da7fcf6f7cec6"}, - {file = "regex-2025.10.23-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:a6ce7973384c37bdf0f371a843f95a6e6f4e1489e10e0cf57330198df72959c5"}, - {file = "regex-2025.10.23-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:2ee3663f2c334959016b56e3bd0dd187cbc73f948e3a3af14c3caaa0c3035d10"}, - {file = "regex-2025.10.23-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2003cc82a579107e70d013482acce8ba773293f2db534fb532738395c557ff34"}, - {file = "regex-2025.10.23-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:182c452279365a93a9f45874f7f191ec1c51e1f1eb41bf2b16563f1a40c1da3a"}, - {file = "regex-2025.10.23-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b1249e9ff581c5b658c8f0437f883b01f1edcf424a16388591e7c05e5e9e8b0c"}, - {file = "regex-2025.10.23-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2b841698f93db3ccc36caa1900d2a3be281d9539b822dc012f08fc80b46a3224"}, - {file = "regex-2025.10.23-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:956d89e0c92d471e8f7eee73f73fdff5ed345886378c45a43175a77538a1ffe4"}, - {file = "regex-2025.10.23-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:5c259cb363299a0d90d63b5c0d7568ee98419861618a95ee9d91a41cb9954462"}, - {file = "regex-2025.10.23-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:185d2b18c062820b3a40d8fefa223a83f10b20a674bf6e8c4a432e8dfd844627"}, - {file = "regex-2025.10.23-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:281d87fa790049c2b7c1b4253121edd80b392b19b5a3d28dc2a77579cb2a58ec"}, - {file = "regex-2025.10.23-cp314-cp314-win32.whl", hash = "sha256:63b81eef3656072e4ca87c58084c7a9c2b81d41a300b157be635a8a675aacfb8"}, - {file = "regex-2025.10.23-cp314-cp314-win_amd64.whl", hash = "sha256:0967c5b86f274800a34a4ed862dfab56928144d03cb18821c5153f8777947796"}, - {file = "regex-2025.10.23-cp314-cp314-win_arm64.whl", hash = "sha256:c70dfe58b0a00b36aa04cdb0f798bf3e0adc31747641f69e191109fd8572c9a9"}, - {file = "regex-2025.10.23-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:1f5799ea1787aa6de6c150377d11afad39a38afd033f0c5247aecb997978c422"}, - {file = "regex-2025.10.23-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:a9639ab7540cfea45ef57d16dcbea2e22de351998d614c3ad2f9778fa3bdd788"}, - {file = "regex-2025.10.23-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:08f52122c352eb44c3421dab78b9b73a8a77a282cc8314ae576fcaa92b780d10"}, - {file = "regex-2025.10.23-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ebf1baebef1c4088ad5a5623decec6b52950f0e4d7a0ae4d48f0a99f8c9cb7d7"}, - {file = "regex-2025.10.23-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:16b0f1c2e2d566c562d5c384c2b492646be0a19798532fdc1fdedacc66e3223f"}, - {file = "regex-2025.10.23-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f7ada5d9dceafaab92646aa00c10a9efd9b09942dd9b0d7c5a4b73db92cc7e61"}, - {file = "regex-2025.10.23-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3a36b4005770044bf08edecc798f0e41a75795b9e7c9c12fe29da8d792ef870c"}, - {file = "regex-2025.10.23-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:af7b2661dcc032da1fae82069b5ebf2ac1dfcd5359ef8b35e1367bfc92181432"}, - {file = "regex-2025.10.23-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:1cb976810ac1416a67562c2e5ba0accf6f928932320fef302e08100ed681b38e"}, - {file = "regex-2025.10.23-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:1a56a54be3897d62f54290190fbcd754bff6932934529fbf5b29933da28fcd43"}, - {file = "regex-2025.10.23-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:8f3e6d202fb52c2153f532043bbcf618fd177df47b0b306741eb9b60ba96edc3"}, - {file = "regex-2025.10.23-cp314-cp314t-win32.whl", hash = "sha256:1fa1186966b2621b1769fd467c7b22e317e6ba2d2cdcecc42ea3089ef04a8521"}, - {file = "regex-2025.10.23-cp314-cp314t-win_amd64.whl", hash = "sha256:08a15d40ce28362eac3e78e83d75475147869c1ff86bc93285f43b4f4431a741"}, - {file = "regex-2025.10.23-cp314-cp314t-win_arm64.whl", hash = "sha256:a93e97338e1c8ea2649e130dcfbe8cd69bba5e1e163834752ab64dcb4de6d5ed"}, - {file = "regex-2025.10.23-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d8d286760ee5b77fd21cf6b33cc45e0bffd1deeda59ca65b9be996f590a9828a"}, - {file = "regex-2025.10.23-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9e72e3b84b170fec02193d32620a0a7060a22e52c46e45957dcd14742e0d28fb"}, - {file = "regex-2025.10.23-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ec506e8114fa12d21616deb44800f536d6bf2e1a69253dbf611f69af92395c99"}, - {file = "regex-2025.10.23-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d7e481f9710e8e24228ce2c77b41db7662a3f68853395da86a292b49dadca2aa"}, - {file = "regex-2025.10.23-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4663ff2fc367735ae7b90b4f0e05b25554446df4addafc76fdaacaaa0ba852b5"}, - {file = "regex-2025.10.23-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0879dd3251a42d2e9b938e1e03b1e9f60de90b4d153015193f5077a376a18439"}, - {file = "regex-2025.10.23-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:651c58aecbab7e97bdf8ec76298a28d2bf2b6238c099ec6bf32e6d41e2f9a9cb"}, - {file = "regex-2025.10.23-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ceabc62a0e879169cd1bf066063bd6991c3e41e437628936a2ce66e0e2071c32"}, - {file = "regex-2025.10.23-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:bfdf4e9aa3e7b7d02fda97509b4ceeed34542361694ecc0a81db1688373ecfbd"}, - {file = "regex-2025.10.23-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:92f565ff9beb9f51bc7cc8c578a7e92eb5c4576b69043a4c58cd05d73fda83c5"}, - {file = "regex-2025.10.23-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:abbea548b1076eaf8635caf1071c9d86efdf0fa74abe71fca26c05a2d64cda80"}, - {file = "regex-2025.10.23-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:33535dcf34f47821381e341f7b715cbd027deda4223af4d3932adcd371d3192a"}, - {file = "regex-2025.10.23-cp39-cp39-win32.whl", hash = "sha256:345c9df49a15bf6460534b104b336581bc5f35c286cac526416e7a63d389b09b"}, - {file = "regex-2025.10.23-cp39-cp39-win_amd64.whl", hash = "sha256:f668fe1fd3358c5423355a289a4a003e58005ce829d217b828f80bd605a90145"}, - {file = "regex-2025.10.23-cp39-cp39-win_arm64.whl", hash = "sha256:07a3fd25d9074923e4d7258b551ae35ab6bdfe01904b8f0d5341c7d8b20eb18d"}, - {file = "regex-2025.10.23.tar.gz", hash = "sha256:8cbaf8ceb88f96ae2356d01b9adf5e6306fa42fa6f7eab6b97794e37c959ac26"}, + {file = "regex-2025.11.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2b441a4ae2c8049106e8b39973bfbddfb25a179dda2bdb99b0eeb60c40a6a3af"}, + {file = "regex-2025.11.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2fa2eed3f76677777345d2f81ee89f5de2f5745910e805f7af7386a920fa7313"}, + {file = "regex-2025.11.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d8b4a27eebd684319bdf473d39f1d79eed36bf2cd34bd4465cdb4618d82b3d56"}, + {file = "regex-2025.11.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5cf77eac15bd264986c4a2c63353212c095b40f3affb2bc6b4ef80c4776c1a28"}, + {file = "regex-2025.11.3-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b7f9ee819f94c6abfa56ec7b1dbab586f41ebbdc0a57e6524bd5e7f487a878c7"}, + {file = "regex-2025.11.3-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:838441333bc90b829406d4a03cb4b8bf7656231b84358628b0406d803931ef32"}, + {file = "regex-2025.11.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cfe6d3f0c9e3b7e8c0c694b24d25e677776f5ca26dce46fd6b0489f9c8339391"}, + {file = "regex-2025.11.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2ab815eb8a96379a27c3b6157fcb127c8f59c36f043c1678110cea492868f1d5"}, + {file = "regex-2025.11.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:728a9d2d173a65b62bdc380b7932dd8e74ed4295279a8fe1021204ce210803e7"}, + {file = "regex-2025.11.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:509dc827f89c15c66a0c216331260d777dd6c81e9a4e4f830e662b0bb296c313"}, + {file = "regex-2025.11.3-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:849202cd789e5f3cf5dcc7822c34b502181b4824a65ff20ce82da5524e45e8e9"}, + {file = "regex-2025.11.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b6f78f98741dcc89607c16b1e9426ee46ce4bf31ac5e6b0d40e81c89f3481ea5"}, + {file = "regex-2025.11.3-cp310-cp310-win32.whl", hash = "sha256:149eb0bba95231fb4f6d37c8f760ec9fa6fabf65bab555e128dde5f2475193ec"}, + {file = "regex-2025.11.3-cp310-cp310-win_amd64.whl", hash = "sha256:ee3a83ce492074c35a74cc76cf8235d49e77b757193a5365ff86e3f2f93db9fd"}, + {file = "regex-2025.11.3-cp310-cp310-win_arm64.whl", hash = "sha256:38af559ad934a7b35147716655d4a2f79fcef2d695ddfe06a06ba40ae631fa7e"}, + {file = "regex-2025.11.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:eadade04221641516fa25139273505a1c19f9bf97589a05bc4cfcd8b4a618031"}, + {file = "regex-2025.11.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:feff9e54ec0dd3833d659257f5c3f5322a12eee58ffa360984b716f8b92983f4"}, + {file = "regex-2025.11.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3b30bc921d50365775c09a7ed446359e5c0179e9e2512beec4a60cbcef6ddd50"}, + {file = "regex-2025.11.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f99be08cfead2020c7ca6e396c13543baea32343b7a9a5780c462e323bd8872f"}, + {file = "regex-2025.11.3-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:6dd329a1b61c0ee95ba95385fb0c07ea0d3fe1a21e1349fa2bec272636217118"}, + {file = "regex-2025.11.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:4c5238d32f3c5269d9e87be0cf096437b7622b6920f5eac4fd202468aaeb34d2"}, + {file = "regex-2025.11.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:10483eefbfb0adb18ee9474498c9a32fcf4e594fbca0543bb94c48bac6183e2e"}, + {file = "regex-2025.11.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:78c2d02bb6e1da0720eedc0bad578049cad3f71050ef8cd065ecc87691bed2b0"}, + {file = "regex-2025.11.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e6b49cd2aad93a1790ce9cffb18964f6d3a4b0b3dbdbd5de094b65296fce6e58"}, + {file = "regex-2025.11.3-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:885b26aa3ee56433b630502dc3d36ba78d186a00cc535d3806e6bfd9ed3c70ab"}, + {file = "regex-2025.11.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ddd76a9f58e6a00f8772e72cff8ebcff78e022be95edf018766707c730593e1e"}, + {file = "regex-2025.11.3-cp311-cp311-win32.whl", hash = "sha256:3e816cc9aac1cd3cc9a4ec4d860f06d40f994b5c7b4d03b93345f44e08cc68bf"}, + {file = "regex-2025.11.3-cp311-cp311-win_amd64.whl", hash = "sha256:087511f5c8b7dfbe3a03f5d5ad0c2a33861b1fc387f21f6f60825a44865a385a"}, + {file = "regex-2025.11.3-cp311-cp311-win_arm64.whl", hash = "sha256:1ff0d190c7f68ae7769cd0313fe45820ba07ffebfddfaa89cc1eb70827ba0ddc"}, + {file = "regex-2025.11.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:bc8ab71e2e31b16e40868a40a69007bc305e1109bd4658eb6cad007e0bf67c41"}, + {file = "regex-2025.11.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:22b29dda7e1f7062a52359fca6e58e548e28c6686f205e780b02ad8ef710de36"}, + {file = "regex-2025.11.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3a91e4a29938bc1a082cc28fdea44be420bf2bebe2665343029723892eb073e1"}, + {file = "regex-2025.11.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:08b884f4226602ad40c5d55f52bf91a9df30f513864e0054bad40c0e9cf1afb7"}, + {file = "regex-2025.11.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3e0b11b2b2433d1c39c7c7a30e3f3d0aeeea44c2a8d0bae28f6b95f639927a69"}, + {file = "regex-2025.11.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:87eb52a81ef58c7ba4d45c3ca74e12aa4b4e77816f72ca25258a85b3ea96cb48"}, + {file = "regex-2025.11.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a12ab1f5c29b4e93db518f5e3872116b7e9b1646c9f9f426f777b50d44a09e8c"}, + {file = "regex-2025.11.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7521684c8c7c4f6e88e35ec89680ee1aa8358d3f09d27dfbdf62c446f5d4c695"}, + {file = "regex-2025.11.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:7fe6e5440584e94cc4b3f5f4d98a25e29ca12dccf8873679a635638349831b98"}, + {file = "regex-2025.11.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:8e026094aa12b43f4fd74576714e987803a315c76edb6b098b9809db5de58f74"}, + {file = "regex-2025.11.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:435bbad13e57eb5606a68443af62bed3556de2f46deb9f7d4237bc2f1c9fb3a0"}, + {file = "regex-2025.11.3-cp312-cp312-win32.whl", hash = "sha256:3839967cf4dc4b985e1570fd8d91078f0c519f30491c60f9ac42a8db039be204"}, + {file = "regex-2025.11.3-cp312-cp312-win_amd64.whl", hash = "sha256:e721d1b46e25c481dc5ded6f4b3f66c897c58d2e8cfdf77bbced84339108b0b9"}, + {file = "regex-2025.11.3-cp312-cp312-win_arm64.whl", hash = "sha256:64350685ff08b1d3a6fff33f45a9ca183dc1d58bbfe4981604e70ec9801bbc26"}, + {file = "regex-2025.11.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:c1e448051717a334891f2b9a620fe36776ebf3dd8ec46a0b877c8ae69575feb4"}, + {file = "regex-2025.11.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9b5aca4d5dfd7fbfbfbdaf44850fcc7709a01146a797536a8f84952e940cca76"}, + {file = "regex-2025.11.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:04d2765516395cf7dda331a244a3282c0f5ae96075f728629287dfa6f76ba70a"}, + {file = "regex-2025.11.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5d9903ca42bfeec4cebedba8022a7c97ad2aab22e09573ce9976ba01b65e4361"}, + {file = "regex-2025.11.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:639431bdc89d6429f6721625e8129413980ccd62e9d3f496be618a41d205f160"}, + {file = "regex-2025.11.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f117efad42068f9715677c8523ed2be1518116d1c49b1dd17987716695181efe"}, + {file = "regex-2025.11.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4aecb6f461316adf9f1f0f6a4a1a3d79e045f9b71ec76055a791affa3b285850"}, + {file = "regex-2025.11.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:3b3a5f320136873cc5561098dfab677eea139521cb9a9e8db98b7e64aef44cbc"}, + {file = "regex-2025.11.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:75fa6f0056e7efb1f42a1c34e58be24072cb9e61a601340cc1196ae92326a4f9"}, + {file = "regex-2025.11.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:dbe6095001465294f13f1adcd3311e50dd84e5a71525f20a10bd16689c61ce0b"}, + {file = "regex-2025.11.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:454d9b4ae7881afbc25015b8627c16d88a597479b9dea82b8c6e7e2e07240dc7"}, + {file = "regex-2025.11.3-cp313-cp313-win32.whl", hash = "sha256:28ba4d69171fc6e9896337d4fc63a43660002b7da53fc15ac992abcf3410917c"}, + {file = "regex-2025.11.3-cp313-cp313-win_amd64.whl", hash = "sha256:bac4200befe50c670c405dc33af26dad5a3b6b255dd6c000d92fe4629f9ed6a5"}, + {file = "regex-2025.11.3-cp313-cp313-win_arm64.whl", hash = "sha256:2292cd5a90dab247f9abe892ac584cb24f0f54680c73fcb4a7493c66c2bf2467"}, + {file = "regex-2025.11.3-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:1eb1ebf6822b756c723e09f5186473d93236c06c579d2cc0671a722d2ab14281"}, + {file = "regex-2025.11.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:1e00ec2970aab10dc5db34af535f21fcf32b4a31d99e34963419636e2f85ae39"}, + {file = "regex-2025.11.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a4cb042b615245d5ff9b3794f56be4138b5adc35a4166014d31d1814744148c7"}, + {file = "regex-2025.11.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:44f264d4bf02f3176467d90b294d59bf1db9fe53c141ff772f27a8b456b2a9ed"}, + {file = "regex-2025.11.3-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:7be0277469bf3bd7a34a9c57c1b6a724532a0d235cd0dc4e7f4316f982c28b19"}, + {file = "regex-2025.11.3-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0d31e08426ff4b5b650f68839f5af51a92a5b51abd8554a60c2fbc7c71f25d0b"}, + {file = "regex-2025.11.3-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e43586ce5bd28f9f285a6e729466841368c4a0353f6fd08d4ce4630843d3648a"}, + {file = "regex-2025.11.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:0f9397d561a4c16829d4e6ff75202c1c08b68a3bdbfe29dbfcdb31c9830907c6"}, + {file = "regex-2025.11.3-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:dd16e78eb18ffdb25ee33a0682d17912e8cc8a770e885aeee95020046128f1ce"}, + {file = "regex-2025.11.3-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:ffcca5b9efe948ba0661e9df0fa50d2bc4b097c70b9810212d6b62f05d83b2dd"}, + {file = "regex-2025.11.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:c56b4d162ca2b43318ac671c65bd4d563e841a694ac70e1a976ac38fcf4ca1d2"}, + {file = "regex-2025.11.3-cp313-cp313t-win32.whl", hash = "sha256:9ddc42e68114e161e51e272f667d640f97e84a2b9ef14b7477c53aac20c2d59a"}, + {file = "regex-2025.11.3-cp313-cp313t-win_amd64.whl", hash = "sha256:7a7c7fdf755032ffdd72c77e3d8096bdcb0eb92e89e17571a196f03d88b11b3c"}, + {file = "regex-2025.11.3-cp313-cp313t-win_arm64.whl", hash = "sha256:df9eb838c44f570283712e7cff14c16329a9f0fb19ca492d21d4b7528ee6821e"}, + {file = "regex-2025.11.3-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:9697a52e57576c83139d7c6f213d64485d3df5bf84807c35fa409e6c970801c6"}, + {file = "regex-2025.11.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:e18bc3f73bd41243c9b38a6d9f2366cd0e0137a9aebe2d8ff76c5b67d4c0a3f4"}, + {file = "regex-2025.11.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:61a08bcb0ec14ff4e0ed2044aad948d0659604f824cbd50b55e30b0ec6f09c73"}, + {file = "regex-2025.11.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c9c30003b9347c24bcc210958c5d167b9e4f9be786cb380a7d32f14f9b84674f"}, + {file = "regex-2025.11.3-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4e1e592789704459900728d88d41a46fe3969b82ab62945560a31732ffc19a6d"}, + {file = "regex-2025.11.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:6538241f45eb5a25aa575dbba1069ad786f68a4f2773a29a2bd3dd1f9de787be"}, + {file = "regex-2025.11.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bce22519c989bb72a7e6b36a199384c53db7722fe669ba891da75907fe3587db"}, + {file = "regex-2025.11.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:66d559b21d3640203ab9075797a55165d79017520685fb407b9234d72ab63c62"}, + {file = "regex-2025.11.3-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:669dcfb2e38f9e8c69507bace46f4889e3abbfd9b0c29719202883c0a603598f"}, + {file = "regex-2025.11.3-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:32f74f35ff0f25a5021373ac61442edcb150731fbaa28286bbc8bb1582c89d02"}, + {file = "regex-2025.11.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:e6c7a21dffba883234baefe91bc3388e629779582038f75d2a5be918e250f0ed"}, + {file = "regex-2025.11.3-cp314-cp314-win32.whl", hash = "sha256:795ea137b1d809eb6836b43748b12634291c0ed55ad50a7d72d21edf1cd565c4"}, + {file = "regex-2025.11.3-cp314-cp314-win_amd64.whl", hash = "sha256:9f95fbaa0ee1610ec0fc6b26668e9917a582ba80c52cc6d9ada15e30aa9ab9ad"}, + {file = "regex-2025.11.3-cp314-cp314-win_arm64.whl", hash = "sha256:dfec44d532be4c07088c3de2876130ff0fbeeacaa89a137decbbb5f665855a0f"}, + {file = "regex-2025.11.3-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:ba0d8a5d7f04f73ee7d01d974d47c5834f8a1b0224390e4fe7c12a3a92a78ecc"}, + {file = "regex-2025.11.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:442d86cf1cfe4faabf97db7d901ef58347efd004934da045c745e7b5bd57ac49"}, + {file = "regex-2025.11.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:fd0a5e563c756de210bb964789b5abe4f114dacae9104a47e1a649b910361536"}, + {file = "regex-2025.11.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bf3490bcbb985a1ae97b2ce9ad1c0f06a852d5b19dde9b07bdf25bf224248c95"}, + {file = "regex-2025.11.3-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3809988f0a8b8c9dcc0f92478d6501fac7200b9ec56aecf0ec21f4a2ec4b6009"}, + {file = "regex-2025.11.3-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f4ff94e58e84aedb9c9fce66d4ef9f27a190285b451420f297c9a09f2b9abee9"}, + {file = "regex-2025.11.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7eb542fd347ce61e1321b0a6b945d5701528dca0cd9759c2e3bb8bd57e47964d"}, + {file = "regex-2025.11.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:d6c2d5919075a1f2e413c00b056ea0c2f065b3f5fe83c3d07d325ab92dce51d6"}, + {file = "regex-2025.11.3-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:3f8bf11a4827cc7ce5a53d4ef6cddd5ad25595d3c1435ef08f76825851343154"}, + {file = "regex-2025.11.3-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:22c12d837298651e5550ac1d964e4ff57c3f56965fc1812c90c9fb2028eaf267"}, + {file = "regex-2025.11.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:62ba394a3dda9ad41c7c780f60f6e4a70988741415ae96f6d1bf6c239cf01379"}, + {file = "regex-2025.11.3-cp314-cp314t-win32.whl", hash = "sha256:4bf146dca15cdd53224a1bf46d628bd7590e4a07fbb69e720d561aea43a32b38"}, + {file = "regex-2025.11.3-cp314-cp314t-win_amd64.whl", hash = "sha256:adad1a1bcf1c9e76346e091d22d23ac54ef28e1365117d99521631078dfec9de"}, + {file = "regex-2025.11.3-cp314-cp314t-win_arm64.whl", hash = "sha256:c54f768482cef41e219720013cd05933b6f971d9562544d691c68699bf2b6801"}, + {file = "regex-2025.11.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:81519e25707fc076978c6143b81ea3dc853f176895af05bf7ec51effe818aeec"}, + {file = "regex-2025.11.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3bf28b1873a8af8bbb58c26cc56ea6e534d80053b41fb511a35795b6de507e6a"}, + {file = "regex-2025.11.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:856a25c73b697f2ce2a24e7968285579e62577a048526161a2c0f53090bea9f9"}, + {file = "regex-2025.11.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8a3d571bd95fade53c86c0517f859477ff3a93c3fde10c9e669086f038e0f207"}, + {file = "regex-2025.11.3-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:732aea6de26051af97b94bc98ed86448821f839d058e5d259c72bf6d73ad0fc0"}, + {file = "regex-2025.11.3-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:51c1c1847128238f54930edb8805b660305dca164645a9fd29243f5610beea34"}, + {file = "regex-2025.11.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:22dd622a402aad4558277305350699b2be14bc59f64d64ae1d928ce7d072dced"}, + {file = "regex-2025.11.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f3b5a391c7597ffa96b41bd5cbd2ed0305f515fcbb367dfa72735679d5502364"}, + {file = "regex-2025.11.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:cc4076a5b4f36d849fd709284b4a3b112326652f3b0466f04002a6c15a0c96c1"}, + {file = "regex-2025.11.3-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:a295ca2bba5c1c885826ce3125fa0b9f702a1be547d821c01d65f199e10c01e2"}, + {file = "regex-2025.11.3-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:b4774ff32f18e0504bfc4e59a3e71e18d83bc1e171a3c8ed75013958a03b2f14"}, + {file = "regex-2025.11.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:22e7d1cdfa88ef33a2ae6aa0d707f9255eb286ffbd90045f1088246833223aee"}, + {file = "regex-2025.11.3-cp39-cp39-win32.whl", hash = "sha256:74d04244852ff73b32eeede4f76f51c5bcf44bc3c207bc3e6cf1c5c45b890708"}, + {file = "regex-2025.11.3-cp39-cp39-win_amd64.whl", hash = "sha256:7a50cd39f73faa34ec18d6720ee25ef10c4c1839514186fcda658a06c06057a2"}, + {file = "regex-2025.11.3-cp39-cp39-win_arm64.whl", hash = "sha256:43b4fb020e779ca81c1b5255015fe2b82816c76ec982354534ad9ec09ad7c9e3"}, + {file = "regex-2025.11.3.tar.gz", hash = "sha256:1fedc720f9bb2494ce31a58a1631f9c82df6a09b49c19517ea5cc280b4541e01"}, ] [[package]] @@ -2494,7 +2498,7 @@ description = "RestrictedPython is a defined subset of the Python language which optional = false python-versions = "<3.15,>=3.9" groups = ["main"] -markers = "python_version < \"3.12\"" +markers = "python_version < \"3.14\"" files = [ {file = "restrictedpython-8.1-py3-none-any.whl", hash = "sha256:4769449c6cdb10f2071649ba386902befff0eff2a8fd6217989fa7b16aeae926"}, {file = "restrictedpython-8.1.tar.gz", hash = "sha256:4a69304aceacf6bee74bdf153c728221d4e3109b39acbfe00b3494927080d898"}, @@ -3188,4 +3192,4 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.1" python-versions = "^3.11" -content-hash = "e6413824b6ec2fa2e89002d58d6c3432772dc3279619b8f54e4818abaa3b44a7" +content-hash = "f2b3913ac70cfb499bdfaab2f11ad61438db4b24d6c8d757d4838f62ed9d6bb0" diff --git a/sdk/pyproject.toml b/sdk/pyproject.toml index dd3bf54cf0..40e602ec35 100644 --- a/sdk/pyproject.toml +++ b/sdk/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "agenta" -version = "0.60.2" +version = "0.61.0" description = "The SDK for agenta is an open-source LLMOps platform." readme = "README.md" authors = [ @@ -41,13 +41,12 @@ opentelemetry-instrumentation = ">=0.56b0" opentelemetry-exporter-otlp-proto-http ="^1.27.0" structlog = "^25.2.0" huggingface-hub = "<0.31.0" -restrictedpython = { version = "^8.0", python = ">=3.11,<3.12" } - +restrictedpython = { version = "^8.0", python = ">=3.11,<3.14" } # audit fixes h11 = "^0.16.0" decorator = "^5.2.1" -openai = "^1.106.0" +openai = ">=1.106.0" tiktoken = "0.11.0" google-auth = ">=2.23,<3" diff --git a/web/ee/package.json b/web/ee/package.json index 01946d1ff3..5bc3ba3451 100644 --- a/web/ee/package.json +++ b/web/ee/package.json @@ -1,6 +1,6 @@ { "name": "@agenta/ee", - "version": "0.60.2", + "version": "0.61.0", "private": true, "engines": { "node": ">=18" diff --git a/web/ee/src/components/EvalRunDetails/AutoEvalRun/assets/VariantTag.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/assets/VariantTag.tsx index 771c942e46..8d750d6527 100644 --- a/web/ee/src/components/EvalRunDetails/AutoEvalRun/assets/VariantTag.tsx +++ b/web/ee/src/components/EvalRunDetails/AutoEvalRun/assets/VariantTag.tsx @@ -4,13 +4,19 @@ import {ArrowSquareOut} from "@phosphor-icons/react" import {useQueryClient} from "@tanstack/react-query" import {Skeleton, Tag} from "antd" import clsx from "clsx" -import {useSetAtom} from "jotai" +import {useAtomValue, useSetAtom} from "jotai" import {useRouter} from "next/router" import useURL from "@/oss/hooks/useURL" import {buildRevisionsQueryParam} from "@/oss/lib/helpers/url" +import {runIndexFamily} from "@/oss/lib/hooks/useEvaluationRunData/assets/atoms" import type {EnrichedEvaluationRun} from "@/oss/lib/hooks/usePreviewEvaluations/types" -import {recentAppIdAtom, routerAppIdAtom} from "@/oss/state/app" +import { + appDetailQueryAtomFamily, + currentAppContextAtom, + recentAppIdAtom, + routerAppIdAtom, +} from "@/oss/state/app" import { combineAppNameWithLabel, @@ -50,7 +56,7 @@ const VariantTag = ({ const setRecentAppId = useSetAtom(recentAppIdAtom) const routeAppId = normalizeId(router.query.app_id as string | undefined) const {baseAppURL} = useURL() - + const app = useAtomValue(appDetailQueryAtomFamily(enrichedRun?.appId || null)) const variantsFromRun = useMemo(() => { if (enrichedRun?.variants && Array.isArray(enrichedRun.variants)) { return enrichedRun.variants as any[] @@ -107,10 +113,6 @@ const VariantTag = ({ return variantFromRun }, [variant, variantFromRun]) - if (isLoading) { - return - } - const baseLabel = normalizeLabel(variantName) ?? normalizeLabel(resolvedVariant?.variantName) ?? @@ -143,6 +145,7 @@ const VariantTag = ({ fallbackAppName: (resolvedVariant as any)?.appName ?? (resolvedVariant as any)?.application?.name ?? + (resolvedVariant as any)?.baseName ?? enrichedRun?.appName ?? (enrichedRun as any)?.app_name ?? (enrichedRun as any)?.app?.name, @@ -208,6 +211,7 @@ const VariantTag = ({ const blockedByRuntime = isRouteAppContext && display.hasRuntime === false const canNavigate = + app?.data?.app_type !== "custom (sdk)" && !isDeleted && Boolean(targetAppId) && hasValidRevision && @@ -259,4 +263,12 @@ const VariantTag = ({ ) } -export default VariantTag +const VariantTagRouter = ({isLoading, ...props}: VariantTagProps) => { + if (isLoading) { + return + } else { + return + } +} + +export default VariantTagRouter diff --git a/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunCompareMenu/index.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunCompareMenu/index.tsx index 23613814a7..0c85b4c5dd 100644 --- a/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunCompareMenu/index.tsx +++ b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunCompareMenu/index.tsx @@ -22,9 +22,11 @@ const failedFilters = ["errors", "error", "failed", "failure"] const EvalRunCompareMenu = ({ popoverProps, buttonProps, + disabled = false, }: { popoverProps?: PopoverProps buttonProps?: ButtonProps + disabled?: boolean }) => { const [isMenuOpen, setIsMenuOpen] = useState(false) const [searchTerm, setSearchTerm] = useState("") @@ -101,7 +103,7 @@ const EvalRunCompareMenu = ({ (run) => run?.data?.steps.every( (step) => step?.type !== "annotation" || step?.origin === "auto", - ) && !Boolean(run?.flags?.is_live), + ) && !run?.flags?.is_live, ) return autoEvals @@ -260,7 +262,7 @@ const EvalRunCompareMenu = ({ } {...popoverProps} > - diff --git a/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/FocusDrawerContent/index.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/FocusDrawerContent/index.tsx index 0df5ba5646..d274272ee9 100644 --- a/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/FocusDrawerContent/index.tsx +++ b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/FocusDrawerContent/index.tsx @@ -103,6 +103,230 @@ export interface DrawerEvaluatorMetric { fallbackKeys?: string[] } +const normalizeMetricPrimaryKey = (slug: string | undefined, rawKey: string): string => { + const normalizedSlug = slug && slug.trim().length > 0 ? slug.trim() : undefined + const trimmed = rawKey.trim() + if (!trimmed) return normalizedSlug ?? "" + if (normalizedSlug) { + const prefix = `${normalizedSlug}.` + if (trimmed.startsWith(prefix)) return trimmed + } + if (trimmed.includes(".")) return trimmed + return normalizedSlug ? `${normalizedSlug}.${trimmed}` : trimmed +} + +const INVOCATION_METRIC_PREFIX = "attributes.ag.metrics" +const INVOCATION_METRIC_SIGNATURE = `.attributes.ag.metrics` + +const isInvocationMetricKey = (key?: string): boolean => { + if (!key) return false + return key.startsWith(INVOCATION_METRIC_PREFIX) || key.includes(INVOCATION_METRIC_SIGNATURE) +} + +const filterInvocationMetricDefinitions = ( + metrics: DrawerEvaluatorMetric[], +): DrawerEvaluatorMetric[] => metrics.filter((metric) => !isInvocationMetricKey(metric.metricKey)) + +const stripOutputsPrefixes = (key: string): string => { + let result = key + const OUTPUT_PREFIX = "attributes.ag.data.outputs." + const METRIC_PREFIX = "attributes.ag.metrics." + while (result.startsWith(OUTPUT_PREFIX)) { + result = result.slice(OUTPUT_PREFIX.length) + } + while (result.startsWith(METRIC_PREFIX)) { + result = result.slice(METRIC_PREFIX.length) + } + return result +} + +const normalizeMetricKeyForSlug = (key: string | undefined, slug?: string): string => { + if (!key) return "" + let result = key.trim() + if (!result) return "" + if (slug && result.startsWith(`${slug}.`)) { + result = result.slice(slug.length + 1) + } + result = stripOutputsPrefixes(result) + return result +} + +const dedupeEvaluatorMetricDefinitions = ( + metrics: DrawerEvaluatorMetric[], + slug?: string, +): DrawerEvaluatorMetric[] => { + const map = new Map() + metrics.forEach((metric) => { + const normalizedKey = normalizeMetricKeyForSlug(metric.metricKey ?? metric.id, slug) + const existing = map.get(normalizedKey) + if (!existing) { + map.set(normalizedKey, metric) + return + } + const fallbackKeys = new Set([ + ...(existing.fallbackKeys || []), + ...(metric.fallbackKeys || []), + ]) + const preferred = + isInvocationMetricKey(existing.metricKey) && !isInvocationMetricKey(metric.metricKey) + ? metric + : existing + map.set(normalizedKey, { + ...preferred, + fallbackKeys: fallbackKeys.size ? Array.from(fallbackKeys) : preferred.fallbackKeys, + }) + }) + return Array.from(map.values()) +} + +const collectMetricFallbackKeys = ( + slug: string | undefined, + rawKey: string, + primaryKey: string, + meta: any, +): string[] => { + const set = new Set() + const normalizedSlug = slug && slug.trim().length > 0 ? slug.trim() : undefined + const push = (value?: string) => { + if (!value) return + const trimmed = String(value).trim() + if (!trimmed) return + if (trimmed.includes(".") || !normalizedSlug) { + set.add(trimmed) + } else { + set.add(`${normalizedSlug}.${trimmed}`) + } + } + + push(rawKey) + + const aliases = Array.isArray(meta?.aliases) + ? meta?.aliases + : meta?.aliases + ? [meta.aliases] + : meta?.alias + ? [meta.alias] + : [] + aliases.forEach(push) + + const extraKeys = [ + meta?.metricKey, + meta?.metric_key, + meta?.key, + meta?.path, + meta?.fullKey, + meta?.full_key, + meta?.canonicalKey, + meta?.canonical_key, + meta?.statsKey, + meta?.stats_key, + meta?.metric, + ] + extraKeys.forEach(push) + + const fallbackKeys = Array.from(set).filter((value) => value !== rawKey && value !== primaryKey) + return fallbackKeys +} + +// const buildDrawerMetricDefinition = ( +// slug: string | undefined, +// rawKey: string, +// meta: any, +// ): DrawerEvaluatorMetric => { +// const normalizedSlug = slug && slug.trim().length > 0 ? slug.trim() : undefined +// const normalizedDisplayBase = +// normalizedSlug && rawKey.startsWith(`${normalizedSlug}.`) +// ? rawKey.slice(normalizedSlug.length + 1) +// : rawKey +// const normalizedDisplay = stripOutputsPrefixes(normalizedDisplayBase) +// const primaryKey = normalizeMetricPrimaryKey(slug, rawKey) +// const fallbackKeys = collectMetricFallbackKeys(slug, rawKey, primaryKey, meta) +// const id = canonicalizeMetricKey(primaryKey) || primaryKey + +// return { +// id, +// displayName: normalizedDisplay || primaryKey, +// metricKey: primaryKey, +// fallbackKeys: fallbackKeys.length ? fallbackKeys : undefined, +// } +// } + +const collectCandidateSteps = (data?: UseEvaluationRunScenarioStepsFetcherResult): any[] => { + if (!data) return [] + const buckets: any[] = [] + if (Array.isArray(data.annotationSteps)) buckets.push(...(data.annotationSteps as any[])) + if (Array.isArray(data.steps)) buckets.push(...(data.steps as any[])) + if (Array.isArray(data.invocationSteps)) buckets.push(...(data.invocationSteps as any[])) + return buckets +} + +// const collectSlugCandidates = ( +// data: UseEvaluationRunScenarioStepsFetcherResult | undefined, +// evaluatorSlug: string, +// ): string[] => { +// const set = new Set() +// const push = (value?: string | null) => { +// if (!value) return +// const normalized = String(value).trim() +// if (!normalized) return +// set.add(normalized) +// } + +// push(evaluatorSlug) + +// const steps = collectCandidateSteps(data) +// steps.forEach((step) => { +// if (!step) return +// const ref: any = step?.references?.evaluator +// push(step?.stepKey as any) +// push(step?.stepkey as any) +// push(step?.step_key as any) +// push(ref?.slug) +// push(ref?.key) +// push(ref?.id) +// }) + +// return Array.from(set) +// } + +// const findAnnotationStepKey = ( +// data: UseEvaluationRunScenarioStepsFetcherResult | undefined, +// slugCandidates: string[], +// ): string | undefined => { +// if (!data) return undefined + +// const steps = collectCandidateSteps(data) +// if (!steps.length) return undefined + +// const loweredCandidates = slugCandidates +// .map((slug) => String(slug).toLowerCase()) +// .filter((slug) => slug.length > 0) + +// const matched = steps.find((step) => { +// if (!step) return false +// const possible: string[] = [ +// (step as any)?.stepKey, +// (step as any)?.stepkey, +// (step as any)?.step_key, +// (step as any)?.references?.evaluator?.slug, +// (step as any)?.references?.evaluator?.key, +// (step as any)?.references?.evaluator?.id, +// ] + +// return possible +// .filter(Boolean) +// .map((value) => String(value).toLowerCase()) +// .some((candidate) => loweredCandidates.includes(candidate)) +// }) + +// return ( +// (matched as any)?.stepKey ?? +// (matched as any)?.stepkey ?? +// (matched as any)?.step_key ?? +// undefined +// ) +// } + const EvaluatorFailureDisplay = ({ status, error, @@ -945,6 +1169,12 @@ const FocusDrawerContent = () => { : false const enricedRun = evaluationRunData?.enrichedRun + + const annotationSteps = useMemo(() => { + const steps = (enricedRun?.data?.steps || []) as any[] + if (!Array.isArray(steps)) return [] + return steps.filter((step) => step?.type === "annotation") + }, [enricedRun?.data?.steps]) const runIndex = evaluationRunData?.runIndex const invocationStep = useMemo( () => scenarioStepsData?.invocationSteps?.[0], @@ -1218,9 +1448,13 @@ const FocusDrawerContent = () => { const rawEvaluators = enricedRun?.evaluators const list = asEvaluatorArray(rawEvaluators) return list.map((entry: any, idx: number) => { - const identifiers = collectEvaluatorIdentifiers(entry) + const identifierSet = new Set() + collectEvaluatorIdentifiers(entry).forEach((identifier) => + identifierSet.add(identifier), + ) + let matchedFallback: any - for (const identifier of identifiers) { + for (const identifier of identifierSet) { const candidate = evaluatorLookupByIdentifier.get(identifier) if (candidate) { matchedFallback = candidate @@ -1228,8 +1462,16 @@ const FocusDrawerContent = () => { } } + if (matchedFallback) { + collectEvaluatorIdentifiers(matchedFallback).forEach((identifier) => + identifierSet.add(identifier), + ) + } + const slug = extractEvaluatorSlug(entry) ?? extractEvaluatorSlug(matchedFallback) const resolvedSlug = slug ?? `evaluator-${idx}` + if (resolvedSlug) identifierSet.add(resolvedSlug) + const displayName = extractEvaluatorName(entry) ?? extractEvaluatorName(matchedFallback) ?? @@ -1241,13 +1483,39 @@ const FocusDrawerContent = () => { resolveEvaluatorMetricsMap(matchedFallback) ?? {} + const aliasSet = new Set(identifierSet) + annotationSteps.forEach((step) => { + const stepEvaluatorSlug = pickString(step?.references?.evaluator?.slug) + const stepEvaluatorId = pickString(step?.references?.evaluator?.id) + const matches = + (stepEvaluatorSlug && identifierSet.has(stepEvaluatorSlug)) || + (stepEvaluatorId && identifierSet.has(stepEvaluatorId)) + if (!matches) return + const stepKey = pickString(step?.key) + if (stepKey) { + aliasSet.add(stepKey) + if (step.origin === "human") { + const parts = stepKey.split(".") + if (parts.length > 1) aliasSet.add(parts[1]) + } + } + if (stepEvaluatorSlug) aliasSet.add(stepEvaluatorSlug) + }) + return { name: displayName, metrics, slug: resolvedSlug, + aliases: Array.from(aliasSet), } }) - }, [enricedRun?.evaluators, evaluatorLookupByIdentifier]) + }, [annotationSteps, enricedRun?.evaluators, evaluatorLookupByIdentifier]) + + const focusRunMetricsStatsAtom = useMemo(() => { + if (!runId) return emptyStatsAtom + return runMetricsStatsCacheFamily(runId) + }, [runId]) + const focusRunMetricsStatsMap = useAtomValue(focusRunMetricsStatsAtom) const scenarioMetricDefinitions = useMemo(() => { const columns = @@ -1262,6 +1530,9 @@ const FocusDrawerContent = () => { const rawKey = String(column.path || column.name || "").trim() if (!rawKey) return undefined const definition = buildDrawerMetricDefinition(undefined, rawKey, column) + if (isInvocationMetricKey(definition.metricKey)) { + return undefined + } const fallback = new Set(definition.fallbackKeys || []) fallback.add(rawKey) const canonical = canonicalizeMetricKey(rawKey) @@ -1292,6 +1563,77 @@ const FocusDrawerContent = () => { }) }, [evalType]) + const evaluatorAliasToSlug = useMemo(() => { + const map = new Map() + evaluatorMetrics.forEach((entry) => { + const slug = entry?.slug + if (!slug) return + const aliases = new Set(entry?.aliases || []) + aliases.add(slug) + aliases.forEach((alias) => { + const normalized = pickString(alias) + if (!normalized) return + if (!map.has(normalized)) { + map.set(normalized, slug) + } + }) + }) + return map + }, [evaluatorMetrics]) + + const aggregatedMetricKeys = useMemo(() => { + const sources = new Set() + if (focusRunMetricsStatsMap && typeof focusRunMetricsStatsMap === "object") { + Object.keys(focusRunMetricsStatsMap).forEach((key) => { + const normalized = pickString(key) + if (normalized) sources.add(normalized) + }) + } + return sources + }, [focusRunMetricsStatsMap]) + + const inferredEvaluatorMetricDefinitions = useMemo(() => { + const map = new Map() + if (!aggregatedMetricKeys.size || !evaluatorAliasToSlug.size) return map + + const recordDefinition = (slug: string, definition: DrawerEvaluatorMetric) => { + const existing = map.get(slug) + if (!existing) { + map.set(slug, [definition]) + return + } + if (!existing.some((entry) => entry.id === definition.id)) { + existing.push(definition) + } + } + + aggregatedMetricKeys.forEach((rawKey) => { + const canonical = canonicalizeMetricKey(rawKey) + if (!canonical || !canonical.includes(".")) return + if (canonical.startsWith("attributes.ag.metrics")) return + + const segments = canonical.split(".").filter(Boolean) + for (let idx = 0; idx < segments.length; idx += 1) { + const prefix = segments.slice(0, idx + 1).join(".") + const slug = evaluatorAliasToSlug.get(prefix) + if (!slug) continue + const metricSegments = segments.slice(idx + 1) + const metricName = metricSegments.join(".") + const baseKey = metricName && metricName.length ? `${slug}.${metricName}` : slug + const aliasCandidates = [canonical, rawKey].filter( + (candidate): candidate is string => Boolean(candidate && candidate !== baseKey), + ) + const definition = buildDrawerMetricDefinition(slug, baseKey, { + aliases: aliasCandidates.length ? aliasCandidates : undefined, + }) + recordDefinition(slug, definition) + break + } + }) + + return map + }, [aggregatedMetricKeys, evaluatorAliasToSlug]) + const openAndScrollTo = useCallback((key: string) => { // Ensure the related section is expanded when navigating via hash setActiveKeys((prev) => { @@ -1426,28 +1768,6 @@ const FocusDrawerContent = () => { ), children: (
- {/* {shouldShowTraceSummary ? ( -
- {traceJson ? ( -
- {}} - headerName="Trace payload" - initialValue={traceJson} - editorType="borderless" - state="readOnly" - disabled - readOnly - editorClassName="!text-xs" - className="!w-full" - defaultMinimized - editorProps={{codeOnly: true, language: "json"} as any} - /> -
- ) : null} -
- ) : null} */} {hasEntryData ? (
{ Object.entries(metricsMeta).forEach( ([rawKey, meta]) => { + const keyString = String(rawKey) + if (isInvocationMetricKey(keyString)) return const definition = buildDrawerMetricDefinition( slug, - String(rawKey), + keyString, meta, ) const mapKey = `${slug}::${definition.id}` @@ -1777,7 +2099,22 @@ const FocusDrawerContent = () => { ) }) - const metricDefs = Array.from(metricKeyOrder.values()) + const inferredDefs = + inferredEvaluatorMetricDefinitions.get(slug) || [] + inferredDefs.forEach((definition) => { + const mapKey = `${slug}::${definition.id}` + if (!metricKeyOrder.has(mapKey)) { + metricKeyOrder.set(mapKey, definition) + } + }) + + const metricDefs = dedupeEvaluatorMetricDefinitions( + filterInvocationMetricDefinitions( + Array.from(metricKeyOrder.values()), + ), + slug, + ) + if (!metricDefs.length) return null const displayName = slugName[slug] || slug return ( @@ -1852,10 +2189,12 @@ const FocusDrawerContent = () => { const isPrevOpen = !!(prevSlug && activeKeys.includes(prevSlug)) const metricMap = new Map() - const metricHelper = (meta: any, rawKey: string) => { + Object.entries(metrics || {}).forEach(([rawKey, meta]) => { + const keyString = String(rawKey) + if (isInvocationMetricKey(keyString)) return const definition = buildDrawerMetricDefinition( evaluator.slug, - String(rawKey), + keyString, meta, ) const mapKey = `${evaluator.slug}::${definition.id}` @@ -1875,18 +2214,30 @@ const FocusDrawerContent = () => { : undefined, }) } - } + }) - Object.entries(metrics || {}).forEach(([rawKey, meta]) => { - if (meta.properties) { - Object.entries(meta.properties).forEach(([propKey, propMeta]) => { - metricHelper(propMeta, `${rawKey}.${propKey}`) - }) - } else { - metricHelper(meta, rawKey) + // Object.entries(metrics || {}).forEach(([rawKey, meta]) => { + // if (meta.properties) { + // Object.entries(meta.properties).forEach(([propKey, propMeta]) => { + // metricHelper(propMeta, `${rawKey}.${propKey}`) + // }) + // } else { + // metricHelper(meta, rawKey) + // } + // }) + const inferredDefs = + inferredEvaluatorMetricDefinitions.get(evaluator.slug) || [] + inferredDefs.forEach((definition) => { + const mapKey = `${evaluator.slug}::${definition.id}` + if (!metricMap.has(mapKey)) { + metricMap.set(mapKey, definition) } }) - const metricDefs = Array.from(metricMap.values()) + const metricDefs = dedupeEvaluatorMetricDefinitions( + filterInvocationMetricDefinitions(Array.from(metricMap.values())), + evaluator.slug, + ) + if (!metricDefs.length) return null if (!evaluator) return null return { @@ -1943,6 +2294,8 @@ const FocusDrawerContent = () => { baseRunId, invocationStepKey, invocationStep?.stepkey, + evaluatorMetrics, + inferredEvaluatorMetricDefinitions, ]) if ((!scenarioStepsData && !hasResolvedSteps) || !enricedRun || !runId) { diff --git a/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/FocusDrawerContent/lib/helpers.ts b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/FocusDrawerContent/lib/helpers.ts index a84d3d1d55..e043510fe5 100644 --- a/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/FocusDrawerContent/lib/helpers.ts +++ b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/FocusDrawerContent/lib/helpers.ts @@ -196,16 +196,30 @@ export const collectMetricFallbackKeys = ( return fallbackKeys } +export const stripOutputsPrefixes = (key: string): string => { + let result = key + const OUTPUT_PREFIX = "attributes.ag.data.outputs." + const METRIC_PREFIX = "attributes.ag.metrics." + while (result.startsWith(OUTPUT_PREFIX)) { + result = result.slice(OUTPUT_PREFIX.length) + } + while (result.startsWith(METRIC_PREFIX)) { + result = result.slice(METRIC_PREFIX.length) + } + return result +} + export const buildDrawerMetricDefinition = ( slug: string | undefined, rawKey: string, meta: any, ): DrawerEvaluatorMetric => { const normalizedSlug = slug && slug.trim().length > 0 ? slug.trim() : undefined - const normalizedDisplay = + const normalizedDisplayBase = normalizedSlug && rawKey.startsWith(`${normalizedSlug}.`) ? rawKey.slice(normalizedSlug.length + 1) : rawKey + const normalizedDisplay = stripOutputsPrefixes(normalizedDisplayBase) const primaryKey = normalizeMetricPrimaryKey(slug, rawKey) const fallbackKeys = collectMetricFallbackKeys(slug, rawKey, primaryKey, meta) const id = canonicalizeMetricKey(primaryKey) || primaryKey diff --git a/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/FocusDrawerSidePanel/index.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/FocusDrawerSidePanel/index.tsx index 37df1f571c..e440c43f9e 100644 --- a/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/FocusDrawerSidePanel/index.tsx +++ b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunFocusDrawer/assets/FocusDrawerSidePanel/index.tsx @@ -114,7 +114,7 @@ const FocusDrawerSidePanel = () => { icon: , children: dedupedEvaluators?.map((e) => ({ - title: e.name, + title: e.name ?? e.slug, key: e.slug, icon: , })) || [], diff --git a/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunPromptConfigViewer/assets/PromptConfigCard.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunPromptConfigViewer/assets/PromptConfigCard.tsx index 008d3069ed..198d2542f6 100644 --- a/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunPromptConfigViewer/assets/PromptConfigCard.tsx +++ b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvalRunPromptConfigViewer/assets/PromptConfigCard.tsx @@ -550,12 +550,14 @@ const PromptConfigCard = ({ >
- + {evaluation?.name ? ( + + ) : null} {variantForDisplay ? ( | undefined): boolean => { + if (!metric || typeof metric !== "object") return false + if (typeof metric.mean === "number" && Number.isFinite(metric.mean)) return true + if (typeof metric.count === "number" && metric.count > 0) return true + + const distribution: any[] | undefined = Array.isArray((metric as any).distribution) + ? (metric as any).distribution + : undefined + if (distribution && distribution.some((bin) => Number(bin?.count ?? 0) > 0)) return true + + const hist = Array.isArray((metric as any).hist) ? (metric as any).hist : undefined + if (hist && hist.some((bin) => Number(bin?.count ?? bin?.frequency ?? 0) > 0)) return true + + const freq = Array.isArray((metric as any).frequency) + ? (metric as any).frequency + : Array.isArray((metric as any).rank) + ? (metric as any).rank + : undefined + if (freq && freq.some((entry) => Number(entry?.count ?? entry?.frequency ?? 0) > 0)) return true + + const unique = (metric as any).unique + if (Array.isArray(unique) && unique.length > 0) { + return typeof metric.mean === "number" + } + + return false +} + const EvalRunScoreTable = ({className, type}: {className?: string; type: "auto" | "online"}) => { const baseRunId = useRunId() const {projectURL} = useURL() @@ -89,6 +121,79 @@ const EvalRunScoreTable = ({className, type}: {className?: string; type: "auto" const runs = useAtomValue(runsStateFamily(allRunIds)) const metricsByRun = useAtomValue(runsMetricsFamily(allRunIds)) + const evaluatorsBySlug = useMemo(() => { + const map = new Map() + const register = (entry: any, slug: string) => { + if (!entry || typeof entry !== "object") return + if (!slug || map.has(slug)) return + map.set(slug, entry) + } + + runs.forEach((state) => { + const annotationSteps = state?.enrichedRun?.data?.steps?.filter( + (step: any) => step?.type === "annotation", + ) + annotationSteps?.forEach((step: any) => { + const evaluatorId = step?.references?.evaluator?.id + if (!evaluatorId) return + const evaluator = (state?.enrichedRun?.evaluators || []).find( + (e: any) => e.id === evaluatorId, + ) + if (evaluator) { + const originalKey = typeof step?.key === "string" ? step.key : undefined + const parts = originalKey ? originalKey.split(".") : [] + const humanKey = parts.length > 1 ? parts[1] : originalKey + const resolvedKey = step.origin === "human" ? humanKey : originalKey + if (originalKey) { + register(evaluator, originalKey) + } + if (resolvedKey && resolvedKey !== originalKey) { + register(evaluator, resolvedKey) + } + } + }) + }) + + return Object.fromEntries(map.entries()) + }, [runs]) + + const schemaMetricDefinitionsBySlug = useMemo(() => { + const map: Record = {} + Object.entries(evaluatorsBySlug).forEach(([slug, evaluator]) => { + const definitions = collectMetricSchemasFromEvaluator(evaluator) + .map(({name, schema}) => { + const trimmed = (name || "").trim() + if (!trimmed) return null + return {name: trimmed, type: deriveSchemaMetricType(schema)} + }) + .filter(Boolean) as {name: string; type?: string | string[]}[] + + const existing = map[slug] ?? [] + const merged = new Map() + existing.forEach((definition) => merged.set(definition.name, definition)) + definitions.forEach((definition) => merged.set(definition.name, definition)) + map[slug] = Array.from(merged.values()) + }) + return map + }, [evaluatorsBySlug]) + + const evaluatorMetricKeysBySlug = useMemo(() => { + const map: Record> = {} + Object.entries(schemaMetricDefinitionsBySlug).forEach(([slug, definitions]) => { + const set = new Set() + definitions.forEach(({name}) => { + const canonical = canonicalizeMetricKey(name) + set.add(name) + set.add(canonical) + const prefixed = `${slug}.${name}` + set.add(prefixed) + set.add(canonicalizeMetricKey(prefixed)) + }) + map[slug] = set + }) + return map + }, [schemaMetricDefinitionsBySlug]) + // Convenience lookup maps const evalById = useMemo(() => { const map: Record = {} @@ -101,48 +206,107 @@ const EvalRunScoreTable = ({className, type}: {className?: string; type: "auto" metricsByRun.forEach(({id, metrics}) => { const source = (metrics || {}) as Record - const normalized: Record = {} - - const evaluators = (evalById[id]?.enrichedRun?.evaluators || []) as { - slug?: string - metrics?: Record - }[] - const evaluatorsBySlug = new Map}>() - evaluators.forEach((evaluator) => { - if (typeof evaluator?.slug === "string") { - evaluatorsBySlug.set(evaluator.slug, evaluator) + const normalized: Record = {...source} + + Object.entries(source || {}).forEach(([rawKey, value]) => { + const canonical = canonicalizeMetricKey(rawKey) + if (canonical !== rawKey && normalized[canonical] === undefined) { + normalized[canonical] = value } }) + map[id] = normalized + }) - const shouldIncludeMetric = (canonicalKey: string) => { - if (INVOCATION_METRIC_SET.has(canonicalKey)) return true - if (!canonicalKey.includes(".")) return true + return map + }, [metricsByRun]) + + const combinedMetricEntries = useMemo(() => { + const entries: { + fullKey: string + evaluatorSlug: string + metricKey: string + }[] = [] + const seen = new Set() + + const pushEntry = (source: Record) => { + Object.keys(source || {}).forEach((rawKey) => { + const canonical = canonicalizeMetricKey(rawKey) + if (INVOCATION_METRIC_SET.has(canonical)) return + if (!canonical.includes(".")) return + if (seen.has(canonical)) return + + const metric = + (getMetricValueWithAliases(source, canonical) as Record) || + (source?.[rawKey] as Record) + if (!metricHasContent(metric)) return + + const segments = canonical.split(".").filter(Boolean) + if (!segments.length) return + + const resolveSlugFromSegments = () => { + let slugCandidate = segments[0] + let idx = 1 + while (idx <= segments.length) { + if (evaluatorsBySlug[slugCandidate]) { + return {slug: slugCandidate, metricStartIdx: idx} + } + if (idx >= segments.length) break + slugCandidate = `${slugCandidate}.${segments[idx]}` + idx += 1 + } + if (segments.length > 1 && evaluatorsBySlug[segments[1]]) { + return {slug: segments[1], metricStartIdx: 2} + } + return null + } - const [slug, ...parts] = canonicalKey.split(".") - if (!parts.length) return false - const evaluator = evaluatorsBySlug.get(slug) - if (!evaluator?.metrics) return false - const key = parts[parts.length - 1] - return Object.prototype.hasOwnProperty.call(evaluator.metrics, key) - } + const resolved = resolveSlugFromSegments() + if (!resolved) return + const {slug, metricStartIdx} = resolved - Object.entries(source || {}).forEach(([rawKey, value]) => { - const canonical = canonicalizeMetricKey(rawKey) - if (!shouldIncludeMetric(canonical)) return + const evaluator = evaluatorsBySlug[slug] + if (!evaluator) return - if (normalized[rawKey] === undefined) { - normalized[rawKey] = value + const metricKeySegments = segments.slice(metricStartIdx) + const metricKey = + metricKeySegments.length > 0 + ? metricKeySegments.join(".") + : (segments[metricStartIdx - 1] ?? slug) + + if (metricKey.startsWith("attributes.ag.metrics")) { + return } - if (canonical !== rawKey && normalized[canonical] === undefined) { - normalized[canonical] = value + + const allowedKeys = evaluatorMetricKeysBySlug[slug] + if (allowedKeys && allowedKeys.size) { + const keySegments = metricKey.split(".").filter(Boolean) + const candidateKeys = new Set([metricKey]) + keySegments.forEach((_, idx) => { + const prefix = keySegments.slice(0, idx + 1).join(".") + const suffix = keySegments.slice(idx).join(".") + if (prefix) candidateKeys.add(prefix) + if (suffix) candidateKeys.add(suffix) + const segment = keySegments[idx] + if (segment) candidateKeys.add(segment) + }) + const matchesDefinition = Array.from(candidateKeys).some((key) => + allowedKeys.has(key), + ) + if (!matchesDefinition) return } + + entries.push({fullKey: canonical, evaluatorSlug: slug, metricKey}) + seen.add(canonical) }) + } - map[id] = normalized + metricsByRun.forEach(({metrics}) => { + const source = (metrics || {}) as Record + pushEntry(source) }) - return map - }, [metricsByRun, evalById]) + return entries + }, [metricsByRun, evaluatorsBySlug, evaluatorMetricKeysBySlug]) const baseRunState = baseRunId ? evalById[baseRunId] : undefined const hasBaseScenarios = @@ -160,7 +324,6 @@ const EvalRunScoreTable = ({className, type}: {className?: string; type: "auto" }, []) const chartMetrics = useMemo(() => { - // Build union of evaluator metrics across all runs, then add invocation metrics per rules. interface Axis { name: string maxScore: number @@ -172,63 +335,49 @@ const EvalRunScoreTable = ({className, type}: {className?: string; type: "auto" const axesByKey: Record = {} - // 1) Union evaluator metrics from all runs - allRunIds.forEach((runId, runIdx) => { - const stats = metricsLookup[runId] || {} - const evaluators = evalById[runId]?.enrichedRun?.evaluators - const processed = new Set() - - Object.keys(stats).forEach((rawKey) => { - const canonicalKey = canonicalizeMetricKey(rawKey) - if (processed.has(canonicalKey)) return - processed.add(canonicalKey) - - if (INVOCATION_METRIC_SET.has(canonicalKey)) return - if (!canonicalKey.includes(".")) return - - const metric = getMetricValueWithAliases(stats, canonicalKey) - if (!metric) return - - const [evalSlug, ...metricParts] = canonicalKey.split(".") - const metricRemainder = metricParts.join(".") - const evaluator = evaluators?.find((e: any) => e.slug === evalSlug) + combinedMetricEntries.forEach(({fullKey, evaluatorSlug, metricKey}) => { + const evaluator = evaluatorsBySlug[evaluatorSlug] + if (!evaluator) return + + const displayMetricName = metricKey + ? formatMetricName(metricKey) + : formatMetricName(fullKey) + const evaluatorLabel = evaluator?.name ?? formatColumnTitle(evaluatorSlug) + + const axis = + axesByKey[fullKey] || + (axesByKey[fullKey] = { + name: `${evaluatorLabel} - ${displayMetricName}`, + maxScore: 100, + type: "numeric", + _key: fullKey, + }) - if (!evaluator) return + allRunIds.forEach((runId, runIdx) => { + const stats = metricsLookup[runId] || {} + const metric = getMetricValueWithAliases(stats, fullKey) + if (!metricHasContent(metric)) return - const axisKey = canonicalKey const isBinary = Array.isArray((metric as any)?.frequency) - const displayMetricName = metricRemainder - ? formatMetricName(metricRemainder) - : formatMetricName(canonicalKey) - - const x = metricParts[metricParts.length - 1] - if (x in evaluator.metrics) { - if (!axesByKey[axisKey]) { - axesByKey[axisKey] = { - name: `${evaluator?.name ?? evalSlug} - ${displayMetricName}`, - maxScore: isBinary ? 100 : (metric as any)?.max || 100, - type: isBinary ? "binary" : "numeric", - _key: axisKey, - } - } else if (!isBinary) { - const mx = (metric as any)?.max - if (typeof mx === "number") { - axesByKey[axisKey].maxScore = Math.max(axesByKey[axisKey].maxScore, mx) - } + axis.type = isBinary ? "binary" : "numeric" + if (!isBinary) { + const mx = (metric as any)?.max + if (typeof mx === "number") { + axis.maxScore = Math.max(axis.maxScore, mx) } - - const seriesKey = runIdx === 0 ? "value" : `value-${runIdx + 1}` - const v = isBinary - ? getFrequencyData(metric, false) - : ((metric as any)?.mean ?? 0) - axesByKey[axisKey][seriesKey] = v + } else { + axis.maxScore = 100 } + + const seriesKey = runIdx === 0 ? "value" : `value-${runIdx + 1}` + axis[seriesKey] = isBinary + ? getFrequencyData(metric, false) + : ((metric as any)?.mean ?? 0) }) }) let axes: Axis[] = Object.values(axesByKey) - // 2) Invocation metrics only when evaluator metrics are fewer than 3 (based on union) const evaluatorCount = axes.length const addInvocationAxis = (metricKey: string, label?: string) => { const axis: Axis = { @@ -237,27 +386,27 @@ const EvalRunScoreTable = ({className, type}: {className?: string; type: "auto" type: "numeric", _key: metricKey, } + allRunIds.forEach((runId, runIdx) => { - const stats = metricsLookup[runId] || {} - const metric = getMetricValueWithAliases(stats, metricKey) as BasicStats | any + const metrics = metricsLookup[runId] + const metric = getMetricValueWithAliases(metrics || {}, metricKey) as any + if (!metric) return const seriesKey = runIdx === 0 ? "value" : `value-${runIdx + 1}` - axis[seriesKey] = metric?.mean ?? 0 - const mx = metric?.max - if (typeof mx === "number") axis.maxScore = Math.max(axis.maxScore, mx) + if (metric.mean !== undefined) { + axis[seriesKey] = metric.mean + axis.maxScore = Math.max(axis.maxScore, metric.mean) + } }) - axes.push(axis) + + if (axis.maxScore > 0) { + axes.push(axis) + } } if (evaluatorCount < 3) { - if (evaluatorCount === 2) { - addInvocationAxis(COST_METRIC_KEY, "Invocation costs") - } else if (evaluatorCount <= 1) { - addInvocationAxis(DURATION_METRIC_KEY, "Invocation duration") - addInvocationAxis(COST_METRIC_KEY, "Invocation costs") - } + INVOCATION_METRIC_COLUMNS.forEach(({key, label}) => addInvocationAxis(key, label)) } - // 3) Ensure all series keys exist for each axis if (axes.length > 0) { allRunIds.forEach((_, runIdx) => { const seriesKey = runIdx === 0 ? "value" : `value-${runIdx + 1}` @@ -268,22 +417,39 @@ const EvalRunScoreTable = ({className, type}: {className?: string; type: "auto" } return axes.map(({_key, ...rest}) => rest) - }, [allRunIds, evalById, metricsLookup, getFrequencyData]) + }, [allRunIds, combinedMetricEntries, evaluatorsBySlug, metricsLookup, getFrequencyData]) const spiderChartClassName = clsx([ "min-h-[400px] h-[400px]", {"w-[50%] !h-full": !isComparison}, ]) - const dataSource = useMemo(() => { - // Build union of all metric keys across runs - const metricKeys = new Set() - allRunIds.forEach((id) => { - const m = metricsLookup[id] || {} + const chartSeries = useMemo( + () => + allRunIds.map((id, idx) => { + const state = evalById[id] + const compareIdx = state?.compareIndex || idx + 1 + const colorIdx = state?.colorIndex || (state?.isBase ? 1 : undefined) || compareIdx + return { + key: idx === 0 ? "value" : `value-${idx + 1}`, + color: (EVAL_COLOR as any)[colorIdx] || "#3B82F6", + name: state?.enrichedRun?.name || `Eval ${compareIdx}`, + } + }), + [allRunIds, evalById], + ) - Object.keys(m).forEach((k) => metricKeys.add(canonicalizeMetricKey(k))) - }) + const sortedEvaluatorMetricEntries = useMemo(() => { + const entries = [...combinedMetricEntries] + entries.sort((a, b) => + a.evaluatorSlug === b.evaluatorSlug + ? a.metricKey.localeCompare(b.metricKey) + : a.evaluatorSlug.localeCompare(b.evaluatorSlug), + ) + return entries + }, [combinedMetricEntries]) + const dataSource = useMemo(() => { // const baseEval = evalById[baseRunId!]?.enrichedRun const rows: any[] = [] @@ -436,52 +602,43 @@ const EvalRunScoreTable = ({className, type}: {className?: string; type: "auto" }) // Evaluator metrics grouped by evaluator slug - const allEvaluatorEntries: {slug: string; metricKey: string; fullKey: string}[] = [] - Array.from(metricKeys) - .filter((k) => !INVOCATION_METRIC_SET.has(k) && k.includes(".")) - .forEach((fullKey) => { - const [slug, ...restParts] = fullKey.split(".") - const metricKey = restParts.join(".") || slug - allEvaluatorEntries.push({slug, metricKey, fullKey}) - }) - - // Maintain stable order by slug then metricKey - allEvaluatorEntries - .sort((a, b) => - a.slug === b.slug - ? a.metricKey.localeCompare(b.metricKey) - : a.slug.localeCompare(b.slug), - ) - .forEach(({slug, metricKey, fullKey}) => { - const state = evalById[baseRunId!] - const evaluator = state?.enrichedRun?.evaluators?.find((e: any) => e.slug === slug) - const baseMetric = getMetricValueWithAliases( - metricsLookup[baseRunId!] || {}, - fullKey, - ) as any - const [, ...restParts] = fullKey.split(".") - const metricPath = restParts.length ? restParts.join(".") : metricKey - const labelSegment = metricPath.split(".").pop() || metricPath - const displayMetricName = formatColumnTitle(labelSegment) - const titleNode = ( -
- - {evaluator?.name ?? formatColumnTitle(slug)} - -
- {displayMetricName} - {/* Show (mean) if base has mean */} - {baseMetric && (baseMetric as any)?.mean !== undefined && ( - (mean) - )} -
+ sortedEvaluatorMetricEntries.forEach(({evaluatorSlug: slug, metricKey, fullKey}) => { + const evaluator = evaluatorsBySlug[slug] + const baseMetric = getMetricValueWithAliases( + metricsLookup[baseRunId!] || {}, + fullKey, + ) as any + const metricPath = metricKey || fullKey + const labelSegment = metricPath.split(".").pop() || metricPath + const displayMetricName = formatColumnTitle(labelSegment) + const titleNode = ( +
+ + {evaluator?.name ?? formatColumnTitle(slug)} + +
+ {displayMetricName} + {baseMetric && (baseMetric as any)?.mean !== undefined && ( + (mean) + )}
- ) - pushMetricRow(fullKey, titleNode) - }) +
+ ) + pushMetricRow(fullKey, titleNode) + }) return rows - }, [allRunIds, baseRunId, evalById, getFrequencyData, metricsLookup, runs, type]) + }, [ + allRunIds, + baseRunId, + evalById, + evaluatorsBySlug, + getFrequencyData, + metricsLookup, + projectURL, + sortedEvaluatorMetricEntries, + type, + ]) return (
@@ -602,21 +759,7 @@ const EvalRunScoreTable = ({className, type}: {className?: string; type: "auto" {"w-[50%] !h-full": !isComparison}, ])} metrics={chartMetrics} - series={useMemo(() => { - return allRunIds.map((id, idx) => { - const state = evalById[id] - const compareIdx = state?.compareIndex || idx + 1 - const colorIdx = - state?.colorIndex || - (state?.isBase ? 1 : undefined) || - compareIdx - return { - key: idx === 0 ? "value" : `value-${idx + 1}`, - color: (EVAL_COLOR as any)[colorIdx] || "#3B82F6", - name: state?.enrichedRun?.name || `Eval ${compareIdx}`, - } - }) - }, [allRunIds, evalById])} + series={chartSeries} /> )}
diff --git a/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/TimeSeriesChart.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/TimeSeriesChart.tsx index 63440ff7f3..3fba471a38 100644 --- a/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/TimeSeriesChart.tsx +++ b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/TimeSeriesChart.tsx @@ -5,7 +5,7 @@ import clsx from "clsx" import { Area, CartesianGrid, - Legend, + ComposedChart, Line, LineChart, ResponsiveContainer, @@ -19,43 +19,12 @@ import {EvaluatorDto} from "@/oss/lib/hooks/useEvaluators/types" import {formatMetricName} from "../../assets/utils" import PlaceholderOverlay, {PlaceholderEvaluationType} from "../shared/PlaceholderOverlay" +import {withAlpha, format3Sig, formatTimestamp} from "./assets/helpers" import HistogramChart from "./assets/HistogramChart" +import LowerBand from "./assets/LowerBand" +import UpperBand from "./assets/UpperBand" -const withAlpha = (color: string, alpha: number) => { - if (color.startsWith("#")) { - const hex = color.slice(1) - const normalized = - hex.length === 3 - ? hex - .split("") - .map((ch) => ch + ch) - .join("") - : hex - const int = Number.parseInt(normalized, 16) - if (!Number.isNaN(int)) { - const r = (int >> 16) & 255 - const g = (int >> 8) & 255 - const b = int & 255 - return `rgba(${r}, ${g}, ${b}, ${alpha})` - } - } - return color -} - -const format3Sig = (n: number) => { - if (!Number.isFinite(n)) return String(n) - const abs = Math.abs(n) - if (abs !== 0 && (abs < 0.001 || abs >= 1000)) return n.toExponential(2) - const s = n.toPrecision(3) - return String(Number(s)) -} - -const formatTimestamp = (value: number) => { - if (!Number.isFinite(value)) return "" - const date = new Date(value) - if (Number.isNaN(date.getTime())) return "" - return `${date.toLocaleDateString()} ${date.toLocaleTimeString()}` -} +// helpers moved to ./assets/helpers const PLACEHOLDER_TIME_START = Date.UTC(2025, 0, 1, 9, 0, 0) const PLACEHOLDER_TIME_STEP = 60 * 60 * 1000 @@ -114,6 +83,29 @@ const EvaluatorMetricsTimeSeriesChart = ({ placeholderTitle?: ReactNode placeholderDescription?: ReactNode }) => { + useEffect(() => { + if (process.env.NODE_ENV === "development") { + try { + const _seriesCount = Array.isArray(series) ? series.length : 0 + const _summary = (series || []).map((s) => { + const pts = Array.isArray(s.points) ? s.points : [] + const count = pts.length + const timestamps = pts.slice(0, 10).map((p) => p.timestamp) + const allTs = pts.map((p) => p.timestamp) + const minTs = allTs.length ? Math.min(...allTs) : undefined + const maxTs = allTs.length ? Math.max(...allTs) : undefined + return { + id: s.id, + name: s.name, + points: count, + minTs, + maxTs, + sampleTs: timestamps, + } + }) + } catch {} + } + }, [series, metricKey, isBoolean, evaluationType]) const hasData = series?.some((s) => s.points.length > 0) const evaluatorLabel = evaluator?.name || evaluator?.slug || "this evaluator" const overlayTitle = placeholderTitle ?? "Waiting for your traces" @@ -122,7 +114,7 @@ const EvaluatorMetricsTimeSeriesChart = ({ `Generate traces with ${evaluatorLabel} to start collecting results.` const chartData = useMemo(() => { - const map = new Map>() + const map = new Map>() series.forEach((s) => { s.points.forEach((pt) => { const existing = map.get(pt.timestamp) ?? {timestamp: pt.timestamp} @@ -139,9 +131,7 @@ const EvaluatorMetricsTimeSeriesChart = ({ if (pt.p50 !== undefined) { existing[`${s.id}__p50`] = pt.p50 } - if (pt.p25 !== undefined && pt.p75 !== undefined) { - existing[`${s.id}__pRange`] = pt.p75 - pt.p25 - } + // old band helpers removed; we will compute highlight areas in windowData if (pt.histogram && pt.histogram.length) { existing[`${s.id}__hist`] = pt.histogram } @@ -198,7 +188,6 @@ const EvaluatorMetricsTimeSeriesChart = ({ setPanStartX(e.clientX) const range = resolveRange() || [tsExtent.min, tsExtent.max] setPanStartDomain(range) - console.debug("[TimeSeries] mousedown", {startX: e.clientX, range}) } const handleMouseMove: React.MouseEventHandler = (e) => { const rect = containerRef.current?.getBoundingClientRect() @@ -221,12 +210,7 @@ const EvaluatorMetricsTimeSeriesChart = ({ nextMin -= diff nextMax -= diff } - console.debug("[TimeSeries] mousemove", { - dx, - domainWidth, - xDomainBefore: panStartDomain, - xDomainAfter: [nextMin, nextMax], - }) + panTargetRef.current = [nextMin, nextMax] if (panRAF.current == null) { panRAF.current = requestAnimationFrame(() => { @@ -240,7 +224,6 @@ const EvaluatorMetricsTimeSeriesChart = ({ setIsPanning(false) setPanStartX(null) setPanStartDomain(null) - console.debug("[TimeSeries] pan end") if (panRAF.current) { cancelAnimationFrame(panRAF.current) panRAF.current = null @@ -327,14 +310,6 @@ const EvaluatorMetricsTimeSeriesChart = ({ anchorRatio = Math.max(0, Math.min(1, anchorRatio)) } - console.debug("[TimeSeries] wheel(native)", { - xDomainBefore: [currentMin, currentMax], - xDomainAfter: [nextMin, nextMax], - cursorRatio: ratio, - factor: rawFactor, - anchorCenter, - anchorRatio, - }) setXDomain([nextMin, nextMax]) // Reset anchor after idle time to start a new gesture on next wheel @@ -388,7 +363,7 @@ const EvaluatorMetricsTimeSeriesChart = ({ }, [series, isBoolean]) // Consistent chart margins to compute plot area sizes - const chartMargin = useMemo(() => ({top: 8, right: 12, bottom: 48, left: 56}), []) + const chartMargin = useMemo(() => ({top: 8, right: 12, bottom: 24, left: 48}), []) const domain = useMemo(() => { if (isBoolean) return [0 - BOOLEAN_AXIS_PADDING, 100 + BOOLEAN_AXIS_PADDING] as const @@ -453,40 +428,75 @@ const EvaluatorMetricsTimeSeriesChart = ({ ) }) + const makeActiveDot = + (strokeColor: string, r = 3) => + (props: any) => { + const isEdge = !!props?.payload?.__edgeRow + if (isEdge) return + const {cx, cy} = props || {} + if (typeof cx !== "number" || typeof cy !== "number") return + return ( + + ) + } + const ChartLegend = memo((props: any) => { - const items: any[] = Array.isArray(props?.payload) ? props.payload : [] + // Manual items mode: [{label, color}] — used when rendering outside the chart + if (Array.isArray(props?.items)) { + const items = props.items as {label: string; color: string}[] + if (!items.length) return null + return ( +
+ {items.map((it, idx) => ( +
+ + {it.label} +
+ ))} +
+ ) + } + + // Fallback: Recharts payload mode (not used after moving legend outside) + const raw: any[] = Array.isArray(props?.payload) ? props.payload : [] + if (!raw.length) return null + const items = raw + .filter((it) => !!String(it?.value ?? "")) + .map((it) => ({ + label: String(it?.payload?.name ?? it?.value ?? ""), + color: String(it?.payload?.stroke || it?.payload?.fill || it?.color || "#888"), + })) if (!items.length) return null - return ( -
- {items.map((it) => ( -
- - {String(it?.value ?? "")} -
- ))} -
- ) + return }) // Augmented data: full wide dataset plus interpolated boundary rows at min/max @@ -513,6 +523,7 @@ const EvaluatorMetricsTimeSeriesChart = ({ const addEdgeRow = (boundaryTs: number, side: "min" | "max") => { let row = rowMap.get(boundaryTs) ?? {timestamp: boundaryTs} + ;(row as any).__edgeRow = true let any = false for (const s of sorted) { const pts = s.pts @@ -577,6 +588,80 @@ const EvaluatorMetricsTimeSeriesChart = ({ addEdgeRow(minNum, "min") addEdgeRow(maxNum, "max") + // Interpolate p25/p75 for rows that lack them so active dots appear consistently + const interpAt = ( + pts: {timestamp: number; value: number}[], + t: number, + ): number | undefined => { + if (!pts.length) return undefined + if (t <= pts[0].timestamp) return pts[0].value + if (t >= pts[pts.length - 1].timestamp) return pts[pts.length - 1].value + let lo = 0 + let hi = pts.length - 1 + while (lo <= hi) { + const mid = (lo + hi) >> 1 + const mt = pts[mid].timestamp + if (mt < t) lo = mid + 1 + else hi = mid - 1 + } + const next = pts[lo] + const prev = pts[lo - 1] + if (!prev || !next || next.timestamp === prev.timestamp) return undefined + const r = (t - prev.timestamp) / (next.timestamp - prev.timestamp) + return prev.value + r * (next.value - prev.value) + } + + for (const s of series) { + const p25Pts = s.points + .filter((p) => typeof p.p25 === "number") + .map((p) => ({timestamp: p.timestamp, value: p.p25 as number})) + .sort((a, b) => a.timestamp - b.timestamp) + const p75Pts = s.points + .filter((p) => typeof p.p75 === "number") + .map((p) => ({timestamp: p.timestamp, value: p.p75 as number})) + .sort((a, b) => a.timestamp - b.timestamp) + if (!p25Pts.length && !p75Pts.length) continue + + for (const [ts, row] of rowMap.entries()) { + if (typeof (row as any)[`${s.id}__p25`] !== "number" && p25Pts.length) { + const v = interpAt(p25Pts, ts) + if (typeof v === "number" && Number.isFinite(v)) { + ;(row as any)[`${s.id}__p25`] = v + } + } + if (typeof (row as any)[`${s.id}__p75`] !== "number" && p75Pts.length) { + const v = interpAt(p75Pts, ts) + if (typeof v === "number" && Number.isFinite(v)) { + ;(row as any)[`${s.id}__p75`] = v + } + } + + // Compute helpers + const mainVal = Number((row as any)[s.id]) + const p25Val = (row as any)[`${s.id}__p25`] + const p75Val = (row as any)[`${s.id}__p75`] + if (Number.isFinite(mainVal)) { + const NUM_SEGS = 30 + if (typeof p75Val === "number" && Number.isFinite(p75Val as number)) { + ;(row as any)[`${s.id}__upperBase`] = mainVal + const ud = Math.max(0, (p75Val as number) - mainVal) + const usz = ud / NUM_SEGS + for (let i = 0; i < NUM_SEGS; i++) { + ;(row as any)[`${s.id}__upperSeg${i}`] = usz + } + } + if (typeof p25Val === "number" && Number.isFinite(p25Val as number)) { + ;(row as any)[`${s.id}__lowerBase`] = p25Val as number + const ld = Math.max(0, mainVal - (p25Val as number)) + const lsz = ld / NUM_SEGS + for (let i = 0; i < NUM_SEGS; i++) { + ;(row as any)[`${s.id}__lowerSeg${i}`] = lsz + } + } + } + } + } + return Array.from(rowMap.values()).sort((a, b) => Number(a.timestamp) - Number(b.timestamp)) }, [chartData, xDomain, tsExtent, series, isBoolean]) @@ -616,7 +701,7 @@ const EvaluatorMetricsTimeSeriesChart = ({
{ - console.debug("[TimeSeries] reset") setXDomain(["auto", "auto"]) // Also clear any previous wheel anchor so next zoom re-anchors at cursor wheelAnchorRef.current = null @@ -634,7 +718,7 @@ const EvaluatorMetricsTimeSeriesChart = ({
{hasData ? ( - { if (suppressTooltip) return null if (!active || !payload || !payload.length) return null - const rows = payload as any[] + const rows = (payload as any[]).filter((row: any) => { + const key = String(row?.dataKey || "") + // Exclude helper series for highlight areas and redundant percentile lines + if (!key) return false + if (key.endsWith("__upperBase")) return false + if (key.endsWith("__lowerBase")) return false + if (key.includes("__upperSeg")) return false + if (key.includes("__lowerSeg")) return false + // Remove separate percentile lines (we show range below the main row) + if (key.endsWith("__p25")) return false + if (key.endsWith("__p75")) return false + return true + }) return ( -
-
+
+
{formatTimestamp(Number(label))}
@@ -689,7 +785,6 @@ const EvaluatorMetricsTimeSeriesChart = ({ : Number(countRaw.toFixed(2)) : countRaw const p25 = row.payload?.[`${dataKey}__p25`] - const p50 = row.payload?.[`${dataKey}__p50`] const p75 = row.payload?.[`${dataKey}__p75`] const formatValue = (val?: number) => { if (val == null) return undefined @@ -705,11 +800,6 @@ const EvaluatorMetricsTimeSeriesChart = ({ const formattedValue = formatValue(valueNum) const formattedP25 = formatValue(p25) const formattedP75 = formatValue(p75) - const formattedMedian = formatValue(p50) - const formattedRange = - formattedP25 && formattedP75 - ? `${formattedP25} – ${formattedP75}` - : undefined const histogram = row.payload?.[ `${dataKey}__hist` ] as { @@ -760,57 +850,79 @@ const EvaluatorMetricsTimeSeriesChart = ({ ) : [] return ( -
-
- +
+
+ + + {name} + + {count != null ? ( + + • {count} scenarios + + ) : null} +
+
- - {name}: - - {formattedValue} - {count != null ? ( - - • {count} scenarios - - ) : null} -
- {formattedRange ? ( -
- P25–P75: {formattedRange} + > + {formattedValue}
- ) : null} - {formattedMedian ? ( -
- Median: {formattedMedian} +
+ {formattedP25 || formattedP75 ? ( +
+ P25: {formattedP25 ?? "-"} + {formattedP75 ? ( + {` / P75: ${formattedP75}`} + ) : null}
) : null} {histogramData.length ? ( -
- +
+
+ +
) : null}
@@ -828,68 +940,47 @@ const EvaluatorMetricsTimeSeriesChart = ({ stroke="#CBD5E1" onChange={onBrushChange as any} /> */} - } - /> {series.map((s) => { const hasRange = s.points.some( (pt) => typeof pt.p25 === "number" && typeof pt.p75 === "number", ) - const hasMedian = s.points.some( - (pt) => typeof pt.p50 === "number", - ) - const rangeId = `range_${s.id}` - const rangeFill = withAlpha(s.color, 0.18) - const medianStroke = withAlpha(s.color, 0.6) return ( {hasRange ? ( <> - + + - ) : null} - {hasMedian ? ( - - ) : null} ) })} - + ) : ( @@ -992,6 +1083,21 @@ const EvaluatorMetricsTimeSeriesChart = ({ ) : null}
+ {(() => { + const items = series.flatMap((s) => { + const base = [{label: s.name, color: s.color}] + const hasRange = s.points.some( + (pt) => typeof pt.p25 === "number" && typeof pt.p75 === "number", + ) + if (!hasRange) return base + return [ + ...base, + {label: `${s.name} P25`, color: withAlpha(s.color, 0.1)}, + {label: `${s.name} P75`, color: withAlpha(s.color, 0.1)}, + ] + }) + return + })()} ) } diff --git a/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/assets/LowerBand.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/assets/LowerBand.tsx new file mode 100644 index 0000000000..29b039c640 --- /dev/null +++ b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/assets/LowerBand.tsx @@ -0,0 +1,70 @@ +import {memo} from "react" + +import {Area} from "recharts" + +function LowerBand({id, color}: {id: string; color: string}) { + // Generate gradient strips - more strips = smoother gradient + const numStrips = 30 + const gradientFalloff = 1.8 // Controls how quickly the gradient fades + const maxOpacity = 0.5 // Maximum opacity at the main line + + // Parse color if it's in hex format, otherwise use rgba + const getColorWithOpacity = (opacity: number) => { + // If color is already rgba, extract RGB values + // Otherwise assume it's hex and convert + if (color.startsWith("rgba")) { + const match = color.match(/rgba?\((\d+),\s*(\d+),\s*(\d+)/) + if (match) { + return `rgba(${match[1]}, ${match[2]}, ${match[3]}, ${opacity})` + } + } + // For hex colors like "#f44336" + const hex = color.replace("#", "") + const r = parseInt(hex.substring(0, 2), 16) + const g = parseInt(hex.substring(2, 4), 16) + const b = parseInt(hex.substring(4, 6), 16) + return `rgba(${r}, ${g}, ${b}, ${opacity})` + } + + return ( + <> + {/* Baseline at lower boundary */} + + + {/* Generate gradient strips from boundary to main line */} + {Array.from({length: numStrips}).map((_, i) => { + // Calculate opacity: starts transparent at boundary (i=0), + // increases toward main line (i=numStrips-1) + const ratio = i / (numStrips - 1) + const opacity = Math.pow(ratio, gradientFalloff) * maxOpacity + + return ( + + ) + })} + + ) +} + +export default memo(LowerBand) diff --git a/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/assets/UpperBand.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/assets/UpperBand.tsx new file mode 100644 index 0000000000..0ccf00e510 --- /dev/null +++ b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/assets/UpperBand.tsx @@ -0,0 +1,70 @@ +import {memo} from "react" + +import {Area} from "recharts" + +function UpperBand({id, color}: {id: string; color: string}) { + // Generate gradient strips - more strips = smoother gradient + const numStrips = 30 + const gradientFalloff = 1.8 // Controls how quickly the gradient fades + const maxOpacity = 0.5 // Maximum opacity at the main line + + // Parse color if it's in hex format, otherwise use rgba + const getColorWithOpacity = (opacity: number) => { + // If color is already rgba, extract RGB values + // Otherwise assume it's hex and convert + if (color.startsWith("rgba")) { + const match = color.match(/rgba?\((\d+),\s*(\d+),\s*(\d+)/) + if (match) { + return `rgba(${match[1]}, ${match[2]}, ${match[3]}, ${opacity})` + } + } + // For hex colors like "#2196f3" + const hex = color.replace("#", "") + const r = parseInt(hex.substring(0, 2), 16) + const g = parseInt(hex.substring(2, 4), 16) + const b = parseInt(hex.substring(4, 6), 16) + return `rgba(${r}, ${g}, ${b}, ${opacity})` + } + + return ( + <> + {/* Baseline at main line */} + + + {/* Generate gradient strips from main line to boundary */} + {Array.from({length: numStrips}).map((_, i) => { + // Calculate opacity: starts strong at main line (i=0), + // fades toward boundary (i=numStrips-1) + const ratio = i / (numStrips - 1) + const opacity = Math.pow(1 - ratio, gradientFalloff) * maxOpacity + + return ( + + ) + })} + + ) +} + +export default memo(UpperBand) diff --git a/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/assets/helpers.ts b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/assets/helpers.ts new file mode 100644 index 0000000000..3622fbe691 --- /dev/null +++ b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/assets/helpers.ts @@ -0,0 +1,35 @@ +export const withAlpha = (color: string, alpha: number) => { + if (color.startsWith("#")) { + const hex = color.slice(1) + const normalized = + hex.length === 3 + ? hex + .split("") + .map((ch) => ch + ch) + .join("") + : hex + const int = Number.parseInt(normalized, 16) + if (!Number.isNaN(int)) { + const r = (int >> 16) & 255 + const g = (int >> 8) & 255 + const b = int & 255 + return `rgba(${r}, ${g}, ${b}, ${alpha})` + } + } + return color +} + +export const format3Sig = (n: number) => { + if (!Number.isFinite(n)) return String(n) + const abs = Math.abs(n) + if (abs !== 0 && (abs < 0.001 || abs >= 1000)) return n.toExponential(2) + const s = n.toPrecision(3) + return String(Number(s)) +} + +export const formatTimestamp = (value: number) => { + if (!Number.isFinite(value)) return "" + const date = new Date(value) + if (Number.isNaN(date.getTime())) return "" + return `${date.toLocaleDateString()} ${date.toLocaleTimeString()}` +} diff --git a/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/index.tsx b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/index.tsx index 34f101567a..a78b3d9d54 100644 --- a/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/index.tsx +++ b/web/ee/src/components/EvalRunDetails/AutoEvalRun/components/EvaluatorMetricsChart/index.tsx @@ -6,11 +6,11 @@ import clsx from "clsx" import {EvaluatorDto} from "@/oss/lib/hooks/useEvaluators/types" import {EVAL_BG_COLOR} from "../../assets/utils" +import BarChartPlaceholder from "../shared/BarChartPlaceholder" +import PlaceholderOverlay, {PlaceholderEvaluationType} from "../shared/PlaceholderOverlay" import BarChart from "./assets/BarChart" import HistogramChart from "./assets/HistogramChart" -import PlaceholderOverlay, {PlaceholderEvaluationType} from "../shared/PlaceholderOverlay" -import BarChartPlaceholder from "../shared/BarChartPlaceholder" /* ---------------- helpers ---------------- */ @@ -221,7 +221,7 @@ const EvaluatorMetricsChart = ({
- {evaluator?.name} + {evaluatorLabel} {name} diff --git a/web/ee/src/components/EvalRunDetails/HumanEvalRun/components/Modals/RenameEvalModal/index.tsx b/web/ee/src/components/EvalRunDetails/HumanEvalRun/components/Modals/RenameEvalModal/index.tsx index c4c07ba0ed..86e81c461e 100644 --- a/web/ee/src/components/EvalRunDetails/HumanEvalRun/components/Modals/RenameEvalModal/index.tsx +++ b/web/ee/src/components/EvalRunDetails/HumanEvalRun/components/Modals/RenameEvalModal/index.tsx @@ -15,7 +15,15 @@ import {RenameEvalModalProps} from "../types" import RenameEvalModalContent from "./assets/RenameEvalModalContent" -const RenameEvalModal = ({id, name, description, runId, ...props}: RenameEvalModalProps) => { +const RenameEvalModal = ({ + id, + name, + description, + runId, + onCancel, + afterClose, + ...modalProps +}: RenameEvalModalProps) => { const {mutate} = useSWRConfig() const contextRunId = useRunId() // Get runId from context const effectiveRunId = runId || contextRunId // Use prop runId if available, otherwise context @@ -28,8 +36,8 @@ const RenameEvalModal = ({id, name, description, runId, ...props}: RenameEvalMod setEditName(name) setEditDescription(description || "") setError(null) - props.afterClose?.() - }, [name, description]) + afterClose?.() + }, [name, description, afterClose]) const handleSave = useCallback(async () => { setLoading(true) @@ -39,7 +47,7 @@ const RenameEvalModal = ({id, name, description, runId, ...props}: RenameEvalMod const state = evalAtomStore().get(evaluationRunStateFamily(effectiveRunId)) try { - await axios.patch(`/preview/evaluations/runs/${id}`, { + const response = await axios.patch(`/preview/evaluations/runs/${id}`, { run: { ...state.rawRun, id, @@ -47,6 +55,14 @@ const RenameEvalModal = ({id, name, description, runId, ...props}: RenameEvalMod description: editDescription, }, }) + + const updatedCount = response?.data?.count + if (typeof updatedCount === "number" && updatedCount <= 0) { + message.error("Failed to update evaluation run.") + onCancel?.({} as any) + return + } + await mutate( (key: string) => key.includes("/preview/evaluations/runs/") || key.includes(id), undefined, @@ -54,16 +70,19 @@ const RenameEvalModal = ({id, name, description, runId, ...props}: RenameEvalMod ) message.success("Evaluation run updated") - props.onCancel?.({} as any) + onCancel?.({} as any) } catch (err: any) { setError(err?.message || "Failed to update run") } finally { setLoading(false) } - }, [id, editName, editDescription, mutate, runId]) + }, [id, editName, editDescription, mutate, effectiveRunId, name, onCancel]) const isDisabled = useMemo(() => { - return editName.trim() === name.trim() && editDescription.trim() === description?.trim() + return ( + editName?.trim?.() === name?.trim?.() && + editDescription?.trim?.() === description?.trim?.() + ) }, [editName, editDescription, name, description]) return ( @@ -73,8 +92,9 @@ const RenameEvalModal = ({id, name, description, runId, ...props}: RenameEvalMod confirmLoading={loading} okText="Save" afterClose={onAfterClose} + onCancel={onCancel} okButtonProps={{disabled: isDisabled}} - {...props} + {...modalProps} > { +const UrlSync = ({evalType}: {evalType: "auto" | "human" | "online" | "custom"}) => { const router = useRouter() // Use global store for all atom reads/writes to ensure consistency @@ -22,7 +22,7 @@ const UrlSync = ({evalType}: {evalType: "auto" | "human" | "online"}) => { const {view, scenarioId, compare} = router.query const queryView = Array.isArray(view) ? view[0] : (view as string | undefined) const fallbackView = - evalType === "auto" ? "testcases" : evalType === "online" ? "results" : "focus" + evalType === "online" ? "results" : evalType === "human" ? "focus" : "testcases" const v = queryView ?? fallbackView // Parse compare parameter - can be a single string or array of strings diff --git a/web/ee/src/components/EvalRunDetails/components/EvalRunOverviewViewer/index.tsx b/web/ee/src/components/EvalRunDetails/components/EvalRunOverviewViewer/index.tsx index fc434a83b1..7e9d39745d 100644 --- a/web/ee/src/components/EvalRunDetails/components/EvalRunOverviewViewer/index.tsx +++ b/web/ee/src/components/EvalRunDetails/components/EvalRunOverviewViewer/index.tsx @@ -154,13 +154,21 @@ const extractTimeSeriesValue = (rawValue: any): {value: number; isBoolean: boole return {value: rawValue.value, isBoolean: false} } - const frequency = Array.isArray(rawValue.frequency) - ? rawValue.frequency - : Array.isArray(rawValue.rank) - ? rawValue.rank + const frequency = Array.isArray((rawValue as any).frequency) + ? (rawValue as any).frequency + : Array.isArray((rawValue as any).rank) + ? (rawValue as any).rank + : Array.isArray((rawValue as any).freq) + ? (rawValue as any).freq + : undefined + + const uniqueArr = Array.isArray((rawValue as any).unique) + ? (rawValue as any).unique + : Array.isArray((rawValue as any).uniq) + ? (rawValue as any).uniq : undefined - if (Array.isArray(rawValue.unique) || frequency) { + if (uniqueArr || frequency) { const counts = frequency?.map((entry) => entry?.count ?? entry?.frequency ?? 0) ?? [] const total = typeof rawValue.count === "number" @@ -228,56 +236,45 @@ const EvalRunOverviewViewer = ({type = "auto"}: {type: "auto" | "online"}) => { const runs = useAtomValue(runsStateFamily(allRunIds)) const metricsByRun = useAtomValue(runsMetricsFamily(allRunIds)) const rawMetricsByRun = useAtomValue(runsRawMetricsFamily(allRunIds)) - const {data: previewEvaluators} = useEvaluators({preview: true, queries: {is_human: false}}) - const {data: projectEvaluators} = useEvaluators() - - const catalogEvaluators = useMemo( - () => [...toArray(previewEvaluators), ...toArray(projectEvaluators)], - [previewEvaluators, projectEvaluators], - ) - - const catalogEvaluatorsByIdentifier = useMemo(() => { - const map = new Map() - catalogEvaluators.forEach((entry) => { - collectEvaluatorIdentifiers(entry).forEach((identifier) => { - if (!map.has(identifier)) { - map.set(identifier, entry) - } - }) - }) - return map - }, [catalogEvaluators]) const evaluatorsBySlug = useMemo(() => { const map = new Map() - const register = (entry: any) => { + const register = (entry: any, slug: string) => { if (!entry || typeof entry !== "object") return - const identifiers = collectEvaluatorIdentifiers(entry) - let catalogMatch: any - for (const identifier of identifiers) { - const match = catalogEvaluatorsByIdentifier.get(identifier) - if (match) { - catalogMatch = match - break - } - } - const merged = mergeEvaluatorRecords(entry, catalogMatch) ?? catalogMatch ?? entry - const slug = - pickString(merged?.slug) || - pickString(entry?.slug) || - pickString(catalogMatch?.slug) || - undefined + if (!slug || map.has(slug)) return - map.set(slug, merged) + map.set(slug, entry) } runs.forEach((state) => { - toArray(state?.enrichedRun?.evaluators).forEach(register) + const annotationSteps = state?.enrichedRun?.data?.steps?.filter( + (step) => step.type === "annotation", + ) + annotationSteps?.forEach((step) => { + toArray( + state?.enrichedRun?.evaluators?.filter( + (evaluator) => evaluator.id === step.references?.evaluator?.id, + ), + ).forEach((evaluator) => { + if (!evaluator) return + const originalKey = typeof step?.key === "string" ? step.key : undefined + const registerKey = (key?: string) => { + if (!key) return + register(evaluator, key) + } + registerKey(originalKey) + if (step.origin === "human" && originalKey) { + const parts = originalKey.split(".") + if (parts.length > 1) { + registerKey(parts[1]) + } + } + }) + }) }) - toArray(evaluators).forEach(register) return Object.fromEntries(map.entries()) - }, [runs, evaluators, catalogEvaluatorsByIdentifier]) + }, [runs, evaluators]) const schemaMetricDefinitionsBySlug = useMemo(() => { const map: Record = {} @@ -386,17 +383,47 @@ const EvalRunOverviewViewer = ({type = "auto"}: {type: "auto" | "online"}) => { (source?.[rawKey] as Record) if (!metricHasContent(metric)) return - const [slug, ...rest] = canonical.split(".") - const metricKey = rest.join(".") || slug + const segments = canonical.split(".").filter(Boolean) + if (!segments.length) return + + const resolveSlugFromSegments = (): {slug: string; metricStartIdx: number} | null => { + let slugCandidate = segments[0] + let idx = 1 + while (idx <= segments.length) { + if (evaluatorsBySlug[slugCandidate]) { + return {slug: slugCandidate, metricStartIdx: idx} + } + if (idx >= segments.length) break + slugCandidate = `${slugCandidate}.${segments[idx]}` + idx += 1 + } + if (segments.length > 1 && evaluatorsBySlug[segments[1]]) { + return {slug: segments[1], metricStartIdx: 2} + } + return null + } + + const resolved = resolveSlugFromSegments() + if (!resolved) return + const {slug, metricStartIdx} = resolved + + const metricKeySegments = segments.slice(metricStartIdx) + const metricKey = + metricKeySegments.length > 0 + ? metricKeySegments.join(".") + : segments[metricStartIdx - 1] const evaluator = evaluatorsBySlug[slug] if (!evaluator) { return } + if (metricKey.startsWith("attributes.ag.metrics")) { + return + } + const allowedKeys = evaluatorMetricKeysBySlug[slug] - if (allowedKeys) { - if (allowedKeys.size === 0) return + if (allowedKeys && allowedKeys.size > 0) { const segments = metricKey.split(".").filter(Boolean) const candidateKeys = new Set([metricKey]) segments.forEach((_, idx) => { @@ -470,6 +497,7 @@ const EvalRunOverviewViewer = ({type = "auto"}: {type: "auto" | "online"}) => { const evaluatorList = Object.values(evaluatorsBySlug) const resolvedEvalType: PlaceholderEvaluationType = evalType === "online" ? "online" : evalType === "human" ? "human" : "auto" + const evaluatorKeysWithMetrics = useMemo(() => { const set = new Set() combinedMetricEntries.forEach(({evaluatorSlug}) => { @@ -479,11 +507,8 @@ const EvalRunOverviewViewer = ({type = "auto"}: {type: "auto" | "online"}) => { }, [combinedMetricEntries]) const placeholderEvaluators = useMemo(() => { - if (!evaluatorList.length) return [] - return evaluatorList.filter((ev: any) => { - const key = ev?.slug - if (!key) return true - return !evaluatorKeysWithMetrics.has(key) + return Object.entries(evaluatorsBySlug).filter(([slug, evaluator]) => { + return !evaluatorKeysWithMetrics.has(slug) }) }, [evaluatorList, evaluatorKeysWithMetrics]) @@ -745,8 +770,13 @@ const EvalRunOverviewViewer = ({type = "auto"}: {type: "auto" | "online"}) => { }, [buildPlaceholderCopy, resolvedEvalType, scaffoldItems]) if (shouldShowMetricsSkeleton) { - return + return ( + + ) } + return ( <>
@@ -774,14 +804,7 @@ const EvalRunOverviewViewer = ({type = "auto"}: {type: "auto" | "online"}) => { if (type === "online") { const resolveEntryTimestamp = (entry: any): number | null => { - const rawTs = - entry?.timestamp ?? - entry?.window?.timestamp ?? - entry?.window?.end ?? - entry?.created_at ?? - entry?.createdAt ?? - entry?.window_start ?? - null + const rawTs = entry?.timestamp ?? null if (typeof rawTs === "number") { return Number.isFinite(rawTs) ? rawTs : null } @@ -814,17 +837,48 @@ const EvalRunOverviewViewer = ({type = "auto"}: {type: "auto" | "online"}) => { if (ts == null) return null const source = entry?.data || {} - let rawValue = source?.[fullKey] + const isPlainObject = ( + v: unknown, + ): v is Record => + !!v && + typeof v === "object" && + !Array.isArray(v) + + const groupFlat: Record = {} + Object.entries(source || {}).forEach( + ([groupKey, groupVal]) => { + if (isPlainObject(groupVal)) { + Object.entries(groupVal).forEach( + ([innerKey, innerVal]) => { + groupFlat[ + `${groupKey}.${innerKey}` + ] = innerVal + }, + ) + } + }, + ) + + let rawValue = getMetricValueWithAliases( + source, + fullKey, + ) + if (rawValue === undefined) { - rawValue = getMetricValueWithAliases( - source, - fullKey, + const matchKey = Object.keys( + groupFlat, + ).find( + (k) => + k === fullKey || + k.endsWith(`.${fullKey}`), ) + if (matchKey) rawValue = groupFlat[matchKey] } if (rawValue === undefined) return null const resolved = extractTimeSeriesValue(rawValue) + if (!resolved) return null const {value, isBoolean: valueIsBoolean} = resolved diff --git a/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/ScenarioTable.tsx b/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/ScenarioTable.tsx index 8de842f3a6..1291ddaaec 100644 --- a/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/ScenarioTable.tsx +++ b/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/ScenarioTable.tsx @@ -22,10 +22,10 @@ import { type QueryWindowingPayload, } from "../../../../services/onlineEvaluations/api" import {EvalRunTestcaseTableSkeleton} from "../../AutoEvalRun/components/EvalRunTestcaseViewer/assets/EvalRunTestcaseViewerSkeleton" -import type {TableRow} from "./types" import useScrollToScenario from "./hooks/useScrollToScenario" import useTableDataSource from "./hooks/useTableDataSource" +import type {TableRow} from "./types" const VirtualizedScenarioTableAnnotateDrawer = dynamic( () => import("./assets/VirtualizedScenarioTableAnnotateDrawer"), @@ -173,7 +173,7 @@ const ScenarioTable = ({runId: propRunId}: {runId?: string}) => { const handleRowFocus = useCallback( (record: TableRow, event: React.MouseEvent) => { - if (evalType !== "auto" && evalType !== "online") return + if (evalType !== "auto" && evalType !== "online" && evalType !== "custom") return // Ignore clicks originating from interactive elements inside the row const interactiveTarget = (event.target as HTMLElement | null)?.closest( diff --git a/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/CellComponents.tsx b/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/CellComponents.tsx index b433a2c116..a5f4612b58 100644 --- a/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/CellComponents.tsx +++ b/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/CellComponents.tsx @@ -698,7 +698,8 @@ export const InvocationResultCell = memo( const navigation = useAppNavigation() const appState = useAppState() const contextRunId = useOptionalRunId() - const enableFocusDrawer = evalType === "auto" || evalType === "online" + const enableFocusDrawer = + evalType === "auto" || evalType === "online" || evalType === "custom" const handleOpenFocus = () => { const targetRunId = runId ?? contextRunId ?? null diff --git a/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/constants.ts b/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/constants.ts index 2d7623bdde..9726bb3410 100644 --- a/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/constants.ts +++ b/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/constants.ts @@ -37,20 +37,6 @@ export const GeneralHumanEvalMetricColumns = [ stepKey: "metric", metricType: "number", }, - { - name: "promptTokens", - kind: "metric", - path: "promptTokens", - stepKey: "metric", - metricType: "number", - }, - { - name: "completionTokens", - kind: "metric", - path: "completionTokens", - stepKey: "metric", - metricType: "number", - }, { name: "errors", kind: "metric", diff --git a/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/dataSourceBuilder.ts b/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/dataSourceBuilder.ts index 053b318a28..19b7f47efd 100644 --- a/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/dataSourceBuilder.ts +++ b/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/dataSourceBuilder.ts @@ -19,6 +19,150 @@ import {TableRow} from "../types" import {GeneralAutoEvalMetricColumns, GeneralHumanEvalMetricColumns} from "./constants" import {createEvaluatorNameResolver} from "./evaluatorNameUtils" +const pickString = (value: unknown): string | undefined => { + if (typeof value !== "string") return undefined + const trimmed = value.trim() + return trimmed.length ? trimmed : undefined +} + +const resolvePreferredSlug = ( + slug: string | undefined, + revisionSlugByEvaluatorSlug?: Map, +): string | undefined => { + if (!slug) return undefined + return revisionSlugByEvaluatorSlug?.get(slug) ?? slug +} + +const buildBaseSlugByRevisionSlug = ( + revisionSlugByEvaluatorSlug?: Map, +): Map => { + const map = new Map() + revisionSlugByEvaluatorSlug?.forEach((revision, base) => { + if (revision) { + map.set(revision, base) + } + }) + return map +} + +const resolveSlugFromStepMeta = ( + meta: RunIndex["steps"][string] | undefined, + revisionSlugByEvaluatorSlug?: Map, +): string | undefined => { + if (!meta) return undefined + const refs: any = meta?.refs ?? {} + const baseSlug = + pickString(meta?.key) ?? + pickString(refs?.evaluator?.slug) ?? + pickString(refs?.evaluator_variant?.slug) ?? + pickString(refs?.evaluatorRevision?.slug) ?? + pickString(meta.key) + return resolvePreferredSlug(baseSlug, revisionSlugByEvaluatorSlug) +} + +const buildMetricDefsLookup = ( + metricsByEvaluator: Record, + revisionSlugByEvaluatorSlug?: Map, +) => { + const baseSlugByRevisionSlug = new Map() + revisionSlugByEvaluatorSlug?.forEach((revision, base) => { + if (revision) baseSlugByRevisionSlug.set(revision, base) + }) + + return (slug: string | undefined): any[] => { + if (!slug) return [] + const direct = metricsByEvaluator[slug] + if (Array.isArray(direct) && direct.length) return direct + const base = baseSlugByRevisionSlug.get(slug) + if (base) { + const baseDefs = metricsByEvaluator[base] + if (Array.isArray(baseDefs) && baseDefs.length) return baseDefs + } + return [] + } +} + +const OUTPUT_PREFIX = "attributes.ag.data.outputs." +const OUTPUT_PREFIX_LOWER = OUTPUT_PREFIX.toLowerCase() + +const normalizeEvaluatorMetricName = (name?: string): string | undefined => { + if (typeof name !== "string") return undefined + const trimmed = name.trim() + if (!trimmed) return undefined + const lower = trimmed.toLowerCase() + if (lower.startsWith(OUTPUT_PREFIX_LOWER)) { + const tail = trimmed.slice(OUTPUT_PREFIX.length) + return tail ? `outputs.${tail}` : "outputs" + } + return trimmed +} + +const inferMetricTypeFromStats = (stats: BasicStats | undefined): string | undefined => { + if (!stats) return undefined + if (typeof (stats as any).mean === "number" || typeof (stats as any).sum === "number") { + return "number" + } + if (Array.isArray((stats as any).frequency)) { + const values = (stats as any).frequency.map((entry: any) => entry?.value) + const uniqueTypes = new Set(values.map((value) => typeof value)) + if (uniqueTypes.size === 1) { + const [only] = Array.from(uniqueTypes) + if (only === "boolean") return "boolean" + if (only === "string") return "string" + } + } + return undefined +} + +const inferMetricsFromStatsForSlug = ( + slug: string, + statsMap: Record | undefined, + relatedSlugs: string[] = [], +): any[] => { + if (!slug || !statsMap) return [] + const candidates = new Set([slug, ...relatedSlugs].filter(Boolean) as string[]) + const derived = new Map() + + const recordMetric = (name: string | undefined, stats: BasicStats | undefined) => { + if (!name) return + const normalizedName = normalizeEvaluatorMetricName(name) ?? name + if (!normalizedName) return + const existing = derived.get(normalizedName) ?? {} + if (!existing.metricType) { + const inferred = inferMetricTypeFromStats(stats) + if (inferred) existing.metricType = inferred + } + derived.set(normalizedName, existing) + } + + Object.entries(statsMap).forEach(([rawKey, stats]) => { + if (typeof rawKey !== "string") return + for (const candidate of candidates) { + const prefix = `${candidate}.` + if (rawKey.startsWith(prefix)) { + const metricName = rawKey.slice(prefix.length) + if (metricName && !metricName.includes("attributes.ag.metrics")) { + if (metricName.startsWith(OUTPUT_PREFIX)) { + const tail = metricName.slice(OUTPUT_PREFIX.length) + if (tail) { + recordMetric(`${tail}`, stats) + } + } + } + return + } + } + + if (rawKey.startsWith(OUTPUT_PREFIX)) { + const tail = rawKey.slice(OUTPUT_PREFIX.length) + if (!tail) return + recordMetric(`outputs.${tail}`, stats) + } + }) + + return Array.from(derived.entries()).map(([key, meta]) => ({[key]: meta})) +} + const AUTO_INVOCATION_METRIC_SUFFIXES = GeneralAutoEvalMetricColumns.map((col) => col.path) const AUTO_INVOCATION_METRIC_CANONICAL_SET = new Set( AUTO_INVOCATION_METRIC_SUFFIXES.map((path) => canonicalizeMetricKey(path)), @@ -105,6 +249,7 @@ export function buildScenarioTableData({ runId, evaluators, evaluatorNameBySlug, + revisionSlugByEvaluatorSlug, }: { runIndex: RunIndex | null | undefined metricsFromEvaluators: Record @@ -112,11 +257,27 @@ export function buildScenarioTableData({ runId: string evaluators: EvaluatorDto[] evaluatorNameBySlug?: Record + revisionSlugByEvaluatorSlug?: Map }): (ColumnDef & {values?: Record})[] { const baseColumnDefs: ColumnDef[] = runIndex ? Object.values(runIndex.columnsByStep).flat() : [] const evalType = evalAtomStore().get(evalTypeAtom) - const isHumanLikeEval = evalType === "human" || evalType === "online" + const isHumanLikeEval = evalType === "human" + // || evalType === "online" const resolveEvaluatorName = createEvaluatorNameResolver(evaluatorNameBySlug) + const metricStatsMap = (metrics || {}) as Record + const slugToStepKeyMap = new Map() + Object.entries(runIndex?.steps || {}).forEach(([key, meta]) => { + const slug = resolveSlugFromStepMeta(meta, revisionSlugByEvaluatorSlug) + if (slug && !slugToStepKeyMap.has(slug)) { + slugToStepKeyMap.set(slug, key) + } + }) + const baseSlugByRevisionSlug = buildBaseSlugByRevisionSlug(revisionSlugByEvaluatorSlug) + const evaluatorSlugSet = new Set( + (evaluators || []) + .map((e) => (typeof e?.slug === "string" ? e.slug : undefined)) + .filter((slug): slug is string => Boolean(slug)), + ) const columnsInput = baseColumnDefs.filter( (col) => col.kind === "input" && col.name !== "testcase_dedup_id", @@ -135,6 +296,41 @@ export function buildScenarioTableData({ // Further group metrics by evaluator when evaluators info present const evaluatorMetricGroups: any[] = [] + const invocationMetricGroups: any[] = [] + const rawMetricsByEvaluator: Record = + metricsFromEvaluators && typeof metricsFromEvaluators === "object" + ? (metricsFromEvaluators as Record) + : {} + + const normalizedMetricsByEvaluator: Record = {} + const registerMetricDefinitions = (targetSlug: string, defs: any[]) => { + if (!targetSlug || !Array.isArray(defs)) return + if (!normalizedMetricsByEvaluator[targetSlug]) { + normalizedMetricsByEvaluator[targetSlug] = [] + } + defs.forEach((definition) => { + if (!definition || typeof definition !== "object") return + const normalizedDefinition: Record = {} + Object.entries(definition).forEach(([key, value]) => { + if (!key) return + const normalizedKey = normalizeEvaluatorMetricName(key) ?? key + normalizedDefinition[normalizedKey] = value + }) + if (Object.keys(normalizedDefinition).length) { + normalizedMetricsByEvaluator[targetSlug].push(normalizedDefinition) + } + }) + } + Object.entries(rawMetricsByEvaluator).forEach(([slug, defs]) => { + if (!Array.isArray(defs)) return + const preferred = resolvePreferredSlug(slug, revisionSlugByEvaluatorSlug) ?? slug + registerMetricDefinitions(preferred, defs) + }) + + const getMetricDefinitionsForSlug = buildMetricDefsLookup( + normalizedMetricsByEvaluator, + revisionSlugByEvaluatorSlug, + ) // Evaluator Metric Columns if (metricsFromEvaluators && isHumanLikeEval) { @@ -143,33 +339,48 @@ export function buildScenarioTableData({ annotationData.forEach((data) => { const stepMeta = runIndex?.steps?.[data.stepKey] - const slug = stepMeta?.refs?.evaluator?.slug + const slug = stepMeta?.key.split(".")[1] if (!slug) return ;(columnsByEvaluator[slug] ||= []).push(data) }) - const metricsByEvaluator: Record = - metricsFromEvaluators && typeof metricsFromEvaluators === "object" - ? (metricsFromEvaluators as Record) - : {} - - const evaluatorSlugs = new Set([ - ...Object.keys(columnsByEvaluator), - ...Object.keys(metricsByEvaluator), - ]) + const evaluatorSlugs = new Set() + Object.keys(columnsByEvaluator).forEach((slug) => { + const d = columnsByEvaluator[slug] + evaluatorSlugs.add(slug) + }) evaluatorSlugs.forEach((slug) => { if (!slug) return - const evaluator = evaluators?.find((e) => e.slug === slug) const slugColumns = columnsByEvaluator[slug] || [] + const baseSlugForRevision = baseSlugByRevisionSlug.get(slug) + const evaluator = + evaluators?.find((e) => e.slug === slug) || + evaluators?.find( + (e) => resolvePreferredSlug(e.slug, revisionSlugByEvaluatorSlug) === slug, + ) const evaluatorLabel = resolveEvaluatorName(slug) - const metricDefsForSlug = Array.isArray(metricsByEvaluator[slug]) - ? metricsByEvaluator[slug] - : [] + const metricDefsForSlug = getMetricDefinitionsForSlug(slug) + const inferredMetricDefs = + metricDefsForSlug.length > 0 + ? metricDefsForSlug + : inferMetricsFromStatsForSlug( + slug, + metrics?.[slug] as Record, + baseSlugForRevision ? [baseSlugForRevision] : [], + ) + const resolvedMetricDefs = + inferredMetricDefs && inferredMetricDefs.length + ? inferredMetricDefs + : inferMetricsFromStatsForSlug( + slug, + metricStatsMap, + baseSlugForRevision ? [baseSlugForRevision] : [], + ) const resolveMetricType = (metricName: string) => { - const entry = metricDefsForSlug.find((definition: Record) => { + const entry = resolvedMetricDefs.find((definition: Record) => { if (!definition || typeof definition !== "object") return false return Object.prototype.hasOwnProperty.call(definition, metricName) }) @@ -179,47 +390,51 @@ export function buildScenarioTableData({ let children = slugColumns.map((data) => { - const metricName = data.name?.startsWith(`${slug}.`) - ? data.name.slice(slug.length + 1) - : data.name + const rawName = + data.name?.startsWith(`${slug}.`) && data.name.length > slug.length + 1 + ? data.name.slice(slug.length + 1) + : data.name + const metricName = normalizeEvaluatorMetricName(rawName) ?? rawName + const normalizedLabel = (metricName || rawName || "").toLowerCase() + if (normalizedLabel === "outputs" || normalizedLabel === "metrics") { + return undefined + } const formattedMetricName = formatColumnTitle(metricName || data.name || "") - - const type = resolveMetricType(metricName || data.name || "") - const kind: ColumnDef["kind"] = type === "string" ? "annotation" : "metric" - - const primaryKey = metricName ? `${slug}.${metricName}` : data.name || data.path - + const primaryKey = metricName + ? `${slug}.attributes.ag.data.outputs.${metricName}` + : data.name || data.path return { ...data, name: metricName || data.name, title: formattedMetricName, - kind, + kind: "annotation" as const, + // kind: "metric" as const, path: primaryKey, stepKey: data.stepKey, fallbackPath: data.path && data.path !== primaryKey ? data.path : undefined, - metricType: type, + metricType: resolveMetricType(metricName || data.name || ""), } - }) || [] + }) || ([].filter(Boolean) as ColumnDef[]) - if (!children.length && metricDefsForSlug.length) { + if (!children.length && resolvedMetricDefs.length) { const seen = new Set() - children = metricDefsForSlug + const fallbackStepKey = slugColumns[0]?.stepKey || slugToStepKeyMap.get(slug) + children = resolvedMetricDefs .map((definition: Record) => { const metricName = Object.keys(definition || {})[0] if (!metricName || seen.has(metricName)) return undefined seen.add(metricName) const formattedMetricName = formatColumnTitle(metricName) - const type = definition?.[metricName]?.metricType - const kind: ColumnDef["kind"] = type === "string" ? "annotation" : "metric" return { name: metricName, title: formattedMetricName, - kind, + kind: "annotation" as const, + // kind: "metric" as const, key: `${slug}.${metricName}`, path: `${slug}.${metricName}`, fallbackPath: `${slug}.${metricName}`, - stepKey: "metric", - metricType: type, + stepKey: fallbackStepKey ?? "metric", + metricType: definition?.[metricName]?.metricType, } }) .filter(Boolean) as ColumnDef[] @@ -235,105 +450,193 @@ export function buildScenarioTableData({ }) } - if (metricsFromEvaluators && evalType === "auto") { + if (metricsFromEvaluators && ["auto", "online", "custom"].includes(evalType)) { const annotationData = baseColumnDefs.filter((def) => def.kind === "annotation") - const groupedAnnotationData = groupBy(annotationData, (data) => { - return data.name.split(".")[0] + + const stepSlugByKey = new Map() + Object.entries(runIndex?.steps || {}).forEach(([key, meta]) => { + if (meta.kind === "annotation") { + const slug = resolveSlugFromStepMeta(meta, revisionSlugByEvaluatorSlug) + if (slug) { + stepSlugByKey.set(key, slug) + } + } }) - for (const metricKey of Object.keys(metricsFromEvaluators)) { - const evaluator = evaluators?.find((e) => e.slug === metricKey) - const evaluatorLabel = resolveEvaluatorName(metricKey) - - // Build children from base run annotations when available, otherwise from metrics map - let children = Object.entries(groupedAnnotationData) - .flatMap(([k, v]) => { - return v.map((data) => { - // Prefer strict match on slug in data.path when present, else stepKey - const pathPrefix = `${metricKey}.` - const belongsToEvaluator = - (data.path && data.path.startsWith(pathPrefix)) || - data.stepKey === metricKey - if (belongsToEvaluator) { - const metric = metrics?.[`${metricKey}.${data.name}`] - const isMean = metric?.mean !== undefined - const legacyPath = `${metricKey}.${data.name}` - const fullPath = data.path ? `${metricKey}.${data.path}` : legacyPath - - if ( - matchesGeneralInvocationMetric(fullPath) || - matchesGeneralInvocationMetric(legacyPath) - ) { - return undefined - } - - const formattedName = formatColumnTitle(data.name) - // infer type from metricsFromEvaluators entry if present - const type = metricsFromEvaluators[metricKey]?.find( - (x: any) => data.name in x, - )?.[data.name]?.metricType - const kind: ColumnDef["kind"] = - type === "string" ? "annotation" : "metric" - - return { - ...data, - name: data.name, - key: `${metricKey}.${data.name}`, - title: `${formattedName} ${isMean ? "(mean)" : ""}`.trim(), - kind, - path: fullPath, - fallbackPath: legacyPath, - stepKey: "metric", - metricType: type, - } - } - return undefined - }) - }) - .filter(Boolean) as any[] + const annotationColumnsBySlug: Record = {} + annotationData.forEach((column) => { + const slug = column.stepKey ? stepSlugByKey.get(column.stepKey) : undefined + if (!slug) return + ;(annotationColumnsBySlug[slug] ||= []).push(column) + }) - // If no base annotations matched (evaluator only exists in comparison runs), - // fall back to constructing children from metricsFromEvaluators - if (!children.length) { - const metricDefs = metricsFromEvaluators[metricKey] || [] - const seen = new Set() - children = metricDefs - .map((def: any) => { - const metricName = Object.keys(def || {})[0] - if (!metricName || seen.has(metricName)) return undefined - seen.add(metricName) - const fullPath = `${metricKey}.${metricName}` - if ( - matchesGeneralInvocationMetric(fullPath) || - matchesGeneralInvocationMetric(metricName) - ) { - return undefined - } - const formattedName = formatColumnTitle(metricName) - const type = def?.[metricName]?.metricType - const kind: ColumnDef["kind"] = type === "string" ? "annotation" : "metric" - return { - name: metricName, - key: `${metricKey}.${metricName}`, - title: formattedName, - kind, - path: fullPath, - fallbackPath: fullPath, - stepKey: "metric", - metricType: type, - } - }) - .filter(Boolean) as any[] + const gatherMetricDefinitions = (key: string): any[] => { + const defs = getMetricDefinitionsForSlug(key) + if (defs.length) return defs + const alias = stepSlugByKey.get(key) + if (alias && alias !== key) { + return getMetricDefinitionsForSlug(alias) } + return [] + } - evaluatorMetricGroups.push({ - title: evaluator?.name || evaluatorLabel, - key: `metrics_${metricKey}_evaluators`, - children, + const sourceKeys = new Set() + Array.from(stepSlugByKey.keys()).forEach((key) => { + sourceKeys.add(key) + const slugFromKey = resolvePreferredSlug( + stepSlugByKey.get(key) || key, + revisionSlugByEvaluatorSlug, + ) + if (slugFromKey) sourceKeys.add(slugFromKey) + }) + const evaluatorGroupsBySlug = new Map< + string, + {title: string; key: string; children: ColumnDef[]; seen: Set} + >() + + const getChildIdentity = (child: ColumnDef): string | undefined => + (typeof child.key === "string" && child.key.length ? child.key : undefined) ?? + (typeof child.path === "string" && child.path.length ? child.path : undefined) ?? + (typeof child.name === "string" && child.name.length ? child.name : undefined) + + const appendChildrenToGroup = (slug: string, title: string, children: ColumnDef[]) => { + if (!children.length) return + const groupKey = `metrics_${slug}_evaluators` + const existing = evaluatorGroupsBySlug.get(groupKey) + if (!existing) { + const seen = new Set() + const deduped: ColumnDef[] = [] + children.forEach((child) => { + const identity = getChildIdentity(child) + if (!identity || seen.has(identity)) return + seen.add(identity) + deduped.push(child) + }) + if (!deduped.length) return + evaluatorGroupsBySlug.set(groupKey, { + title, + key: groupKey, + children: deduped, + seen, + }) + return + } + children.forEach((child) => { + const identity = getChildIdentity(child) + if (!identity || existing.seen.has(identity)) return + existing.seen.add(identity) + existing.children.push(child) }) } - } + sourceKeys.forEach((rawKey) => { + const slug = resolvePreferredSlug( + stepSlugByKey.get(rawKey) || rawKey, + revisionSlugByEvaluatorSlug, + ) + if (!slug) return + + const stepData = runIndex?.steps?.[rawKey] + + const baseSlugForRevision = baseSlugByRevisionSlug.get(slug) + const evaluator = evaluators?.find((e) => e.id === stepData?.refs?.evaluator?.id) + const evaluatorLabel = resolveEvaluatorName(slug) + const metricDefsPrimary = gatherMetricDefinitions(rawKey) + let metricDefsForKey = + metricDefsPrimary && metricDefsPrimary.length + ? metricDefsPrimary + : gatherMetricDefinitions(slug) + if (!metricDefsForKey || !metricDefsForKey.length) { + const relatedSlugs = new Set() + if (typeof rawKey === "string" && rawKey.length) relatedSlugs.add(rawKey) + const alias = stepSlugByKey.get(rawKey) + if (alias && alias !== slug) relatedSlugs.add(alias) + if (baseSlugForRevision) relatedSlugs.add(baseSlugForRevision) + metricDefsForKey = inferMetricsFromStatsForSlug( + slug, + metricStatsMap, + Array.from(relatedSlugs), + ) + } + + const normalizedRawKey = resolvePreferredSlug( + stepSlugByKey.get(rawKey) || rawKey, + revisionSlugByEvaluatorSlug, + ) + const columnsForKey = [ + ...(annotationColumnsBySlug[slug] || []), + ...(normalizedRawKey && normalizedRawKey !== slug + ? annotationColumnsBySlug[normalizedRawKey] || [] + : []), + ] + const hasMetricDefs = + normalizedMetricsByEvaluator[slug]?.length || + (normalizedRawKey && normalizedMetricsByEvaluator[normalizedRawKey]?.length) || + (metricDefsForKey?.length ?? 0) > 0 + + if (!columnsForKey.length && !hasMetricDefs && !metricDefsForKey?.length) { + return + } + + const seen = new Set() + const children: ColumnDef[] = [] + const pushChild = (child?: ColumnDef) => { + if (!child) return + const key = child.key || child.path || child.name + if (!key) return + if (seen.has(key)) return + seen.add(key) + children.push(child) + } + + const appendMetricDefs = (definitions: Record[]) => { + definitions.forEach((definition: Record) => { + const originalName = Object.keys(definition || {})[0] + const metricName = normalizeEvaluatorMetricName(originalName) ?? originalName + const metricType = definition?.[metricName]?.metricType + if (!metricName) return + if (metricType === "object") return + const canonicalKey = `${slug}.${metricName}` + if (seen.has(canonicalKey)) return + const candidatesForSkip = [ + canonicalKey, + metricName, + `attributes.ag.data.outputs.${metricName}`, + ] + if ( + candidatesForSkip.some((candidate) => + matchesGeneralInvocationMetric(candidate), + ) + ) { + return + } + const formattedName = formatColumnTitle(metricName) + + pushChild({ + name: metricName, + title: formattedName, + kind: "annotation" as const, + key: canonicalKey, + path: `${rawKey}.${OUTPUT_PREFIX}${metricName}`, + stepKey: rawKey ?? "metric", + metricType, + }) + }) + } + + if (metricDefsForKey.length) { + appendMetricDefs(metricDefsForKey) + } + + if (!children.length) return + + appendChildrenToGroup(slug, evaluator?.name || evaluatorLabel, children) + }) + + evaluatorGroupsBySlug.forEach(({seen: _seen, ...group}) => { + evaluatorMetricGroups.push(group) + }) + } const genericMetricsGroup = { title: "Metrics", key: "__metrics_group__", diff --git a/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/utils.tsx b/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/utils.tsx index 93df472fcf..c2843b89de 100644 --- a/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/utils.tsx +++ b/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/assets/utils.tsx @@ -325,7 +325,7 @@ export function buildAntdColumns( /> ) } - return evalType === "auto" ? ( + return evalType === "auto" || evalType === "custom" ? ( { const isOnlineEval = evalType === "online" - const isAutoEval = evalType === "auto" + const isAutoEval = evalType === "auto" || evalType === "custom" const useSingleColumnLayout = isOnlineEval || (isAutoEval && totalOutputs <= 1) const outputKey = outputCol.name || outputCol.path || `output-${idx}` const columnTitle = useSingleColumnLayout @@ -629,7 +629,10 @@ export function buildAntdColumns( } as EnhancedColumnType } - if (evalType === "online" || (evalType === "auto" && outputColumns.length <= 1)) { + if ( + evalType === "online" || + ((evalType === "auto" || evalType === "custom") && outputColumns.length <= 1) + ) { const outputIndex = Math.max(outputColumns.indexOf(c), 0) return createOutputColumnDef(c, outputIndex, outputColumns.length) } @@ -726,23 +729,23 @@ export function buildAntdColumns( } case "metric": { // If this “metric” is actually pointing inside annotations, render via AnnotationValueCell - if (isAnnotationLikeMetricPath(c.path)) { - const annotationStepKey = resolveStepKeyForRun(c, effectiveRunId) - const fieldPath = toAnnotationFieldPath(c.path) - return ( - - ) - } + // if (isAnnotationLikeMetricPath(c.path)) { + // const annotationStepKey = resolveStepKeyForRun(c, effectiveRunId) + // const fieldPath = toAnnotationFieldPath(c.path) + // return ( + // + // ) + // } const scenarioId = record.scenarioId || record.key const evaluatorSlug = (c as any).evaluatorSlug as string | undefined diff --git a/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/hooks/useExpandableComparisonDataSource.tsx b/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/hooks/useExpandableComparisonDataSource.tsx index 3d2f1711ec..6af6d7d86e 100644 --- a/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/hooks/useExpandableComparisonDataSource.tsx +++ b/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/hooks/useExpandableComparisonDataSource.tsx @@ -451,6 +451,24 @@ const useExpandableComparisonDataSource = ({ projectEvaluators, fetchedEvaluatorsById, ]) + const revisionSlugMap = useMemo(() => { + const map = new Map() + Object.values(runIndex?.steps ?? {}).forEach((meta: any) => { + if (!meta || meta.kind !== "annotation") return + const baseSlug = + typeof meta?.refs?.evaluator?.slug === "string" + ? meta.refs.evaluator.slug + : undefined + const revisionSlug = + typeof meta?.refs?.evaluatorRevision?.slug === "string" + ? meta.refs.evaluatorRevision.slug + : undefined + if (baseSlug && revisionSlug && baseSlug !== revisionSlug && !map.has(baseSlug)) { + map.set(baseSlug, revisionSlug) + } + }) + return map + }, [runIndex]) const rawColumns = useMemo( () => @@ -460,8 +478,16 @@ const useExpandableComparisonDataSource = ({ runId: baseRunId, evaluators: allEvaluators, evaluatorNameBySlug, + revisionSlugByEvaluatorSlug: revisionSlugMap, }), - [runIndex, resolvedMetricsFromEvaluators, allEvaluators, evaluatorNameBySlug, expendedRows], + [ + runIndex, + resolvedMetricsFromEvaluators, + allEvaluators, + evaluatorNameBySlug, + revisionSlugMap, + expendedRows, + ], ) const columnsWithRunSpecificSteps = useMemo(() => { diff --git a/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/hooks/useTableDataSource.ts b/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/hooks/useTableDataSource.ts index 69daf16e85..f5628bae0e 100644 --- a/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/hooks/useTableDataSource.ts +++ b/web/ee/src/components/EvalRunDetails/components/VirtualizedScenarioTable/hooks/useTableDataSource.ts @@ -19,6 +19,7 @@ import { runIndexFamily, } from "../../../../../lib/hooks/useEvaluationRunData/assets/atoms/runScopedAtoms" // import {scenarioMetricsMapFamily} from "../../../../../lib/hooks/useEvaluationRunData/assets/atoms/runScopedMetrics" +import {runMetricsStatsCacheFamily} from "../../../../../lib/hooks/useEvaluationRunData/assets/atoms/runScopedMetrics" import { displayedScenarioIdsFamily, loadableScenarioStepFamily, @@ -119,6 +120,7 @@ const useTableDataSource = () => { const runIndex = useAtomValue(runIndexFamily(runId)) const metricsFromEvaluators = useAtomValue(metricsFromEvaluatorsFamily(runId)) || EMPTY_METRICS_MAP + const metricStatsMap = useAtomValue(runMetricsStatsCacheFamily(runId)) || {} // temporary implementation to implement loading state for auto eval const firstScenarioLoadable = useAtomValue(firstScenarioLoadableFamily(runId)) const loadableState = firstScenarioLoadable?.state @@ -139,6 +141,21 @@ const useTableDataSource = () => { : [], [rawEvaluators], ) + const revisionSlugByEvaluatorSlug = useMemo(() => { + const map = new Map() + const steps = runIndex?.steps ?? {} + Object.values(steps).forEach((meta: any) => { + if (!meta || meta.kind !== "annotation") return + const baseSlug = pickString(meta?.refs?.evaluator?.slug) + const revisionSlug = pickString(meta?.refs?.evaluatorRevision?.slug) + if (baseSlug && revisionSlug && baseSlug !== revisionSlug) { + if (!map.has(baseSlug)) { + map.set(baseSlug, revisionSlug) + } + } + }) + return map + }, [runIndex]) const evaluatorFailuresMap = useAtomValue(evaluatorFailuresMapFamily(runId)) const {data: previewEvaluators} = useEvaluators({preview: true}) const {data: projectEvaluators} = useEvaluators() @@ -249,7 +266,12 @@ const useTableDataSource = () => { }) return result - }, [catalogEvaluatorsByIdentifier, metricsFromEvaluators, runEvaluators]) + }, [ + catalogEvaluatorsByIdentifier, + metricsFromEvaluators, + runEvaluators, + revisionSlugByEvaluatorSlug, + ]) const evaluatorIdsFromRunIndex = useMemo(() => { const ids = new Set() @@ -353,10 +375,20 @@ const useTableDataSource = () => { runIndex, runId, metricsFromEvaluators: resolvedMetricsFromEvaluators, + metrics: metricStatsMap, evaluators: runEvaluators, evaluatorNameBySlug, + revisionSlugByEvaluatorSlug, }), - [runIndex, runId, resolvedMetricsFromEvaluators, runEvaluators, evaluatorNameBySlug], + [ + runIndex, + runId, + resolvedMetricsFromEvaluators, + metricStatsMap, + runEvaluators, + evaluatorNameBySlug, + revisionSlugByEvaluatorSlug, + ], ) // Build Ant Design columns and make them resizable diff --git a/web/ee/src/components/EvalRunDetails/index.tsx b/web/ee/src/components/EvalRunDetails/index.tsx index 1108d9992a..0e7d748c61 100644 --- a/web/ee/src/components/EvalRunDetails/index.tsx +++ b/web/ee/src/components/EvalRunDetails/index.tsx @@ -77,7 +77,7 @@ const LegacyEvaluationPage = ({id: evaluationTableId}: {id: string}) => { const data = legacyEvaluationSWR.data return data ? ( - evalType === "auto" ? ( + evalType === "auto" || evalType === "custom" ? ( ) : evalType === "human" ? ( { - return evalType === "auto" ? ( + return evalType === "auto" || evalType === "custom" ? ( ) : evalType === "online" ? ( @@ -117,12 +117,12 @@ const LoadingState = ({ description, id, }: { - evalType: "auto" | "human" | "online" + evalType: "auto" | "human" | "online" | "custom" name: string description: string id: string }) => { - return evalType === "auto" ? ( + return evalType === "auto" || evalType === "custom" ? ( ) : evalType === "online" ? ( @@ -139,7 +139,7 @@ const LoadingState = ({ } const EvaluationPage = memo( - ({evalType, runId}: {evalType: "auto" | "human" | "online"; runId: string}) => { + ({evalType, runId}: {evalType: "auto" | "human" | "online" | "custom"; runId: string}) => { const rootStore = getDefaultStore() const breadcrumbs = useAtomValue(breadcrumbAtom, {store: rootStore}) const appendBreadcrumb = useSetAtom(appendBreadcrumbAtom, {store: rootStore}) @@ -190,11 +190,13 @@ const EvaluationPage = memo( const base = (typeof window !== "undefined" ? window.location.pathname : "") || "" const segs = base.split("/").filter(Boolean) const desiredLabel = - evalType === "auto" - ? "auto evaluation" - : evalType === "online" - ? "online evaluation" - : "human annotation" + evalType === "online" + ? "online evaluation" + : evalType === "human" + ? "human annotation" + : evalType === "custom" + ? "custom evaluation" + : "auto evaluation" const appsIdx = segs.findIndex((s) => s === "apps") if (appsIdx !== -1) { @@ -300,7 +302,7 @@ const EvaluationPage = memo( ) const EvalRunDetailsPage = memo( - ({evalType: propsEvalType}: {evalType: "auto" | "human" | "online"}) => { + ({evalType: propsEvalType}: {evalType: "auto" | "human" | "online" | "custom"}) => { const router = useRouter() const runIdParam = router.query.evaluation_id const runId = diff --git a/web/ee/src/components/EvalRunDetails/state/evalType.ts b/web/ee/src/components/EvalRunDetails/state/evalType.ts index e90f8d565c..306b919b73 100644 --- a/web/ee/src/components/EvalRunDetails/state/evalType.ts +++ b/web/ee/src/components/EvalRunDetails/state/evalType.ts @@ -1,13 +1,12 @@ import {atom} from "jotai" -// This atom is used to store the evaluation type (auto or human) for the current evaluation run. +// This atom is used to store the evaluation type for the current evaluation run. // It is used to determine which evaluation page to render. -export const evalTypeAtom = atom<"auto" | "human" | "online" | null>(null) +export const evalTypeAtom = atom<"auto" | "human" | "online" | "custom" | null>(null) -// This atom is used to set the evaluation type (auto or human) for the current evaluation run. export const setEvalTypeAtom = atom( null, - (get, set, update: "auto" | "human" | "online" | null) => { + (get, set, update: "auto" | "human" | "online" | "custom" | null) => { set(evalTypeAtom, update) }, ) diff --git a/web/ee/src/components/EvalRunDetails/state/urlState.ts b/web/ee/src/components/EvalRunDetails/state/urlState.ts index 24ad048770..29a988ca50 100644 --- a/web/ee/src/components/EvalRunDetails/state/urlState.ts +++ b/web/ee/src/components/EvalRunDetails/state/urlState.ts @@ -35,7 +35,7 @@ export const runViewTypeAtom = atom>(new Set()) @@ -87,7 +87,7 @@ const EvaluationStatusCell = ({ // refresh the eval after a completed run useEffect(() => { - if (evalType !== "auto") return + if (evalType !== "auto" && evalType !== "custom") return const runIdToCheck = runningEvaluations.data?.run?.id const runStatus = runningEvaluations.data?.run?.status @@ -147,7 +147,7 @@ const EvaluationStatusCell = ({ }, [scenarios]) const _status = useMemo(() => { - if (preferProvidedStatus || evalType !== "auto") return runStatus + if (preferProvidedStatus || (evalType !== "auto" && evalType !== "custom")) return runStatus return runningEvaluations.data?.run?.status || runStatus }, [preferProvidedStatus, evalType, runningEvaluations.data?.run?.status, runStatus]) diff --git a/web/ee/src/components/HumanEvaluations/assets/TableDropdownMenu/index.tsx b/web/ee/src/components/HumanEvaluations/assets/TableDropdownMenu/index.tsx index ba13698610..9312f7e771 100644 --- a/web/ee/src/components/HumanEvaluations/assets/TableDropdownMenu/index.tsx +++ b/web/ee/src/components/HumanEvaluations/assets/TableDropdownMenu/index.tsx @@ -26,6 +26,7 @@ const TableDropdownMenu = ({ scope, projectURL, resolveAppId, + disableVariantAction = false, }: TableDropdownMenuProps) => { const router = useRouter() const primaryInvocation = extractPrimaryInvocation(record) @@ -49,7 +50,7 @@ const TableDropdownMenu = ({ onClick: (e) => { e.domEvent.stopPropagation() if ( - evalType === "auto" && + (evalType === "auto" || evalType === "custom") && ![ EvaluationStatus.PENDING, EvaluationStatus.RUNNING, @@ -60,7 +61,7 @@ const TableDropdownMenu = ({ ) { const evaluationId = "id" in record ? record.id : record.key const suffix = - evalType === "auto" + evalType === "auto" || evalType === "custom" ? `/evaluations/results/${evaluationId}` : `/evaluations/single_model_test/${evaluationId}` const pathname = buildEvaluationNavigationUrl({ @@ -86,10 +87,10 @@ const TableDropdownMenu = ({ key: "variant", label: "View variant", icon: , - disabled: !variantId || !targetAppId, + disabled: disableVariantAction || !variantId || !targetAppId, onClick: (e) => { e.domEvent.stopPropagation() - if (!variantId) return + if (disableVariantAction || !variantId) return onVariantNavigation({revisionId: variantId, appId: targetAppId || undefined}) }, }, diff --git a/web/ee/src/components/HumanEvaluations/assets/TableDropdownMenu/types.ts b/web/ee/src/components/HumanEvaluations/assets/TableDropdownMenu/types.ts index f24fa84def..2612e46f75 100644 --- a/web/ee/src/components/HumanEvaluations/assets/TableDropdownMenu/types.ts +++ b/web/ee/src/components/HumanEvaluations/assets/TableDropdownMenu/types.ts @@ -13,4 +13,5 @@ export interface TableDropdownMenuProps { resolveAppId?: (evaluation: EvaluationRow) => string | undefined scope: "app" | "project" projectURL: string + disableVariantAction?: boolean } diff --git a/web/ee/src/components/HumanEvaluations/assets/utils.tsx b/web/ee/src/components/HumanEvaluations/assets/utils.tsx index a42b8fe3b0..9d515cb39a 100644 --- a/web/ee/src/components/HumanEvaluations/assets/utils.tsx +++ b/web/ee/src/components/HumanEvaluations/assets/utils.tsx @@ -5,24 +5,33 @@ import VariantDetailsWithStatus from "@agenta/oss/src/components/VariantDetailsW import {GearSix} from "@phosphor-icons/react" import {Statistic} from "antd" import {ColumnsType} from "antd/es/table" -import {getDefaultStore} from "jotai" +import {getDefaultStore, useAtomValue} from "jotai" import uniqBy from "lodash/uniqBy" import dynamic from "next/dynamic" +import {USEABLE_METRIC_TYPES} from "@/oss/components/pages/observability/drawer/AnnotateDrawer/assets/constants" +import {getDefaultValue} from "@/oss/components/pages/observability/drawer/AnnotateDrawer/assets/transforms" import LabelValuePill from "@/oss/components/ui/LabelValuePill" import {evaluatorsAtom} from "@/oss/lib/atoms/evaluation" import {calculateAvgScore} from "@/oss/lib/helpers/evaluate" +import { + RunIndex, + ColumnDef, + StepMeta, +} from "@/oss/lib/hooks/useEvaluationRunData/assets/helpers/buildRunIndex" import {IScenario} from "@/oss/lib/hooks/useEvaluationRunScenarios/types" import {EvaluatorDto} from "@/oss/lib/hooks/useEvaluators/types" -import {buildEvaluatorMetricColumns} from "@/oss/lib/metricColumnFactory" +import {buildEvaluatorMetricColumns, buildMetricKeyCandidates} from "@/oss/lib/metricColumnFactory" import {getMetricConfig, metricPriority} from "@/oss/lib/metrics/utils" import { + SchemaMetricType, canonicalizeMetricKey, getMetricValueWithAliases, summarizeMetric, } from "@/oss/lib/metricUtils" import {_Evaluation, EvaluationStatus} from "@/oss/lib/Types" import {BasicStats} from "@/oss/services/runMetrics/api/types" +import {appDetailQueryAtomFamily} from "@/oss/state/app" import {GeneralAutoEvalMetricColumns} from "../../EvalRunDetails/components/VirtualizedScenarioTable/assets/constants" import {extractPrimaryInvocation, extractEvaluationAppId} from "../../pages/evaluations/utils" @@ -100,6 +109,483 @@ const labelForRunMetric = (canonicalKey: string) => { return label } +const OUTPUT_PREFIX = "attributes.ag.data.outputs." +const ANALYTICS_PREFIX = "attributes.ag.metrics." + +const stripMetricPrefix = (value: string): string | undefined => { + if (value.startsWith(OUTPUT_PREFIX)) return value.slice(OUTPUT_PREFIX.length) + if (value.startsWith(ANALYTICS_PREFIX)) return value.slice(ANALYTICS_PREFIX.length) + return undefined +} + +const isOutputMetricKey = ( + value: string, +): {isKey: boolean; normalized?: string; leafMetric?: string; prefix?: "outputs" | "metrics"} => { + if (!value) return {isKey: false} + const canonical = canonicalizeMetricKey(value) + const outputIdx = canonical.indexOf(OUTPUT_PREFIX) + const analyticsIdx = canonical.indexOf(ANALYTICS_PREFIX) + let normalized: string | undefined + let suffix: string | undefined + let prefix: "outputs" | "metrics" | undefined + if (outputIdx !== -1) { + normalized = canonical.slice(outputIdx) + suffix = normalized.slice(OUTPUT_PREFIX.length) + prefix = "outputs" + } else if (analyticsIdx !== -1) { + normalized = canonical.slice(analyticsIdx) + suffix = normalized.slice(ANALYTICS_PREFIX.length) + prefix = "metrics" + } + if (!normalized || !suffix) return {isKey: false} + const trimmedSuffix = suffix.replace(/^\.+/, "") + if (!trimmedSuffix) return {isKey: false} + const leafMetric = trimmedSuffix + return {isKey: true, normalized, leafMetric, prefix} +} + +const normalizeIdentifier = (value: unknown): string | undefined => { + if (value === null || value === undefined) return undefined + if (typeof value === "string") return value + if (typeof value === "number" || typeof value === "boolean") return String(value) + return undefined +} + +const getRunIdFromEvaluation = (evaluation: EvaluationRow | any): string | undefined => { + if (!evaluation || typeof evaluation !== "object") return undefined + const candidates = [ + normalizeIdentifier((evaluation as any).id), + normalizeIdentifier((evaluation as any).run_id), + normalizeIdentifier((evaluation as any).runId), + normalizeIdentifier((evaluation as any).key), + ].filter(Boolean) as string[] + return candidates[0] +} + +const deriveStepSlugFromKey = (key?: string): string | undefined => { + if (!key || typeof key !== "string") return undefined + const trimmed = key.trim() + if (!trimmed) return undefined + const parts = trimmed.split(".").filter(Boolean) + if (parts.length >= 2) return parts[parts.length - 1] + return parts[0] +} + +const collectReferenceIdentifiers = (meta?: StepMeta): string[] => { + if (!meta) return [] + const refs = meta.refs ?? {} + const candidates = [ + refs?.evaluator?.slug, + refs?.evaluator?.id, + refs?.evaluator?.key, + refs?.evaluatorVariant?.slug, + refs?.evaluatorVariant?.id, + refs?.evaluatorVariant?.key, + refs?.evaluatorRevision?.slug, + refs?.evaluatorRevision?.id, + refs?.evaluatorRevision?.key, + ] + return (candidates.map(normalizeIdentifier).filter(Boolean) as string[]) ?? [] +} + +const collectEvaluatorIdentifiers = (evaluator?: EvaluatorDto): Set => { + const identifiers = [ + normalizeIdentifier(evaluator?.slug), + normalizeIdentifier((evaluator as any)?.id), + normalizeIdentifier((evaluator as any)?.key), + ].filter(Boolean) as string[] + return new Set(identifiers) +} + +const collectStepSlugsFromRunIndex = ( + runIndex: RunIndex | undefined, + evaluator: EvaluatorDto, +): string[] => { + if (!runIndex) return [] + const matches = new Set() + const evaluatorIdentifiers = collectEvaluatorIdentifiers(evaluator) + Object.values(runIndex.steps ?? {}).forEach((meta) => { + if (!meta || meta.kind !== "annotation") return + const stepSlug = deriveStepSlugFromKey(meta.key) + if (!stepSlug) return + const referenceCandidates = collectReferenceIdentifiers(meta) + const hasRefMatch = referenceCandidates.some((candidate) => + evaluatorIdentifiers.has(candidate), + ) + const matchesByKey = + evaluator?.slug && typeof meta.key === "string" + ? meta.key.endsWith(`.${evaluator.slug}`) || meta.key === evaluator.slug + : false + if (hasRefMatch || matchesByKey || evaluatorIdentifiers.has(stepSlug)) { + matches.add(stepSlug) + } + }) + return Array.from(matches) +} + +const collectStepIdentifiersForEvaluator = ( + runIndexes: Record, + evaluator: EvaluatorDto, +): string[] => { + if (!runIndexes || !evaluator) return [] + const identifiers = new Set() + const evaluatorIdentifiers = collectEvaluatorIdentifiers(evaluator) + + Object.values(runIndexes).forEach((runIndex) => { + if (!runIndex) return + + Object.entries(runIndex.steps ?? {}).forEach(([key, meta]) => { + if (!meta || meta.kind !== "annotation") return + + const stepSlug = deriveStepSlugFromKey(meta.key) + const referenceCandidates = collectReferenceIdentifiers(meta) + const hasReferenceMatch = referenceCandidates.some((candidate) => + evaluatorIdentifiers.has(candidate), + ) + const matchesByKey = + evaluator?.slug && typeof meta.key === "string" + ? meta.key.endsWith(`.${evaluator.slug}`) || meta.key === evaluator.slug + : false + const matchesBySlug = stepSlug ? evaluatorIdentifiers.has(stepSlug) : false + + if (!hasReferenceMatch && !matchesByKey && !matchesBySlug) return + + if (typeof key === "string") { + identifiers.add(key) + const normalizedKey = normalizeIdentifier(key) + if (normalizedKey) identifiers.add(normalizedKey) + } + if (stepSlug) { + identifiers.add(stepSlug) + const normalizedSlug = normalizeIdentifier(stepSlug) + if (normalizedSlug) identifiers.add(normalizedSlug) + } + referenceCandidates.forEach((candidate) => { + identifiers.add(candidate) + const normalized = normalizeIdentifier(candidate) + if (normalized) identifiers.add(normalized) + }) + }) + }) + + return Array.from(identifiers).filter(Boolean) as string[] +} + +const resolveEvaluatorRevisionSlug = ( + evaluatorSlug: string | undefined, + runIndexesByRunId: Record, +): string | undefined => { + if (!evaluatorSlug) return undefined + for (const runIndex of Object.values(runIndexesByRunId)) { + if (!runIndex) continue + for (const meta of Object.values(runIndex.steps ?? {})) { + const refSlug = normalizeIdentifier(meta?.refs?.evaluator?.slug) + if (refSlug !== evaluatorSlug) continue + const revisionSlug = + normalizeIdentifier(meta?.refs?.evaluatorRevision?.slug) ?? + deriveStepSlugFromKey(meta?.key) + if (revisionSlug) return revisionSlug + } + } + return undefined +} + +const collectStepKeysForEvaluator = (slug: string, runIndex?: RunIndex): string[] => { + if (!runIndex) return [] + const stepKeys = new Set() + + if (runIndex.steps?.[slug]) { + stepKeys.add(slug) + } + + Object.entries(runIndex.steps ?? {}).forEach(([stepKey, meta]) => { + if (stepKeys.has(stepKey)) return + const refs = meta?.refs ?? {} + const candidates = [ + refs?.evaluator?.slug, + refs?.evaluator?.id, + refs?.evaluatorVariant?.slug, + refs?.evaluatorVariant?.id, + refs?.evaluatorRevision?.slug, + refs?.evaluatorRevision?.id, + ] + + if (candidates.some((value) => typeof value === "string" && slug.includes(value))) { + stepKeys.add(stepKey) + } + }) + + return Array.from(stepKeys) +} + +const flattenMetricDefinitionEntries = ( + schema: Record | undefined, + prefix?: string, + acc: Record = {}, +): Record => { + if (!schema || typeof schema !== "object") { + return acc + } + + Object.entries(schema).forEach(([key, rawDefinition]) => { + if (!rawDefinition || typeof rawDefinition !== "object") return + + const candidate = + Array.isArray((rawDefinition as any).anyOf) && (rawDefinition as any).anyOf.length + ? (rawDefinition as any).anyOf[0] + : rawDefinition + const qualifiedKey = prefix ? `${prefix}.${key}` : key + const type = candidate?.type as string | undefined + + if ( + type === "object" && + candidate?.properties && + typeof candidate.properties === "object" + ) { + flattenMetricDefinitionEntries(candidate.properties, qualifiedKey, acc) + return + } + + if (!type && candidate?.properties && typeof candidate.properties === "object") { + flattenMetricDefinitionEntries(candidate.properties, qualifiedKey, acc) + return + } + + if (type === "array") { + const {value, items, ...restProps} = candidate + acc[qualifiedKey] = { + value: value ?? "", + items: { + type: items?.type === "string" ? items.type : "string", + enum: items?.enum || [], + }, + ...restProps, + type, + } + return + } + + if (type && USEABLE_METRIC_TYPES.includes(type)) { + const {value, ...restProps} = candidate + acc[qualifiedKey] = { + value: + value ?? + getDefaultValue({ + property: candidate, + ignoreObject: true, + }), + ...restProps, + type, + } + return + } + + if (candidate?.value !== undefined || candidate?.description) { + acc[qualifiedKey] = candidate + } + }) + + return acc +} + +const pruneParentMetricEntries = (metrics: Record): Record => { + if (!metrics || !Object.keys(metrics).length) return metrics + const nestedPrefixes = new Map() + Object.entries(metrics).forEach(([key, definition]) => { + const label = (definition?.label as string | undefined) ?? key + if (!label) return + const [prefix] = label.split(".") + if (!prefix) return + if ((prefix === "outputs" || prefix === "metrics") && label.includes(".")) { + nestedPrefixes.set(prefix, true) + } + }) + + const result: Record = {} + Object.entries(metrics).forEach(([key, definition]) => { + const label = (definition?.label as string | undefined) ?? key + const normalized = label ?? "" + const [prefix] = normalized.split(".") + const lowerLabel = normalized.toLowerCase() + if (lowerLabel.startsWith("metrics.") || lowerLabel.startsWith("metric.")) { + return + } + if ( + nestedPrefixes.get(prefix) && + (prefix === "outputs" || prefix === "metrics") && + normalized.indexOf(".") === -1 && + (key === prefix || !definition?.label) + ) { + return + } + result[key] = definition + }) + return result +} + +const collectColumnsForStepKeys = (keys: string[], runIndex?: RunIndex): ColumnDef[] => { + if (!runIndex || !Array.isArray(keys) || !keys.length) return [] + const columns: ColumnDef[] = [] + keys.forEach((key) => { + const perStep = runIndex.columnsByStep?.[key] + if (Array.isArray(perStep)) { + perStep.forEach((column) => { + columns.push(column) + }) + } + }) + return columns +} + +const deriveMetricKeyFromColumn = (column: ColumnDef): string | undefined => { + if (!column || column.kind !== "annotation") return undefined + if (typeof column.path === "string" && column.path.trim()) { + const canonicalPath = canonicalizeMetricKey(column.path.trim()) + const stripped = stripMetricPrefix(canonicalPath) + if (stripped) return stripped + + const parts = canonicalPath.split(".").filter(Boolean) + if (parts.length) return parts[parts.length - 1] + } + + if (typeof column.name === "string") { + const trimmed = column.name.trim() + if (trimmed) return trimmed.replace(/\s+/g, "_").toLowerCase() + } + + return undefined +} + +const inferSchemaTypeFromStats = (stats: BasicStats | undefined): SchemaMetricType | undefined => { + if (!stats) return undefined + + if (typeof (stats as any).mean === "number" || typeof (stats as any).sum === "number") { + return "number" + } + + const tryEntryValues = [ + Array.isArray((stats as any).frequency) ? (stats as any).frequency : undefined, + Array.isArray((stats as any).rank) ? (stats as any).rank : undefined, + ] + .filter(Boolean) + .flat() as {value: unknown}[] + + if (tryEntryValues.length) { + const valueTypes = new Set(tryEntryValues.map((entry) => typeof entry.value)) + if (valueTypes.size === 1) { + const [onlyType] = Array.from(valueTypes) + if (onlyType === "boolean" || onlyType === "number" || onlyType === "string") { + return onlyType as SchemaMetricType + } + } + } + + return undefined +} + +const deriveMetricsFromRunStats = ( + slug: string | undefined, + runMetricsMap?: Record>, + runIndexesByRunId?: Record, +): Record => { + if (!slug) return {} + + const slugPrefix = `${slug}.` + const derived = new Map() + + const recordMetric = ( + metricKey: string | undefined, + type?: SchemaMetricType, + meta?: Record, + ) => { + if (!metricKey) return + const existing = derived.get(metricKey) ?? {} + if (!existing.type && type) { + existing.type = type + } + if (meta) { + Object.assign(existing, meta) + } + derived.set(metricKey, existing) + } + + if (runMetricsMap) { + Object.entries(runMetricsMap).forEach(([runId, metrics]) => { + if (!metrics) return + let hasPrefixedMetrics = false + Object.entries(metrics).forEach(([rawKey, stats]) => { + if (typeof rawKey !== "string") return + if (!rawKey.startsWith(slugPrefix)) return + hasPrefixedMetrics = true + + const withoutSlug = rawKey.slice(slugPrefix.length) + const canonical = canonicalizeMetricKey(withoutSlug) + + const strippedCanonical = stripMetricPrefix(canonical) + const strippedRaw = stripMetricPrefix(withoutSlug) + + const metricKey = strippedCanonical ?? strippedRaw ?? canonical + if (!metricKey) return + + const inferredType = inferSchemaTypeFromStats(stats) + }) + + if (!hasPrefixedMetrics) { + Object.entries(metrics).forEach(([rawKey, stats]) => { + if (typeof rawKey !== "string") return + if (!rawKey.startsWith(slugPrefix)) return + const {isKey, leafMetric, prefix, normalized} = isOutputMetricKey(rawKey) + if (!isKey || !leafMetric) return + const inferredType = inferSchemaTypeFromStats(stats) + let displayLabel: string | undefined + if (normalized) { + if (prefix === "outputs") { + const suffix = normalized.slice(OUTPUT_PREFIX.length) + displayLabel = suffix ? `outputs.${suffix}` : "outputs" + } else if (prefix === "metrics") { + const suffix = normalized.slice(ANALYTICS_PREFIX.length) + displayLabel = suffix ? `metrics.${suffix}` : "metrics" + } + } + }) + } + + if (!runIndexesByRunId?.[runId]) return + const columns = collectColumnsForStepKeys( + collectStepKeysForEvaluator(slug, runIndexesByRunId[runId]), + runIndexesByRunId[runId], + ) + + columns + .filter((column) => column.kind === "annotation") + .forEach((column) => { + const metricKey = deriveMetricKeyFromColumn(column) + if (!metricKey) return + + const stats = resolveMetricStats(metrics, [ + `${slug}.${metricKey}`, + metricKey, + `${OUTPUT_PREFIX}${metricKey}`, + `${ANALYTICS_PREFIX}${metricKey}`, + ]) + const inferredType = + inferSchemaTypeFromStats(stats) || + ((): SchemaMetricType | undefined => { + const type = column.metricType + if (!type) return undefined + if (Array.isArray(type)) { + return type[0] as SchemaMetricType + } + return type as SchemaMetricType + })() + recordMetric(metricKey, inferredType) + }) + }) + } + + return Object.fromEntries(derived.entries()) +} + export const extractEvaluationStatus = ( scenarios: IScenario[], status?: EvaluationStatus, @@ -126,7 +612,7 @@ export const extractEvaluationStatus = ( } else if (scenarios.every((s) => s.status === EvaluationStatus.SUCCESS)) { derived = EvaluationStatus.SUCCESS } else if ( - evalType === "auto" && + (evalType === "auto" || evalType === "custom") && scenarios.some((s) => s.status === EvaluationStatus.RUNNING) ) { derived = EvaluationStatus.RUNNING @@ -153,30 +639,99 @@ export const extractEvaluationStatus = ( export const getEvaluatorMetricColumns = ({ evaluations, runMetricsMap, + preferRunStepSlugs = false, }: { evaluations: EvaluationRow[] runMetricsMap?: Record> + preferRunStepSlugs?: boolean }) => { // Calculate how many evaluations include each evaluator so we can order // the columns by their popularity across runs (descending). const evaluatorCounts: Record = {} + const declaredEvaluatorSlugs = new Set() evaluations.forEach((evaluation) => { evaluation.evaluators?.forEach((ev: EvaluatorDto) => { evaluatorCounts[ev.slug] = (evaluatorCounts[ev.slug] ?? 0) + 1 + const normalized = normalizeIdentifier(ev?.slug) + if (normalized) declaredEvaluatorSlugs.add(normalized) }) }) + const runIndexesByRunId: Record = {} + evaluations.forEach((evaluation) => { + const runIndex = (evaluation as any)?.runIndex + if (!runIndex) return + const runId = getRunIdFromEvaluation(evaluation) + if (runId) { + runIndexesByRunId[runId] = runIndex + } + }) + + const stepSlugCache = new Map() + const resolveStepSlugs = + preferRunStepSlugs && Object.keys(runIndexesByRunId).length + ? ({record, evaluator}: {record: EvaluationRow; evaluator: EvaluatorDto}) => { + const runId = getRunIdFromEvaluation(record) + if (!runId) return [] + const cacheKey = [ + runId, + normalizeIdentifier((evaluator as any)?.id) ?? "", + normalizeIdentifier(evaluator?.slug) ?? "", + normalizeIdentifier((evaluator as any)?.key) ?? "", + ].join(":") + if (stepSlugCache.has(cacheKey)) { + return stepSlugCache.get(cacheKey)! + } + const runIndex = runIndexesByRunId[runId] + if (!runIndex) { + stepSlugCache.set(cacheKey, []) + return [] + } + const slugs = collectStepSlugsFromRunIndex(runIndex, evaluator) + stepSlugCache.set(cacheKey, slugs) + return slugs + } + : undefined + // Build a unique list of evaluators and sort it by frequency. If two // evaluators have the same frequency, fall back to their names for a // deterministic ordering. const evaluators = uniqBy( - evaluations.flatMap((evaluation) => evaluation.evaluators), + [...(evaluations.flatMap((evaluation) => evaluation.evaluators || []) as EvaluatorDto[])], "slug", ) .filter(Boolean) .map((evaluator: EvaluatorDto) => { + const metricsCandidate: Record = {} + const stepIdentifierCandidates = collectStepIdentifiersForEvaluator( + runIndexesByRunId, + evaluator, + ) + .concat([ + evaluator.slug, + normalizeIdentifier((evaluator as any)?.id), + normalizeIdentifier((evaluator as any)?.key), + ...(Array.isArray(evaluator?.stepIdentifierCandidates) + ? (evaluator.stepIdentifierCandidates as string[]) + : []), + ]) + .filter((value, index, self) => value && self.indexOf(value) === index) + const mergeMetricDefinitions = ( + source?: Record, + {skipFlatten = false}: {skipFlatten?: boolean} = {}, + ) => { + if (!source || typeof source !== "object" || Array.isArray(source)) return + const entries = skipFlatten ? source : flattenMetricDefinitionEntries(source) + Object.entries(entries).forEach(([key, definition]) => { + if (!definition || typeof definition !== "object") return + const existing = metricsCandidate[key] + metricsCandidate[key] = existing + ? {...definition, ...existing} + : {...definition} + }) + } + const serviceFormat = (evaluator as any)?.data?.service?.format - let metricsCandidate: Record | undefined if (serviceFormat && typeof serviceFormat === "object") { const properties = (serviceFormat as any)?.properties const outputsCandidate = @@ -185,34 +740,118 @@ export const getEvaluatorMetricColumns = ({ : undefined) ?? (serviceFormat as any).outputs if (outputsCandidate && typeof outputsCandidate === "object") { - metricsCandidate = + const schemaDefinitions = (outputsCandidate as any).properties && typeof (outputsCandidate as any).properties === "object" ? ((outputsCandidate as any).properties as Record) : (outputsCandidate as Record) + mergeMetricDefinitions(schemaDefinitions) } } - if (!metricsCandidate) { - const fallback = - (evaluator as any)?.settings_values?.outputs ?? - (evaluator as any)?.settings?.outputs ?? - undefined - if (fallback && typeof fallback === "object" && !Array.isArray(fallback)) { - metricsCandidate = fallback as Record - } + const fallbackOutputs = + (evaluator as any)?.settings_values?.outputs ?? + (evaluator as any)?.settings?.outputs ?? + undefined + if ( + fallbackOutputs && + typeof fallbackOutputs === "object" && + !Array.isArray(fallbackOutputs) + ) { + mergeMetricDefinitions(fallbackOutputs as Record) + } + + if ((evaluator as any)?.metrics && typeof (evaluator as any)?.metrics === "object") { + mergeMetricDefinitions((evaluator as any).metrics as Record, { + skipFlatten: true, + }) + } + + const inferredMetrics = deriveMetricsFromRunStats( + evaluator?.slug, + runMetricsMap, + runIndexesByRunId, + ) + + let metrics: Record = {...metricsCandidate} + + if (!Object.keys(metrics).length && Object.keys(inferredMetrics).length) { + metrics = {...inferredMetrics} + } else if (Object.keys(inferredMetrics).length) { + Object.entries(inferredMetrics).forEach(([metricKey, definition]) => { + if (!(metricKey in metrics)) { + metrics[metricKey] = definition + return + } + + if (!definition?.type) return + + const existing = metrics[metricKey] + const existingType = + existing && typeof existing === "object" + ? (existing as any).type + : undefined + + if (!existingType) { + metrics[metricKey] = { + ...(typeof existing === "object" && existing ? existing : {}), + type: definition.type, + } + } + }) } - const metrics = metricsCandidate ?? {} + metrics = pruneParentMetricEntries(metrics) + + const revisionSlug = + preferRunStepSlugs && evaluator?.slug + ? resolveEvaluatorRevisionSlug(evaluator.slug, runIndexesByRunId) + : undefined + + const displayName = + revisionSlug ?? + evaluator?.name ?? + evaluator?.slug ?? + (typeof (evaluator as any)?.displayName === "string" + ? ((evaluator as any).displayName as string) + : undefined) + + const normalizedSlug = normalizeIdentifier(evaluator?.slug) return { - name: evaluator?.name, - slug: evaluator?.slug, + ...evaluator, + name: displayName, + slug: evaluator?.slug ?? revisionSlug ?? evaluator?.name, metrics, + stepIdentifierCandidates, + originalSlug: normalizedSlug ?? evaluator?.slug, } }) + .filter( + ( + evaluator: EvaluatorDto & { + originalSlug?: string + stepIdentifierCandidates?: string[] + }, + ) => { + const normalizedOriginal = normalizeIdentifier((evaluator as any)?.originalSlug) + if (normalizedOriginal && declaredEvaluatorSlugs.has(normalizedOriginal)) + return true + const normalizedSlug = normalizeIdentifier(evaluator?.slug) + if (normalizedSlug && declaredEvaluatorSlugs.has(normalizedSlug)) return true + const candidateMatch = Array.isArray((evaluator as any)?.stepIdentifierCandidates) + ? (evaluator as any).stepIdentifierCandidates.some((candidate: string) => { + const normalized = normalizeIdentifier(candidate) + return normalized ? declaredEvaluatorSlugs.has(normalized) : false + }) + : false + if (candidateMatch) return true + return false + }, + ) .sort((a, b) => { const diff = (evaluatorCounts[b.slug] ?? 0) - (evaluatorCounts[a.slug] ?? 0) + if (!a.name || !b.name) return diff return diff !== 0 ? diff : a.name.localeCompare(b.name) }) @@ -220,15 +859,21 @@ export const getEvaluatorMetricColumns = ({ .flatMap((ev) => { const keys = Object.keys(ev.metrics || {}) if (!keys.length) return [] + const children = buildEvaluatorMetricColumns({ + evaluator: ev, + runMetricsMap, + resolveStepSlugs, + additionalSlugCandidates: (ev as any)?.stepIdentifierCandidates ?? [], + }).filter(Boolean) + + if (!children.length) return [] + return [ { key: ev.slug, title: ev.name ?? ev.slug, collapsible: true, - children: buildEvaluatorMetricColumns({ - evaluator: ev, - runMetricsMap, - }), + children, renderAggregatedData: ({record}) => { const hasEvaluator = Array.isArray((record as any).evaluators) ? (record as any).evaluators.some( @@ -245,20 +890,24 @@ export const getEvaluatorMetricColumns = ({ const pills = Object.keys(ev.metrics || {}) .map((metricKey) => { - const stats = resolveMetricStats(metrics, [ - `${ev.slug}.${metricKey}`, - `${metricKey}`, - `attributes.ag.data.outputs.${metricKey}`, - ]) + const slugCandidates = + resolveStepSlugs?.({record, evaluator: ev}) ?? + (ev.slug ? [ev.slug] : []) + const stats = resolveMetricStats( + metrics, + buildMetricKeyCandidates(metricKey, slugCandidates), + ) const value = summarizeMetric( stats, (ev.metrics as any)?.[metricKey]?.type, ) if (value == null) return null + const definition = (ev.metrics as any)?.[metricKey] + const label = (definition?.label as string | undefined) ?? metricKey return ( @@ -299,7 +948,7 @@ export const getRunMetricColumns = ({ evaluators: EvaluatorDto[] evalType: "auto" | "human" }) => { - if (evalType === "auto") { + if (evalType === "auto" || evalType === "custom") { const runMetricChildren: ColumnsType = GeneralAutoEvalMetricColumns.map( (metricDef) => { const canonicalKey = canonicalizeMetricKey(metricDef.path) @@ -544,6 +1193,43 @@ export const getRunMetricColumns = ({ return runMetricsGroup } +const ApplicationCell = ({record, evalType}) => { + const primaryInvocation = extractPrimaryInvocation(record) + const fallbackVariant = Array.isArray((record as any)?.variants) + ? (record as any)?.variants?.[0] + : undefined + const variantAppName = + fallbackVariant?.appName || + fallbackVariant?.appSlug || + (typeof fallbackVariant?.app_id === "string" ? fallbackVariant.app_id : undefined) + const derivedAppId = extractEvaluationAppId(record) + const strippedPrimaryVariantName = stripVariantSuffix(primaryInvocation?.variantName) + const strippedFallbackVariantName = stripVariantSuffix(fallbackVariant?.variantName) + const isAutoEval = evalType === "auto" || evalType === "custom" + + const candidates = isAutoEval + ? [ + (record as any)?.appName, + primaryInvocation?.appName, + variantAppName, + strippedPrimaryVariantName, + strippedFallbackVariantName, + inferAppNameFromEvaluationName((record as any)?.name), + derivedAppId, + ] + : [(record as any)?.appName, primaryInvocation?.appName, variantAppName] + + const appName = candidates.find((value) => { + if (typeof value !== "string") return false + const trimmed = value.trim() + if (!trimmed) return false + return !isUuidLike(trimmed) + }) + + if (appName) return appName + return "-" +} + export const getColumns = ({ evaluations, onVariantNavigation, @@ -556,6 +1242,8 @@ export const getColumns = ({ extractAppId, projectURL, resolveAppId, + preferRunStepSlugs = false, + disableVariantAction = false, }: { evaluations: EvaluationRow[] onVariantNavigation: (params: {revisionId: string; appId?: string}) => void @@ -568,6 +1256,8 @@ export const getColumns = ({ extractAppId: (evaluation: EvaluationRow) => string | undefined projectURL: string resolveAppId?: (evaluation: EvaluationRow) => string | undefined + preferRunStepSlugs?: boolean + disableVariantAction?: boolean }): ColumnsType => { const baseColumns: ColumnsType = [ { @@ -696,6 +1386,7 @@ export const getColumns = ({ scope={scope} projectURL={projectURL} resolveAppId={resolveAppId} + disableVariantAction={disableVariantAction} /> ), }, @@ -709,54 +1400,14 @@ export const getColumns = ({ onHeaderCell: () => ({ style: {minWidth: 160}, }), - render: (_: any, record: EvaluationRow) => { - const primaryInvocation = extractPrimaryInvocation(record) - const fallbackVariant = Array.isArray((record as any)?.variants) - ? (record as any)?.variants?.[0] - : undefined - const variantAppName = - fallbackVariant?.appName || - fallbackVariant?.appSlug || - (typeof fallbackVariant?.app_id === "string" - ? fallbackVariant.app_id - : undefined) - const derivedAppId = extractEvaluationAppId(record) - const strippedPrimaryVariantName = stripVariantSuffix( - primaryInvocation?.variantName, - ) - const strippedFallbackVariantName = stripVariantSuffix(fallbackVariant?.variantName) - - const isAutoEval = evalType === "auto" - - const candidates = isAutoEval - ? [ - (record as any)?.appName, - primaryInvocation?.appName, - variantAppName, - strippedPrimaryVariantName, - strippedFallbackVariantName, - inferAppNameFromEvaluationName((record as any)?.name), - derivedAppId, - ] - : [(record as any)?.appName, primaryInvocation?.appName, variantAppName] - - const appName = candidates.find((value) => { - if (typeof value !== "string") return false - const trimmed = value.trim() - if (!trimmed) return false - return !isUuidLike(trimmed) - }) - - if (appName) return appName - if (isAutoEval && derivedAppId) return derivedAppId - return "-" - }, + render: (_, record) => , }) } const evaluatorMetricColumns = getEvaluatorMetricColumns({ evaluations, runMetricsMap, + preferRunStepSlugs, }) // Find index of Status column @@ -846,7 +1497,7 @@ export const getColumns = ({ }, }, ] - } else if (evalType === "auto") { + } else if (evalType === "auto" || evalType === "custom") { const legacyAutoEvals = evaluations.filter((rec) => !rec?.data?.steps) const evaluators = getDefaultStore().get(evaluatorsAtom) diff --git a/web/ee/src/components/pages/evaluations/autoEvaluation/AutoEvaluation.tsx b/web/ee/src/components/pages/evaluations/autoEvaluation/AutoEvaluation.tsx index b04e71bcfb..5371a12fb4 100644 --- a/web/ee/src/components/pages/evaluations/autoEvaluation/AutoEvaluation.tsx +++ b/web/ee/src/components/pages/evaluations/autoEvaluation/AutoEvaluation.tsx @@ -298,7 +298,7 @@ const AutoEvaluation = ({viewType = "evaluation", scope = "app"}: AutoEvaluation query: recordAppId ? {app_id: recordAppId} : undefined, }) } else { - router.push(targetPath) + router.push({pathname: targetPath, query: {eval_type: "auto"}}) } }, } diff --git a/web/ee/src/components/pages/evaluations/customEvaluation/CustomEvaluation.tsx b/web/ee/src/components/pages/evaluations/customEvaluation/CustomEvaluation.tsx index e01673f60d..ce97fc9c4c 100644 --- a/web/ee/src/components/pages/evaluations/customEvaluation/CustomEvaluation.tsx +++ b/web/ee/src/components/pages/evaluations/customEvaluation/CustomEvaluation.tsx @@ -1,9 +1,11 @@ -import {useCallback, useMemo, useState} from "react" +import {isValidElement, useCallback, useMemo, useState} from "react" -import {Button, message} from "antd" -import {ColumnsType} from "antd/es/table" +import {Export, Trash} from "@phosphor-icons/react" +import {Button, Space, message} from "antd" +import type {ColumnType, ColumnsType} from "antd/es/table" import {useAtom} from "jotai" import {useRouter} from "next/router" +import {renderToStaticMarkup} from "react-dom/server" import DeleteEvaluationModal from "@/oss/components/DeleteEvaluationModal/DeleteEvaluationModal" import EnhancedTable from "@/oss/components/EnhancedUIs/Table" @@ -14,11 +16,13 @@ import type {EvaluationRow} from "@/oss/components/HumanEvaluations/types" import {useAppId} from "@/oss/hooks/useAppId" import useURL from "@/oss/hooks/useURL" import {EvaluationType} from "@/oss/lib/enums" +import {convertToCsv, downloadCsv} from "@/oss/lib/helpers/fileManipulations" import {buildRevisionsQueryParam} from "@/oss/lib/helpers/url" import useEvaluations from "@/oss/lib/hooks/useEvaluations" import {tempEvaluationAtom} from "@/oss/lib/hooks/usePreviewRunningEvaluations/states/runningEvalAtom" import useRunMetricsMap from "@/oss/lib/hooks/useRunMetricsMap" import {EvaluationStatus} from "@/oss/lib/Types" +import {getAppValues} from "@/oss/state/app" import {buildAppScopedUrl, buildEvaluationNavigationUrl, extractEvaluationAppId} from "../utils" @@ -44,6 +48,141 @@ const isPreviewCustomRun = (run: any) => { return hasCustomStep && !isOnlineSource && !isLive } +type ExportableColumn = { + header: string + column: ColumnType +} + +const decodeHtmlEntities = (value: string): string => + value + .replace(/ /gi, " ") + .replace(/&/gi, "&") + .replace(/</gi, "<") + .replace(/>/gi, ">") + .replace(/'/gi, "'") + .replace(/"/gi, '"') + +const stripHtml = (markup: string): string => { + if (!markup) return "" + const withoutTags = markup.replace(/<[^>]+>/g, " ") + return decodeHtmlEntities(withoutTags).replace(/\s+/g, " ").trim() +} + +const nodeToText = (node: any): string => { + if (node === null || node === undefined) return "" + if (typeof node === "string" || typeof node === "number") return String(node) + if (typeof node === "boolean") return node ? "true" : "false" + if (Array.isArray(node)) { + return node.map((item) => nodeToText(item)).filter(Boolean).join(" ") + } + if (isValidElement(node)) { + return stripHtml(renderToStaticMarkup(node)) + } + if (typeof node === "object") { + try { + return stripHtml(renderToStaticMarkup(<>{node})) + } catch (_error) { + return "" + } + } + return "" +} + +const getValueFromDataIndex = ( + record: EvaluationRow, + dataIndex: ColumnType["dataIndex"], +): any => { + if (dataIndex === undefined || dataIndex === null) return undefined + const path = + typeof dataIndex === "number" + ? [dataIndex] + : Array.isArray(dataIndex) + ? dataIndex + : String(dataIndex).split(".") + + return path.reduce((acc: any, key) => { + if (acc === null || acc === undefined) return undefined + if (typeof key === "number") { + return acc?.[key] + } + const candidate = acc?.[key] + if (candidate !== undefined) return candidate + if (typeof key === "string") { + const numericKey = Number.isNaN(Number(key)) ? key : Number(key) + return acc?.[numericKey as keyof typeof acc] + } + return undefined + }, record) +} + +const resolveColumnTitle = (title: ColumnType["title"]): string => { + if (title === null || title === undefined) return "" + if (typeof title === "string") return title + if (typeof title === "number" || typeof title === "boolean") return String(title) + if (typeof title === "function") { + try { + const node = title({}) + return nodeToText(node) + } catch (_error) { + return "" + } + } + if (isValidElement(title)) { + return nodeToText(title) + } + return "" +} + +const flattenColumnsForExport = ( + columns: ColumnsType, + parentTitles: string[] = [], +): ExportableColumn[] => { + const flattened: ExportableColumn[] = [] + columns.forEach((col) => { + const currentTitle = resolveColumnTitle(col.title) + const nextParentTitles = currentTitle ? [...parentTitles, currentTitle] : parentTitles + + if ("children" in col && col.children && col.children.length) { + flattened.push(...flattenColumnsForExport(col.children, nextParentTitles)) + return + } + + const header = + nextParentTitles.join(" / ") || + String( + col.key ?? + (Array.isArray(col.dataIndex) + ? col.dataIndex.join(".") + : col.dataIndex ?? ""), + ) + + if (!header.trim()) return + if (String(col.key ?? "").toLowerCase() === "key") return + + flattened.push({ + header, + column: col, + }) + }) + return flattened +} + +const extractColumnValue = ( + column: ColumnType, + record: EvaluationRow, + index: number, +): string => { + const baseValue = getValueFromDataIndex(record, column.dataIndex) + const rendered = column.render ? column.render(baseValue, record, index) : baseValue + let text = nodeToText(rendered) + + if (!text && baseValue !== undefined) { + text = nodeToText(baseValue) + } + + return text +} + const CustomEvaluation = ({scope = "app", viewType = "evaluation"}: CustomEvaluationProps) => { const router = useRouter() const routeAppId = useAppId() @@ -129,6 +268,8 @@ const CustomEvaluation = ({scope = "app", viewType = "evaluation"}: CustomEvalua baseAppURL, extractAppId: extractEvaluationAppId, projectURL, + preferRunStepSlugs: true, + disableVariantAction: true, }) }, [ mergedEvaluations, @@ -146,6 +287,11 @@ const CustomEvaluation = ({scope = "app", viewType = "evaluation"}: CustomEvalua [columns, hiddenColumns], ) + const exportColumns = useMemo( + () => flattenColumnsForExport(visibleColumns), + [visibleColumns], + ) + const handleDelete = useCallback( async (ids: string[]) => { setIsDeletingEvaluations(true) @@ -177,16 +323,57 @@ const CustomEvaluation = ({scope = "app", viewType = "evaluation"}: CustomEvalua return viewType === "overview" ? mergedEvaluations.slice(0, 5) : mergedEvaluations }, [mergedEvaluations, viewType]) - const selectedEvaluationsLabel = useMemo(() => { + const selectedKeySet = useMemo(() => { + const set = new Set() + selectedRowKeys.forEach((key) => { + if (key == null) return + const value = key.toString() + if (value) set.add(value) + }) + return set + }, [selectedRowKeys]) + + const recordIndexLookup = useMemo(() => { + const map = new Map() + mergedEvaluations.forEach((evaluation, idx) => { + const key = ( + "id" in evaluation ? evaluation.id : evaluation.key + )?.toString() + if (key) { + map.set(key, idx) + } + }) + return map + }, [mergedEvaluations]) + + const selectedEvaluations = useMemo(() => { if (selectedEvalRecord) { - return selectedEvalRecord.name ?? selectedEvalRecord.key + const selectedId = ( + "id" in selectedEvalRecord ? selectedEvalRecord.id : selectedEvalRecord.key + )?.toString() + const matched = selectedId + ? mergedEvaluations.find((evaluation) => { + const evalId = ( + "id" in evaluation ? evaluation.id : evaluation.key + )?.toString() + return evalId === selectedId + }) + : undefined + return matched ? [matched] : [selectedEvalRecord] } - const selectedItems = mergedEvaluations.filter((evaluation) => - selectedRowKeys.includes("id" in evaluation ? evaluation.id : evaluation.key), - ) - if (selectedItems.length === 0) return "Custom evaluation" - return selectedItems.map((item) => ("name" in item ? item.name : item.key)).join(" | ") - }, [selectedEvalRecord, selectedRowKeys, mergedEvaluations]) + if (!selectedKeySet.size) return [] + return mergedEvaluations.filter((evaluation) => { + const key = ("id" in evaluation ? evaluation.id : evaluation.key)?.toString() + return key ? selectedKeySet.has(key) : false + }) + }, [selectedEvalRecord, selectedKeySet, mergedEvaluations]) + + const selectedEvaluationsLabel = useMemo(() => { + if (!selectedEvaluations.length) return "Custom evaluation" + return selectedEvaluations + .map((item) => ("name" in item ? item.name : item.key)) + .join(" | ") + }, [selectedEvaluations]) const handleRowNavigation = useCallback( (record: EvaluationRow) => { @@ -217,30 +404,93 @@ const CustomEvaluation = ({scope = "app", viewType = "evaluation"}: CustomEvalua if (scope === "project") { router.push({ pathname: targetPath, - query: recordAppId ? {app_id: recordAppId} : undefined, + query: recordAppId + ? {app_id: recordAppId, eval_type: "custom"} + : {eval_type: "custom"}, }) } else { - router.push(targetPath) + router.push({ + pathname: targetPath, + query: {eval_type: "custom"}, + }) } }, [activeAppId, scope, baseAppURL, projectURL, router], ) + const handleExportSelected = useCallback(() => { + if (!selectedEvaluations.length) { + message.warning("Select at least one evaluation to export") + return + } + + if (!exportColumns.length) { + message.warning("There are no visible columns to export") + return + } + + try { + const rows = selectedEvaluations.map((item) => { + const key = ("id" in item ? item.id : item.key)?.toString() + const recordIndex = key ? recordIndexLookup.get(key) ?? 0 : 0 + const row: Record = {} + + exportColumns.forEach(({header, column}) => { + row[header] = extractColumnValue(column, item, recordIndex) || "" + }) + + return row + }) + + const headers = exportColumns.map(({header}) => header) + + const csvData = convertToCsv(rows, headers) + if (!csvData) { + message.error("Failed to prepare export") + return + } + + const {currentApp} = getAppValues() + const filenameBase = + currentApp?.app_name || + (scope === "project" ? "all_applications" : "evaluations") + const filename = `${filenameBase.replace(/\s+/g, "_")}_custom_evaluations.csv` + downloadCsv(filename, csvData) + } catch (error) { + console.error("Failed to export custom evaluations", error) + message.error("Failed to export evaluations") + } + }, [ + selectedEvaluations, + exportColumns, + recordIndexLookup, + scope, + ]) + return (
-
+ + -
+ { - const ids = selectedEvalRecord - ? [selectedEvalRecord.id] - : selectedRowKeys.map((key) => key?.toString()).filter(Boolean) + const ids = selectedEvaluations + .map((evaluation) => + "id" in evaluation ? evaluation.id : evaluation.key?.toString(), + ) + .filter(Boolean) as string[] if (ids.length) { - await handleDelete(ids as string[]) + await handleDelete(ids) } }} evaluationType={selectedEvaluationsLabel} - isMultiple={!selectedEvalRecord && selectedRowKeys.length > 0} + isMultiple={selectedEvaluations.length > 1} />
) diff --git a/web/ee/src/lib/helpers/traceUtils.ts b/web/ee/src/lib/helpers/traceUtils.ts index cbe3b48a0c..f232711598 100644 --- a/web/ee/src/lib/helpers/traceUtils.ts +++ b/web/ee/src/lib/helpers/traceUtils.ts @@ -80,19 +80,52 @@ export function readInvocationResponse({ const effectiveStepKey = invocationStep?.stepKey ?? stepKey // --- PATH RESOLUTION LOGIC --- - let resolvedPath: string | undefined = undefined + const candidatePaths: string[] = [] + const registerPath = (targetPath?: string) => { + if (!targetPath || typeof targetPath !== "string") return + const trimmed = targetPath.trim() + if (!trimmed) return + candidatePaths.push(trimmed) + const canonical = INVOCATION_OUTPUT_KEY_MAP[trimmed] + if (canonical) { + candidatePaths.push(canonical) + } + if (trimmed === "attributes.ag.data.outputs") { + candidatePaths.push("attributes.ag.data.outputs.outputs") + candidatePaths.push("data.outputs") + candidatePaths.push("outputs") + } else if (trimmed.startsWith("attributes.ag.data.outputs.")) { + const suffix = trimmed.slice("attributes.ag.data.outputs.".length) + if (suffix) { + candidatePaths.push(`data.outputs.${suffix}`) + candidatePaths.push(`outputs.${suffix}`) + } + } else if (trimmed.startsWith("data.outputs.")) { + const suffix = trimmed.slice("data.outputs.".length) + if (suffix) { + candidatePaths.push(`outputs.${suffix}`) + } + } + } + if (path) { - resolvedPath = path - } else if (scenarioData.mappings && Array.isArray(scenarioData.mappings) && effectiveStepKey) { + registerPath(path) + } + + if (scenarioData.mappings && Array.isArray(scenarioData.mappings) && effectiveStepKey) { const mapEntry = scenarioData.mappings.find((m: any) => m.step?.key === effectiveStepKey) if (mapEntry?.step?.path) { - resolvedPath = mapEntry.step.path + registerPath(mapEntry.step.path) } } - // After resolving, apply legacy/custom mapping if needed - if (resolvedPath && INVOCATION_OUTPUT_KEY_MAP[resolvedPath]) { - resolvedPath = INVOCATION_OUTPUT_KEY_MAP[resolvedPath] + + if (!candidatePaths.length) { + registerPath("attributes.ag.data.outputs") } + + const resolvedCandidates = Array.from( + new Set(candidatePaths.filter((p): p is string => typeof p === "string" && p.length)), + ) // --- END PATH RESOLUTION LOGIC --- // --- MAPPING LOGIC FOR TESTSET/TESTCASE INFERENCE --- @@ -138,19 +171,25 @@ export function readInvocationResponse({ // First priority: optimistic result override (e.g., UI enqueue) let rawValue = optimisticResult - if (rawValue === undefined && resolvedPath) { - rawValue = resolvePath(primaryNode, resolvedPath) - if (rawValue === undefined) { - for (const node of candidateNodes.slice(1)) { - rawValue = resolvePath(node, resolvedPath) - if (rawValue !== undefined) break + if (rawValue === undefined && resolvedCandidates.length) { + const sources = [ + ...candidateNodes, + invocationStep?.data, + invocationStep?.result, + invocationStep, + ].filter(Boolean) + + for (const candidate of resolvedCandidates) { + for (const source of sources) { + const resolved = resolvePath(source, candidate) + if (resolved !== undefined) { + rawValue = resolved + break + } + } + if (rawValue !== undefined) { + break } - } - if (rawValue === undefined) { - rawValue = - resolvePath(invocationStep?.data, resolvedPath) ?? - resolvePath(invocationStep?.result, resolvedPath) ?? - resolvePath(invocationStep, resolvedPath) } } diff --git a/web/ee/src/lib/hooks/useEvaluationRunData/assets/atoms/progress.ts b/web/ee/src/lib/hooks/useEvaluationRunData/assets/atoms/progress.ts index aa3cba0b58..bcf78a493b 100644 --- a/web/ee/src/lib/hooks/useEvaluationRunData/assets/atoms/progress.ts +++ b/web/ee/src/lib/hooks/useEvaluationRunData/assets/atoms/progress.ts @@ -214,7 +214,8 @@ export const scenarioStatusFamily = atomFamily((params: {scenarioId: string; run computedStatus = "success" } else if (allInvSucceeded) { // Auto and online evals treat successful invocations as completion - const isAutoLikeEval = evalType === "auto" || evalType === "online" + const isAutoLikeEval = + evalType === "auto" || evalType === "online" || evalType === "custom" computedStatus = isAutoLikeEval ? "success" : "incomplete" } else if (anyFailed) { computedStatus = "failure" diff --git a/web/ee/src/lib/hooks/useEvaluationRunData/assets/atoms/runScopedMetrics.ts b/web/ee/src/lib/hooks/useEvaluationRunData/assets/atoms/runScopedMetrics.ts index e38cf82887..41abde06a9 100644 --- a/web/ee/src/lib/hooks/useEvaluationRunData/assets/atoms/runScopedMetrics.ts +++ b/web/ee/src/lib/hooks/useEvaluationRunData/assets/atoms/runScopedMetrics.ts @@ -58,13 +58,14 @@ const buildAnnotationSlugMap = (state?: EvaluationRunState): Record { const key = meta?.key const slug = meta?.refs?.evaluator?.slug + if (meta?.kind !== "annotation") return if ( typeof key === "string" && key.startsWith("evaluator-") && typeof slug === "string" && slug.length ) { - map[key] = slug + map[key] = key } }) @@ -87,6 +88,7 @@ const runFetchMetrics = async ( return [] } })() + const hasCachedMetrics = existingMetrics.length > 0 const promise = (async () => { @@ -365,6 +367,49 @@ export const scenarioMetricSelectorFamily = atomFamily< return selectAtom(scenarioMetricsMapFamily(runId), (s) => s?.[scenarioId], deepEqual) }, deepEqual) +const OUTPUT_PREFIX = "attributes.ag.data.outputs." +const METRICS_PREFIX = "attributes.ag.metrics." + +const stripPrefixVariants = (value: string, ...prefixes: string[]): string => { + let next = value + prefixes.forEach((prefix) => { + if (next.startsWith(prefix)) { + next = next.slice(prefix.length) + } + }) + return next +} + +const appendOutputCandidates = ( + push: (candidate?: string) => void, + seed: string, + slug?: string, +) => { + if (!seed) return + const tail = stripPrefixVariants(seed, OUTPUT_PREFIX, "outputs.") + if (!tail) return + push(`${OUTPUT_PREFIX}${tail}`) + if (slug) { + push(`${slug}.${OUTPUT_PREFIX}${tail}`) + push(`${OUTPUT_PREFIX}${slug}.${tail}`) + } +} + +const appendMetricCandidates = ( + push: (candidate?: string) => void, + seed: string, + slug?: string, +) => { + if (!seed) return + const tail = stripPrefixVariants(seed, METRICS_PREFIX, "metrics.") + if (!tail) return + push(`${METRICS_PREFIX}${tail}`) + if (slug) { + push(`${slug}.${METRICS_PREFIX}${tail}`) + push(`${METRICS_PREFIX}${slug}.${tail}`) + } +} + /** * Run-scoped single metric value selector * Mirrors the legacy scenarioMetricValueFamily but adds runId and optional stepSlug support. @@ -403,16 +448,12 @@ export const scenarioMetricValueFamily = atomFamily( if (slug) { push(`${slug}.${withoutSlug}`) - push(`${slug}.attributes.ag.data.outputs.${withoutSlug}`) - push(`${slug}.attributes.ag.metrics.${withoutSlug}`) - push(`attributes.ag.data.outputs.${slug}.${withoutSlug}`) - push(`attributes.ag.metrics.${slug}.${withoutSlug}`) } - push(`attributes.ag.data.outputs.${withoutSlug}`) - push(`attributes.ag.metrics.${withoutSlug}`) - push(`attributes.ag.data.outputs.${base}`) - push(`attributes.ag.metrics.${base}`) + appendOutputCandidates(push, withoutSlug, slug) + appendMetricCandidates(push, withoutSlug, slug) + appendOutputCandidates(push, base, slug) + appendMetricCandidates(push, base, slug) return candidates } @@ -425,7 +466,9 @@ export const scenarioMetricValueFamily = atomFamily( for (const candidate of candidateKeys) { const resolved = getMetricValueWithAliases(metrics, candidate) - if (resolved !== undefined) return resolved + if (resolved !== undefined) { + return resolved + } } return undefined }, diff --git a/web/ee/src/lib/hooks/useEvaluationRunData/assets/helpers/buildRunIndex.ts b/web/ee/src/lib/hooks/useEvaluationRunData/assets/helpers/buildRunIndex.ts index 512cbacdc0..bff8b3681c 100644 --- a/web/ee/src/lib/hooks/useEvaluationRunData/assets/helpers/buildRunIndex.ts +++ b/web/ee/src/lib/hooks/useEvaluationRunData/assets/helpers/buildRunIndex.ts @@ -9,6 +9,8 @@ export interface ColumnDef { name: string /** "input" | "invocation" | "annotation" */ kind: StepKind + /** Optional marker for where the column originated (auto/custom/human/etc.) */ + origin?: string /** Optional evaluator metric primitive type ("number", "boolean", etc.) */ metricType?: string /** Dot-path used to resolve the value inside the owning step payload / testcase */ @@ -23,6 +25,7 @@ export interface ColumnDef { export interface StepMeta { key: string kind: StepKind + origin?: string /** List of upstream step keys declared in `inputs` */ upstream: string[] /** Raw references blob – may contain application, evaluator, etc. */ @@ -93,6 +96,7 @@ export function buildRunIndex(rawRun: any): RunIndex { steps[s.key] = { key: s.key, kind, + origin: typeof s.origin === "string" ? s.origin : undefined, upstream: (s.inputs ?? []).map((i: any) => i.key), refs: s.references ?? {}, } @@ -122,9 +126,11 @@ export function buildRunIndex(rawRun: any): RunIndex { }) } + const metaForStep = steps[m.step.key] const col: ColumnDef = { name: m.column.name, kind: colKind, + origin: metaForStep?.origin, path: m.step.path, stepKey: m.step.key, } diff --git a/web/ee/src/lib/hooks/useEvaluationRunData/index.ts b/web/ee/src/lib/hooks/useEvaluationRunData/index.ts index 4bf7787e65..df4bef1420 100644 --- a/web/ee/src/lib/hooks/useEvaluationRunData/index.ts +++ b/web/ee/src/lib/hooks/useEvaluationRunData/index.ts @@ -129,7 +129,7 @@ const useEvaluationRunData = (evaluationTableId: string | null, debug = false, r ) ).filter(Boolean) as PreviewTestset[] - if (!fetchedTestsets.length && evalType === "auto") { + if (!fetchedTestsets.length && (evalType === "auto" || evalType === "custom")) { evalAtomStore().set( evaluationRunStateFamily(runId || evaluationTableId), (draft: any) => { @@ -215,7 +215,7 @@ const useEvaluationRunData = (evaluationTableId: string | null, debug = false, r : await fetchLegacyEvaluationData(evaluationTableId as string) if (!rawRun) return null - if (evalType === "auto") { + if (evalType === "auto" || evalType === "custom") { return rawRun } diff --git a/web/ee/src/lib/hooks/usePreviewEvaluations/assets/utils.ts b/web/ee/src/lib/hooks/usePreviewEvaluations/assets/utils.ts index 5338a4dd33..57230b6c89 100644 --- a/web/ee/src/lib/hooks/usePreviewEvaluations/assets/utils.ts +++ b/web/ee/src/lib/hooks/usePreviewEvaluations/assets/utils.ts @@ -501,7 +501,7 @@ export const enrichEvaluationRun = ({ const useEnrichEvaluationRun = ({ evalType = "human", }: { - evalType?: "human" | "auto" | "online" + evalType?: "human" | "auto" | "online" | "custom" } = {}): | (( run: SnakeToCamelCaseKeys, @@ -520,13 +520,20 @@ const useEnrichEvaluationRun = ({ const {data: evaluators, isLoading: loadingEvaluators} = useEvaluators({ preview: true, - queries: evalType === "human" ? {is_human: true} : {}, + queries: + evalType === "human" + ? {is_human: true} + : evalType === "custom" + ? {is_evaluator: true} + : {}, }) + const combinedEvaluators = useMemo(() => { const list: EvaluatorDto[] = [] const seenIds = new Set() const seenSlugs = new Set() const seenKeys = new Set() + const pushEvaluator = (ev: any) => { if (!ev) return const id = typeof ev.id === "string" ? ev.id : undefined @@ -547,6 +554,7 @@ const useEnrichEvaluationRun = ({ ;(Array.isArray(evaluators) ? evaluators : []).forEach(pushEvaluator) return list }, [evaluators]) + const {revisions: variantsData, isLoading: _variantsLoading} = useStatelessVariants({ lightLoading: true, }) diff --git a/web/ee/src/lib/hooks/usePreviewEvaluations/index.ts b/web/ee/src/lib/hooks/usePreviewEvaluations/index.ts index c587632f86..dbba797840 100644 --- a/web/ee/src/lib/hooks/usePreviewEvaluations/index.ts +++ b/web/ee/src/lib/hooks/usePreviewEvaluations/index.ts @@ -117,8 +117,6 @@ interface PreviewEvaluationsQueryState { import {searchQueryAtom} from "./states/queryFilterAtoms" import {EnrichedEvaluationRun, EvaluationRun} from "./types" -import useEvaluators from "@/oss/lib/hooks/useEvaluators" - const SCENARIOS_ENDPOINT = "/preview/evaluations/scenarios/" /** @@ -181,13 +179,6 @@ const usePreviewEvaluations = ({ return base }, [flags, propsTypes]) - const {data: humanEvaluators} = useEvaluators({ - preview: true, - queries: { - is_human: !types.includes(EvaluationType.automatic), - }, - }) - const referenceFilters = useMemo(() => { const filters: any[] = [] if (appId) { @@ -201,6 +192,7 @@ const usePreviewEvaluations = ({ const effectiveEvalType = useMemo(() => { if (propsTypes.includes(EvaluationType.online)) return "online" as const if (types.includes(EvaluationType.automatic)) return "auto" as const + if (types.includes(EvaluationType.custom_code_run)) return "custom" as const return "human" as const }, [propsTypes, types]) @@ -330,6 +322,9 @@ const usePreviewEvaluations = ({ const runClone = structuredClone(_run) const runIndex = buildRunIndex(runClone) const result = enrichRun(runClone, previewTestsets?.testsets || [], runIndex) + if (result) { + result.runIndex = runIndex + } if (result && isOnline) { const flags = (result as any).flags || {} diff --git a/web/ee/src/lib/hooks/useRunMetricsMap/index.ts b/web/ee/src/lib/hooks/useRunMetricsMap/index.ts index 2595ab2e55..968cdb47b6 100644 --- a/web/ee/src/lib/hooks/useRunMetricsMap/index.ts +++ b/web/ee/src/lib/hooks/useRunMetricsMap/index.ts @@ -35,17 +35,36 @@ const fetchRunMetricsMap = async ( const parts = stepKey.split(".") if (parts.length === 1) { const slug = parts[0] - if (evaluatorSlugs?.has(slug)) { - // This is an evaluator metric, ensure all keys are prefixed - const newStepData: Record = {} - Object.entries(stepData).forEach(([key, value]) => { - const prefixedKey = key.startsWith(`${slug}.`) ? key : `${slug}.${key}` - newStepData[prefixedKey] = value - }) - processedData[stepKey] = newStepData - } else { - // Keep non-evaluator data as is - processedData[stepKey] = stepData + try { + if (evaluatorSlugs?.has(slug)) { + // This is an evaluator metric, ensure all keys are prefixed + const newStepData: Record = {} + Object.entries(stepData).forEach(([key, value]) => { + const prefixedKey = key.startsWith(`${slug}.`) + ? key + : `${slug}.${key}` + newStepData[prefixedKey] = value + }) + processedData[stepKey] = newStepData + } else { + // Keep non-evaluator data as is + processedData[stepKey] = stepData + } + } catch (e) { + if (evaluatorSlugs?.includes(slug)) { + // This is an evaluator metric, ensure all keys are prefixed + const newStepData: Record = {} + Object.entries(stepData).forEach(([key, value]) => { + const prefixedKey = key.startsWith(`${slug}.`) + ? key + : `${slug}.${key}` + newStepData[prefixedKey] = value + }) + processedData[stepKey] = newStepData + } else { + // Keep non-evaluator data as is + processedData[stepKey] = stepData + } } } else { // Keep invocation data as is diff --git a/web/ee/src/lib/metricColumnFactory.tsx b/web/ee/src/lib/metricColumnFactory.tsx index d4c6ea4e69..0d9fc5c1e0 100644 --- a/web/ee/src/lib/metricColumnFactory.tsx +++ b/web/ee/src/lib/metricColumnFactory.tsx @@ -3,6 +3,8 @@ import React from "react" import {ColumnsType} from "antd/es/table" import {MetricDetailsPopoverWrapper} from "@/oss/components/HumanEvaluations/assets/MetricDetailsPopover" +import {USEABLE_METRIC_TYPES} from "@/oss/components/pages/observability/drawer/AnnotateDrawer/assets/constants" +import {getMetricsFromEvaluator} from "@/oss/components/pages/observability/drawer/AnnotateDrawer/assets/transforms" import {EvaluatorDto} from "@/oss/lib/hooks/useEvaluators/types" import {buildMetricSorter} from "@/oss/lib/metricSorter" import { @@ -12,6 +14,48 @@ import { getMetricValueWithAliases, } from "@/oss/lib/metricUtils" +const METRIC_OUTPUT_PREFIX = "attributes.ag.data.outputs." +const METRIC_ANALYTICS_PREFIX = "attributes.ag.metrics." + +const toTitleCase = (value: string): string => + value + .replace(/[_\-.]/g, " ") + .replace(/\s+/g, " ") + .trim() + .replace(/\b\w/g, (char) => char.toUpperCase()) + +const normalizeMetricLabel = (raw: unknown): string => { + if (typeof raw !== "string") return "Metric" + const trimmed = raw.trim() + if (!trimmed) return "Metric" + + const stripPrefix = (value: string, prefix: string): string => { + if (value === prefix) return "" + return value.startsWith(prefix) ? value.slice(prefix.length) : value + } + + let label = stripPrefix(trimmed, METRIC_OUTPUT_PREFIX) + label = stripPrefix(label, "outputs.") + label = stripPrefix(label, "outputs") + label = stripPrefix(label, METRIC_ANALYTICS_PREFIX) + label = stripPrefix(label, "metrics.") + label = stripPrefix(label, "metrics") + label = label.replace(/^attributes\.ag\./, "") + + label = label.replace(/\[(.+?)\]/g, "$1") + + if (!label) { + label = trimmed + } + + if (label.includes(".")) { + return label + } + + const titled = toTitleCase(label) + return titled || trimmed +} + const resolveMetricStats = ( metrics: Record | undefined, candidates: (string | undefined)[], @@ -44,6 +88,233 @@ export interface BuildEvaluatorMetricColumnsParams { runMetricsMap?: Record> hidePrimitiveTable?: boolean debug?: boolean + resolveStepSlugs?: (params: { + record: EvaluationRow + evaluator: EvaluatorDto + }) => string[] | undefined + additionalSlugCandidates?: string[] +} + +const deriveRunId = (record: EvaluationRow): string | undefined => { + if (!record) return undefined + if ("id" in record && typeof record.id === "string") return record.id + if ("run_id" in record && typeof (record as any).run_id === "string") + return (record as any).run_id + if ("runId" in record && typeof (record as any).runId === "string") return (record as any).runId + if ("key" in record && typeof record.key === "string") return record.key + return undefined +} + +const normalizeSlugList = (candidates?: (string | undefined)[]): string[] => { + const unique = new Set( + (candidates || []) + .map((candidate) => (typeof candidate === "string" ? candidate.trim() : "")) + .filter((candidate) => Boolean(candidate)), + ) + return Array.from(unique) as string[] +} + +export const buildMetricKeyCandidates = (metricKey: string, slugCandidates: string[]): string[] => { + const resolvedSlugs = normalizeSlugList(slugCandidates) + const variants = new Set() + + const addOutputVariants = (key: string) => { + if (!key) return + if (key.startsWith("attributes.ag.data.outputs.")) { + variants.add(key) + const tail = key.slice("attributes.ag.data.outputs.".length) + if (tail) variants.add(`outputs.${tail}`) + else variants.add("outputs") + return + } + if (key.startsWith("outputs.")) { + variants.add(key) + const tail = key.slice("outputs.".length) + variants.add(`attributes.ag.data.outputs.${tail}`) + return + } + if (key === "outputs") { + variants.add(key) + variants.add("attributes.ag.data.outputs") + return + } + variants.add(`attributes.ag.data.outputs.${key}`) + } + + const addAnalyticsVariants = (key: string) => { + if (!key) return + if (key.startsWith("attributes.ag.metrics.")) { + variants.add(key) + const tail = key.slice("attributes.ag.metrics.".length) + if (tail) variants.add(`metrics.${tail}`) + else variants.add("metrics") + return + } + if (key.startsWith("metrics.")) { + variants.add(key) + const tail = key.slice("metrics.".length) + variants.add(`attributes.ag.metrics.${tail}`) + return + } + if (key === "metrics") { + variants.add(key) + variants.add("attributes.ag.metrics") + return + } + variants.add(`attributes.ag.metrics.${key}`) + } + + const registerVariants = (key: string) => { + if (!key) return + variants.add(key) + addOutputVariants(key) + addAnalyticsVariants(key) + } + + registerVariants(metricKey) + + const baseVariants = Array.from(variants) + + resolvedSlugs.forEach((slug) => { + if (!slug) return + baseVariants.forEach((variant) => { + variants.add(`${slug}.${variant}`) + }) + }) + + return Array.from(variants) +} + +const flattenMetricDefinitions = ( + schema: Record | undefined, + prefix?: string, + acc: Record = {}, +): Record => { + if (!schema || typeof schema !== "object") return acc + + Object.entries(schema).forEach(([key, rawValue]) => { + if (!rawValue || typeof rawValue !== "object") return + const value = rawValue.anyOf?.[0] || rawValue + const name = prefix ? `${prefix}.${key}` : key + const type = value?.type as string | undefined + + if (type === "object" && value?.properties && typeof value.properties === "object") { + flattenMetricDefinitions(value.properties, name, acc) + return + } + + if (type === "array") { + acc[name] = {...value, type} + return + } + + if (type && USEABLE_METRIC_TYPES.includes(type)) { + acc[name] = {...value, type} + } + }) + + return acc +} + +const inferMetricTypeFromStats = (stats: BasicStats | undefined): string | undefined => { + if (!stats) return undefined + const numericCandidates = [(stats as any).mean, (stats as any).sum, (stats as any).max] + if (numericCandidates.some((value) => typeof value === "number")) { + return "number" + } + + const frequency = Array.isArray((stats as any).frequency) + ? ((stats as any).frequency as any[]) + : undefined + + if (frequency && frequency.length) { + const sampleEntry = frequency.find((entry) => entry?.value !== undefined) + const sample = sampleEntry?.value + const sampleType = typeof sample + if (sampleType === "boolean" || sampleType === "string") return sampleType + if (sampleType === "number") return "number" + } + + return undefined +} + +const extractOutputsTail = (key: string): string | undefined => { + if (!key) return undefined + const lower = key.toLowerCase() + if (lower.includes(METRIC_ANALYTICS_PREFIX)) return undefined + const idx = lower.lastIndexOf(METRIC_OUTPUT_PREFIX) + if (idx >= 0) { + const tail = key.slice(idx + METRIC_OUTPUT_PREFIX.length) + return tail || undefined + } + if (lower.startsWith("outputs.")) { + return key.slice("outputs.".length) + } + if (!lower.startsWith("attributes.ag.metrics")) { + return key + } + return undefined +} + +const inferMetricDefinitionsFromStats = ( + runMetricsMap: Record> | undefined, + slugCandidates: string[], +): Record => { + if (!runMetricsMap) return {} + + const normalizedSlugs = normalizeSlugList(slugCandidates) + const derived = new Map>() + + const includesSlug = (rawKey: string): boolean => { + if (!normalizedSlugs.length) return true + return normalizedSlugs.some((slug) => { + if (!slug) return false + const slugPrefix = `${slug}.` + if (rawKey.startsWith(slugPrefix)) return true + return rawKey.includes(`.${slug}.`) + }) + } + + const recordMetric = (metricKey: string, stats: BasicStats | undefined) => { + if ( + !metricKey || + metricKey.startsWith(METRIC_ANALYTICS_PREFIX) || + metricKey.startsWith("metrics.") || + metricKey === "metrics" || + metricKey.startsWith("metric.") || + metricKey === "metric" + ) + return + const existing = derived.get(metricKey) ?? {} + if (!existing.type) { + const inferred = inferMetricTypeFromStats(stats) + if (inferred) existing.type = inferred + } + derived.set(metricKey, existing) + } + + Object.values(runMetricsMap).forEach((metrics) => { + Object.entries(metrics || {}).forEach(([rawKey, stats]) => { + if (typeof rawKey !== "string") return + if (!includesSlug(rawKey)) return + + let stripped = rawKey + for (const slug of normalizedSlugs) { + if (!slug) continue + const slugPrefix = `${slug}.` + if (stripped.startsWith(slugPrefix)) { + stripped = stripped.slice(slugPrefix.length) + break + } + } + + const tail = extractOutputsTail(stripped) + if (!tail) return + recordMetric(tail, stats) + }) + }) + + return Object.fromEntries(derived.entries()) } export function buildEvaluatorMetricColumns({ @@ -51,63 +322,159 @@ export function buildEvaluatorMetricColumns({ runMetricsMap, hidePrimitiveTable = false, debug = false, + resolveStepSlugs, + additionalSlugCandidates = [], }: BuildEvaluatorMetricColumnsParams): ColumnsType { - const metricKeys = Object.keys(evaluator.metrics || {}) - return metricKeys.map((metricKey) => { - const schemaType = evaluator.metrics?.[metricKey]?.type - const sortable = isSortableMetricType(schemaType) - - const analyticsCandidates = [ - `attributes.ag.data.outputs.${metricKey}`, - `attributes.ag.metrics.${metricKey}`, - ] - const baseCandidates = [ - `${evaluator.slug}.${metricKey}`, - metricKey, - ...analyticsCandidates.map((path) => `${evaluator.slug}.${path}`), - ...analyticsCandidates, - ] - - return { - key: `${evaluator.slug}:${metricKey}`, - dataIndex: metricKey, - title: ( -
- {metricKey} -
- ), - sorter: sortable - ? buildMetricSorter((row) => { - const runId = "id" in row ? row.id : (row as any).key - const metrics = runMetricsMap?.[runId] - return resolveMetricStats(metrics, baseCandidates) - }) - : undefined, - render: (_: any, record: EvaluationRow) => { - const hasEvaluator = Array.isArray((record as any).evaluators) - ? (record as any).evaluators.some( - (e: EvaluatorDto) => e.slug === evaluator.slug, - ) - : false - - const runId = ("id" in record ? record.id : (record as any).key) as string - const runMetric = runMetricsMap?.[runId] - const stats = resolveMetricStats(runMetric, baseCandidates) - - return hasEvaluator ? ( - - ) : ( -
- ) - }, - } as any - }) as ColumnsType + const defaultSlugCandidates = normalizeSlugList([ + evaluator.slug, + (evaluator as any)?.slug, + (evaluator as any)?.id, + (evaluator as any)?.key, + ...(additionalSlugCandidates || []), + ]) + + const normalizedMetrics: Record = {} + + const extractType = (candidate: any): string | undefined => { + if (!candidate) return undefined + if (typeof candidate === "string") return candidate + if (Array.isArray(candidate)) { + const str = candidate.find((value) => typeof value === "string") + return typeof str === "string" ? str : undefined + } + if (typeof candidate?.type === "string") return candidate.type + return undefined + } + + const mergeMetricDefinition = (key: string, definition: any) => { + if (!key || !definition) return + const entry = normalizedMetrics[key] || {} + const candidateType = extractType(definition.type ?? definition) + if (candidateType && !entry.type) { + entry.type = candidateType + } + const candidateLabel = definition.label ?? definition.title + if (candidateLabel && !entry.label) { + entry.label = candidateLabel + } + if (definition.description && !entry.description) { + entry.description = definition.description + } + normalizedMetrics[key] = entry + } + + const schemaFields = getMetricsFromEvaluator(evaluator) as Record + Object.entries(schemaFields || {}).forEach(([key, definition]) => { + mergeMetricDefinition(key, definition) + }) + + const schemaDefinitions = flattenMetricDefinitions( + evaluator.data?.service?.format?.properties?.outputs?.properties, + ) + Object.entries(schemaDefinitions).forEach(([key, definition]) => { + mergeMetricDefinition(key, definition) + }) + + const settingsValuesDefinitions = flattenMetricDefinitions( + (evaluator as any)?.settings_values?.outputs, + ) + Object.entries(settingsValuesDefinitions).forEach(([key, definition]) => { + mergeMetricDefinition(key, definition) + }) + + const settingsDefinitions = flattenMetricDefinitions((evaluator as any)?.settings?.outputs) + Object.entries(settingsDefinitions).forEach(([key, definition]) => { + mergeMetricDefinition(key, definition) + }) + + const inferredMetricDefinitions = inferMetricDefinitionsFromStats( + runMetricsMap, + defaultSlugCandidates, + ) + Object.entries(inferredMetricDefinitions).forEach(([key, definition]) => { + mergeMetricDefinition(key, definition) + }) + + Object.entries(normalizedMetrics).forEach(([key, entry]) => { + const candidate = + typeof entry.label === "string" && entry.label.trim().length ? entry.label : undefined + entry.label = normalizeMetricLabel(candidate ?? key) + }) + + const metricKeys = Object.keys(normalizedMetrics) + const enrichedEvaluator = {...evaluator, metrics: normalizedMetrics} + + const resolveSlugsForRecord = (record: EvaluationRow | undefined): string[] => { + if (!record) return defaultSlugCandidates + const resolved = resolveStepSlugs?.({record, evaluator}) + const normalized = normalizeSlugList(resolved) + if (normalized.length) return normalized + return defaultSlugCandidates + } + + const resolveMetricsForRecord = ( + record: EvaluationRow, + metricKey: string, + ): {runId?: string; candidates: string[]} => { + const runId = deriveRunId(record) + const slugCandidates = resolveSlugsForRecord(record) + const candidates = buildMetricKeyCandidates(metricKey, slugCandidates) + return {runId, candidates} + } + + return metricKeys + .map((metricKey) => { + const schemaType = normalizedMetrics?.[metricKey]?.type + const sortable = isSortableMetricType(schemaType) + + if (schemaType === "object") return null + if (schemaType === "string") return null + const definition = normalizedMetrics[metricKey] || {} + const columnLabel = normalizeMetricLabel(definition.label ?? metricKey) + + return { + key: `${evaluator.slug}:${metricKey}`, + dataIndex: metricKey, + title: ( +
+ {columnLabel} +
+ ), + sorter: sortable + ? buildMetricSorter((row) => { + const {runId, candidates} = resolveMetricsForRecord(row, metricKey) + const metrics = runMetricsMap?.[runId || ""] + return resolveMetricStats(metrics, candidates) + }) + : undefined, + render: (_: any, record: EvaluationRow) => { + const {runId, candidates} = resolveMetricsForRecord(record, metricKey) + const hasEvaluator = Array.isArray((record as any).evaluators) + ? (record as any).evaluators.some( + (e: EvaluatorDto) => e.slug === evaluator.slug, + ) + : false + + const effectiveRunId = runId || "" + const runMetric = runMetricsMap?.[effectiveRunId] + const stats = resolveMetricStats(runMetric, candidates) + const [effectiveSlug] = resolveSlugsForRecord(record) + const popoverSlug = effectiveSlug || evaluator.slug || metricKey + return hasEvaluator ? ( + + ) : ( +
+ ) + }, + } as any + }) + .filter(Boolean) as ColumnsType } diff --git a/web/ee/src/pages/w/[workspace_id]/p/[project_id]/apps/[app_id]/evaluations/results/[evaluation_id]/index.tsx b/web/ee/src/pages/w/[workspace_id]/p/[project_id]/apps/[app_id]/evaluations/results/[evaluation_id]/index.tsx index 0cbe9eede3..8a3e7e4523 100644 --- a/web/ee/src/pages/w/[workspace_id]/p/[project_id]/apps/[app_id]/evaluations/results/[evaluation_id]/index.tsx +++ b/web/ee/src/pages/w/[workspace_id]/p/[project_id]/apps/[app_id]/evaluations/results/[evaluation_id]/index.tsx @@ -4,9 +4,20 @@ import EvalRunDetailsPage from "@/oss/components/EvalRunDetails" const AppEvaluationResultsPage = () => { const router = useRouter() - const t = (router.query.type as string) || "auto" - const evalType = t === "online" ? "online" : "auto" - return + const rawType = + (Array.isArray(router.query.eval_type) + ? router.query.eval_type[0] + : router.query.eval_type) || + (Array.isArray(router.query.type) ? router.query.type[0] : router.query.type) + const normalized = + rawType === "online" + ? "online" + : rawType === "human" + ? "human" + : rawType === "custom" + ? "custom" + : "auto" + return } export default AppEvaluationResultsPage diff --git a/web/ee/src/pages/w/[workspace_id]/p/[project_id]/evaluations/results/[evaluation_id]/index.tsx b/web/ee/src/pages/w/[workspace_id]/p/[project_id]/evaluations/results/[evaluation_id]/index.tsx index be04621e79..8cabe50e3a 100644 --- a/web/ee/src/pages/w/[workspace_id]/p/[project_id]/evaluations/results/[evaluation_id]/index.tsx +++ b/web/ee/src/pages/w/[workspace_id]/p/[project_id]/evaluations/results/[evaluation_id]/index.tsx @@ -4,9 +4,20 @@ import EvalRunDetailsPage from "@/oss/components/EvalRunDetails" const ProjectEvaluationResultsPage = () => { const router = useRouter() - const t = (router.query.type as string) || "auto" - const evalType = t === "online" ? "online" : "auto" - return + const rawType = + (Array.isArray(router.query.eval_type) + ? router.query.eval_type[0] + : router.query.eval_type) || + (Array.isArray(router.query.type) ? router.query.type[0] : router.query.type) + const normalized = + rawType === "online" + ? "online" + : rawType === "human" + ? "human" + : rawType === "custom" + ? "custom" + : "auto" + return } export default ProjectEvaluationResultsPage diff --git a/web/oss/package.json b/web/oss/package.json index 6d4e247c86..eebe2ffeba 100644 --- a/web/oss/package.json +++ b/web/oss/package.json @@ -1,6 +1,6 @@ { "name": "@agenta/oss", - "version": "0.60.2", + "version": "0.61.0", "private": true, "engines": { "node": ">=18" diff --git a/web/oss/src/components/Layout/assets/Breadcrumbs.tsx b/web/oss/src/components/Layout/assets/Breadcrumbs.tsx index 597ede2791..6ed32597f8 100644 --- a/web/oss/src/components/Layout/assets/Breadcrumbs.tsx +++ b/web/oss/src/components/Layout/assets/Breadcrumbs.tsx @@ -42,7 +42,9 @@ const breadcrumbItemsGenerator = (breadcrumbs: BreadcrumbAtom): {title: React.Re copyText={getUniquePartOfId(item.value)} tooltipProps={{placement: "right"}} > - {isUuid(item.label) ? getUniquePartOfId(item.label) : item.label} + + {isUuid(item.label) ? getUniquePartOfId(item.label) : item.label} + ) : ( diff --git a/web/oss/src/components/SelectLLMProvider/index.tsx b/web/oss/src/components/SelectLLMProvider/index.tsx index 784bda6ac0..e324c8e02b 100644 --- a/web/oss/src/components/SelectLLMProvider/index.tsx +++ b/web/oss/src/components/SelectLLMProvider/index.tsx @@ -72,7 +72,7 @@ const SelectLLMProvider = ({ const providers = useMemo( () => - labeledProviders.map(({ key, label }) => ({ + labeledProviders.map(({key, label}) => ({ label, options: [label], value: key, diff --git a/web/oss/src/components/Sidebar/hooks/useSidebarConfig/index.tsx b/web/oss/src/components/Sidebar/hooks/useSidebarConfig/index.tsx index dc3e023f4a..dd8fa3cbbd 100644 --- a/web/oss/src/components/Sidebar/hooks/useSidebarConfig/index.tsx +++ b/web/oss/src/components/Sidebar/hooks/useSidebarConfig/index.tsx @@ -17,7 +17,6 @@ import { Gauge, } from "@phosphor-icons/react" -import {useAppId} from "@/oss/hooks/useAppId" import {useCrispChat} from "@/oss/hooks/useCrispChat" import {useSession} from "@/oss/hooks/useSession" import useURL from "@/oss/hooks/useURL" @@ -28,7 +27,6 @@ import {useOrgData} from "@/oss/state/org" import {SidebarConfig} from "../../types" export const useSidebarConfig = () => { - const appId = useAppId() const {doesSessionExist} = useSession() const {currentApp, recentlyVisitedAppId} = useAppsData() const {selectedOrg} = useOrgData() @@ -78,41 +76,41 @@ export const useSidebarConfig = () => { title: "Overview", link: `${appURL || recentlyVisitedAppURL}/overview`, icon: , - isHidden: !appId && !recentlyVisitedAppId, + isHidden: !currentApp && !recentlyVisitedAppId, }, { key: "app-playground-link", title: "Playground", link: `${appURL || recentlyVisitedAppURL}/playground`, icon: , - isHidden: !appId && !recentlyVisitedAppId, + isHidden: !currentApp && !recentlyVisitedAppId, }, { key: "app-variants-link", title: "Registry", link: `${appURL || recentlyVisitedAppURL}/variants`, - isHidden: !appId && !recentlyVisitedAppId, + isHidden: !currentApp && !recentlyVisitedAppId, icon: , }, { key: "app-evaluations-link", title: "Evaluations", link: `${appURL || recentlyVisitedAppURL}/evaluations`, - isHidden: (!appId && !recentlyVisitedAppId) || !isDemo(), + isHidden: (!currentApp && !recentlyVisitedAppId) || !isDemo(), icon: , }, { key: "app-traces-link", title: "Traces", icon: , - isHidden: !appId && !recentlyVisitedAppId, + isHidden: !currentApp && !recentlyVisitedAppId, link: `${appURL || recentlyVisitedAppURL}/traces`, }, { key: "app-deployments-link", title: "Deployments", link: `${appURL || recentlyVisitedAppURL}/deployments`, - isHidden: !appId && !recentlyVisitedAppId, + isHidden: !currentApp && !recentlyVisitedAppId, icon: , }, { diff --git a/web/oss/src/components/pages/observability/drawer/AnnotateDrawer/assets/transforms.ts b/web/oss/src/components/pages/observability/drawer/AnnotateDrawer/assets/transforms.ts index 83bfe6e262..cef4b1174f 100644 --- a/web/oss/src/components/pages/observability/drawer/AnnotateDrawer/assets/transforms.ts +++ b/web/oss/src/components/pages/observability/drawer/AnnotateDrawer/assets/transforms.ts @@ -186,29 +186,50 @@ export const getMetricsFromEvaluator = (evaluator: EvaluatorDto): Record = {} - for (const [key, prop] of Object.entries(evalMetricsSchema)) { - if (prop.anyOf?.length > 0) { - const props = prop.anyOf[0] - fields[key] = {value: "", ...props} - } else if (prop.type === "array") { - const {value, items, ...restProps} = prop - fields[key] = { - value: "", - items: { - type: items?.type === "string" ? items?.type : "string", - enum: items?.enum || [], - }, - ...restProps, + const collectMetricFields = ( + schema: Record, + prefix?: string, + target: Record = fields, + ) => { + Object.entries(schema || {}).forEach(([key, rawProp]) => { + if (!rawProp || typeof rawProp !== "object") return + + const props = rawProp.anyOf?.length ? rawProp.anyOf[0] : rawProp + const qualifiedKey = prefix ? `${prefix}.${key}` : key + const type = props?.type as string | undefined + + if (type === "object" && props?.properties && typeof props.properties === "object") { + collectMetricFields(props.properties, qualifiedKey, target) + return } - } else if (prop.type && USEABLE_METRIC_TYPES.includes(prop.type)) { - const {value, ...restProps} = prop - fields[key] = { - value: getDefaultValue({property: prop, ignoreObject: true}), - ...restProps, + + if (type === "array") { + const {value, items, ...restProps} = props + target[qualifiedKey] = { + value: "", + items: { + type: items?.type === "string" ? items?.type : "string", + enum: items?.enum || [], + }, + ...restProps, + } + return } - } + + if (type && USEABLE_METRIC_TYPES.includes(type)) { + const {value, ...restProps} = props + target[qualifiedKey] = { + value: getDefaultValue({property: props, ignoreObject: true}), + ...restProps, + } + } + }) + + return target } + collectMetricFields(evalMetricsSchema) + return fields } diff --git a/web/oss/src/components/pages/observability/drawer/TraceContent/index.tsx b/web/oss/src/components/pages/observability/drawer/TraceContent/index.tsx index ee3d380cfb..6f9f84f897 100644 --- a/web/oss/src/components/pages/observability/drawer/TraceContent/index.tsx +++ b/web/oss/src/components/pages/observability/drawer/TraceContent/index.tsx @@ -159,9 +159,7 @@ const TraceContent = ({ title="Copy span id" tooltipProps={{placement: "bottom", arrow: true}} > - - {activeTrace?.span_id || "-"} - + {activeTrace?.span_id || "-"}
diff --git a/web/oss/src/components/pages/observability/drawer/TraceHeader/index.tsx b/web/oss/src/components/pages/observability/drawer/TraceHeader/index.tsx index 47900b856e..f14933fa20 100644 --- a/web/oss/src/components/pages/observability/drawer/TraceHeader/index.tsx +++ b/web/oss/src/components/pages/observability/drawer/TraceHeader/index.tsx @@ -490,9 +490,7 @@ const TraceHeader = ({ copyText={getTraceIdFromNode(displayTrace) || ""} title="Copy trace id" > - - {getTraceIdFromNode(displayTrace) || "-"} - + {getTraceIdFromNode(displayTrace) || "-"} diff --git a/web/oss/src/lib/Types.ts b/web/oss/src/lib/Types.ts index 1a2c86c43b..c5716e292d 100644 --- a/web/oss/src/lib/Types.ts +++ b/web/oss/src/lib/Types.ts @@ -832,9 +832,9 @@ export interface StyleProps { } export interface SettingsPreset { - key: string; - name: string; - values: Record; + key: string + name: string + values: Record } export interface Evaluator { diff --git a/web/oss/src/lib/hooks/useEvaluators/index.ts b/web/oss/src/lib/hooks/useEvaluators/index.ts index f2fe73a266..2444832a55 100644 --- a/web/oss/src/lib/hooks/useEvaluators/index.ts +++ b/web/oss/src/lib/hooks/useEvaluators/index.ts @@ -26,7 +26,6 @@ const useEvaluators = ({ onSuccess, onError, projectId, - ..._rest }: UseEvaluatorsOptions & { preview?: Preview queries?: {is_human: boolean} diff --git a/web/oss/src/lib/hooks/useEvaluators/types.ts b/web/oss/src/lib/hooks/useEvaluators/types.ts index 4d37261c70..acd165910d 100644 --- a/web/oss/src/lib/hooks/useEvaluators/types.ts +++ b/web/oss/src/lib/hooks/useEvaluators/types.ts @@ -18,12 +18,30 @@ export interface EvaluatorData { } } +export interface EvaluatorRevisionDto { + id?: string + slug?: string + evaluator_id?: string + evaluator_variant_id?: string + version?: string + data?: Record + flags?: Record + meta?: Record + tags?: Record +} + +export interface EvaluatorRevisionsResponseDto { + count?: number + evaluator_revisions?: EvaluatorRevisionDto[] +} + export type EvaluatorPreviewDto = EvaluatorDto<"payload"> & EvaluatorDto<"response"> & { /** * Computed metrics schema derived from EvaluatorDto.data */ metrics: Record + revision?: EvaluatorRevisionDto } type EvaluatorDtoBase = { diff --git a/web/oss/src/state/app/atoms/fetcher.ts b/web/oss/src/state/app/atoms/fetcher.ts index a52478d6c3..abfbea516f 100644 --- a/web/oss/src/state/app/atoms/fetcher.ts +++ b/web/oss/src/state/app/atoms/fetcher.ts @@ -2,6 +2,8 @@ import {atom} from "jotai" import {atomFamily, atomWithStorage} from "jotai/utils" import {atomWithQuery} from "jotai-tanstack-query" +import axios from "@/oss/lib/api/assets/axiosConfig" +import {getAgentaApiUrl} from "@/oss/lib/helpers/api" import {ListAppsItem, User} from "@/oss/lib/Types" import {fetchAppContainerURL} from "@/oss/services/api" import {fetchAllApps} from "@/oss/services/app" @@ -79,7 +81,7 @@ export const appsQueryAtom = atomWithQuery((get) => { queryKey: ["apps", projectId], queryFn: async () => { const data = await fetchAllApps() - return data + return data.filter((app) => app.app_type !== "custom (sdk)") }, staleTime: 1000 * 60, // 1 minute refetchOnWindowFocus: false, @@ -123,6 +125,28 @@ export const uriQueryAtomFamily = atomFamily((params: {appId: string; variantId? }), ) +export const appDetailQueryAtomFamily = atomFamily((appId: string | null) => + atomWithQuery((get) => { + const projectId = get(projectIdAtom) + + return { + queryKey: ["app", appId, projectId], + queryFn: async () => { + if (!appId) return null + const {data} = await axios.get( + `${getAgentaApiUrl()}/apps/${encodeURIComponent(appId)}?project_id=${projectId}`, + ) + return data as ListAppsItem + }, + staleTime: 1000 * 60, + refetchOnWindowFocus: false, + refetchOnReconnect: false, + refetchOnMount: false, + enabled: !!projectId && !!appId, + } + }), +) + const logApps = process.env.NEXT_PUBLIC_LOG_APP_ATOMS === "true" ;[appsQueryAtom, routerAppIdAtom, recentAppIdAtom].forEach((a, i) => diff --git a/web/oss/src/state/app/hooks.ts b/web/oss/src/state/app/hooks.ts index bc6ab09622..f86fff801d 100644 --- a/web/oss/src/state/app/hooks.ts +++ b/web/oss/src/state/app/hooks.ts @@ -4,7 +4,6 @@ import {useQueryClient} from "@tanstack/react-query" import {useAtom, useAtomValue} from "jotai" import {ListAppsItem} from "@/oss/lib/Types" - import {useAppState} from "@/oss/state/appState" import {appsQueryAtom, recentAppIdAtom} from "./atoms/fetcher" @@ -20,8 +19,19 @@ export const useAppsData = () => { const {appId} = useAppState() useEffect(() => { - if (appId) setRecentAppId(appId) - }, [appId, setRecentAppId]) + // Only set recent app from URL when it exists in the filtered apps list + // This avoids enabling app-sidebar for SDK evaluation apps (filtered out) + if (!appId) return + if (Array.isArray(apps)) { + const exists = (apps as ListAppsItem[]).some((app) => app.app_id === appId) + if (exists) { + if (recentAppId !== appId) setRecentAppId(appId) + } else { + if (recentAppId) setRecentAppId(null) + } + } + // If apps haven't loaded yet, do nothing here; the fallback effect below will enforce validity once loaded + }, [appId, apps, recentAppId, setRecentAppId]) useEffect(() => { if (recentAppId && Array.isArray(apps)) { diff --git a/web/oss/src/state/evaluators/atoms.ts b/web/oss/src/state/evaluators/atoms.ts index e154603565..aafd2b52f9 100644 --- a/web/oss/src/state/evaluators/atoms.ts +++ b/web/oss/src/state/evaluators/atoms.ts @@ -7,6 +7,8 @@ import {transformApiData} from "@/oss/lib/hooks/useAnnotations/assets/transforme import { EvaluatorDto, EvaluatorPreviewDto, + EvaluatorRevisionDto, + EvaluatorRevisionsResponseDto, EvaluatorsResponseDto, } from "@/oss/lib/hooks/useEvaluators/types" import {Evaluator, EvaluatorConfig} from "@/oss/lib/Types" @@ -34,6 +36,68 @@ const extractKeyFromUri = (uri: unknown): string | undefined => { return undefined } +const isPlainObject = (value: unknown): value is Record => { + return typeof value === "object" && value !== null && !Array.isArray(value) +} + +const mergePlainObjects = (primary: any, fallback: any): any => { + if (isPlainObject(primary) && isPlainObject(fallback)) { + const result: Record = {...fallback} + Object.entries(primary).forEach(([key, value]) => { + result[key] = mergePlainObjects(value, fallback[key]) + }) + return result + } + + if (primary === undefined || primary === null) { + return isPlainObject(fallback) ? {...fallback} : fallback + } + + return primary +} + +const normalizeTags = (candidate: unknown): string[] => { + if (!candidate) return [] + if (Array.isArray(candidate)) { + return candidate + .map((value) => (typeof value === "string" ? value.trim() : String(value))) + .filter(Boolean) + } + if (typeof candidate === "object") { + return Object.entries(candidate as Record) + .filter(([, value]) => Boolean(value)) + .map(([key]) => key.trim()) + .filter(Boolean) + } + if (typeof candidate === "string") { + const trimmed = candidate.trim() + return trimmed ? [trimmed] : [] + } + return [] +} + +const mergeTags = (...sources: unknown[]): string[] => { + const set = new Set() + sources.forEach((source) => { + normalizeTags(source).forEach((tag) => set.add(tag)) + }) + return Array.from(set) +} + +const extractRequiresLlmApiKeys = (source: unknown): boolean | undefined => { + if (!source || typeof source !== "object") return undefined + const direct = (source as any).requires_llm_api_keys + if (direct !== undefined) return Boolean(direct) + + const fromFlags = (source as any).flags?.requires_llm_api_keys + if (fromFlags !== undefined) return Boolean(fromFlags) + + const fromMeta = (source as any).meta?.requires_llm_api_keys + if (fromMeta !== undefined) return Boolean(fromMeta) + + return undefined +} + export const evaluatorConfigsQueryAtomFamily = atomFamily( ({projectId: overrideProjectId, appId: overrideAppId, preview}: EvaluatorConfigsParams = {}) => atomWithQuery((get) => { @@ -93,22 +157,13 @@ export const evaluatorsQueryAtomFamily = atomFamily( `/preview/simple/evaluators/query?project_id=${projectId}`, requestBody, ) - const evaluators = (response?.data?.evaluators ?? []).map((item) => { + let evaluators = (response?.data?.evaluators ?? []).map((item) => { const transformed = transformApiData({ data: item, members, }) - const rawTags = item?.tags ?? transformed?.tags - const tags = Array.isArray(rawTags) - ? rawTags - : rawTags && typeof rawTags === "object" - ? Object.values(rawTags as Record) - .map((value) => String(value)) - .filter(Boolean) - : typeof rawTags === "string" - ? [rawTags] - : [] + const tags = mergeTags(item?.tags, transformed?.tags) const rawKey = item?.flags?.evaluator_key ?? @@ -124,10 +179,8 @@ export const evaluatorsQueryAtomFamily = atomFamily( (item as any)?.data?.service?.uri, ) const requiresLlmApiKeys = - item?.requires_llm_api_keys ?? - item?.flags?.requires_llm_api_keys ?? - item?.meta?.requires_llm_api_keys ?? - transformed?.requires_llm_api_keys ?? + extractRequiresLlmApiKeys(item) ?? + extractRequiresLlmApiKeys(transformed) ?? false return { @@ -139,6 +192,117 @@ export const evaluatorsQueryAtomFamily = atomFamily( } }) as EvaluatorPreviewDto[] + if (evaluators.length) { + const revisionRefs = evaluators + .map((ev) => { + const version = + (ev as any)?.version ?? + (ev.meta as any)?.version ?? + undefined + const ref = { + id: ev.id, + slug: ev.slug, + version, + } + if (ref.id || ref.slug || ref.version) return ref + return null + }) + .filter(Boolean) as { + id?: string + slug?: string + version?: string + }[] + + if (revisionRefs.length) { + try { + const revisionResponse = + await axios.post( + `/preview/evaluators/revisions/query?project_id=${projectId}`, + { + evaluator_refs: revisionRefs, + }, + ) + + const revisions = + revisionResponse?.data?.evaluator_revisions ?? [] + + if (revisions.length) { + const byEvaluatorId = new Map< + string, + EvaluatorRevisionDto + >() + const bySlug = new Map() + const byRevisionId = new Map() + + revisions.forEach((revision) => { + if (revision.evaluator_id) { + byEvaluatorId.set(revision.evaluator_id, revision) + } + if (revision.slug) { + bySlug.set(revision.slug, revision) + } + if (revision.id) { + byRevisionId.set(revision.id, revision) + } + }) + + evaluators = evaluators.map((ev) => { + const revision = + (ev.id && byEvaluatorId.get(ev.id)) || + (ev.slug && bySlug.get(ev.slug)) || + (typeof ev.meta?.evaluator_revision_id === + "string" && + byRevisionId.get( + ev.meta.evaluator_revision_id, + )) || + undefined + + if (!revision) return ev + + const mergedData = mergePlainObjects( + ev.data, + revision.data, + ) + const mergedFlags = mergePlainObjects( + ev.flags, + revision.flags, + ) + const mergedMeta = mergePlainObjects( + ev.meta, + revision.meta, + ) + const mergedTags = mergeTags(ev.tags, revision.tags) + + const withRevision: EvaluatorPreviewDto = { + ...ev, + data: mergedData, + flags: mergedFlags, + meta: mergedMeta, + tags: mergedTags, + revision, + } + + const requiresLlmApiKeys = + extractRequiresLlmApiKeys(withRevision) ?? + extractRequiresLlmApiKeys(revision) ?? + extractRequiresLlmApiKeys(ev) ?? + false + + return { + ...withRevision, + requires_llm_api_keys: Boolean(requiresLlmApiKeys), + metrics: getMetricsFromEvaluator( + withRevision as EvaluatorDto, + ), + } + }) + } + } catch (error) { + console.warn("Failed to fetch evaluator revisions", error) + } + } + } + return evaluators } diff --git a/web/package.json b/web/package.json index 9228ee9355..8e5ab2468e 100644 --- a/web/package.json +++ b/web/package.json @@ -1,6 +1,6 @@ { "name": "agenta-web", - "version": "0.60.2", + "version": "0.61.0", "workspaces": [ "ee", "oss",