diff --git a/packages/traceloop-sdk/.flake8 b/packages/traceloop-sdk/.flake8 index b689847557..4cbe545eb8 100644 --- a/packages/traceloop-sdk/.flake8 +++ b/packages/traceloop-sdk/.flake8 @@ -9,4 +9,6 @@ exclude = .venv, .pytest_cache max-line-length = 120 -per-file-ignores = __init__.py:F401 +per-file-ignores = + __init__.py:F401 + traceloop/sdk/generated/**/*.py:E501 diff --git a/packages/traceloop-sdk/poetry.lock b/packages/traceloop-sdk/poetry.lock index e3e764fe3e..8b3399e3dc 100644 --- a/packages/traceloop-sdk/poetry.lock +++ b/packages/traceloop-sdk/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand. [[package]] name = "aiohappyeyeballs" @@ -7,7 +7,6 @@ description = "Happy Eyeballs for asyncio" optional = false python-versions = ">=3.8" groups = ["main", "test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "aiohappyeyeballs-2.4.4-py3-none-any.whl", hash = "sha256:a980909d50efcd44795c4afeca523296716d50cd756ddca6af8c65b996e27de8"}, {file = "aiohappyeyeballs-2.4.4.tar.gz", hash = "sha256:5fdd7d87889c63183afc18ce9271f9b0a7d32c2303e394468dd45d514a757745"}, @@ -20,7 +19,6 @@ description = "Async http client/server framework (asyncio)" optional = false python-versions = ">=3.9" groups = ["main", "test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "aiohttp-3.11.11-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a60804bff28662cbcf340a4d61598891f12eea3a66af48ecfdc975ceec21e3c8"}, {file = "aiohttp-3.11.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4b4fa1cb5f270fb3eab079536b764ad740bb749ce69a94d4ec30ceee1b5940d5"}, @@ -120,7 +118,6 @@ description = "aiosignal: a list of registered asynchronous callbacks" optional = false python-versions = ">=3.9" groups = ["main", "test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "aiosignal-1.3.2-py2.py3-none-any.whl", hash = "sha256:45cde58e409a301715980c2b01d0c28bdde3770d8290b5eb2173759d9acb31a5"}, {file = "aiosignal-1.3.2.tar.gz", hash = "sha256:a8c255c66fafb1e499c9351d0bf32ff2d8a0321595ebac3b93713656d2436f54"}, @@ -135,8 +132,7 @@ version = "0.7.0" description = "Reusable constraint types to use with typing.Annotated" optional = false python-versions = ">=3.8" -groups = ["main", "test"] -markers = "platform_python_implementation == \"PyPy\"" +groups = ["main", "dev", "test"] files = [ {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, @@ -149,7 +145,6 @@ description = "The official Python library for the anthropic API" optional = false python-versions = ">=3.7" groups = ["main", "test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "anthropic-0.25.9-py3-none-any.whl", hash = "sha256:d0b17d442160356a531593b237de55d3125cc6fa708f1268c214107e61c81c57"}, {file = "anthropic-0.25.9.tar.gz", hash = "sha256:a4ec810b1cfbf3340af99b6f5bf599a83d66986e0f572a5f3bc4ebcab284f629"}, @@ -175,7 +170,6 @@ description = "High level compatibility layer for multiple asynchronous event lo optional = false python-versions = ">=3.9" groups = ["main", "test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "anyio-4.8.0-py3-none-any.whl", hash = "sha256:b5011f270ab5eb0abf13385f851315585cc37ef330dd88e27ec3d34d651fd47a"}, {file = "anyio-4.8.0.tar.gz", hash = "sha256:1d9fe889df5212298c0c0723fa20479d1b94883a2df44bd3897aa91083316f7a"}, @@ -192,6 +186,21 @@ doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "trustme", "truststore (>=0.9.1) ; python_version >= \"3.10\"", "uvloop (>=0.21) ; platform_python_implementation == \"CPython\" and platform_system != \"Windows\" and python_version < \"3.14\""] trio = ["trio (>=0.26.1)"] +[[package]] +name = "argcomplete" +version = "3.6.3" +description = "Bash tab completion for argparse" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "argcomplete-3.6.3-py3-none-any.whl", hash = "sha256:f5007b3a600ccac5d25bbce33089211dfd49eab4a7718da3f10e3082525a92ce"}, + {file = "argcomplete-3.6.3.tar.gz", hash = "sha256:62e8ed4fd6a45864acc8235409461b72c9a28ee785a2011cc5eb78318786c89c"}, +] + +[package.extras] +test = ["coverage", "mypy", "pexpect", "ruff", "wheel"] + [[package]] name = "async-timeout" version = "4.0.3" @@ -199,7 +208,7 @@ description = "Timeout context manager for asyncio programs" optional = false python-versions = ">=3.7" groups = ["main", "test"] -markers = "python_version < \"3.11\" and platform_python_implementation == \"PyPy\" or python_version == \"3.10\"" +markers = "python_version == \"3.10\"" files = [ {file = "async-timeout-4.0.3.tar.gz", hash = "sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f"}, {file = "async_timeout-4.0.3-py3-none-any.whl", hash = "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028"}, @@ -212,7 +221,6 @@ description = "Classes Without Boilerplate" optional = false python-versions = ">=3.8" groups = ["main", "test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "attrs-24.3.0-py3-none-any.whl", hash = "sha256:ac96cd038792094f438ad1f6ff80837353805ac950cd2aa0e0625ef19850c308"}, {file = "attrs-24.3.0.tar.gz", hash = "sha256:8f5c07333d543103541ba7be0e2ce16eeee8130cb0b3f9238ab904ce1e85baff"}, @@ -233,7 +241,6 @@ description = "A tool that automatically formats Python code to conform to the P optional = false python-versions = ">=3.8" groups = ["dev"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "autopep8-2.2.0-py2.py3-none-any.whl", hash = "sha256:05418a981f038969d8bdcd5636bf15948db7555ae944b9f79b5a34b35f1370d4"}, {file = "autopep8-2.2.0.tar.gz", hash = "sha256:d306a0581163ac29908280ad557773a95a9bede072c0fafed6f141f5311f43c1"}, @@ -243,6 +250,59 @@ files = [ pycodestyle = ">=2.11.0" tomli = {version = "*", markers = "python_version < \"3.11\""} +[[package]] +name = "black" +version = "25.12.0" +description = "The uncompromising code formatter." +optional = false +python-versions = ">=3.10" +groups = ["dev"] +files = [ + {file = "black-25.12.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f85ba1ad15d446756b4ab5f3044731bf68b777f8f9ac9cdabd2425b97cd9c4e8"}, + {file = "black-25.12.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:546eecfe9a3a6b46f9d69d8a642585a6eaf348bcbbc4d87a19635570e02d9f4a"}, + {file = "black-25.12.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:17dcc893da8d73d8f74a596f64b7c98ef5239c2cd2b053c0f25912c4494bf9ea"}, + {file = "black-25.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:09524b0e6af8ba7a3ffabdfc7a9922fb9adef60fed008c7cd2fc01f3048e6e6f"}, + {file = "black-25.12.0-cp310-cp310-win_arm64.whl", hash = "sha256:b162653ed89eb942758efeb29d5e333ca5bb90e5130216f8369857db5955a7da"}, + {file = "black-25.12.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d0cfa263e85caea2cff57d8f917f9f51adae8e20b610e2b23de35b5b11ce691a"}, + {file = "black-25.12.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1a2f578ae20c19c50a382286ba78bfbeafdf788579b053d8e4980afb079ab9be"}, + {file = "black-25.12.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d3e1b65634b0e471d07ff86ec338819e2ef860689859ef4501ab7ac290431f9b"}, + {file = "black-25.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:a3fa71e3b8dd9f7c6ac4d818345237dfb4175ed3bf37cd5a581dbc4c034f1ec5"}, + {file = "black-25.12.0-cp311-cp311-win_arm64.whl", hash = "sha256:51e267458f7e650afed8445dc7edb3187143003d52a1b710c7321aef22aa9655"}, + {file = "black-25.12.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:31f96b7c98c1ddaeb07dc0f56c652e25bdedaac76d5b68a059d998b57c55594a"}, + {file = "black-25.12.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:05dd459a19e218078a1f98178c13f861fe6a9a5f88fc969ca4d9b49eb1809783"}, + {file = "black-25.12.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c1f68c5eff61f226934be6b5b80296cf6939e5d2f0c2f7d543ea08b204bfaf59"}, + {file = "black-25.12.0-cp312-cp312-win_amd64.whl", hash = "sha256:274f940c147ddab4442d316b27f9e332ca586d39c85ecf59ebdea82cc9ee8892"}, + {file = "black-25.12.0-cp312-cp312-win_arm64.whl", hash = "sha256:169506ba91ef21e2e0591563deda7f00030cb466e747c4b09cb0a9dae5db2f43"}, + {file = "black-25.12.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a05ddeb656534c3e27a05a29196c962877c83fa5503db89e68857d1161ad08a5"}, + {file = "black-25.12.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9ec77439ef3e34896995503865a85732c94396edcc739f302c5673a2315e1e7f"}, + {file = "black-25.12.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0e509c858adf63aa61d908061b52e580c40eae0dfa72415fa47ac01b12e29baf"}, + {file = "black-25.12.0-cp313-cp313-win_amd64.whl", hash = "sha256:252678f07f5bac4ff0d0e9b261fbb029fa530cfa206d0a636a34ab445ef8ca9d"}, + {file = "black-25.12.0-cp313-cp313-win_arm64.whl", hash = "sha256:bc5b1c09fe3c931ddd20ee548511c64ebf964ada7e6f0763d443947fd1c603ce"}, + {file = "black-25.12.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:0a0953b134f9335c2434864a643c842c44fba562155c738a2a37a4d61f00cad5"}, + {file = "black-25.12.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:2355bbb6c3b76062870942d8cc450d4f8ac71f9c93c40122762c8784df49543f"}, + {file = "black-25.12.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9678bd991cc793e81d19aeeae57966ee02909877cb65838ccffef24c3ebac08f"}, + {file = "black-25.12.0-cp314-cp314-win_amd64.whl", hash = "sha256:97596189949a8aad13ad12fcbb4ae89330039b96ad6742e6f6b45e75ad5cfd83"}, + {file = "black-25.12.0-cp314-cp314-win_arm64.whl", hash = "sha256:778285d9ea197f34704e3791ea9404cd6d07595745907dd2ce3da7a13627b29b"}, + {file = "black-25.12.0-py3-none-any.whl", hash = "sha256:48ceb36c16dbc84062740049eef990bb2ce07598272e673c17d1a7720c71c828"}, + {file = "black-25.12.0.tar.gz", hash = "sha256:8d3dd9cea14bff7ddc0eb243c811cdb1a011ebb4800a5f0335a01a68654796a7"}, +] + +[package.dependencies] +click = ">=8.0.0" +mypy-extensions = ">=0.4.3" +packaging = ">=22.0" +pathspec = ">=0.9.0" +platformdirs = ">=2" +pytokens = ">=0.3.0" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +typing-extensions = {version = ">=4.0.1", markers = "python_version < \"3.11\""} + +[package.extras] +colorama = ["colorama (>=0.4.3)"] +d = ["aiohttp (>=3.10)"] +jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] +uvloop = ["uvloop (>=0.15.2)"] + [[package]] name = "certifi" version = "2024.12.14" @@ -250,7 +310,6 @@ description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" groups = ["main", "test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "certifi-2024.12.14-py3-none-any.whl", hash = "sha256:1275f7a45be9464efc1173084eaa30f866fe2e47d389406136d332ed4967ec56"}, {file = "certifi-2024.12.14.tar.gz", hash = "sha256:b650d30f370c2b724812bee08008be0c4163b163ddaec3f2546c1caf65f191db"}, @@ -263,7 +322,6 @@ description = "The Real First Universal Charset Detector. Open, modern and activ optional = false python-versions = ">=3.7" groups = ["main", "test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de"}, {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176"}, @@ -359,6 +417,21 @@ files = [ {file = "charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3"}, ] +[[package]] +name = "click" +version = "8.3.1" +description = "Composable command line interface toolkit" +optional = false +python-versions = ">=3.10" +groups = ["dev"] +files = [ + {file = "click-8.3.1-py3-none-any.whl", hash = "sha256:981153a64e25f12d547d3426c367a4857371575ee7ad18df2a6183ab0545b2a6"}, + {file = "click-8.3.1.tar.gz", hash = "sha256:12ff4785d337a1bb490bb7e9c2b1ee5da3112e94a8622f26a6c77f5d2fc6842a"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + [[package]] name = "colorama" version = "0.4.6" @@ -366,7 +439,6 @@ description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" groups = ["main", "dev", "test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, @@ -379,11 +451,44 @@ description = "Fast, scalable unique ID generation" optional = false python-versions = "*" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "cuid-0.4.tar.gz", hash = "sha256:74eaba154916a2240405c3631acee708c263ef8fa05a86820b87d0f59f84e978"}, ] +[[package]] +name = "datamodel-code-generator" +version = "0.26.5" +description = "Datamodel Code Generator" +optional = false +python-versions = "<4.0,>=3.8" +groups = ["dev"] +files = [ + {file = "datamodel_code_generator-0.26.5-py3-none-any.whl", hash = "sha256:e32f986b9914a2b45093947043aa0192d704650be93151f78acf5c95676601ce"}, + {file = "datamodel_code_generator-0.26.5.tar.gz", hash = "sha256:c4a94a7dbf7972129882732d9bcee44c9ae090f57c82edd58d237b9d48c40dd0"}, +] + +[package.dependencies] +argcomplete = ">=1.10,<4.0" +black = ">=19.10b0" +genson = ">=1.2.1,<2.0" +inflect = ">=4.1.0,<6.0" +isort = ">=4.3.21,<6.0" +jinja2 = ">=2.10.1,<4.0" +packaging = "*" +pydantic = [ + {version = ">=1.9.0,<2.4.0 || >2.4.0,<3.0", extras = ["email"], markers = "python_version == \"3.10\""}, + {version = ">=1.10.0,<2.4.0 || >2.4.0,<3.0", extras = ["email"], markers = "python_version >= \"3.11\" and python_version < \"4.0\""}, + {version = ">=1.10.0,<2.0.0 || >2.0.0,<2.0.1 || >2.0.1,<2.4.0 || >2.4.0,<3.0", extras = ["email"], markers = "python_version >= \"3.12\" and python_version < \"4.0\""}, +] +pyyaml = ">=6.0.1" +toml = {version = ">=0.10.0,<1.0.0", markers = "python_version < \"3.11\""} + +[package.extras] +debug = ["PySnooper (>=0.4.1,<2.0.0)"] +graphql = ["graphql-core (>=3.2.3,<4.0.0)"] +http = ["httpx"] +validation = ["openapi-spec-validator (>=0.2.8,<0.7.0)", "prance (>=0.18.2)"] + [[package]] name = "deprecated" version = "1.2.15" @@ -391,7 +496,6 @@ description = "Python @deprecated decorator to deprecate old python classes, fun optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "Deprecated-1.2.15-py2.py3-none-any.whl", hash = "sha256:353bc4a8ac4bfc96800ddab349d89c25dec1079f65fd53acdcc1e0b975b21320"}, {file = "deprecated-1.2.15.tar.gz", hash = "sha256:683e561a90de76239796e6b6feac66b99030d2dd3fcf61ef996330f14bbb9b0d"}, @@ -410,12 +514,48 @@ description = "Distro - an OS platform information API" optional = false python-versions = ">=3.6" groups = ["main", "test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"}, {file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"}, ] +[[package]] +name = "dnspython" +version = "2.8.0" +description = "DNS toolkit" +optional = false +python-versions = ">=3.10" +groups = ["dev"] +files = [ + {file = "dnspython-2.8.0-py3-none-any.whl", hash = "sha256:01d9bbc4a2d76bf0db7c1f729812ded6d912bd318d3b1cf81d30c0f845dbf3af"}, + {file = "dnspython-2.8.0.tar.gz", hash = "sha256:181d3c6996452cb1189c4046c61599b84a5a86e099562ffde77d26984ff26d0f"}, +] + +[package.extras] +dev = ["black (>=25.1.0)", "coverage (>=7.0)", "flake8 (>=7)", "hypercorn (>=0.17.0)", "mypy (>=1.17)", "pylint (>=3)", "pytest (>=8.4)", "pytest-cov (>=6.2.0)", "quart-trio (>=0.12.0)", "sphinx (>=8.2.0)", "sphinx-rtd-theme (>=3.0.0)", "twine (>=6.1.0)", "wheel (>=0.45.0)"] +dnssec = ["cryptography (>=45)"] +doh = ["h2 (>=4.2.0)", "httpcore (>=1.0.0)", "httpx (>=0.28.0)"] +doq = ["aioquic (>=1.2.0)"] +idna = ["idna (>=3.10)"] +trio = ["trio (>=0.30)"] +wmi = ["wmi (>=1.5.1) ; platform_system == \"Windows\""] + +[[package]] +name = "email-validator" +version = "2.3.0" +description = "A robust email address syntax and deliverability validation library." +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "email_validator-2.3.0-py3-none-any.whl", hash = "sha256:80f13f623413e6b197ae73bb10bf4eb0908faf509ad8362c5edeb0be7fd450b4"}, + {file = "email_validator-2.3.0.tar.gz", hash = "sha256:9fc05c37f2f6cf439ff414f8fc46d917929974a82244c20eb10231ba60c54426"}, +] + +[package.dependencies] +dnspython = ">=2.0.0" +idna = ">=2.0.0" + [[package]] name = "exceptiongroup" version = "1.2.2" @@ -423,7 +563,7 @@ description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" groups = ["main", "dev", "test"] -markers = "python_version < \"3.11\" and platform_python_implementation == \"PyPy\" or python_version == \"3.10\"" +markers = "python_version == \"3.10\"" files = [ {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, @@ -439,7 +579,6 @@ description = "A platform independent file lock." optional = false python-versions = ">=3.8" groups = ["main", "test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "filelock-3.16.1-py3-none-any.whl", hash = "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0"}, {file = "filelock-3.16.1.tar.gz", hash = "sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435"}, @@ -457,7 +596,6 @@ description = "the modular source code checker: pep8 pyflakes and co" optional = false python-versions = ">=3.8.1" groups = ["dev"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "flake8-7.0.0-py2.py3-none-any.whl", hash = "sha256:a6dfbb75e03252917f2473ea9653f7cd799c3064e54d4c8140044c5c065f53c3"}, {file = "flake8-7.0.0.tar.gz", hash = "sha256:33f96621059e65eec474169085dc92bf26e7b2d47366b70be2f67ab80dc25132"}, @@ -475,7 +613,6 @@ description = "A list-like structure which implements collections.abc.MutableSeq optional = false python-versions = ">=3.8" groups = ["main", "test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "frozenlist-1.5.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5b6a66c18b5b9dd261ca98dffcb826a525334b2f29e7caa54e182255c5f6a65a"}, {file = "frozenlist-1.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d1b3eb7b05ea246510b43a7e53ed1653e55c2121019a97e60cad7efb881a97bb"}, @@ -578,7 +715,6 @@ description = "File-system specification" optional = false python-versions = ">=3.8" groups = ["main", "test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "fsspec-2024.12.0-py3-none-any.whl", hash = "sha256:b520aed47ad9804237ff878b504267a3b0b441e97508bd6d2d8774e3db85cee2"}, {file = "fsspec-2024.12.0.tar.gz", hash = "sha256:670700c977ed2fb51e0d9f9253177ed20cbde4a3e5c0283cc5385b5870c8533f"}, @@ -612,6 +748,18 @@ test-downstream = ["aiobotocore (>=2.5.4,<3.0.0)", "dask-expr", "dask[dataframe, test-full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "cloudpickle", "dask", "distributed", "dropbox", "dropboxdrivefs", "fastparquet", "fusepy", "gcsfs", "jinja2", "kerchunk", "libarchive-c", "lz4", "notebook", "numpy", "ocifs", "pandas", "panel", "paramiko", "pyarrow", "pyarrow (>=1)", "pyftpdlib", "pygit2", "pytest", "pytest-asyncio (!=0.22.0)", "pytest-benchmark", "pytest-cov", "pytest-mock", "pytest-recording", "pytest-rerunfailures", "python-snappy", "requests", "smbprotocol", "tqdm", "urllib3", "zarr", "zstandard"] tqdm = ["tqdm"] +[[package]] +name = "genson" +version = "1.3.0" +description = "GenSON is a powerful, user-friendly JSON Schema generator." +optional = false +python-versions = "*" +groups = ["dev"] +files = [ + {file = "genson-1.3.0-py3-none-any.whl", hash = "sha256:468feccd00274cc7e4c09e84b08704270ba8d95232aa280f65b986139cec67f7"}, + {file = "genson-1.3.0.tar.gz", hash = "sha256:e02db9ac2e3fd29e65b5286f7135762e2cd8a986537c075b06fc5f1517308e37"}, +] + [[package]] name = "googleapis-common-protos" version = "1.66.0" @@ -619,7 +767,6 @@ description = "Common protobufs used in Google APIs" optional = false python-versions = ">=3.7" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "googleapis_common_protos-1.66.0-py2.py3-none-any.whl", hash = "sha256:d7abcd75fabb2e0ec9f74466401f6c119a0b498e27370e9be4c94cb7e382b8ed"}, {file = "googleapis_common_protos-1.66.0.tar.gz", hash = "sha256:c3e7b33d15fdca5374cc0a7346dd92ffa847425cc4ea941d970f13680052ec8c"}, @@ -638,7 +785,7 @@ description = "Lightweight in-process concurrent programming" optional = false python-versions = ">=3.7" groups = ["test"] -markers = "(platform_python_implementation == \"PyPy\" or python_version == \"3.10\" or python_version == \"3.11\" or python_version == \"3.12\" or python_version == \"3.13\") and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\") and python_version <= \"3.13\"" +markers = "(platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\") and python_version < \"3.14\"" files = [ {file = "greenlet-3.1.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:0bbae94a29c9e5c7e4a2b7f0aae5c17e8e90acbfd3bf6270eeba60c39fce3563"}, {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fde093fb93f35ca72a556cf72c92ea3ebfda3d79fc35bb19fbe685853869a83"}, @@ -726,7 +873,6 @@ description = "HTTP/2-based RPC framework" optional = false python-versions = ">=3.8" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "grpcio-1.69.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:2060ca95a8db295ae828d0fc1c7f38fb26ccd5edf9aa51a0f44251f5da332e97"}, {file = "grpcio-1.69.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:2e52e107261fd8fa8fa457fe44bfadb904ae869d87c1280bf60f93ecd3e79278"}, @@ -795,7 +941,6 @@ description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" optional = false python-versions = ">=3.8" groups = ["main", "test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86"}, {file = "h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1"}, @@ -808,7 +953,6 @@ description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" groups = ["main", "test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55"}, {file = "httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8"}, @@ -831,7 +975,6 @@ description = "The next generation HTTP client." optional = false python-versions = ">=3.8" groups = ["main", "test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"}, {file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"}, @@ -857,7 +1000,6 @@ description = "Client library to download and publish models, datasets and other optional = false python-versions = ">=3.8.0" groups = ["main", "test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "huggingface_hub-0.27.1-py3-none-any.whl", hash = "sha256:1c5155ca7d60b60c2e2fc38cbb3ffb7f7c3adf48f824015b219af9061771daec"}, {file = "huggingface_hub-0.27.1.tar.gz", hash = "sha256:c004463ca870283909d715d20f066ebd6968c2207dae9393fdffb3c1d4d8f98b"}, @@ -892,8 +1034,7 @@ version = "3.10" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.6" -groups = ["main", "test"] -markers = "platform_python_implementation == \"PyPy\"" +groups = ["main", "dev", "test"] files = [ {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, @@ -909,7 +1050,6 @@ description = "Read metadata from Python packages" optional = false python-versions = ">=3.8" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "importlib_metadata-8.5.0-py3-none-any.whl", hash = "sha256:45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b"}, {file = "importlib_metadata-8.5.0.tar.gz", hash = "sha256:71522656f0abace1d072b9e5481a48f07c138e00f079c38c8f883823f9c26bd7"}, @@ -927,6 +1067,22 @@ perf = ["ipython"] test = ["flufl.flake8", "importlib-resources (>=1.3) ; python_version < \"3.9\"", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] type = ["pytest-mypy"] +[[package]] +name = "inflect" +version = "5.6.2" +description = "Correctly generate plurals, singular nouns, ordinals, indefinite articles; convert numbers to words" +optional = false +python-versions = ">=3.7" +groups = ["dev"] +files = [ + {file = "inflect-5.6.2-py3-none-any.whl", hash = "sha256:b45d91a4a28a4e617ff1821117439b06eaa86e2a4573154af0149e9be6687238"}, + {file = "inflect-5.6.2.tar.gz", hash = "sha256:aadc7ed73928f5e014129794bbac03058cca35d0a973a5fc4eb45c7fa26005f9"}, +] + +[package.extras] +docs = ["jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx"] +testing = ["pygments", "pytest (>=6)", "pytest-black (>=0.3.7) ; platform_python_implementation != \"PyPy\"", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1) ; platform_python_implementation != \"PyPy\""] + [[package]] name = "inflection" version = "0.5.1" @@ -934,7 +1090,6 @@ description = "A port of Ruby on Rails inflector to Python" optional = false python-versions = ">=3.5" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "inflection-0.5.1-py2.py3-none-any.whl", hash = "sha256:f38b2b640938a4f35ade69ac3d053042959b62a0f1076a5bbaa1b9526605a8a2"}, {file = "inflection-0.5.1.tar.gz", hash = "sha256:1a29730d366e996aaacffb2f1f1cb9593dc38e2ddd30c91250c6dde09ea9b417"}, @@ -947,20 +1102,33 @@ description = "brain-dead simple config-ini parsing" optional = false python-versions = ">=3.7" groups = ["dev", "test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, ] +[[package]] +name = "isort" +version = "5.13.2" +description = "A Python utility / library to sort Python imports." +optional = false +python-versions = ">=3.8.0" +groups = ["dev"] +files = [ + {file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"}, + {file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"}, +] + +[package.extras] +colors = ["colorama (>=0.4.6)"] + [[package]] name = "jinja2" version = "3.1.6" description = "A very fast and expressive template engine." optional = false python-versions = ">=3.7" -groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" +groups = ["main", "dev"] files = [ {file = "jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67"}, {file = "jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d"}, @@ -979,7 +1147,6 @@ description = "Fast iterable JSON parser." optional = false python-versions = ">=3.8" groups = ["test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "jiter-0.8.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:ca8577f6a413abe29b079bc30f907894d7eb07a865c4df69475e868d73e71c7b"}, {file = "jiter-0.8.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b25bd626bde7fb51534190c7e3cb97cee89ee76b76d7585580e22f34f5e3f393"}, @@ -1066,7 +1233,6 @@ description = "Apply JSON-Patches (RFC 6902)" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*" groups = ["test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "jsonpatch-1.33-py2.py3-none-any.whl", hash = "sha256:0ae28c0cd062bbd8b8ecc26d7d164fbbea9652a1a3693f3b956c1eae5145dade"}, {file = "jsonpatch-1.33.tar.gz", hash = "sha256:9fcd4009c41e6d12348b4a0ff2563ba56a2923a7dfee731d004e212e1ee5030c"}, @@ -1082,7 +1248,6 @@ description = "Identify specific nodes in a JSON document (RFC 6901)" optional = false python-versions = ">=3.7" groups = ["test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "jsonpointer-3.0.0-py2.py3-none-any.whl", hash = "sha256:13e088adc14fca8b6aa8177c044e12701e6ad4b28ff10e65f2267a90109c9942"}, {file = "jsonpointer-3.0.0.tar.gz", hash = "sha256:2b2d729f2091522d61c3b31f82e11870f60b68f43fbc705cb76bf4b832af59ef"}, @@ -1095,7 +1260,6 @@ description = "Building applications with LLMs through composability" optional = false python-versions = "<4.0,>=3.8.1" groups = ["test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "langchain-0.2.17-py3-none-any.whl", hash = "sha256:a97a33e775f8de074370aecab95db148b879c794695d9e443c95457dce5eb525"}, {file = "langchain-0.2.17.tar.gz", hash = "sha256:5a99ce94aae05925851777dba45cbf2c475565d1e91cbe7d82c5e329d514627e"}, @@ -1124,7 +1288,6 @@ description = "Building applications with LLMs through composability" optional = false python-versions = "<4.0,>=3.8.1" groups = ["test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "langchain_core-0.2.43-py3-none-any.whl", hash = "sha256:619601235113298ebf8252a349754b7c28d3cf7166c7c922da24944b78a9363a"}, {file = "langchain_core-0.2.43.tar.gz", hash = "sha256:42c2ef6adedb911f4254068b6adc9eb4c4075f6c8cb3d83590d3539a815695f5"}, @@ -1149,7 +1312,6 @@ description = "An integration package connecting OpenAI and LangChain" optional = false python-versions = "<4.0,>=3.8.1" groups = ["test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "langchain_openai-0.1.25-py3-none-any.whl", hash = "sha256:f0b34a233d0d9cb8fce6006c903e57085c493c4f0e32862b99063b96eaedb109"}, {file = "langchain_openai-0.1.25.tar.gz", hash = "sha256:eb116f744f820247a72f54313fb7c01524fba0927120d4e899e5e4ab41ad3928"}, @@ -1167,7 +1329,6 @@ description = "LangChain text splitting utilities" optional = false python-versions = "<4.0,>=3.8.1" groups = ["test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "langchain_text_splitters-0.2.4-py3-none-any.whl", hash = "sha256:2702dee5b7cbdd595ccbe43b8d38d01a34aa8583f4d6a5a68ad2305ae3e7b645"}, {file = "langchain_text_splitters-0.2.4.tar.gz", hash = "sha256:f7daa7a3b0aa8309ce248e2e2b6fc8115be01118d336c7f7f7dfacda0e89bf29"}, @@ -1183,7 +1344,6 @@ description = "Client library to connect to the LangSmith LLM Tracing and Evalua optional = false python-versions = "<4.0,>=3.8.1" groups = ["test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "langsmith-0.1.147-py3-none-any.whl", hash = "sha256:7166fc23b965ccf839d64945a78e9f1157757add228b086141eb03a60d699a15"}, {file = "langsmith-0.1.147.tar.gz", hash = "sha256:2e933220318a4e73034657103b3b1a3a6109cc5db3566a7e8e03be8d6d7def7a"}, @@ -1208,8 +1368,7 @@ version = "3.0.2" description = "Safely add untrusted strings to HTML/XML markup." optional = false python-versions = ">=3.9" -groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" +groups = ["main", "dev"] files = [ {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8"}, {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158"}, @@ -1281,7 +1440,6 @@ description = "McCabe checker, plugin for flake8" optional = false python-versions = ">=3.6" groups = ["dev"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, @@ -1294,7 +1452,6 @@ description = "multidict implementation" optional = false python-versions = ">=3.8" groups = ["main", "test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "multidict-6.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3380252550e372e8511d49481bd836264c009adb826b23fefcc5dd3c69692f60"}, {file = "multidict-6.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:99f826cbf970077383d7de805c0681799491cb939c25450b9b5b3ced03ca99f1"}, @@ -1400,7 +1557,6 @@ description = "Optional static typing for Python" optional = false python-versions = ">=3.9" groups = ["dev"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "mypy-1.18.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c1eab0cf6294dafe397c261a75f96dc2c31bffe3b944faa24db5def4e2b0f77c"}, {file = "mypy-1.18.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7a780ca61fc239e4865968ebc5240bb3bf610ef59ac398de9a7421b54e4a207e"}, @@ -1462,7 +1618,6 @@ description = "Type system extensions for programs checked with the mypy type ch optional = false python-versions = ">=3.8" groups = ["dev"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505"}, {file = "mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558"}, @@ -1475,7 +1630,6 @@ description = "Fundamental package for array computing in Python" optional = false python-versions = ">=3.9" groups = ["dev", "test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"}, {file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"}, @@ -1522,7 +1676,6 @@ description = "The official Python library for the openai API" optional = false python-versions = ">=3.8" groups = ["test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "openai-1.59.7-py3-none-any.whl", hash = "sha256:cfa806556226fa96df7380ab2e29814181d56fea44738c2b0e581b462c268692"}, {file = "openai-1.59.7.tar.gz", hash = "sha256:043603def78c00befb857df9f0a16ee76a3af5984ba40cb7ee5e2f40db4646bf"}, @@ -1549,7 +1702,6 @@ description = "OpenTelemetry Python API" optional = false python-versions = ">=3.9" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "opentelemetry_api-1.38.0-py3-none-any.whl", hash = "sha256:2891b0197f47124454ab9f0cf58f3be33faca394457ac3e09daba13ff50aa582"}, {file = "opentelemetry_api-1.38.0.tar.gz", hash = "sha256:f4c193b5e8acb0912b06ac5b16321908dd0843d75049c091487322284a3eea12"}, @@ -1566,7 +1718,6 @@ description = "OpenTelemetry Protobuf encoding" optional = false python-versions = ">=3.9" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "opentelemetry_exporter_otlp_proto_common-1.38.0-py3-none-any.whl", hash = "sha256:03cb76ab213300fe4f4c62b7d8f17d97fcfd21b89f0b5ce38ea156327ddda74a"}, {file = "opentelemetry_exporter_otlp_proto_common-1.38.0.tar.gz", hash = "sha256:e333278afab4695aa8114eeb7bf4e44e65c6607d54968271a249c180b2cb605c"}, @@ -1582,7 +1733,6 @@ description = "OpenTelemetry Collector Protobuf over gRPC Exporter" optional = false python-versions = ">=3.9" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "opentelemetry_exporter_otlp_proto_grpc-1.38.0-py3-none-any.whl", hash = "sha256:7c49fd9b4bd0dbe9ba13d91f764c2d20b0025649a6e4ac35792fb8d84d764bc7"}, {file = "opentelemetry_exporter_otlp_proto_grpc-1.38.0.tar.gz", hash = "sha256:2473935e9eac71f401de6101d37d6f3f0f1831db92b953c7dcc912536158ebd6"}, @@ -1607,7 +1757,6 @@ description = "OpenTelemetry Collector Protobuf over HTTP Exporter" optional = false python-versions = ">=3.9" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "opentelemetry_exporter_otlp_proto_http-1.38.0-py3-none-any.whl", hash = "sha256:84b937305edfc563f08ec69b9cb2298be8188371217e867c1854d77198d0825b"}, {file = "opentelemetry_exporter_otlp_proto_http-1.38.0.tar.gz", hash = "sha256:f16bd44baf15cbe07633c5112ffc68229d0edbeac7b37610be0b2def4e21e90b"}, @@ -1629,7 +1778,6 @@ description = "Instrumentation Tools & Auto Instrumentation for OpenTelemetry Py optional = false python-versions = ">=3.9" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "opentelemetry_instrumentation-0.59b0-py3-none-any.whl", hash = "sha256:44082cc8fe56b0186e87ee8f7c17c327c4c2ce93bdbe86496e600985d74368ee"}, {file = "opentelemetry_instrumentation-0.59b0.tar.gz", hash = "sha256:6010f0faaacdaf7c4dff8aac84e226d23437b331dcda7e70367f6d73a7db1adc"}, @@ -1643,12 +1791,11 @@ wrapt = ">=1.0.0,<2.0.0" [[package]] name = "opentelemetry-instrumentation-agno" -version = "0.49.2" +version = "0.50.1" description = "OpenTelemetry Agno instrumentation" optional = false python-versions = ">=3.9,<4" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [] develop = true @@ -1667,12 +1814,11 @@ url = "../opentelemetry-instrumentation-agno" [[package]] name = "opentelemetry-instrumentation-alephalpha" -version = "0.49.2" +version = "0.50.1" description = "OpenTelemetry Aleph Alpha instrumentation" optional = false python-versions = ">=3.9,<4" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [] develop = true @@ -1691,12 +1837,11 @@ url = "../opentelemetry-instrumentation-alephalpha" [[package]] name = "opentelemetry-instrumentation-anthropic" -version = "0.49.2" +version = "0.50.1" description = "OpenTelemetry Anthropic instrumentation" optional = false python-versions = ">=3.9,<4" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [] develop = true @@ -1715,12 +1860,11 @@ url = "../opentelemetry-instrumentation-anthropic" [[package]] name = "opentelemetry-instrumentation-bedrock" -version = "0.49.2" +version = "0.50.1" description = "OpenTelemetry Bedrock instrumentation" optional = false python-versions = ">=3.9,<4" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [] develop = true @@ -1738,12 +1882,11 @@ url = "../opentelemetry-instrumentation-bedrock" [[package]] name = "opentelemetry-instrumentation-chromadb" -version = "0.49.2" +version = "0.50.1" description = "OpenTelemetry Chroma DB instrumentation" optional = false python-versions = ">=3.9,<4" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [] develop = true @@ -1762,12 +1905,11 @@ url = "../opentelemetry-instrumentation-chromadb" [[package]] name = "opentelemetry-instrumentation-cohere" -version = "0.49.2" +version = "0.50.1" description = "OpenTelemetry Cohere instrumentation" optional = false python-versions = ">=3.9,<4" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [] develop = true @@ -1786,12 +1928,11 @@ url = "../opentelemetry-instrumentation-cohere" [[package]] name = "opentelemetry-instrumentation-crewai" -version = "0.49.2" +version = "0.50.1" description = "OpenTelemetry crewAI instrumentation" optional = false python-versions = ">=3.10,<4" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [] develop = true @@ -1810,12 +1951,11 @@ url = "../opentelemetry-instrumentation-crewai" [[package]] name = "opentelemetry-instrumentation-google-generativeai" -version = "0.49.2" +version = "0.50.1" description = "OpenTelemetry Google Generative AI instrumentation" optional = false python-versions = ">=3.9,<4" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [] develop = true @@ -1834,12 +1974,11 @@ url = "../opentelemetry-instrumentation-google-generativeai" [[package]] name = "opentelemetry-instrumentation-groq" -version = "0.49.2" +version = "0.50.1" description = "OpenTelemetry Groq instrumentation" optional = false python-versions = ">=3.9,<4" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [] develop = true @@ -1858,12 +1997,11 @@ url = "../opentelemetry-instrumentation-groq" [[package]] name = "opentelemetry-instrumentation-haystack" -version = "0.49.2" +version = "0.50.1" description = "OpenTelemetry Haystack instrumentation" optional = false python-versions = ">=3.9,<4" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [] develop = true @@ -1882,12 +2020,11 @@ url = "../opentelemetry-instrumentation-haystack" [[package]] name = "opentelemetry-instrumentation-lancedb" -version = "0.49.2" +version = "0.50.1" description = "OpenTelemetry Lancedb instrumentation" optional = false python-versions = ">=3.9,<4" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [] develop = true @@ -1906,12 +2043,11 @@ url = "../opentelemetry-instrumentation-lancedb" [[package]] name = "opentelemetry-instrumentation-langchain" -version = "0.49.2" +version = "0.50.1" description = "OpenTelemetry Langchain instrumentation" optional = false python-versions = ">=3.9,<4" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [] develop = true @@ -1930,12 +2066,11 @@ url = "../opentelemetry-instrumentation-langchain" [[package]] name = "opentelemetry-instrumentation-llamaindex" -version = "0.49.2" +version = "0.50.1" description = "OpenTelemetry LlamaIndex instrumentation" optional = false python-versions = ">=3.9,<4" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [] develop = true @@ -1961,7 +2096,6 @@ description = "OpenTelemetry Logging instrumentation" optional = false python-versions = ">=3.9" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "opentelemetry_instrumentation_logging-0.59b0-py3-none-any.whl", hash = "sha256:fdd4eddbd093fc421df8f7d356ecb15b320a1f3396b56bce5543048a5c457eea"}, {file = "opentelemetry_instrumentation_logging-0.59b0.tar.gz", hash = "sha256:1b51116444edc74f699daf9002ded61529397100c9bc903c8b9aaa75a5218c76"}, @@ -1973,12 +2107,11 @@ opentelemetry-instrumentation = "0.59b0" [[package]] name = "opentelemetry-instrumentation-marqo" -version = "0.49.2" +version = "0.50.1" description = "OpenTelemetry Marqo instrumentation" optional = false python-versions = ">=3.9,<4" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [] develop = true @@ -1997,12 +2130,11 @@ url = "../opentelemetry-instrumentation-marqo" [[package]] name = "opentelemetry-instrumentation-mcp" -version = "0.49.2" +version = "0.50.1" description = "OpenTelemetry mcp instrumentation" optional = false python-versions = ">=3.10,<4" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [] develop = true @@ -2021,12 +2153,11 @@ url = "../opentelemetry-instrumentation-mcp" [[package]] name = "opentelemetry-instrumentation-milvus" -version = "0.49.2" +version = "0.50.1" description = "OpenTelemetry Milvus instrumentation" optional = false python-versions = ">=3.9,<4" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [] develop = true @@ -2045,12 +2176,11 @@ url = "../opentelemetry-instrumentation-milvus" [[package]] name = "opentelemetry-instrumentation-mistralai" -version = "0.49.2" +version = "0.50.1" description = "OpenTelemetry Mistral AI instrumentation" optional = false python-versions = ">=3.9,<4" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [] develop = true @@ -2069,12 +2199,11 @@ url = "../opentelemetry-instrumentation-mistralai" [[package]] name = "opentelemetry-instrumentation-ollama" -version = "0.49.2" +version = "0.50.1" description = "OpenTelemetry Ollama instrumentation" optional = false python-versions = ">=3.9,<4" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [] develop = true @@ -2093,12 +2222,11 @@ url = "../opentelemetry-instrumentation-ollama" [[package]] name = "opentelemetry-instrumentation-openai" -version = "0.49.2" +version = "0.50.1" description = "OpenTelemetry OpenAI instrumentation" optional = false python-versions = ">=3.9,<4" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [] develop = true @@ -2117,12 +2245,11 @@ url = "../opentelemetry-instrumentation-openai" [[package]] name = "opentelemetry-instrumentation-openai-agents" -version = "0.49.2" +version = "0.50.1" description = "OpenTelemetry OpenAI Agents instrumentation" optional = false python-versions = ">=3.9,<4" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [] develop = true @@ -2141,12 +2268,11 @@ url = "../opentelemetry-instrumentation-openai-agents" [[package]] name = "opentelemetry-instrumentation-pinecone" -version = "0.49.2" +version = "0.50.1" description = "OpenTelemetry Pinecone instrumentation" optional = false python-versions = ">=3.9,<4" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [] develop = true @@ -2165,12 +2291,11 @@ url = "../opentelemetry-instrumentation-pinecone" [[package]] name = "opentelemetry-instrumentation-qdrant" -version = "0.49.2" +version = "0.50.1" description = "OpenTelemetry Qdrant instrumentation" optional = false python-versions = ">=3.9,<4" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [] develop = true @@ -2194,7 +2319,6 @@ description = "OpenTelemetry Redis instrumentation" optional = false python-versions = ">=3.9" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "opentelemetry_instrumentation_redis-0.59b0-py3-none-any.whl", hash = "sha256:8f7494dede5a6bfe5d8f20da67b371a502883398081856378380efef27da0bdf"}, {file = "opentelemetry_instrumentation_redis-0.59b0.tar.gz", hash = "sha256:d7f1c7c55ab57e10e0155c4c65d028a7e436aec7ccc7ccbf1d77e8cd12b55abd"}, @@ -2211,12 +2335,11 @@ instruments = ["redis (>=2.6)"] [[package]] name = "opentelemetry-instrumentation-replicate" -version = "0.49.2" +version = "0.50.1" description = "OpenTelemetry Replicate instrumentation" optional = false python-versions = ">=3.9,<4" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [] develop = true @@ -2240,7 +2363,6 @@ description = "OpenTelemetry requests instrumentation" optional = false python-versions = ">=3.9" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "opentelemetry_instrumentation_requests-0.59b0-py3-none-any.whl", hash = "sha256:d43121532877e31a46c48649279cec2504ee1e0ceb3c87b80fe5ccd7eafc14c1"}, {file = "opentelemetry_instrumentation_requests-0.59b0.tar.gz", hash = "sha256:9af2ffe3317f03074d7f865919139e89170b6763a0251b68c25e8e64e04b3400"}, @@ -2257,12 +2379,11 @@ instruments = ["requests (>=2.0,<3.0)"] [[package]] name = "opentelemetry-instrumentation-sagemaker" -version = "0.49.2" +version = "0.50.1" description = "OpenTelemetry SageMaker instrumentation" optional = false python-versions = ">=3.9,<4" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [] develop = true @@ -2283,7 +2404,6 @@ description = "OpenTelemetry SQLAlchemy instrumentation" optional = false python-versions = ">=3.9" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "opentelemetry_instrumentation_sqlalchemy-0.59b0-py3-none-any.whl", hash = "sha256:4ef150c49b6d1a8a7328f9d23ff40c285a245b88b0875ed2e5d277a40aa921c8"}, {file = "opentelemetry_instrumentation_sqlalchemy-0.59b0.tar.gz", hash = "sha256:7647b1e63497deebd41f9525c414699e0d49f19efcadc8a0642b715897f62d32"}, @@ -2306,7 +2426,6 @@ description = "Thread context propagation support for OpenTelemetry" optional = false python-versions = ">=3.9" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "opentelemetry_instrumentation_threading-0.59b0-py3-none-any.whl", hash = "sha256:76da2fc01fe1dccebff6581080cff9e42ac7b27cc61eb563f3c4435c727e8eca"}, {file = "opentelemetry_instrumentation_threading-0.59b0.tar.gz", hash = "sha256:ce5658730b697dcbc0e0d6d13643a69fd8aeb1b32fa8db3bade8ce114c7975f3"}, @@ -2319,12 +2438,11 @@ wrapt = ">=1.0.0,<2.0.0" [[package]] name = "opentelemetry-instrumentation-together" -version = "0.49.2" +version = "0.50.1" description = "OpenTelemetry Together AI instrumentation" optional = false python-versions = ">=3.9,<4" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [] develop = true @@ -2343,12 +2461,11 @@ url = "../opentelemetry-instrumentation-together" [[package]] name = "opentelemetry-instrumentation-transformers" -version = "0.49.2" +version = "0.50.1" description = "OpenTelemetry transformers instrumentation" optional = false python-versions = ">=3.9,<4" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [] develop = true @@ -2369,7 +2486,6 @@ description = "OpenTelemetry urllib3 instrumentation" optional = false python-versions = ">=3.9" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "opentelemetry_instrumentation_urllib3-0.59b0-py3-none-any.whl", hash = "sha256:a68c363092cf5db8c67c5778dbb2e4a14554e77baf7d276c374ea75ec926e148"}, {file = "opentelemetry_instrumentation_urllib3-0.59b0.tar.gz", hash = "sha256:2de8d53a746bba043be1bc8f3246e1b131ebb6e94fe73601edd8b2bd91fe35b8"}, @@ -2387,12 +2503,11 @@ instruments = ["urllib3 (>=1.0.0,<3.0.0)"] [[package]] name = "opentelemetry-instrumentation-vertexai" -version = "0.49.2" +version = "0.50.1" description = "OpenTelemetry Vertex AI instrumentation" optional = false python-versions = ">=3.9,<4" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [] develop = true @@ -2411,12 +2526,11 @@ url = "../opentelemetry-instrumentation-vertexai" [[package]] name = "opentelemetry-instrumentation-watsonx" -version = "0.49.2" +version = "0.50.1" description = "OpenTelemetry IBM Watsonx Instrumentation" optional = false python-versions = ">=3.9,<4" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [] develop = true @@ -2435,12 +2549,11 @@ url = "../opentelemetry-instrumentation-watsonx" [[package]] name = "opentelemetry-instrumentation-weaviate" -version = "0.49.2" +version = "0.50.1" description = "OpenTelemetry Weaviate instrumentation" optional = false python-versions = ">=3.9,<4" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [] develop = true @@ -2459,12 +2572,11 @@ url = "../opentelemetry-instrumentation-weaviate" [[package]] name = "opentelemetry-instrumentation-writer" -version = "0.49.2" +version = "0.50.1" description = "OpenTelemetry Writer instrumentation" optional = false python-versions = ">=3.10,<4" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [] develop = true @@ -2488,7 +2600,6 @@ description = "OpenTelemetry Python Proto" optional = false python-versions = ">=3.9" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "opentelemetry_proto-1.38.0-py3-none-any.whl", hash = "sha256:b6ebe54d3217c42e45462e2a1ae28c3e2bf2ec5a5645236a490f55f45f1a0a18"}, {file = "opentelemetry_proto-1.38.0.tar.gz", hash = "sha256:88b161e89d9d372ce723da289b7da74c3a8354a8e5359992be813942969ed468"}, @@ -2504,7 +2615,6 @@ description = "OpenTelemetry Python SDK" optional = false python-versions = ">=3.9" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "opentelemetry_sdk-1.38.0-py3-none-any.whl", hash = "sha256:1c66af6564ecc1553d72d811a01df063ff097cdc82ce188da9951f93b8d10f6b"}, {file = "opentelemetry_sdk-1.38.0.tar.gz", hash = "sha256:93df5d4d871ed09cb4272305be4d996236eedb232253e3ab864c8620f051cebe"}, @@ -2522,7 +2632,6 @@ description = "OpenTelemetry Semantic Conventions" optional = false python-versions = ">=3.9" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "opentelemetry_semantic_conventions-0.59b0-py3-none-any.whl", hash = "sha256:35d3b8833ef97d614136e253c1da9342b4c3c083bbaf29ce31d572a1c3825eed"}, {file = "opentelemetry_semantic_conventions-0.59b0.tar.gz", hash = "sha256:7a6db3f30d70202d5bf9fa4b69bc866ca6a30437287de6c510fb594878aed6b0"}, @@ -2539,7 +2648,6 @@ description = "OpenTelemetry Semantic Conventions Extension for Large Language M optional = false python-versions = "<4,>=3.9" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "opentelemetry_semantic_conventions_ai-0.4.13-py3-none-any.whl", hash = "sha256:883a30a6bb5deaec0d646912b5f9f6dcbb9f6f72557b73d0f2560bf25d13e2d5"}, {file = "opentelemetry_semantic_conventions_ai-0.4.13.tar.gz", hash = "sha256:94efa9fb4ffac18c45f54a3a338ffeb7eedb7e1bb4d147786e77202e159f0036"}, @@ -2552,7 +2660,6 @@ description = "Web util for OpenTelemetry" optional = false python-versions = ">=3.9" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "opentelemetry_util_http-0.59b0-py3-none-any.whl", hash = "sha256:6d036a07563bce87bf521839c0671b507a02a0d39d7ea61b88efa14c6e25355d"}, {file = "opentelemetry_util_http-0.59b0.tar.gz", hash = "sha256:ae66ee91be31938d832f3b4bc4eb8a911f6eddd38969c4a871b1230db2a0a560"}, @@ -2651,7 +2758,6 @@ description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" groups = ["main", "dev", "test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, @@ -2664,7 +2770,6 @@ description = "Powerful data structures for data analysis, time series, and stat optional = false python-versions = ">=3.9" groups = ["test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "pandas-2.3.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:22c2e866f7209ebc3a8f08d75766566aae02bcc91d196935a1d9e59c7b990ac9"}, {file = "pandas-2.3.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3583d348546201aff730c8c47e49bc159833f971c2899d6097bce68b9112a4f1"}, @@ -2752,7 +2857,6 @@ description = "Type annotations for pandas" optional = false python-versions = ">=3.10" groups = ["dev"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "pandas_stubs-2.3.2.250926-py3-none-any.whl", hash = "sha256:81121818453dcfe00f45c852f4dceee043640b813830f6e7bd084a4ef7ff7270"}, {file = "pandas_stubs-2.3.2.250926.tar.gz", hash = "sha256:c64b9932760ceefb96a3222b953e6a251321a9832a28548be6506df473a66406"}, @@ -2769,12 +2873,28 @@ description = "Utility library for gitignore style pattern matching of file path optional = false python-versions = ">=3.8" groups = ["dev"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, ] +[[package]] +name = "platformdirs" +version = "4.5.1" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." +optional = false +python-versions = ">=3.10" +groups = ["dev"] +files = [ + {file = "platformdirs-4.5.1-py3-none-any.whl", hash = "sha256:d03afa3963c806a9bed9d5125c8f4cb2fdaf74a55ab60e5d59b3fde758104d31"}, + {file = "platformdirs-4.5.1.tar.gz", hash = "sha256:61d5cdcc6065745cdd94f0f878977f8de9437be93de97c1c12f853c9c0cdcbda"}, +] + +[package.extras] +docs = ["furo (>=2025.9.25)", "proselint (>=0.14)", "sphinx (>=8.2.3)", "sphinx-autodoc-typehints (>=3.2)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.4.2)", "pytest-cov (>=7)", "pytest-mock (>=3.15.1)"] +type = ["mypy (>=1.18.2)"] + [[package]] name = "pluggy" version = "1.5.0" @@ -2782,7 +2902,6 @@ description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.8" groups = ["dev", "test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, @@ -2799,7 +2918,6 @@ description = "Accelerated property cache" optional = false python-versions = ">=3.9" groups = ["main", "test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "propcache-0.2.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:6b3f39a85d671436ee3d12c017f8fdea38509e4f25b28eb25877293c98c243f6"}, {file = "propcache-0.2.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d51fbe4285d5db5d92a929e3e21536ea3dd43732c5b177c7ef03f918dff9f2"}, @@ -2892,7 +3010,6 @@ description = "" optional = false python-versions = ">=3.8" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "protobuf-5.29.3-cp310-abi3-win32.whl", hash = "sha256:3ea51771449e1035f26069c4c7fd51fba990d07bc55ba80701c78f886bf9c888"}, {file = "protobuf-5.29.3-cp310-abi3-win_amd64.whl", hash = "sha256:a4fa6f80816a9a0678429e84973f2f98cbc218cca434abe8db2ad0bffc98503a"}, @@ -2914,7 +3031,6 @@ description = "Python style guide checker" optional = false python-versions = ">=3.8" groups = ["dev"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "pycodestyle-2.11.1-py2.py3-none-any.whl", hash = "sha256:44fe31000b2d866f2e41841b18528a505fbd7fef9017b04eff4e2648a0fadc67"}, {file = "pycodestyle-2.11.1.tar.gz", hash = "sha256:41ba0e7afc9752dfb53ced5489e89f8186be00e599e712660695b7a75ff2663f"}, @@ -2926,8 +3042,7 @@ version = "2.10.5" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" -groups = ["main", "test"] -markers = "platform_python_implementation == \"PyPy\"" +groups = ["main", "dev", "test"] files = [ {file = "pydantic-2.10.5-py3-none-any.whl", hash = "sha256:4dd4e322dbe55472cb7ca7e73f4b63574eecccf2835ffa2af9021ce113c83c53"}, {file = "pydantic-2.10.5.tar.gz", hash = "sha256:278b38dbbaec562011d659ee05f63346951b3a248a6f3642e1bc68894ea2b4ff"}, @@ -2935,6 +3050,7 @@ files = [ [package.dependencies] annotated-types = ">=0.6.0" +email-validator = {version = ">=2.0.0", optional = true, markers = "extra == \"email\""} pydantic-core = "2.27.2" typing-extensions = ">=4.12.2" @@ -2948,8 +3064,7 @@ version = "2.27.2" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" -groups = ["main", "test"] -markers = "platform_python_implementation == \"PyPy\"" +groups = ["main", "dev", "test"] files = [ {file = "pydantic_core-2.27.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2d367ca20b2f14095a8f4fa1210f5a7b78b8a20009ecced6b12818f455b1e9fa"}, {file = "pydantic_core-2.27.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:491a2b73db93fab69731eaee494f320faa4e093dbed776be1a829c2eb222c34c"}, @@ -3063,7 +3178,6 @@ description = "passive checker of Python programs" optional = false python-versions = ">=3.8" groups = ["dev"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "pyflakes-3.2.0-py2.py3-none-any.whl", hash = "sha256:84b5be138a2dfbb40689ca07e2152deb896a65c3a3e24c251c5c62489568074a"}, {file = "pyflakes-3.2.0.tar.gz", hash = "sha256:1c61603ff154621fb2a9172037d84dca3500def8c8b630657d1701f026f8af3f"}, @@ -3076,7 +3190,6 @@ description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.8" groups = ["dev", "test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "pytest-8.3.4-py3-none-any.whl", hash = "sha256:50e16d954148559c9a74109af1eaf0c945ba2d8f30f0a3d3335edde19788b6f6"}, {file = "pytest-8.3.4.tar.gz", hash = "sha256:965370d062bce11e73868e0335abac31b4d3de0e82f4007408d242b4f8610761"}, @@ -3100,7 +3213,6 @@ description = "Pytest support for asyncio" optional = false python-versions = ">=3.8" groups = ["test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2"}, {file = "pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3"}, @@ -3120,7 +3232,6 @@ description = "A pytest plugin that allows you recording of network interactions optional = false python-versions = ">=3.7" groups = ["test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "pytest_recording-0.13.2-py3-none-any.whl", hash = "sha256:3820fe5743d1ac46e807989e11d073cb776a60bdc544cf43ebca454051b22d13"}, {file = "pytest_recording-0.13.2.tar.gz", hash = "sha256:000c3babbb466681457fd65b723427c1779a0c6c17d9e381c3142a701e124877"}, @@ -3141,7 +3252,6 @@ description = "pytest-sugar is a plugin for pytest that changes the default look optional = false python-versions = "*" groups = ["dev"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "pytest-sugar-1.0.0.tar.gz", hash = "sha256:6422e83258f5b0c04ce7c632176c7732cab5fdb909cb39cca5c9139f81276c0a"}, {file = "pytest_sugar-1.0.0-py3-none-any.whl", hash = "sha256:70ebcd8fc5795dc457ff8b69d266a4e2e8a74ae0c3edc749381c64b5246c8dfd"}, @@ -3162,7 +3272,6 @@ description = "Extensions to the standard Python datetime module" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" groups = ["test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, @@ -3171,6 +3280,21 @@ files = [ [package.dependencies] six = ">=1.5" +[[package]] +name = "pytokens" +version = "0.3.0" +description = "A Fast, spec compliant Python 3.14+ tokenizer that runs on older Pythons." +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "pytokens-0.3.0-py3-none-any.whl", hash = "sha256:95b2b5eaf832e469d141a378872480ede3f251a5a5041b8ec6e581d3ac71bbf3"}, + {file = "pytokens-0.3.0.tar.gz", hash = "sha256:2f932b14ed08de5fcf0b391ace2642f858f1394c0857202959000b68ed7a458a"}, +] + +[package.extras] +dev = ["black", "build", "mypy", "pytest", "pytest-cov", "setuptools", "tox", "twine", "wheel"] + [[package]] name = "pytz" version = "2025.2" @@ -3178,7 +3302,6 @@ description = "World timezone definitions, modern and historical" optional = false python-versions = "*" groups = ["test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00"}, {file = "pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3"}, @@ -3190,8 +3313,7 @@ version = "6.0.2" description = "YAML parser and emitter for Python" optional = false python-versions = ">=3.8" -groups = ["main", "test"] -markers = "platform_python_implementation == \"PyPy\"" +groups = ["main", "dev", "test"] files = [ {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, @@ -3255,7 +3377,6 @@ description = "Alternative regular expression module, to replace re." optional = false python-versions = ">=3.8" groups = ["test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ff590880083d60acc0433f9c3f713c51f7ac6ebb9adf889c79a261ecf541aa91"}, {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:658f90550f38270639e83ce492f27d2c8d2cd63805c65a13a14d36ca126753f0"}, @@ -3360,7 +3481,6 @@ description = "Python HTTP for Humans." optional = false python-versions = ">=3.8" groups = ["main", "test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, @@ -3383,7 +3503,6 @@ description = "A utility belt for advanced users of python-requests" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" groups = ["test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "requests-toolbelt-1.0.0.tar.gz", hash = "sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6"}, {file = "requests_toolbelt-1.0.0-py2.py3-none-any.whl", hash = "sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06"}, @@ -3399,7 +3518,6 @@ description = "Python 2 and 3 compatibility utilities" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" groups = ["test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"}, {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, @@ -3412,7 +3530,6 @@ description = "Sniff out which async library your code is running under" optional = false python-versions = ">=3.7" groups = ["main", "test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, @@ -3425,7 +3542,6 @@ description = "Database Abstraction Library" optional = false python-versions = ">=3.7" groups = ["test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "SQLAlchemy-2.0.37-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:da36c3b0e891808a7542c5c89f224520b9a16c7f5e4d6a1156955605e54aef0e"}, {file = "SQLAlchemy-2.0.37-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e7402ff96e2b073a98ef6d6142796426d705addd27b9d26c3b32dbaa06d7d069"}, @@ -3522,7 +3638,6 @@ description = "Retry code until it succeeds" optional = false python-versions = ">=3.8" groups = ["main", "test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "tenacity-8.5.0-py3-none-any.whl", hash = "sha256:b594c2a5945830c267ce6b79a166228323ed52718f30302c1359836112346687"}, {file = "tenacity-8.5.0.tar.gz", hash = "sha256:8bc6c0c8a09b31e6cad13c47afbed1a567518250a9a171418582ed8d9c20ca78"}, @@ -3539,7 +3654,6 @@ description = "ANSI color formatting for output in terminal" optional = false python-versions = ">=3.9" groups = ["dev"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "termcolor-2.5.0-py3-none-any.whl", hash = "sha256:37b17b5fc1e604945c2642c872a3764b5d547a48009871aea3edd3afa180afb8"}, {file = "termcolor-2.5.0.tar.gz", hash = "sha256:998d8d27da6d48442e8e1f016119076b690d962507531df4890fcd2db2ef8a6f"}, @@ -3555,7 +3669,6 @@ description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models" optional = false python-versions = ">=3.9" groups = ["test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "tiktoken-0.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b07e33283463089c81ef1467180e3e00ab00d46c2c4bbcef0acab5f771d6695e"}, {file = "tiktoken-0.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9269348cb650726f44dd3bbb3f9110ac19a8dcc8f54949ad3ef652ca22a38e21"}, @@ -3604,7 +3717,6 @@ description = "" optional = false python-versions = ">=3.7" groups = ["main", "test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "tokenizers-0.21.0-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:3c4c93eae637e7d2aaae3d376f06085164e1660f89304c0ab2b1d08a406636b2"}, {file = "tokenizers-0.21.0-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:f53ea537c925422a2e0e92a24cce96f6bc5046bbef24a1652a5edc8ba975f62e"}, @@ -3631,6 +3743,19 @@ dev = ["tokenizers[testing]"] docs = ["setuptools-rust", "sphinx", "sphinx-rtd-theme"] testing = ["black (==22.3)", "datasets", "numpy", "pytest", "requests", "ruff"] +[[package]] +name = "toml" +version = "0.10.2" +description = "Python Library for Tom's Obvious, Minimal Language" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +groups = ["dev"] +markers = "python_version == \"3.10\"" +files = [ + {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"}, + {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, +] + [[package]] name = "tomli" version = "2.2.1" @@ -3638,7 +3763,7 @@ description = "A lil' TOML parser" optional = false python-versions = ">=3.8" groups = ["dev", "test"] -markers = "python_version < \"3.11\" and platform_python_implementation == \"PyPy\" or python_version == \"3.10\"" +markers = "python_version == \"3.10\"" files = [ {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"}, @@ -3681,7 +3806,6 @@ description = "Fast, Extensible Progress Meter" optional = false python-versions = ">=3.7" groups = ["main", "test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2"}, {file = "tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2"}, @@ -3704,7 +3828,6 @@ description = "Typing stubs for colorama" optional = false python-versions = ">=3.9" groups = ["dev"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "types_colorama-0.4.15.20250801-py3-none-any.whl", hash = "sha256:b6e89bd3b250fdad13a8b6a465c933f4a5afe485ea2e2f104d739be50b13eea9"}, {file = "types_colorama-0.4.15.20250801.tar.gz", hash = "sha256:02565d13d68963d12237d3f330f5ecd622a3179f7b5b14ee7f16146270c357f5"}, @@ -3717,7 +3840,6 @@ description = "Typing stubs for pytz" optional = false python-versions = ">=3.9" groups = ["dev"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "types_pytz-2025.2.0.20251108-py3-none-any.whl", hash = "sha256:0f1c9792cab4eb0e46c52f8845c8f77cf1e313cb3d68bf826aa867fe4717d91c"}, {file = "types_pytz-2025.2.0.20251108.tar.gz", hash = "sha256:fca87917836ae843f07129567b74c1929f1870610681b4c92cb86a3df5817bdb"}, @@ -3775,7 +3897,6 @@ description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" groups = ["main", "dev", "test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, @@ -3788,7 +3909,6 @@ description = "Provider of IANA time zone data" optional = false python-versions = ">=2" groups = ["test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8"}, {file = "tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9"}, @@ -3838,7 +3958,6 @@ description = "Automatically mock your HTTP interactions to simplify and speed u optional = false python-versions = ">=3.9" groups = ["test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "vcrpy-7.0.0-py2.py3-none-any.whl", hash = "sha256:55791e26c18daa363435054d8b35bd41a4ac441b6676167635d1b37a71dbe124"}, {file = "vcrpy-7.0.0.tar.gz", hash = "sha256:176391ad0425edde1680c5b20738ea3dc7fb942520a48d2993448050986b3a50"}, @@ -3863,7 +3982,6 @@ description = "Module for decorators, wrappers and monkey patching." optional = false python-versions = ">=3.8" groups = ["main", "test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3d57c572081fed831ad2d26fd430d565b76aa277ed1d30ff4d40670b1c0dd984"}, {file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5e251054542ae57ac7f3fba5d10bfff615b6c2fb09abeb37d2f1463f841ae22"}, @@ -3953,7 +4071,6 @@ description = "Yet another URL library" optional = false python-versions = ">=3.9" groups = ["main", "test"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "yarl-1.18.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7df647e8edd71f000a5208fe6ff8c382a1de8edfbccdbbfe649d263de07d8c34"}, {file = "yarl-1.18.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c69697d3adff5aa4f874b19c0e4ed65180ceed6318ec856ebc423aa5850d84f7"}, @@ -4051,7 +4168,6 @@ description = "Backport of pathlib-compatible object wrapper for zip files" optional = false python-versions = ">=3.9" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\"" files = [ {file = "zipp-3.21.0-py3-none-any.whl", hash = "sha256:ac1bbe05fd2991f160ebce24ffbac5f6d11d83dc90891255885223d42b3cd931"}, {file = "zipp-3.21.0.tar.gz", hash = "sha256:2c9958f6430a2040341a52eb608ed6dd93ef4392e02ffe219417c1b28b5dd1f4"}, @@ -4071,4 +4187,4 @@ datasets = [] [metadata] lock-version = "2.1" python-versions = ">=3.10,<4" -content-hash = "16ac125b5109af187aa3e992c2e3b09154a6215bd9e0f202b6e687818ab964d6" +content-hash = "8aca3eb79bb4564f07dcd538dfb49777f714a4b39faaa3de650608ef81838d3f" diff --git a/packages/traceloop-sdk/pyproject.toml b/packages/traceloop-sdk/pyproject.toml index 313f31328d..898cec8b63 100644 --- a/packages/traceloop-sdk/pyproject.toml +++ b/packages/traceloop-sdk/pyproject.toml @@ -83,6 +83,7 @@ mypy = "^1.18.2" types-requests = "^2.31.0" types-colorama = "^0.4.15" pandas-stubs = "*" +datamodel-code-generator = "^0.26.0" [tool.poetry.group.test.dependencies] openai = "^1.31.1" diff --git a/packages/traceloop-sdk/traceloop/sdk/evaluator/__init__.py b/packages/traceloop-sdk/traceloop/sdk/evaluator/__init__.py index b1233374b8..618c701c3c 100644 --- a/packages/traceloop-sdk/traceloop/sdk/evaluator/__init__.py +++ b/packages/traceloop-sdk/traceloop/sdk/evaluator/__init__.py @@ -1,9 +1,10 @@ from .evaluator import Evaluator from .config import EvaluatorDetails -from .evaluators_made_by_traceloop import EvaluatorMadeByTraceloop +from .evaluators_made_by_traceloop import EvaluatorMadeByTraceloop, create_evaluator __all__ = [ "Evaluator", "EvaluatorDetails", "EvaluatorMadeByTraceloop", + "create_evaluator", ] diff --git a/packages/traceloop-sdk/traceloop/sdk/evaluator/evaluator.py b/packages/traceloop-sdk/traceloop/sdk/evaluator/evaluator.py index dd30718cfb..6fa72d0f3d 100644 --- a/packages/traceloop-sdk/traceloop/sdk/evaluator/evaluator.py +++ b/packages/traceloop-sdk/traceloop/sdk/evaluator/evaluator.py @@ -1,5 +1,6 @@ import httpx from typing import Dict, Optional, Any, List +from pydantic import ValidationError from .field_mapping import normalize_task_output, get_field_suggestions, format_field_help from .model import ( @@ -11,6 +12,25 @@ ) from .stream_client import SSEClient from .config import EvaluatorDetails +from ..generated.evaluators import get_request_model + + +def _validate_evaluator_input(slug: str, input: Dict[str, str]) -> None: + """Validate input against the evaluator's request model if available. + + Args: + slug: The evaluator slug (e.g., "pii-detector") + input: Dictionary of input field names to values + + Raises: + ValueError: If input fails validation against the request model + """ + request_model = get_request_model(slug) + if request_model: + try: + request_model(**input) + except ValidationError as e: + raise ValueError(f"Invalid input for '{slug}': {e}") from e class Evaluator: @@ -94,6 +114,8 @@ async def run_experiment_evaluator( Returns: ExecutionResponse: The evaluation result from SSE stream """ + _validate_evaluator_input(evaluator_slug, input) + request = self._build_evaluator_request( task_id, experiment_id, experiment_run_id, input, evaluator_version, evaluator_config ) @@ -136,6 +158,8 @@ async def trigger_experiment_evaluator( Returns: str: The execution_id that can be used to check results later """ + _validate_evaluator_input(evaluator_slug, input) + request = self._build_evaluator_request( task_id, experiment_id, experiment_run_id, input, evaluator_version, evaluator_config ) diff --git a/packages/traceloop-sdk/traceloop/sdk/evaluator/evaluators_made_by_traceloop.py b/packages/traceloop-sdk/traceloop/sdk/evaluator/evaluators_made_by_traceloop.py index 8887bf86be..75c2da1931 100644 --- a/packages/traceloop-sdk/traceloop/sdk/evaluator/evaluators_made_by_traceloop.py +++ b/packages/traceloop-sdk/traceloop/sdk/evaluator/evaluators_made_by_traceloop.py @@ -1,729 +1,120 @@ -from typing import Optional, Dict, Any, List -from .config import EvaluatorDetails - - -class EvaluatorMadeByTraceloop: - """ - Factory class for creating made by traceloop evaluators with proper configuration. - - This class provides easy-to-use factory methods for all made by traceloop evaluators, - with type hints and documentation for their configuration options. - - Example: - >>> from traceloop.sdk.evaluator import EvaluatorMadeByTraceloop - >>> - >>> evaluators = [ - ... EvaluatorMadeByTraceloop.pii_detector(probability_threshold=0.8), - ... EvaluatorMadeByTraceloop.toxicity_detector(threshold=0.7), - ... ] - """ - - @staticmethod - def pii_detector( - probability_threshold: float = 0.5, - ) -> EvaluatorDetails: - """ - PII (Personally Identifiable Information) detector evaluator. - - Required task output fields: - - text: The text to check for PII - - Args: - probability_threshold: Minimum probability threshold for detecting PII (0.0-1.0) - - Returns: - EvaluatorDetails configured for PII detection - """ - config: Dict[str, Any] = {"probability_threshold": probability_threshold} - return EvaluatorDetails(slug="pii-detector", version=None, config=config, required_input_fields=["text"]) - - @staticmethod - def toxicity_detector( - threshold: float = 0.5, - ) -> EvaluatorDetails: - """ - Toxicity detector evaluator. - - Required task output fields: - - text: The text to check for toxicity - - Args: - threshold: Minimum toxicity threshold for flagging content (0.0-1.0) - - Returns: - EvaluatorDetails configured for toxicity detection - """ - config: Dict[str, Any] = {"threshold": threshold} - - return EvaluatorDetails(slug="toxicity-detector", version=None, config=config, required_input_fields=["text"]) - - @staticmethod - def prompt_injection( - threshold: float = 0.5, - ) -> EvaluatorDetails: - """ - Prompt injection detector evaluator. - - Required task output fields: - - prompt: The prompt to check for prompt injection attempts - - Args: - threshold: Minimum threshold for detecting prompt injection attempts (0.0-1.0) - - Returns: - EvaluatorDetails configured for prompt injection detection - """ - config: Dict[str, Any] = {"threshold": threshold} - return EvaluatorDetails(slug="prompt-injection", version=None, config=config, required_input_fields=["prompt"]) - - @staticmethod - def regex_validator( - regex: str, - should_match: bool = True, - case_sensitive: bool = True, - dot_include_nl: bool = False, - multi_line: bool = False, - ) -> EvaluatorDetails: - """ - Regular expression validator evaluator. - - Required task output fields: - - text: The text to validate against the regex pattern - - Args: - regex: The regular expression pattern to match against - should_match: If True, pass when pattern matches; if False, pass when pattern doesn't match - case_sensitive: Whether the regex matching should be case-sensitive - dot_include_nl: Whether the dot (.) should match newline characters - multi_line: Whether to enable multi-line mode (^ and $ match line boundaries) - - Returns: - EvaluatorDetails configured for regex validation - """ - config: Dict[str, Any] = { - "regex": regex, - "should_match": should_match, - "case_sensitive": case_sensitive, - "dot_include_nl": dot_include_nl, - "multi_line": multi_line, - } - - return EvaluatorDetails(slug="regex-validator", version=None, config=config, required_input_fields=["text"]) - - @staticmethod - def json_validator( - enable_schema_validation: bool = False, - schema_string: Optional[str] = None, - ) -> EvaluatorDetails: - """ - JSON validator evaluator. - - Required task output fields: - - text: The JSON text to validate - - Args: - enable_schema_validation: Whether to validate against a JSON schema - schema_string: JSON schema string to validate against (required if enable_schema_validation is True) - - Returns: - EvaluatorDetails configured for JSON validation - """ - config: Dict[str, Any] = { - "enable_schema_validation": enable_schema_validation, - } - if schema_string: - config["schema_string"] = schema_string - - return EvaluatorDetails(slug="json-validator", version=None, config=config, required_input_fields=["text"]) - - @staticmethod - def placeholder_regex( - regex: str, - placeholder_name: str, - should_match: bool = True, - case_sensitive: bool = True, - dot_include_nl: bool = False, - multi_line: bool = False, - ) -> EvaluatorDetails: - """ - Placeholder regex evaluator - validates that placeholders match a regex pattern. - - Required task output fields: - - text: The text to validate against the regex pattern - - placeholder_value: The value of the placeholder to validate - - Args: - regex: The regular expression pattern to match against - placeholder_name: Name of the placeholder to validate - should_match: If True, pass when pattern matches; if False, pass when pattern doesn't match - case_sensitive: Whether the regex matching should be case-sensitive - dot_include_nl: Whether the dot (.) should match newline characters - multi_line: Whether to enable multi-line mode (^ and $ match line boundaries) - - Returns: - EvaluatorDetails configured for placeholder regex validation - """ - config: Dict[str, Any] = { - "regex": regex, - "placeholder_name": placeholder_name, - "should_match": should_match, - "case_sensitive": case_sensitive, - "dot_include_nl": dot_include_nl, - "multi_line": multi_line, - } - - return EvaluatorDetails( - slug="placeholder-regex", - version=None, - config=config, - required_input_fields=["text", "placeholder_value"], - ) - - @staticmethod - def char_count( - ) -> EvaluatorDetails: - """ - Character count evaluator - counts the number of characters in text. - - Required task output fields: - - text: The text to count characters in - - Returns: - EvaluatorDetails configured for character counting - """ - config: Dict[str, Any] = {} - - return EvaluatorDetails(slug="char-count", version=None, config=config, required_input_fields=["text"]) - - @staticmethod - def char_count_ratio( - ) -> EvaluatorDetails: - """ - Character count ratio evaluator - measures the ratio of characters between two texts. - - Required task output fields: - - numerator_text: The numerator text for ratio calculation - - denominator_text: The denominator text for ratio calculation - - Returns: - EvaluatorDetails configured for character count ratio calculation - """ - config: Dict[str, Any] = {} - - return EvaluatorDetails( - slug="char-count-ratio", - version=None, - config=config, - required_input_fields=["numerator_text", "denominator_text"], - ) - - @staticmethod - def word_count() -> EvaluatorDetails: - """ - Word count evaluator - counts the number of words in text. - - Required task output fields: - - text: The text to count words in - - Returns: - EvaluatorDetails configured for word counting - """ - config: Dict[str, Any] = {} - - return EvaluatorDetails(slug="word-count", version=None, config=config, required_input_fields=["text"]) - - @staticmethod - def word_count_ratio( - ) -> EvaluatorDetails: - """ - Word count ratio evaluator - measures the ratio of words between two texts. - - Required task output fields: - - numerator_text: The numerator text for ratio calculation -+ - denominator_text: The denominator text for ratio calculation - - Returns: - EvaluatorDetails configured for word count ratio calculation - """ - config: Dict[str, Any] = {} - - return EvaluatorDetails( - slug="word-count-ratio", - version=None, - config=config, - required_input_fields=["numerator_text", "denominator_text"], - ) - - @staticmethod - def answer_relevancy( - ) -> EvaluatorDetails: - """ - Answer relevancy evaluator - verifies responses address the query. - - Required task output fields: - - question: The input question - - answer: The answer to evaluate - - Returns: - EvaluatorDetails configured for answer relevancy evaluation - """ - config: Dict[str, Any] = {} - - return EvaluatorDetails( - slug="answer-relevancy", - version=None, - config=config, - required_input_fields=["question", "answer"], - ) - - @staticmethod - def faithfulness( - ) -> EvaluatorDetails: - """ - Faithfulness evaluator - detects hallucinations and verifies facts. - - Required task output fields: - - question: The input question - - completion: The completion to evaluate for faithfulness - - context: The context to verify against - - Returns: - EvaluatorDetails configured for faithfulness evaluation - """ - config: Dict[str, Any] = {} - - return EvaluatorDetails( - slug="faithfulness", - version=None, - config=config, - required_input_fields=["question", "completion", "context"], - ) - - @staticmethod - def context_relevance( - ) -> EvaluatorDetails: - """ - Context relevance evaluator - validates context relevance. - - Required task output fields: - - query: The user's query or question - - context: The retrieved context to evaluate for relevance - - Returns: - EvaluatorDetails configured for context relevance evaluation - """ - config: Dict[str, Any] = {} +""" +Factory class for creating Traceloop evaluators with proper configuration. - return EvaluatorDetails( - slug="context-relevance", - version=None, - config=config, - required_input_fields=["query", "context"], - ) +This module dynamically generates factory methods from the generated.evaluators registry. +""" - @staticmethod - def profanity_detector() -> EvaluatorDetails: - """ - Profanity detector evaluator - flags inappropriate language. +from typing import Any, List - Required task output fields: - - text: The text to check for profanity - - Returns: - EvaluatorDetails configured for profanity detection - """ - config: Dict[str, Any] = {} - - return EvaluatorDetails(slug="profanity-detector", version=None, config=config, required_input_fields=["text"]) - - @staticmethod - def sexism_detector( - threshold: float = 0.5, - ) -> EvaluatorDetails: - """ - Sexism detector evaluator - detects sexist language and bias. - - Required task output fields: - - text: The text to check for sexism - - Args: - threshold: Minimum threshold for detecting sexism (0.0-1.0) - - Returns: - EvaluatorDetails configured for sexism detection - """ - config: Dict[str, Any] = {"threshold": threshold} - - return EvaluatorDetails(slug="sexism-detector", version=None, config=config, required_input_fields=["text"]) - - @staticmethod - def secrets_detector( - ) -> EvaluatorDetails: - """ - Secrets detector evaluator - monitors for credential and key leaks. - - Required task output fields: - - text: The text to check for secrets - - Returns: - EvaluatorDetails configured for secrets detection - """ - config: Dict[str, Any] = {} - return EvaluatorDetails(slug="secrets-detector", version=None, config=config, required_input_fields=["text"]) - - @staticmethod - def sql_validator( - ) -> EvaluatorDetails: - """ - SQL validator evaluator - validates SQL queries. - - Required task output fields: - - text: The SQL query to validate - - Returns: - EvaluatorDetails configured for SQL validation - """ - config: Dict[str, Any] = {} - - return EvaluatorDetails(slug="sql-validator", version=None, config=config, required_input_fields=["text"]) - - @staticmethod - def semantic_similarity( - ) -> EvaluatorDetails: - """ - Semantic similarity evaluator - measures semantic similarity between texts. - - Required task output fields: - - completion: The completion text to compare - - reference: The reference text to compare against - - Returns: - EvaluatorDetails configured for semantic similarity evaluation - """ - config: Dict[str, Any] = {} - - return EvaluatorDetails( - slug="semantic-similarity", - version=None, - config=config, - required_input_fields=["completion", "reference"], - ) - - @staticmethod - def agent_goal_accuracy( - ) -> EvaluatorDetails: - """ - Agent goal accuracy evaluator - validates agent goal achievement. - - Required task output fields: - - question: The input question or goal - - completion: The agent's completion - - reference: The reference answer or goal - - Returns: - EvaluatorDetails configured for agent goal accuracy evaluation - """ - config: Dict[str, Any] = {} - - return EvaluatorDetails( - slug="agent-goal-accuracy", - version=None, - config=config, - required_input_fields=["question", "completion", "reference"], - ) - - @staticmethod - def topic_adherence( - ) -> EvaluatorDetails: - """ - Topic adherence evaluator - validates topic adherence. - - Required task output fields: - - question: The input question or goal - - completion: The completion text to evaluate - - reference_topics: The expected topic or topics - - Returns: - EvaluatorDetails configured for topic adherence evaluation - """ - config: Dict[str, Any] = {} - - return EvaluatorDetails( - slug="topic-adherence", - version=None, - config=config, - required_input_fields=["question", "completion", "reference_topics"], - ) - - @staticmethod - def perplexity( - ) -> EvaluatorDetails: - """ - Perplexity evaluator - measures text perplexity from prompt. - - Required task output fields: - - prompt: The prompt to measure perplexity for - - Returns: - EvaluatorDetails configured for perplexity measurement - """ - config: Dict[str, Any] = {} - - return EvaluatorDetails( - slug="perplexity", - version=None, - config=config, - required_input_fields=["prompt"], - ) - - @staticmethod - def answer_completeness( - ) -> EvaluatorDetails: - """ - Answer completeness evaluator - measures how completely responses use relevant context. - - Required task output fields: - - question: The input question - - completion: The completion to evaluate - - context: The context to evaluate against - - Returns: - EvaluatorDetails configured for answer completeness evaluation - """ - config: Dict[str, Any] = {} - - return EvaluatorDetails( - slug="answer-completeness", - version=None, - config=config, - required_input_fields=["question", "completion", "context"], - ) - - @staticmethod - def answer_correctness( - ) -> EvaluatorDetails: - """ - Answer correctness evaluator - evaluates factual accuracy by comparing answers against ground truth. - - Required task output fields: - - question: The input question - - completion: The completion to evaluate - - ground_truth: The ground truth answer - - Returns: - EvaluatorDetails configured for answer correctness evaluation - """ - config: Dict[str, Any] = {} - - return EvaluatorDetails( - slug="answer-correctness", - version=None, - config=config, - required_input_fields=["question", "completion", "ground_truth"], - ) - - @staticmethod - def uncertainty_detector( - ) -> EvaluatorDetails: - """ - Uncertainty detector evaluator - generates responses and measures model uncertainty from logprobs. - - Required task output fields: - - prompt: The prompt to evaluate uncertainty for - - Returns: - EvaluatorDetails configured for uncertainty detection - """ - config: Dict[str, Any] = {} - - return EvaluatorDetails( - slug="uncertainty-detector", - version=None, - config=config, - required_input_fields=["prompt"], - ) - - @staticmethod - def agent_tool_error_detector( - ) -> EvaluatorDetails: - """ - Agent tool error detector evaluator - detects errors or failures during tool execution. - - Required task output fields: - - tool_input: The input parameters passed to the tool - - tool_output: The output or response from the tool execution - - Returns: - EvaluatorDetails configured for agent tool error detection - """ - config: Dict[str, Any] = {} - - return EvaluatorDetails( - slug="agent-tool-error-detector", - version=None, - config=config, - required_input_fields=["tool_input", "tool_output"], - ) - - @staticmethod - def agent_flow_quality( - threshold: float = 0.5, - conditions: List[str] = [], - ) -> EvaluatorDetails: - """ - Agent flow quality evaluator - validates agent trajectories against user-defined natural language tests. - - Required task output fields: - - trajectory_prompts: The prompts extracted from the span attributes (llm.prompts.*) - - trajectory_completions: The completions extracted from the span attributes (llm.completions.*) - Args: - threshold: Minimum threshold for detecting tool errors (0.0-1.0) - conditions: List of conditions in natural language to evaluate the agent flow quality against - - Returns: - EvaluatorDetails configured for agent flow quality evaluation - """ - config: Dict[str, Any] = { - "threshold": threshold, - "conditions": conditions, - } - - return EvaluatorDetails( - slug="agent-flow-quality", - version=None, - config=config, - required_input_fields=["trajectory_prompts", "trajectory_completions"], - ) - - @staticmethod - def agent_efficiency( - ) -> EvaluatorDetails: - """ - Agent efficiency evaluator - evaluates agent efficiency by checking for redundant calls and optimal paths. - - Required task output fields: - - trajectory_prompts: The prompts extracted from the span attributes (llm.prompts.*) - - trajectory_completions: The completions extracted from the span attributes (llm.completions.*) - - Returns: - EvaluatorDetails configured for agent efficiency evaluation - """ - config: Dict[str, Any] = {} - - return EvaluatorDetails( - slug="agent-efficiency", - version=None, - config=config, - required_input_fields=["trajectory_prompts", "trajectory_completions"], - ) - - @staticmethod - def agent_goal_completeness( - ) -> EvaluatorDetails: - """ - Agent goal completeness evaluator - measures whether the agent successfully accomplished all user goals. - - Required task output fields: - - trajectory_prompts: The prompts extracted from the span attributes (llm.prompts.*) - - trajectory_completions: The completions extracted from the span attributes (llm.completions.*) - - Returns: - EvaluatorDetails configured for agent goal completeness evaluation - """ - config: Dict[str, Any] = {} +from ..generated.evaluators import REQUEST_MODELS +from .config import EvaluatorDetails - return EvaluatorDetails( - slug="agent-goal-completeness", - version=None, - config=config, - required_input_fields=["trajectory_prompts", "trajectory_completions"], - ) - @staticmethod - def instruction_adherence( - ) -> EvaluatorDetails: - """ - Instruction adherence evaluator - measures how well the LLM response follows given instructions. +def _get_required_fields(slug: str) -> List[str]: + """Get required input fields for an evaluator from its request model.""" + model = REQUEST_MODELS.get(slug) + if not model: + return [] + return [name for name, field in model.model_fields.items() if field.is_required()] - Required task output fields: - - instructions: The instructions to evaluate against - - response: The response to evaluate - Returns: - EvaluatorDetails configured for instruction adherence evaluation - """ - config: Dict[str, Any] = {} +def _get_config_fields(slug: str) -> dict: + """Get config fields (non-required) with their defaults from the request model.""" + model = REQUEST_MODELS.get(slug) + if not model: + return {} + config_fields = {} + for name, field in model.model_fields.items(): + if not field.is_required(): + config_fields[name] = field.default + return config_fields - return EvaluatorDetails( - slug="instruction-adherence", - version=None, - config=config, - required_input_fields=["instructions", "response"], - ) - @staticmethod - def conversation_quality( - ) -> EvaluatorDetails: - """ - Conversation quality evaluator - evaluates conversation quality based on tone, - clarity, flow, responsiveness, and transparency. +def _slug_to_method_name(slug: str) -> str: + """Convert slug like 'pii-detector' to method name like 'pii_detector'.""" + return slug.replace("-", "_") - Required task output fields: - - prompts: The conversation prompts (flattened dict with llm.prompts.X.content/role) - - completions: The conversation completions (flattened dict with llm.completions.X.content/role) - Returns: - EvaluatorDetails configured for conversation quality evaluation - """ - config: Dict[str, Any] = {} +def _method_name_to_slug(method_name: str) -> str: + """Convert method name like 'pii_detector' to slug like 'pii-detector'.""" + return method_name.replace("_", "-") - return EvaluatorDetails( - slug="conversation-quality", - version=None, - config=config, - required_input_fields=["prompts", "completions"], - ) - @staticmethod - def intent_change( - ) -> EvaluatorDetails: - """ - Intent change evaluator - detects whether the user's primary intent or workflow - changed significantly during a conversation. +def create_evaluator(slug: str, **config: Any) -> EvaluatorDetails: + """Create an EvaluatorDetails for the given slug with optional config. - Required task output fields: - - prompts: The conversation prompts (flattened dict with llm.prompts.X.content/role) - - completions: The conversation completions (flattened dict with llm.completions.X.content/role) + Args: + slug: The evaluator slug (e.g., "pii-detector") + **config: Configuration options for the evaluator - Returns: - EvaluatorDetails configured for intent change detection - """ - config: Dict[str, Any] = {} + Returns: + EvaluatorDetails configured for the specified evaluator - return EvaluatorDetails( - slug="intent-change", - version=None, - config=config, - required_input_fields=["prompts", "completions"], - ) + Example: + >>> from traceloop.sdk.evaluator import create_evaluator + >>> evaluator = create_evaluator("pii-detector", probability_threshold=0.8) + """ + if slug not in REQUEST_MODELS: + available = ", ".join(sorted(REQUEST_MODELS.keys())) + raise ValueError(f"Unknown evaluator slug: '{slug}'. Available: {available}") + + # Remove None values from config + config = {k: v for k, v in config.items() if v is not None} + return EvaluatorDetails( + slug=slug, + version=None, + config=config, + required_input_fields=_get_required_fields(slug), + ) + + +class _EvaluatorMadeByTraceloopMeta(type): + """Metaclass that dynamically generates evaluator factory methods.""" + + def __getattr__(cls, name: str) -> Any: + """Dynamically create factory methods for any evaluator slug.""" + slug = _method_name_to_slug(name) + if slug in REQUEST_MODELS: + + def factory(**config: Any) -> EvaluatorDetails: + return create_evaluator(slug, **config) + + factory.__name__ = name + config_fields = list(_get_config_fields(slug).keys()) or "none" + factory.__doc__ = f"Create {slug} evaluator. Config fields: {config_fields}" + return factory + raise AttributeError(f"'{cls.__name__}' has no attribute '{name}'") + + def __dir__(cls) -> List[str]: + """List all available evaluator methods.""" + methods = list(super().__dir__()) + for slug in REQUEST_MODELS: + methods.append(_slug_to_method_name(slug)) + return methods + + +class EvaluatorMadeByTraceloop(metaclass=_EvaluatorMadeByTraceloopMeta): + """ + Factory class for creating Traceloop evaluators with proper configuration. - @staticmethod - def tone_detection( - ) -> EvaluatorDetails: - """ - Tone detection evaluator - classifies emotional tone of responses (joy, anger, sadness, etc.). + All evaluator slugs from the registry are available as methods. + Methods are dynamically generated from REQUEST_MODELS. - Required task output fields: - - text: The text to analyze for tone + Example: + >>> from traceloop.sdk.evaluator import EvaluatorMadeByTraceloop + >>> + >>> evaluators = [ + ... EvaluatorMadeByTraceloop.pii_detector(probability_threshold=0.8), + ... EvaluatorMadeByTraceloop.toxicity_detector(threshold=0.7), + ... EvaluatorMadeByTraceloop.faithfulness(), + ... ] - Returns: - EvaluatorDetails configured for tone detection - """ - config: Dict[str, Any] = {} + Available evaluators (auto-generated from registry): + - pii_detector, toxicity_detector, prompt_injection + - regex_validator, json_validator, sql_validator + - faithfulness, answer_relevancy, context_relevance + - agent_goal_accuracy, agent_efficiency, agent_flow_quality + - and more... (use dir(EvaluatorMadeByTraceloop) to see all) + """ - return EvaluatorDetails( - slug="tone-detection", - version=None, - config=config, - required_input_fields=["text"], - ) + pass diff --git a/packages/traceloop-sdk/traceloop/sdk/evaluator/model.py b/packages/traceloop-sdk/traceloop/sdk/evaluator/model.py index 1fc4636221..e5e6a8f5b1 100644 --- a/packages/traceloop-sdk/traceloop/sdk/evaluator/model.py +++ b/packages/traceloop-sdk/traceloop/sdk/evaluator/model.py @@ -1,7 +1,9 @@ import datetime -from typing import Dict, Any, Optional +from typing import Dict, Any, Optional, TypeVar, Type from pydantic import BaseModel, RootModel +T = TypeVar('T', bound=BaseModel) + class InputExtractor(BaseModel): source: str @@ -42,3 +44,20 @@ class ExecutionResponse(BaseModel): execution_id: str result: Dict[str, Any] + + def typed_result(self, model: Type[T]) -> T: + """Parse result into a typed Pydantic model. + + Args: + model: The Pydantic model class to parse the result into + + Returns: + An instance of the provided model class + + Example: + from traceloop.sdk.generated.evaluators import PIIDetectorResponse + result = await evaluator.run_experiment_evaluator(...) + pii = result.typed_result(PIIDetectorResponse) + print(pii.has_pii) # IDE autocomplete works! + """ + return model(**self.result) diff --git a/packages/traceloop-sdk/traceloop/sdk/generated/__init__.py b/packages/traceloop-sdk/traceloop/sdk/generated/__init__.py new file mode 100644 index 0000000000..59399a0eda --- /dev/null +++ b/packages/traceloop-sdk/traceloop/sdk/generated/__init__.py @@ -0,0 +1,16 @@ +# Generated code modules +# DO NOT EDIT MANUALLY - Regenerate with scripts in /scripts/ + +from .evaluators import ( + REQUEST_MODELS, + RESPONSE_MODELS, + get_request_model, + get_response_model, +) + +__all__ = [ + "REQUEST_MODELS", + "RESPONSE_MODELS", + "get_request_model", + "get_response_model", +] diff --git a/packages/traceloop-sdk/traceloop/sdk/generated/evaluators/__init__.py b/packages/traceloop-sdk/traceloop/sdk/generated/evaluators/__init__.py new file mode 100644 index 0000000000..5241cf6102 --- /dev/null +++ b/packages/traceloop-sdk/traceloop/sdk/generated/evaluators/__init__.py @@ -0,0 +1,162 @@ +# generated by datamodel-codegen +# Models for v2/evaluators/execute endpoints from OpenAPI spec +# +# DO NOT EDIT MANUALLY - Regenerate with: +# ./scripts/generate-models.sh /path/to/swagger.json + +from .request import ( + AgentEfficiencyRequest, + AgentFlowQualityRequest, + AgentGoalAccuracyRequest, + AgentGoalCompletenessRequest, + AgentToolErrorDetectorRequest, + AnswerCompletenessRequest, + AnswerCorrectnessRequest, + AnswerRelevancyRequest, + CharCountRatioRequest, + CharCountRequest, + ContextRelevanceRequest, + ConversationQualityRequest, + FaithfulnessRequest, + InstructionAdherenceRequest, + IntentChangeRequest, + JSONValidatorRequest, + PIIDetectorRequest, + PerplexityRequest, + PlaceholderRegexRequest, + ProfanityDetectorRequest, + PromptInjectionRequest, + PromptPerplexityRequest, + RegexValidatorRequest, + SQLValidatorRequest, + SecretsDetectorRequest, + SemanticSimilarityRequest, + SexismDetectorRequest, + ToneDetectionRequest, + TopicAdherenceRequest, + ToxicityDetectorRequest, + UncertaintyDetectorRequest, + WordCountRatioRequest, + WordCountRequest, +) + +from .registry import ( + REQUEST_MODELS, + RESPONSE_MODELS, + get_request_model, + get_response_model, +) + +from .response import ( + AgentEfficiencyResponse, + AgentFlowQualityResponse, + AgentGoalAccuracyResponse, + AgentGoalCompletenessResponse, + AgentToolErrorDetectorResponse, + AnswerCompletenessResponse, + AnswerCorrectnessResponse, + AnswerRelevancyResponse, + CharCountRatioResponse, + CharCountResponse, + ContextRelevanceResponse, + ConversationQualityResponse, + ErrorResponse, + FaithfulnessResponse, + InstructionAdherenceResponse, + IntentChangeResponse, + JSONValidatorResponse, + PIIDetectorResponse, + PerplexityResponse, + PlaceholderRegexResponse, + ProfanityDetectorResponse, + PromptInjectionResponse, + PromptPerplexityResponse, + RegexValidatorResponse, + SQLValidatorResponse, + SecretsDetectorResponse, + SemanticSimilarityResponse, + SexismDetectorResponse, + ToneDetectionResponse, + TopicAdherenceResponse, + ToxicityDetectorResponse, + UncertaintyDetectorResponse, + WordCountRatioResponse, + WordCountResponse, +) + +__all__ = [ + # Registry functions + "REQUEST_MODELS", + "RESPONSE_MODELS", + "get_request_model", + "get_response_model", + # Evaluator request models + "AgentEfficiencyRequest", + "AgentFlowQualityRequest", + "AgentGoalAccuracyRequest", + "AgentGoalCompletenessRequest", + "AgentToolErrorDetectorRequest", + "AnswerCompletenessRequest", + "AnswerCorrectnessRequest", + "AnswerRelevancyRequest", + "CharCountRatioRequest", + "CharCountRequest", + "ContextRelevanceRequest", + "ConversationQualityRequest", + "FaithfulnessRequest", + "InstructionAdherenceRequest", + "IntentChangeRequest", + "JSONValidatorRequest", + "PIIDetectorRequest", + "PerplexityRequest", + "PlaceholderRegexRequest", + "ProfanityDetectorRequest", + "PromptInjectionRequest", + "PromptPerplexityRequest", + "RegexValidatorRequest", + "SQLValidatorRequest", + "SecretsDetectorRequest", + "SemanticSimilarityRequest", + "SexismDetectorRequest", + "ToneDetectionRequest", + "TopicAdherenceRequest", + "ToxicityDetectorRequest", + "UncertaintyDetectorRequest", + "WordCountRatioRequest", + "WordCountRequest", + # Evaluator response models + "AgentEfficiencyResponse", + "AgentFlowQualityResponse", + "AgentGoalAccuracyResponse", + "AgentGoalCompletenessResponse", + "AgentToolErrorDetectorResponse", + "AnswerCompletenessResponse", + "AnswerCorrectnessResponse", + "AnswerRelevancyResponse", + "CharCountRatioResponse", + "CharCountResponse", + "ContextRelevanceResponse", + "ConversationQualityResponse", + "ErrorResponse", + "FaithfulnessResponse", + "InstructionAdherenceResponse", + "IntentChangeResponse", + "JSONValidatorResponse", + "PIIDetectorResponse", + "PerplexityResponse", + "PlaceholderRegexResponse", + "ProfanityDetectorResponse", + "PromptInjectionResponse", + "PromptPerplexityResponse", + "RegexValidatorResponse", + "SQLValidatorResponse", + "SecretsDetectorResponse", + "SemanticSimilarityResponse", + "SexismDetectorResponse", + "ToneDetectionResponse", + "TopicAdherenceResponse", + "ToxicityDetectorResponse", + "UncertaintyDetectorResponse", + "WordCountRatioResponse", + "WordCountResponse", +] diff --git a/packages/traceloop-sdk/traceloop/sdk/generated/evaluators/registry.py b/packages/traceloop-sdk/traceloop/sdk/generated/evaluators/registry.py new file mode 100644 index 0000000000..56a02362e9 --- /dev/null +++ b/packages/traceloop-sdk/traceloop/sdk/generated/evaluators/registry.py @@ -0,0 +1,168 @@ +""" +Registry mapping evaluator slugs to their request/response Pydantic models. + +This enables type-safe validation of inputs and parsing of outputs. + +DO NOT EDIT MANUALLY - Regenerate with: + ./scripts/generate-models.sh /path/to/swagger.json +""" + +from typing import Dict, Type, Optional +from pydantic import BaseModel + +from .request import ( + AgentEfficiencyRequest, + AgentFlowQualityRequest, + AgentGoalAccuracyRequest, + AgentGoalCompletenessRequest, + AgentToolErrorDetectorRequest, + AnswerCompletenessRequest, + AnswerCorrectnessRequest, + AnswerRelevancyRequest, + CharCountRatioRequest, + CharCountRequest, + ContextRelevanceRequest, + ConversationQualityRequest, + FaithfulnessRequest, + InstructionAdherenceRequest, + IntentChangeRequest, + JSONValidatorRequest, + PIIDetectorRequest, + PerplexityRequest, + PlaceholderRegexRequest, + ProfanityDetectorRequest, + PromptInjectionRequest, + PromptPerplexityRequest, + RegexValidatorRequest, + SQLValidatorRequest, + SecretsDetectorRequest, + SemanticSimilarityRequest, + SexismDetectorRequest, + ToneDetectionRequest, + TopicAdherenceRequest, + ToxicityDetectorRequest, + UncertaintyDetectorRequest, + WordCountRatioRequest, + WordCountRequest, +) + +from .response import ( + AgentEfficiencyResponse, + AgentFlowQualityResponse, + AgentGoalAccuracyResponse, + AgentGoalCompletenessResponse, + AgentToolErrorDetectorResponse, + AnswerCompletenessResponse, + AnswerCorrectnessResponse, + AnswerRelevancyResponse, + CharCountRatioResponse, + CharCountResponse, + ContextRelevanceResponse, + ConversationQualityResponse, + FaithfulnessResponse, + InstructionAdherenceResponse, + IntentChangeResponse, + JSONValidatorResponse, + PIIDetectorResponse, + PerplexityResponse, + PlaceholderRegexResponse, + ProfanityDetectorResponse, + PromptInjectionResponse, + PromptPerplexityResponse, + RegexValidatorResponse, + SQLValidatorResponse, + SecretsDetectorResponse, + SemanticSimilarityResponse, + SexismDetectorResponse, + ToneDetectionResponse, + TopicAdherenceResponse, + ToxicityDetectorResponse, + UncertaintyDetectorResponse, + WordCountRatioResponse, + WordCountResponse, +) + + +# Mapping from evaluator slug to request model +REQUEST_MODELS: Dict[str, Type[BaseModel]] = { + "agent-efficiency": AgentEfficiencyRequest, + "agent-flow-quality": AgentFlowQualityRequest, + "agent-goal-accuracy": AgentGoalAccuracyRequest, + "agent-goal-completeness": AgentGoalCompletenessRequest, + "agent-tool-error-detector": AgentToolErrorDetectorRequest, + "answer-completeness": AnswerCompletenessRequest, + "answer-correctness": AnswerCorrectnessRequest, + "answer-relevancy": AnswerRelevancyRequest, + "char-count": CharCountRequest, + "char-count-ratio": CharCountRatioRequest, + "context-relevance": ContextRelevanceRequest, + "conversation-quality": ConversationQualityRequest, + "faithfulness": FaithfulnessRequest, + "instruction-adherence": InstructionAdherenceRequest, + "intent-change": IntentChangeRequest, + "json-validator": JSONValidatorRequest, + "perplexity": PerplexityRequest, + "pii-detector": PIIDetectorRequest, + "placeholder-regex": PlaceholderRegexRequest, + "profanity-detector": ProfanityDetectorRequest, + "prompt-injection": PromptInjectionRequest, + "prompt-perplexity": PromptPerplexityRequest, + "regex-validator": RegexValidatorRequest, + "secrets-detector": SecretsDetectorRequest, + "semantic-similarity": SemanticSimilarityRequest, + "sexism-detector": SexismDetectorRequest, + "sql-validator": SQLValidatorRequest, + "tone-detection": ToneDetectionRequest, + "topic-adherence": TopicAdherenceRequest, + "toxicity-detector": ToxicityDetectorRequest, + "uncertainty-detector": UncertaintyDetectorRequest, + "word-count": WordCountRequest, + "word-count-ratio": WordCountRatioRequest, +} + +# Mapping from evaluator slug to response model +RESPONSE_MODELS: Dict[str, Type[BaseModel]] = { + "agent-efficiency": AgentEfficiencyResponse, + "agent-flow-quality": AgentFlowQualityResponse, + "agent-goal-accuracy": AgentGoalAccuracyResponse, + "agent-goal-completeness": AgentGoalCompletenessResponse, + "agent-tool-error-detector": AgentToolErrorDetectorResponse, + "answer-completeness": AnswerCompletenessResponse, + "answer-correctness": AnswerCorrectnessResponse, + "answer-relevancy": AnswerRelevancyResponse, + "char-count": CharCountResponse, + "char-count-ratio": CharCountRatioResponse, + "context-relevance": ContextRelevanceResponse, + "conversation-quality": ConversationQualityResponse, + "faithfulness": FaithfulnessResponse, + "instruction-adherence": InstructionAdherenceResponse, + "intent-change": IntentChangeResponse, + "json-validator": JSONValidatorResponse, + "perplexity": PerplexityResponse, + "pii-detector": PIIDetectorResponse, + "placeholder-regex": PlaceholderRegexResponse, + "profanity-detector": ProfanityDetectorResponse, + "prompt-injection": PromptInjectionResponse, + "prompt-perplexity": PromptPerplexityResponse, + "regex-validator": RegexValidatorResponse, + "secrets-detector": SecretsDetectorResponse, + "semantic-similarity": SemanticSimilarityResponse, + "sexism-detector": SexismDetectorResponse, + "sql-validator": SQLValidatorResponse, + "tone-detection": ToneDetectionResponse, + "topic-adherence": TopicAdherenceResponse, + "toxicity-detector": ToxicityDetectorResponse, + "uncertainty-detector": UncertaintyDetectorResponse, + "word-count": WordCountResponse, + "word-count-ratio": WordCountRatioResponse, +} + + +def get_request_model(slug: str) -> Optional[Type[BaseModel]]: + """Get the request model for an evaluator by slug.""" + return REQUEST_MODELS.get(slug) + + +def get_response_model(slug: str) -> Optional[Type[BaseModel]]: + """Get the response model for an evaluator by slug.""" + return RESPONSE_MODELS.get(slug) diff --git a/packages/traceloop-sdk/traceloop/sdk/generated/evaluators/request.py b/packages/traceloop-sdk/traceloop/sdk/generated/evaluators/request.py new file mode 100644 index 0000000000..b869224ce6 --- /dev/null +++ b/packages/traceloop-sdk/traceloop/sdk/generated/evaluators/request.py @@ -0,0 +1,277 @@ +# generated by datamodel-codegen: +# filename: tmpvqz8m01b.json + +from __future__ import annotations + +from typing import Optional + +from pydantic import BaseModel, Field + + +class AgentEfficiencyRequest(BaseModel): + trajectory_completions: str = Field( + ..., examples=['["User found", "Email updated", "Changes saved"]'] + ) + trajectory_prompts: str = Field( + ..., examples=['["Find user info", "Update email", "Save changes"]'] + ) + + +class AgentFlowQualityRequest(BaseModel): + conditions: list[str] = Field( + ..., examples=[['no tools called', 'agent completed task']] + ) + threshold: float = Field(..., examples=[0.5]) + trajectory_completions: str = Field( + ..., + examples=['["Found 5 flights", "Selected $299 flight", "Booking confirmed"]'], + ) + trajectory_prompts: str = Field( + ..., + examples=[ + '["Search for flights", "Select the cheapest option", "Confirm booking"]' + ], + ) + + +class AgentGoalAccuracyRequest(BaseModel): + completion: str = Field( + ..., + examples=[ + 'I have booked your flight from New York to Los Angeles departing Monday at 9am.' + ], + ) + question: str = Field( + ..., examples=['Book a flight from NYC to LA for next Monday'] + ) + reference: str = Field(..., examples=['Flight booked: NYC to LA, Monday departure']) + + +class AgentGoalCompletenessRequest(BaseModel): + threshold: float = Field(..., examples=[0.5]) + trajectory_completions: str = Field( + ..., + examples=['["Account created", "Preferences saved", "Notifications enabled"]'], + ) + trajectory_prompts: str = Field( + ..., + examples=['["Create new account", "Set preferences", "Enable notifications"]'], + ) + + +class AgentToolErrorDetectorRequest(BaseModel): + tool_input: str = Field( + ..., examples=['{"action": "search", "query": "flights to Paris"}'] + ) + tool_output: str = Field( + ..., + examples=[ + '{"status": "success", "results": [{"flight": "AF123", "price": 450}]}' + ], + ) + + +class AnswerCompletenessRequest(BaseModel): + completion: str = Field(..., examples=['Paris.']) + context: str = Field(..., examples=['The capital of France is Paris.']) + question: str = Field(..., examples=['What is the capital of France?']) + + +class AnswerCorrectnessRequest(BaseModel): + completion: str = Field(..., examples=['World War II ended in 1945.']) + ground_truth: str = Field(..., examples=['1945']) + question: str = Field(..., examples=['What year did World War II end?']) + + +class AnswerRelevancyRequest(BaseModel): + answer: str = Field(..., examples=['The capital of France is Paris.']) + question: str = Field(..., examples=['What is the capital of France?']) + + +class CharCountRatioRequest(BaseModel): + denominator_text: str = Field( + ..., examples=['This is a longer text for comparison'] + ) + numerator_text: str = Field(..., examples=['Short text']) + + +class CharCountRequest(BaseModel): + text: str = Field(..., examples=['Hello, world! This is a sample text.']) + + +class ContextRelevanceRequest(BaseModel): + context: str = Field( + ..., + examples=[ + 'Our store is open Monday to Friday from 9am to 6pm, and Saturday from 10am to 4pm. We are closed on Sundays.' + ], + ) + model: Optional[str] = Field(None, examples=['gpt-4o']) + query: str = Field(..., examples=['What are the business hours?']) + + +class ConversationQualityRequest(BaseModel): + completions: str = Field( + ..., + examples=[ + '["Hi! I\'d be happy to assist you today.", "We offer consulting, development, and support services."]' + ], + ) + model: Optional[str] = Field(None, examples=['gpt-4o']) + prompts: str = Field( + ..., examples=['["Hello, how can I help?", "What services do you offer?"]'] + ) + + +class FaithfulnessRequest(BaseModel): + completion: str = Field( + ..., examples=['The Eiffel Tower is located in Paris and was built in 1889.'] + ) + context: str = Field( + ..., + examples=[ + 'The Eiffel Tower is a wrought-iron lattice tower on the Champ de Mars in Paris, France. It was constructed from 1887 to 1889.' + ], + ) + question: str = Field(..., examples=['When was the Eiffel Tower built?']) + + +class InstructionAdherenceRequest(BaseModel): + instructions: str = Field( + ..., examples=['Respond in exactly 3 bullet points and use formal language.'] + ) + response: str = Field( + ..., + examples=[ + '- First point about the topic\n- Second relevant consideration\n- Final concluding thought' + ], + ) + + +class IntentChangeRequest(BaseModel): + completions: str = Field( + ..., + examples=[ + '["Sure, I can help with hotel booking", "No problem, let me search for flights"]' + ], + ) + model: Optional[str] = Field(None, examples=['gpt-4o']) + prompts: str = Field( + ..., + examples=['["I want to book a hotel", "Actually, I need a flight instead"]'], + ) + + +class JSONValidatorRequest(BaseModel): + enable_schema_validation: Optional[bool] = Field(None, examples=[True]) + schema_string: Optional[str] = Field(None, examples=['{}']) + text: str = Field(..., examples=['{"name": "John", "age": 30}']) + + +class PIIDetectorRequest(BaseModel): + probability_threshold: Optional[float] = Field(None, examples=[0.8]) + text: str = Field( + ..., + examples=[ + 'Please contact John Smith at john.smith@email.com or call 555-123-4567.' + ], + ) + + +class PerplexityRequest(BaseModel): + logprobs: str = Field(..., examples=['[-2.3, -1.5, -0.8, -1.2, -0.5]']) + + +class PlaceholderRegexRequest(BaseModel): + case_sensitive: Optional[bool] = Field(None, examples=[True]) + dot_include_nl: Optional[bool] = Field(None, examples=[True]) + multi_line: Optional[bool] = Field(None, examples=[True]) + placeholder_value: str = Field( + ..., examples=['[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\.[A-Z|a-z]{2,}'] + ) + should_match: Optional[bool] = Field(None, examples=[True]) + text: str = Field(..., examples=['user@example.com']) + + +class ProfanityDetectorRequest(BaseModel): + text: str = Field(..., examples=['This is a clean and professional message.']) + + +class PromptInjectionRequest(BaseModel): + prompt: str = Field(..., examples=['What is the weather like today?']) + threshold: Optional[float] = Field(None, examples=[0.5]) + + +class PromptPerplexityRequest(BaseModel): + prompt: str = Field(..., examples=['What is the capital of France?']) + + +class RegexValidatorRequest(BaseModel): + case_sensitive: Optional[bool] = Field(None, examples=[True]) + dot_include_nl: Optional[bool] = Field(None, examples=[True]) + multi_line: Optional[bool] = Field(None, examples=[True]) + regex: Optional[str] = Field(None, examples=['.*']) + should_match: Optional[bool] = Field(None, examples=[True]) + text: str = Field(..., examples=['user@example.com']) + + +class SQLValidatorRequest(BaseModel): + text: str = Field(..., examples=['SELECT * FROM users WHERE id = 1;']) + + +class SecretsDetectorRequest(BaseModel): + text: str = Field( + ..., examples=['Here is some text without any API keys or passwords.'] + ) + + +class SemanticSimilarityRequest(BaseModel): + completion: str = Field(..., examples=['The cat sat on the mat.']) + reference: str = Field(..., examples=['A feline was resting on the rug.']) + + +class SexismDetectorRequest(BaseModel): + text: str = Field( + ..., + examples=['All team members should be treated equally regardless of gender.'], + ) + threshold: Optional[float] = Field(None, examples=[0.5]) + + +class ToneDetectionRequest(BaseModel): + text: str = Field(..., examples=['The capital of France is Paris.']) + + +class TopicAdherenceRequest(BaseModel): + completion: str = Field( + ..., + examples=[ + 'Machine learning is a subset of AI that enables systems to learn from data.' + ], + ) + question: str = Field(..., examples=['Tell me about machine learning']) + reference_topics: str = Field( + ..., examples=['artificial intelligence, data science, algorithms'] + ) + + +class ToxicityDetectorRequest(BaseModel): + text: str = Field(..., examples=['Thank you for your help with this project.']) + threshold: Optional[float] = Field(None, examples=[0.5]) + + +class UncertaintyDetectorRequest(BaseModel): + prompt: str = Field( + ..., examples=['I am not sure, I think the capital of France is Paris.'] + ) + + +class WordCountRatioRequest(BaseModel): + denominator_text: str = Field( + ..., examples=['This is a longer input text for comparison'] + ) + numerator_text: str = Field(..., examples=['Short response']) + + +class WordCountRequest(BaseModel): + text: str = Field(..., examples=['This is a sample text with several words.']) diff --git a/packages/traceloop-sdk/traceloop/sdk/generated/evaluators/response.py b/packages/traceloop-sdk/traceloop/sdk/generated/evaluators/response.py new file mode 100644 index 0000000000..0b75dd715d --- /dev/null +++ b/packages/traceloop-sdk/traceloop/sdk/generated/evaluators/response.py @@ -0,0 +1,166 @@ +# generated by datamodel-codegen: +# filename: tmpvqz8m01b.json + +from __future__ import annotations + +from typing import Optional + +from pydantic import BaseModel, Field + + +class AgentEfficiencyResponse(BaseModel): + step_efficiency_reason: Optional[str] = Field( + None, examples=['Agent completed task with minimal redundant steps'] + ) + step_efficiency_score: Optional[float] = Field(None, examples=[0.85]) + task_completion_reason: Optional[str] = Field( + None, examples=['All required tasks were completed successfully'] + ) + task_completion_score: Optional[float] = Field(None, examples=[0.92]) + + +class AgentFlowQualityResponse(BaseModel): + reason: Optional[str] = Field( + None, examples=['Agent followed the expected flow correctly'] + ) + result: Optional[str] = Field(None, examples=['pass']) + score: Optional[float] = Field(None, examples=[0.89]) + + +class AgentGoalAccuracyResponse(BaseModel): + accuracy_score: Optional[float] = Field(None, examples=[0.88]) + + +class AgentGoalCompletenessResponse(BaseModel): + reason: Optional[str] = Field(None, examples=['All user goals were accomplished']) + result: Optional[str] = Field(None, examples=['complete']) + score: Optional[float] = Field(None, examples=[0.95]) + + +class AgentToolErrorDetectorResponse(BaseModel): + reason: Optional[str] = Field( + None, examples=['Tool executed successfully without errors'] + ) + result: Optional[str] = Field(None, examples=['success']) + + +class AnswerCompletenessResponse(BaseModel): + answer_completeness_score: Optional[float] = Field(None, examples=[0.95]) + + +class AnswerCorrectnessResponse(BaseModel): + correctness_score: Optional[float] = Field(None, examples=[0.91]) + + +class AnswerRelevancyResponse(BaseModel): + is_relevant: Optional[bool] = Field(None, examples=[True]) + + +class CharCountRatioResponse(BaseModel): + char_ratio: Optional[float] = Field(None, examples=[0.75]) + + +class CharCountResponse(BaseModel): + char_count: Optional[int] = Field(None, examples=[42]) + + +class ContextRelevanceResponse(BaseModel): + relevance_score: Optional[float] = Field(None, examples=[0.88]) + + +class ConversationQualityResponse(BaseModel): + conversation_quality_score: Optional[float] = Field(None, examples=[0.82]) + + +class ErrorResponse(BaseModel): + error: Optional[str] = Field(None, examples=['error message']) + + +class FaithfulnessResponse(BaseModel): + is_faithful: Optional[bool] = Field(None, examples=[True]) + + +class InstructionAdherenceResponse(BaseModel): + instruction_adherence_score: Optional[float] = Field(None, examples=[0.87]) + + +class IntentChangeResponse(BaseModel): + pass_: Optional[bool] = Field(None, alias='pass', examples=[True]) + reason: Optional[str] = Field( + None, examples=['User intent remained consistent throughout the conversation'] + ) + score: Optional[int] = Field(None, examples=[1]) + + +class JSONValidatorResponse(BaseModel): + is_valid_json: Optional[bool] = Field(None, examples=[True]) + + +class PIIDetectorResponse(BaseModel): + has_pii: Optional[bool] = Field(None, examples=[False]) + + +class PerplexityResponse(BaseModel): + perplexity_score: Optional[float] = Field(None, examples=[12.5]) + + +class PlaceholderRegexResponse(BaseModel): + is_valid_regex: Optional[bool] = Field(None, examples=[True]) + + +class ProfanityDetectorResponse(BaseModel): + has_profanity: Optional[bool] = Field(None, examples=[False]) + + +class PromptInjectionResponse(BaseModel): + has_injection: Optional[str] = Field(None, examples=['safe']) + + +class PromptPerplexityResponse(BaseModel): + perplexity_score: Optional[float] = Field(None, examples=[8.3]) + + +class RegexValidatorResponse(PlaceholderRegexResponse): + pass + + +class SQLValidatorResponse(BaseModel): + is_valid_sql: Optional[bool] = Field(None, examples=[True]) + + +class SecretsDetectorResponse(BaseModel): + has_secret: Optional[bool] = Field(None, examples=[False]) + + +class SemanticSimilarityResponse(BaseModel): + similarity_score: Optional[float] = Field(None, examples=[0.92]) + + +class SexismDetectorResponse(BaseModel): + is_safe: Optional[str] = Field(None, examples=['safe']) + + +class ToneDetectionResponse(BaseModel): + score: Optional[float] = Field(None, examples=[0.95]) + tone: Optional[str] = Field(None, examples=['neutral']) + + +class TopicAdherenceResponse(BaseModel): + adherence_score: Optional[float] = Field(None, examples=[0.95]) + + +class ToxicityDetectorResponse(SexismDetectorResponse): + pass + + +class UncertaintyDetectorResponse(BaseModel): + answer: Optional[str] = Field(None, examples=['Paris']) + uncertainty: Optional[float] = Field(None, examples=[0.95]) + + +class WordCountRatioResponse(BaseModel): + word_ratio: Optional[float] = Field(None, examples=[0.85]) + + +class WordCountResponse(BaseModel): + word_count: Optional[int] = Field(None, examples=[10]) diff --git a/scripts/codegen/generate_evaluator_models.py b/scripts/codegen/generate_evaluator_models.py new file mode 100644 index 0000000000..96f27483f9 --- /dev/null +++ b/scripts/codegen/generate_evaluator_models.py @@ -0,0 +1,333 @@ +#!/usr/bin/env python3 +""" +Generate Pydantic models from OpenAPI/Swagger spec. +Extracts models used by v2/evaluators/execute/* endpoints. + +Usage: + python generate_evaluator_models.py +""" + +import json +import re +import subprocess +import sys +import tempfile +from pathlib import Path + + +def extract_definitions_and_mappings(swagger_path: str) -> tuple[dict, dict]: + """ + Extract definitions used by v2/evaluators/execute/* endpoints. + Also extracts slug-to-model mappings. + + Returns: + tuple: (filtered_definitions, slug_mappings) + slug_mappings: {slug: {"request": "ModelName", "response": "ModelName"}} + """ + with open(swagger_path) as f: + data = json.load(f) + + all_definitions = data["definitions"] + needed_refs = set() + slug_mappings = {} + + # Collect all definitions referenced by target endpoints + for path, methods in data["paths"].items(): + if "/v2/evaluators/execute/" in path: + # Extract slug from path like /v2/evaluators/execute/pii-detector + slug = path.split("/v2/evaluators/execute/")[-1] + if slug: + slug_mappings[slug] = {"request": None, "response": None} + + for method, details in methods.items(): + # Get request body refs + for param in details.get("parameters", []): + if "schema" in param and "$ref" in param["schema"]: + ref = param["schema"]["$ref"].replace("#/definitions/", "") + needed_refs.add(ref) + if slug and ref.startswith("request."): + # Convert request.PIIDetectorRequest to PIIDetectorRequest + model_name = ref.split(".")[-1] + slug_mappings[slug]["request"] = model_name + + # Get response refs + for code, resp in details.get("responses", {}).items(): + if "schema" in resp and "$ref" in resp["schema"]: + ref = resp["schema"]["$ref"].replace("#/definitions/", "") + needed_refs.add(ref) + # Only use 200 response for the success model mapping + if slug and code == "200" and ref.startswith("response."): + model_name = ref.split(".")[-1] + slug_mappings[slug]["response"] = model_name + + # Recursively find all referenced definitions + def find_refs(obj, refs): + if isinstance(obj, dict): + if "$ref" in obj: + ref = obj["$ref"].replace("#/definitions/", "") + if ref not in refs: + refs.add(ref) + if ref in all_definitions: + find_refs(all_definitions[ref], refs) + else: + for v in obj.values(): + find_refs(v, refs) + elif isinstance(obj, list): + for item in obj: + find_refs(item, refs) + + # Find all nested references + all_needed = set(needed_refs) + for ref in list(needed_refs): + if ref in all_definitions: + find_refs(all_definitions[ref], all_needed) + + # Filter definitions to only include needed ones + filtered_definitions = { + k: v for k, v in all_definitions.items() if k in all_needed + } + + # Clean up slug_mappings - remove entries without both request and response + slug_mappings = { + slug: models + for slug, models in slug_mappings.items() + if models["request"] and models["response"] + } + + return filtered_definitions, slug_mappings + + +def generate_registry_py(output_dir: Path, slug_mappings: dict) -> int: + """Generate registry.py with slug-to-model mappings.""" + + # Collect all unique request and response model names + request_models = sorted(set( + m["request"] for m in slug_mappings.values() if m["request"] + )) + response_models = sorted(set( + m["response"] for m in slug_mappings.values() if m["response"] + )) + + content = '''""" +Registry mapping evaluator slugs to their request/response Pydantic models. + +This enables type-safe validation of inputs and parsing of outputs. + +DO NOT EDIT MANUALLY - Regenerate with: + ./scripts/generate-models.sh /path/to/swagger.json +""" + +from typing import Dict, Type, Optional +from pydantic import BaseModel + +''' + + # Import request models + if request_models: + content += "from .request import (\n" + for model in request_models: + content += f" {model},\n" + content += ")\n\n" + + # Import response models + if response_models: + content += "from .response import (\n" + for model in response_models: + content += f" {model},\n" + content += ")\n\n" + + # Generate REQUEST_MODELS dict + content += "\n# Mapping from evaluator slug to request model\n" + content += "REQUEST_MODELS: Dict[str, Type[BaseModel]] = {\n" + for slug in sorted(slug_mappings.keys()): + model = slug_mappings[slug]["request"] + if model: + content += f' "{slug}": {model},\n' + content += "}\n\n" + + # Generate RESPONSE_MODELS dict + content += "# Mapping from evaluator slug to response model\n" + content += "RESPONSE_MODELS: Dict[str, Type[BaseModel]] = {\n" + for slug in sorted(slug_mappings.keys()): + model = slug_mappings[slug]["response"] + if model: + content += f' "{slug}": {model},\n' + content += "}\n\n" + + # Add helper functions + content += ''' +def get_request_model(slug: str) -> Optional[Type[BaseModel]]: + """Get the request model for an evaluator by slug.""" + return REQUEST_MODELS.get(slug) + + +def get_response_model(slug: str) -> Optional[Type[BaseModel]]: + """Get the response model for an evaluator by slug.""" + return RESPONSE_MODELS.get(slug) +''' + + (output_dir / "registry.py").write_text(content) + + return len(slug_mappings) + + +def generate_init_py(output_dir: Path) -> tuple[int, int]: + """Generate __init__.py with proper exports.""" + # Extract class names from request.py + request_classes = [] + request_file = output_dir / "request.py" + if request_file.exists(): + content = request_file.read_text() + request_classes = re.findall(r"^class (\w+)\(", content, re.MULTILINE) + + # Extract class names from response.py + response_classes = [] + response_file = output_dir / "response.py" + if response_file.exists(): + content = response_file.read_text() + response_classes = re.findall(r"^class (\w+)\(", content, re.MULTILINE) + + # Generate __init__.py + init_content = '''# generated by datamodel-codegen +# Models for v2/evaluators/execute endpoints from OpenAPI spec +# +# DO NOT EDIT MANUALLY - Regenerate with: +# ./scripts/generate-models.sh /path/to/swagger.json + +''' + + # Import request models + if request_classes: + init_content += "from .request import (\n" + for cls in sorted(request_classes): + init_content += f" {cls},\n" + init_content += ")\n\n" + + # Import registry + init_content += """from .registry import ( + REQUEST_MODELS, + RESPONSE_MODELS, + get_request_model, + get_response_model, +) + +""" + + # Import response models + if response_classes: + init_content += "from .response import (\n" + for cls in sorted(response_classes): + init_content += f" {cls},\n" + init_content += ")\n\n" + + # Generate __all__ + init_content += "__all__ = [\n" + init_content += " # Registry functions\n" + init_content += ' "REQUEST_MODELS",\n' + init_content += ' "RESPONSE_MODELS",\n' + init_content += ' "get_request_model",\n' + init_content += ' "get_response_model",\n' + + if request_classes: + init_content += " # Evaluator request models\n" + for cls in sorted(request_classes): + init_content += f' "{cls}",\n' + if response_classes: + init_content += " # Evaluator response models\n" + for cls in sorted(response_classes): + init_content += f' "{cls}",\n' + init_content += "]\n" + + (output_dir / "__init__.py").write_text(init_content) + + return len(request_classes), len(response_classes) + + +def main(): + if len(sys.argv) != 3: + print(f"Usage: {sys.argv[0]} ") + print(f"Example: {sys.argv[0]} /path/to/swagger.json ./generated") + sys.exit(1) + + swagger_path = sys.argv[1] + output_dir = Path(sys.argv[2]) + + if not Path(swagger_path).exists(): + print(f"Error: Swagger file not found at {swagger_path}") + sys.exit(1) + + print("=== Extracting definitions for evaluator execute endpoints ===") + + # Extract definitions and slug mappings + filtered_definitions, slug_mappings = extract_definitions_and_mappings( + swagger_path + ) + + request_count = len( + [k for k in filtered_definitions if k.startswith("request.")] + ) + response_count = len( + [k for k in filtered_definitions if k.startswith("response.")] + ) + + print(f"Extracted {len(filtered_definitions)} definitions") + print(f"Request types: {request_count}") + print(f"Response types: {response_count}") + print(f"Evaluator slugs: {len(slug_mappings)}") + + # Create JSON Schema with filtered definitions + schema = { + "$schema": "http://json-schema.org/draft-07/schema#", + "definitions": filtered_definitions, + "type": "object", + } + + # Write to temp file + with tempfile.NamedTemporaryFile( + mode="w", suffix=".json", delete=False + ) as f: + json.dump(schema, f, indent=2) + temp_schema = f.name + + print("=== Generating Pydantic models ===") + + # Create output directory + output_dir.mkdir(parents=True, exist_ok=True) + + # Run datamodel-codegen + try: + subprocess.run( + [ + "datamodel-codegen", + "--input", temp_schema, + "--input-file-type", "jsonschema", + "--output", str(output_dir), + "--output-model-type", "pydantic_v2.BaseModel", + "--target-python-version", "3.10", + "--use-standard-collections", + "--reuse-model", + "--disable-timestamp", + ], + check=True, + ) + finally: + # Cleanup temp file + Path(temp_schema).unlink(missing_ok=True) + + print("=== Generating registry.py with slug mappings ===") + registry_count = generate_registry_py(output_dir, slug_mappings) + print(f"Generated registry.py with {registry_count} evaluator mappings") + + print("=== Generating __init__.py with exports ===") + req_count, resp_count = generate_init_py(output_dir) + print( + f"Generated __init__.py with {req_count} request " + f"and {resp_count} response exports" + ) + + print("=== Model generation complete ===") + print(f"Output written to: {output_dir}") + + +if __name__ == "__main__": + main() diff --git a/scripts/generate-models.sh b/scripts/generate-models.sh new file mode 100755 index 0000000000..aeac6c1514 --- /dev/null +++ b/scripts/generate-models.sh @@ -0,0 +1,35 @@ +#!/bin/bash +set -euo pipefail + +# Generate Pydantic models from OpenAPI/Swagger spec +# Extracts models used by v2/evaluators/execute/* endpoints +# Usage: ./scripts/generate-models.sh /path/to/swagger.json + +if [ $# -eq 0 ]; then + echo "Usage: $0 " + echo "Example: $0 /path/to/api-service/docs/swagger.json" + exit 1 +fi + +SWAGGER_PATH="$1" +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +ROOT_DIR="$(cd "${SCRIPT_DIR}/.." && pwd)" +OUTPUT_DIR="${ROOT_DIR}/packages/traceloop-sdk/traceloop/sdk/generated/evaluators" +CODEGEN_SCRIPT="${SCRIPT_DIR}/codegen/generate_evaluator_models.py" + +if [ ! -f "${SWAGGER_PATH}" ]; then + echo "Error: Swagger file not found at ${SWAGGER_PATH}" + exit 1 +fi + +echo "=== Generating models from ${SWAGGER_PATH} ===" + +# Change to traceloop-sdk directory for poetry +cd "${ROOT_DIR}/packages/traceloop-sdk" + +# Run the Python generation script +poetry run python "${CODEGEN_SCRIPT}" "${SWAGGER_PATH}" "${OUTPUT_DIR}" + +echo "" +echo "Generated files:" +ls -la "${OUTPUT_DIR}"