Skip to content

Commit 7075aca

Browse files
feat(api): api update
1 parent 2c0bd75 commit 7075aca

File tree

14 files changed

+9
-1324
lines changed

14 files changed

+9
-1324
lines changed

.stats.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
1-
configured_endpoints: 56
2-
openapi_spec_hash: ef5807baa380babd037f9b6271761eae
1+
configured_endpoints: 54
2+
openapi_spec_hash: f263c6c6d8d75a8f7c1e9c65188e7ef2
33
config_hash: 6c3ad84d97bf1d0989ad2ec0cae64078

api.md

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -252,16 +252,3 @@ Methods:
252252
- <code title="patch /api/projects/{project_id}/remediations/{remediation_id}/pause">client.projects.remediations.<a href="./src/codex/resources/projects/remediations.py">pause</a>(remediation_id, \*, project_id) -> <a href="./src/codex/types/projects/remediation_pause_response.py">RemediationPauseResponse</a></code>
253253
- <code title="patch /api/projects/{project_id}/remediations/{remediation_id}/publish">client.projects.remediations.<a href="./src/codex/resources/projects/remediations.py">publish</a>(remediation_id, \*, project_id) -> <a href="./src/codex/types/projects/remediation_publish_response.py">RemediationPublishResponse</a></code>
254254
- <code title="patch /api/projects/{project_id}/remediations/{remediation_id}/unpause">client.projects.remediations.<a href="./src/codex/resources/projects/remediations.py">unpause</a>(remediation_id, \*, project_id) -> <a href="./src/codex/types/projects/remediation_unpause_response.py">RemediationUnpauseResponse</a></code>
255-
256-
# Tlm
257-
258-
Types:
259-
260-
```python
261-
from codex.types import TlmPromptResponse, TlmScoreResponse
262-
```
263-
264-
Methods:
265-
266-
- <code title="post /api/tlm/prompt">client.tlm.<a href="./src/codex/resources/tlm.py">prompt</a>(\*\*<a href="src/codex/types/tlm_prompt_params.py">params</a>) -> <a href="./src/codex/types/tlm_prompt_response.py">TlmPromptResponse</a></code>
267-
- <code title="post /api/tlm/score">client.tlm.<a href="./src/codex/resources/tlm.py">score</a>(\*\*<a href="src/codex/types/tlm_score_params.py">params</a>) -> <a href="./src/codex/types/tlm_score_response.py">TlmScoreResponse</a></code>

src/codex/_client.py

Lines changed: 1 addition & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
)
2323
from ._utils import is_given, get_async_library
2424
from ._version import __version__
25-
from .resources import tlm, health
25+
from .resources import health
2626
from ._streaming import Stream as Stream, AsyncStream as AsyncStream
2727
from ._exceptions import APIStatusError
2828
from ._base_client import (
@@ -58,7 +58,6 @@ class Codex(SyncAPIClient):
5858
organizations: organizations.OrganizationsResource
5959
users: users.UsersResource
6060
projects: projects.ProjectsResource
61-
tlm: tlm.TlmResource
6261
with_raw_response: CodexWithRawResponse
6362
with_streaming_response: CodexWithStreamedResponse
6463

@@ -143,7 +142,6 @@ def __init__(
143142
self.organizations = organizations.OrganizationsResource(self)
144143
self.users = users.UsersResource(self)
145144
self.projects = projects.ProjectsResource(self)
146-
self.tlm = tlm.TlmResource(self)
147145
self.with_raw_response = CodexWithRawResponse(self)
148146
self.with_streaming_response = CodexWithStreamedResponse(self)
149147

@@ -304,7 +302,6 @@ class AsyncCodex(AsyncAPIClient):
304302
organizations: organizations.AsyncOrganizationsResource
305303
users: users.AsyncUsersResource
306304
projects: projects.AsyncProjectsResource
307-
tlm: tlm.AsyncTlmResource
308305
with_raw_response: AsyncCodexWithRawResponse
309306
with_streaming_response: AsyncCodexWithStreamedResponse
310307

@@ -389,7 +386,6 @@ def __init__(
389386
self.organizations = organizations.AsyncOrganizationsResource(self)
390387
self.users = users.AsyncUsersResource(self)
391388
self.projects = projects.AsyncProjectsResource(self)
392-
self.tlm = tlm.AsyncTlmResource(self)
393389
self.with_raw_response = AsyncCodexWithRawResponse(self)
394390
self.with_streaming_response = AsyncCodexWithStreamedResponse(self)
395391

@@ -551,7 +547,6 @@ def __init__(self, client: Codex) -> None:
551547
self.organizations = organizations.OrganizationsResourceWithRawResponse(client.organizations)
552548
self.users = users.UsersResourceWithRawResponse(client.users)
553549
self.projects = projects.ProjectsResourceWithRawResponse(client.projects)
554-
self.tlm = tlm.TlmResourceWithRawResponse(client.tlm)
555550

556551

557552
class AsyncCodexWithRawResponse:
@@ -560,7 +555,6 @@ def __init__(self, client: AsyncCodex) -> None:
560555
self.organizations = organizations.AsyncOrganizationsResourceWithRawResponse(client.organizations)
561556
self.users = users.AsyncUsersResourceWithRawResponse(client.users)
562557
self.projects = projects.AsyncProjectsResourceWithRawResponse(client.projects)
563-
self.tlm = tlm.AsyncTlmResourceWithRawResponse(client.tlm)
564558

565559

566560
class CodexWithStreamedResponse:
@@ -569,7 +563,6 @@ def __init__(self, client: Codex) -> None:
569563
self.organizations = organizations.OrganizationsResourceWithStreamingResponse(client.organizations)
570564
self.users = users.UsersResourceWithStreamingResponse(client.users)
571565
self.projects = projects.ProjectsResourceWithStreamingResponse(client.projects)
572-
self.tlm = tlm.TlmResourceWithStreamingResponse(client.tlm)
573566

574567

575568
class AsyncCodexWithStreamedResponse:
@@ -578,7 +571,6 @@ def __init__(self, client: AsyncCodex) -> None:
578571
self.organizations = organizations.AsyncOrganizationsResourceWithStreamingResponse(client.organizations)
579572
self.users = users.AsyncUsersResourceWithStreamingResponse(client.users)
580573
self.projects = projects.AsyncProjectsResourceWithStreamingResponse(client.projects)
581-
self.tlm = tlm.AsyncTlmResourceWithStreamingResponse(client.tlm)
582574

583575

584576
Client = Codex

src/codex/resources/__init__.py

Lines changed: 0 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,5 @@
11
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
22

3-
from .tlm import (
4-
TlmResource,
5-
AsyncTlmResource,
6-
TlmResourceWithRawResponse,
7-
AsyncTlmResourceWithRawResponse,
8-
TlmResourceWithStreamingResponse,
9-
AsyncTlmResourceWithStreamingResponse,
10-
)
113
from .users import (
124
UsersResource,
135
AsyncUsersResource,
@@ -66,10 +58,4 @@
6658
"AsyncProjectsResourceWithRawResponse",
6759
"ProjectsResourceWithStreamingResponse",
6860
"AsyncProjectsResourceWithStreamingResponse",
69-
"TlmResource",
70-
"AsyncTlmResource",
71-
"TlmResourceWithRawResponse",
72-
"AsyncTlmResourceWithRawResponse",
73-
"TlmResourceWithStreamingResponse",
74-
"AsyncTlmResourceWithStreamingResponse",
7561
]

src/codex/resources/projects/projects.py

Lines changed: 4 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -456,7 +456,6 @@ def validate(
456456
eval_scores: Optional[Dict[str, float]] | NotGiven = NOT_GIVEN,
457457
messages: Iterable[project_validate_params.Message] | NotGiven = NOT_GIVEN,
458458
options: Optional[project_validate_params.Options] | NotGiven = NOT_GIVEN,
459-
prompt: Optional[str] | NotGiven = NOT_GIVEN,
460459
quality_preset: Literal["best", "high", "medium", "low", "base"] | NotGiven = NOT_GIVEN,
461460
rewritten_question: Optional[str] | NotGiven = NOT_GIVEN,
462461
task: Optional[str] | NotGiven = NOT_GIVEN,
@@ -576,12 +575,8 @@ def validate(
576575
When this parameter is 1, `TLM.prompt()` simply returns a standard LLM response and does not attempt to auto-improve it.
577576
This parameter has no effect when `disable_trustworthiness` is True.
578577
579-
disable_trustworthiness (bool, default = False): if True, trustworthiness scoring is disabled and TLM will not compute trust scores for responses.
580-
This is useful when you only want to use custom evaluation criteria or when you want to minimize computational overhead and only need the base LLM response.
581-
The following parameters will be ignored when `disable_trustworthiness` is True: `num_consistency_samples`, `num_self_reflections`, `num_candidate_responses`, `reasoning_effort`, `similarity_measure`.
582-
583-
prompt: The prompt to use for the TLM call. If not provided, the prompt will be
584-
generated from the messages.
578+
disable_trustworthiness (bool, default = False): if True, TLM will not compute trust scores,
579+
useful if you only want to compute custom evaluation criteria.
585580
586581
quality_preset: The quality preset to use for the TLM or Trustworthy RAG API.
587582
@@ -625,7 +620,6 @@ def validate(
625620
"eval_scores": eval_scores,
626621
"messages": messages,
627622
"options": options,
628-
"prompt": prompt,
629623
"quality_preset": quality_preset,
630624
"rewritten_question": rewritten_question,
631625
"task": task,
@@ -1034,7 +1028,6 @@ async def validate(
10341028
eval_scores: Optional[Dict[str, float]] | NotGiven = NOT_GIVEN,
10351029
messages: Iterable[project_validate_params.Message] | NotGiven = NOT_GIVEN,
10361030
options: Optional[project_validate_params.Options] | NotGiven = NOT_GIVEN,
1037-
prompt: Optional[str] | NotGiven = NOT_GIVEN,
10381031
quality_preset: Literal["best", "high", "medium", "low", "base"] | NotGiven = NOT_GIVEN,
10391032
rewritten_question: Optional[str] | NotGiven = NOT_GIVEN,
10401033
task: Optional[str] | NotGiven = NOT_GIVEN,
@@ -1154,12 +1147,8 @@ async def validate(
11541147
When this parameter is 1, `TLM.prompt()` simply returns a standard LLM response and does not attempt to auto-improve it.
11551148
This parameter has no effect when `disable_trustworthiness` is True.
11561149
1157-
disable_trustworthiness (bool, default = False): if True, trustworthiness scoring is disabled and TLM will not compute trust scores for responses.
1158-
This is useful when you only want to use custom evaluation criteria or when you want to minimize computational overhead and only need the base LLM response.
1159-
The following parameters will be ignored when `disable_trustworthiness` is True: `num_consistency_samples`, `num_self_reflections`, `num_candidate_responses`, `reasoning_effort`, `similarity_measure`.
1160-
1161-
prompt: The prompt to use for the TLM call. If not provided, the prompt will be
1162-
generated from the messages.
1150+
disable_trustworthiness (bool, default = False): if True, TLM will not compute trust scores,
1151+
useful if you only want to compute custom evaluation criteria.
11631152
11641153
quality_preset: The quality preset to use for the TLM or Trustworthy RAG API.
11651154
@@ -1203,7 +1192,6 @@ async def validate(
12031192
"eval_scores": eval_scores,
12041193
"messages": messages,
12051194
"options": options,
1206-
"prompt": prompt,
12071195
"quality_preset": quality_preset,
12081196
"rewritten_question": rewritten_question,
12091197
"task": task,

0 commit comments

Comments
 (0)