Skip to content

Commit 4151cf2

Browse files
feat(api): api update
1 parent cfc87fb commit 4151cf2

16 files changed

+57
-310
lines changed

.stats.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
1-
configured_endpoints: 55
2-
openapi_spec_hash: d69252c7252423bc98ecc17807eb33ec
1+
configured_endpoints: 54
2+
openapi_spec_hash: 04e1b7aefbeff10daab249b153de147f
33
config_hash: bed87752f4056d0c4bf2ddf856307800

api.md

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -202,7 +202,6 @@ Types:
202202
from codex.types.projects import (
203203
QueryLogRetrieveResponse,
204204
QueryLogListResponse,
205-
QueryLogAddUserFeedbackResponse,
206205
QueryLogListByGroupResponse,
207206
QueryLogListGroupsResponse,
208207
QueryLogStartRemediationResponse,
@@ -213,7 +212,6 @@ Methods:
213212

214213
- <code title="get /api/projects/{project_id}/query_logs/{query_log_id}">client.projects.query_logs.<a href="./src/codex/resources/projects/query_logs.py">retrieve</a>(query_log_id, \*, project_id) -> <a href="./src/codex/types/projects/query_log_retrieve_response.py">QueryLogRetrieveResponse</a></code>
215214
- <code title="get /api/projects/{project_id}/query_logs/">client.projects.query_logs.<a href="./src/codex/resources/projects/query_logs.py">list</a>(project_id, \*\*<a href="src/codex/types/projects/query_log_list_params.py">params</a>) -> <a href="./src/codex/types/projects/query_log_list_response.py">SyncOffsetPageQueryLogs[QueryLogListResponse]</a></code>
216-
- <code title="post /api/projects/{project_id}/query_logs/{query_log_id}/user_feedback">client.projects.query_logs.<a href="./src/codex/resources/projects/query_logs.py">add_user_feedback</a>(query_log_id, \*, project_id, \*\*<a href="src/codex/types/projects/query_log_add_user_feedback_params.py">params</a>) -> <a href="./src/codex/types/projects/query_log_add_user_feedback_response.py">QueryLogAddUserFeedbackResponse</a></code>
217215
- <code title="get /api/projects/{project_id}/query_logs/logs_by_group">client.projects.query_logs.<a href="./src/codex/resources/projects/query_logs.py">list_by_group</a>(project_id, \*\*<a href="src/codex/types/projects/query_log_list_by_group_params.py">params</a>) -> <a href="./src/codex/types/projects/query_log_list_by_group_response.py">QueryLogListByGroupResponse</a></code>
218216
- <code title="get /api/projects/{project_id}/query_logs/groups">client.projects.query_logs.<a href="./src/codex/resources/projects/query_logs.py">list_groups</a>(project_id, \*\*<a href="src/codex/types/projects/query_log_list_groups_params.py">params</a>) -> <a href="./src/codex/types/projects/query_log_list_groups_response.py">SyncOffsetPageQueryLogGroups[QueryLogListGroupsResponse]</a></code>
219217
- <code title="post /api/projects/{project_id}/query_logs/{query_log_id}/start_remediation">client.projects.query_logs.<a href="./src/codex/resources/projects/query_logs.py">start_remediation</a>(query_log_id, \*, project_id) -> <a href="./src/codex/types/projects/query_log_start_remediation_response.py">QueryLogStartRemediationResponse</a></code>

src/codex/resources/projects/projects.py

Lines changed: 12 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -527,11 +527,12 @@ def validate(
527527
"claude-3.5-sonnet", "claude-3.5-haiku", "claude-3-haiku", "nova-micro",
528528
"nova-lite", "nova-pro"}, default = "gpt-4.1-mini"): Underlying base LLM to use
529529
(better models yield better results, faster models yield faster results). -
530-
Models still in beta: "o3", "o1", "o4-mini", "o3-mini", "o1-mini",
531-
"gpt-4.5-preview", "claude-opus-4-0", "claude-sonnet-4-0", "claude-3.7-sonnet",
532-
"claude-3.5-haiku". - Recommended models for accuracy: "gpt-5", "gpt-4.1",
533-
"o4-mini", "o3", "claude-opus-4-0", "claude-sonnet-4-0". - Recommended models
534-
for low latency/costs: "gpt-4.1-nano", "nova-micro".
530+
Models still in beta: "gpt-5", "gpt-5-mini", "gpt-5-nano", "o3", "o1",
531+
"o4-mini", "o3-mini", "o1-mini", "gpt-4.5-preview", "claude-opus-4-0",
532+
"claude-sonnet-4-0", "claude-3.7-sonnet", "claude-3.5-haiku". - Recommended
533+
models for accuracy: "gpt-5", "gpt-4.1", "o4-mini", "o3", "claude-opus-4-0",
534+
"claude-sonnet-4-0". - Recommended models for low latency/costs: "gpt-4.1-nano",
535+
"nova-micro".
535536
536537
log (list[str], default = []): optionally specify additional logs or metadata that TLM should return.
537538
For instance, include "explanation" here to get explanations of why a response is scored with low trustworthiness.
@@ -1105,11 +1106,12 @@ async def validate(
11051106
"claude-3.5-sonnet", "claude-3.5-haiku", "claude-3-haiku", "nova-micro",
11061107
"nova-lite", "nova-pro"}, default = "gpt-4.1-mini"): Underlying base LLM to use
11071108
(better models yield better results, faster models yield faster results). -
1108-
Models still in beta: "o3", "o1", "o4-mini", "o3-mini", "o1-mini",
1109-
"gpt-4.5-preview", "claude-opus-4-0", "claude-sonnet-4-0", "claude-3.7-sonnet",
1110-
"claude-3.5-haiku". - Recommended models for accuracy: "gpt-5", "gpt-4.1",
1111-
"o4-mini", "o3", "claude-opus-4-0", "claude-sonnet-4-0". - Recommended models
1112-
for low latency/costs: "gpt-4.1-nano", "nova-micro".
1109+
Models still in beta: "gpt-5", "gpt-5-mini", "gpt-5-nano", "o3", "o1",
1110+
"o4-mini", "o3-mini", "o1-mini", "gpt-4.5-preview", "claude-opus-4-0",
1111+
"claude-sonnet-4-0", "claude-3.7-sonnet", "claude-3.5-haiku". - Recommended
1112+
models for accuracy: "gpt-5", "gpt-4.1", "o4-mini", "o3", "claude-opus-4-0",
1113+
"claude-sonnet-4-0". - Recommended models for low latency/costs: "gpt-4.1-nano",
1114+
"nova-micro".
11131115
11141116
log (list[str], default = []): optionally specify additional logs or metadata that TLM should return.
11151117
For instance, include "explanation" here to get explanations of why a response is scored with low trustworthiness.

src/codex/resources/projects/query_logs.py

Lines changed: 1 addition & 101 deletions
Original file line numberDiff line numberDiff line change
@@ -25,17 +25,11 @@
2525
AsyncOffsetPageQueryLogGroups,
2626
)
2727
from ..._base_client import AsyncPaginator, make_request_options
28-
from ...types.projects import (
29-
query_log_list_params,
30-
query_log_list_groups_params,
31-
query_log_list_by_group_params,
32-
query_log_add_user_feedback_params,
33-
)
28+
from ...types.projects import query_log_list_params, query_log_list_groups_params, query_log_list_by_group_params
3429
from ...types.projects.query_log_list_response import QueryLogListResponse
3530
from ...types.projects.query_log_retrieve_response import QueryLogRetrieveResponse
3631
from ...types.projects.query_log_list_groups_response import QueryLogListGroupsResponse
3732
from ...types.projects.query_log_list_by_group_response import QueryLogListByGroupResponse
38-
from ...types.projects.query_log_add_user_feedback_response import QueryLogAddUserFeedbackResponse
3933
from ...types.projects.query_log_start_remediation_response import QueryLogStartRemediationResponse
4034

4135
__all__ = ["QueryLogsResource", "AsyncQueryLogsResource"]
@@ -190,46 +184,6 @@ def list(
190184
model=QueryLogListResponse,
191185
)
192186

193-
def add_user_feedback(
194-
self,
195-
query_log_id: str,
196-
*,
197-
project_id: str,
198-
key: str,
199-
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
200-
# The extra values given here take precedence over values defined on the client or passed to this method.
201-
extra_headers: Headers | None = None,
202-
extra_query: Query | None = None,
203-
extra_body: Body | None = None,
204-
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
205-
) -> QueryLogAddUserFeedbackResponse:
206-
"""
207-
Add User Feedback Route
208-
209-
Args:
210-
key: A key describing the criteria of the feedback, eg 'rating'
211-
212-
extra_headers: Send extra headers
213-
214-
extra_query: Add additional query parameters to the request
215-
216-
extra_body: Add additional JSON properties to the request
217-
218-
timeout: Override the client-level default timeout for this request, in seconds
219-
"""
220-
if not project_id:
221-
raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
222-
if not query_log_id:
223-
raise ValueError(f"Expected a non-empty value for `query_log_id` but received {query_log_id!r}")
224-
return self._post(
225-
f"/api/projects/{project_id}/query_logs/{query_log_id}/user_feedback",
226-
body=maybe_transform({"key": key}, query_log_add_user_feedback_params.QueryLogAddUserFeedbackParams),
227-
options=make_request_options(
228-
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
229-
),
230-
cast_to=QueryLogAddUserFeedbackResponse,
231-
)
232-
233187
def list_by_group(
234188
self,
235189
project_id: str,
@@ -614,48 +568,6 @@ def list(
614568
model=QueryLogListResponse,
615569
)
616570

617-
async def add_user_feedback(
618-
self,
619-
query_log_id: str,
620-
*,
621-
project_id: str,
622-
key: str,
623-
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
624-
# The extra values given here take precedence over values defined on the client or passed to this method.
625-
extra_headers: Headers | None = None,
626-
extra_query: Query | None = None,
627-
extra_body: Body | None = None,
628-
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
629-
) -> QueryLogAddUserFeedbackResponse:
630-
"""
631-
Add User Feedback Route
632-
633-
Args:
634-
key: A key describing the criteria of the feedback, eg 'rating'
635-
636-
extra_headers: Send extra headers
637-
638-
extra_query: Add additional query parameters to the request
639-
640-
extra_body: Add additional JSON properties to the request
641-
642-
timeout: Override the client-level default timeout for this request, in seconds
643-
"""
644-
if not project_id:
645-
raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
646-
if not query_log_id:
647-
raise ValueError(f"Expected a non-empty value for `query_log_id` but received {query_log_id!r}")
648-
return await self._post(
649-
f"/api/projects/{project_id}/query_logs/{query_log_id}/user_feedback",
650-
body=await async_maybe_transform(
651-
{"key": key}, query_log_add_user_feedback_params.QueryLogAddUserFeedbackParams
652-
),
653-
options=make_request_options(
654-
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
655-
),
656-
cast_to=QueryLogAddUserFeedbackResponse,
657-
)
658-
659571
async def list_by_group(
660572
self,
661573
project_id: str,
@@ -901,9 +813,6 @@ def __init__(self, query_logs: QueryLogsResource) -> None:
901813
self.list = to_raw_response_wrapper(
902814
query_logs.list,
903815
)
904-
self.add_user_feedback = to_raw_response_wrapper(
905-
query_logs.add_user_feedback,
906-
)
907816
self.list_by_group = to_raw_response_wrapper(
908817
query_logs.list_by_group,
909818
)
@@ -925,9 +834,6 @@ def __init__(self, query_logs: AsyncQueryLogsResource) -> None:
925834
self.list = async_to_raw_response_wrapper(
926835
query_logs.list,
927836
)
928-
self.add_user_feedback = async_to_raw_response_wrapper(
929-
query_logs.add_user_feedback,
930-
)
931837
self.list_by_group = async_to_raw_response_wrapper(
932838
query_logs.list_by_group,
933839
)
@@ -949,9 +855,6 @@ def __init__(self, query_logs: QueryLogsResource) -> None:
949855
self.list = to_streamed_response_wrapper(
950856
query_logs.list,
951857
)
952-
self.add_user_feedback = to_streamed_response_wrapper(
953-
query_logs.add_user_feedback,
954-
)
955858
self.list_by_group = to_streamed_response_wrapper(
956859
query_logs.list_by_group,
957860
)
@@ -973,9 +876,6 @@ def __init__(self, query_logs: AsyncQueryLogsResource) -> None:
973876
self.list = async_to_streamed_response_wrapper(
974877
query_logs.list,
975878
)
976-
self.add_user_feedback = async_to_streamed_response_wrapper(
977-
query_logs.add_user_feedback,
978-
)
979879
self.list_by_group = async_to_streamed_response_wrapper(
980880
query_logs.list_by_group,
981881
)

src/codex/resources/tlm.py

Lines changed: 24 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -101,11 +101,12 @@ def prompt(
101101
"claude-3.5-sonnet", "claude-3.5-haiku", "claude-3-haiku", "nova-micro",
102102
"nova-lite", "nova-pro"}, default = "gpt-4.1-mini"): Underlying base LLM to use
103103
(better models yield better results, faster models yield faster results). -
104-
Models still in beta: "o3", "o1", "o4-mini", "o3-mini", "o1-mini",
105-
"gpt-4.5-preview", "claude-opus-4-0", "claude-sonnet-4-0", "claude-3.7-sonnet",
106-
"claude-3.5-haiku". - Recommended models for accuracy: "gpt-5", "gpt-4.1",
107-
"o4-mini", "o3", "claude-opus-4-0", "claude-sonnet-4-0". - Recommended models
108-
for low latency/costs: "gpt-4.1-nano", "nova-micro".
104+
Models still in beta: "gpt-5", "gpt-5-mini", "gpt-5-nano", "o3", "o1",
105+
"o4-mini", "o3-mini", "o1-mini", "gpt-4.5-preview", "claude-opus-4-0",
106+
"claude-sonnet-4-0", "claude-3.7-sonnet", "claude-3.5-haiku". - Recommended
107+
models for accuracy: "gpt-5", "gpt-4.1", "o4-mini", "o3", "claude-opus-4-0",
108+
"claude-sonnet-4-0". - Recommended models for low latency/costs: "gpt-4.1-nano",
109+
"nova-micro".
109110
110111
log (list[str], default = []): optionally specify additional logs or metadata that TLM should return.
111112
For instance, include "explanation" here to get explanations of why a response is scored with low trustworthiness.
@@ -244,11 +245,12 @@ def score(
244245
"claude-3.5-sonnet", "claude-3.5-haiku", "claude-3-haiku", "nova-micro",
245246
"nova-lite", "nova-pro"}, default = "gpt-4.1-mini"): Underlying base LLM to use
246247
(better models yield better results, faster models yield faster results). -
247-
Models still in beta: "o3", "o1", "o4-mini", "o3-mini", "o1-mini",
248-
"gpt-4.5-preview", "claude-opus-4-0", "claude-sonnet-4-0", "claude-3.7-sonnet",
249-
"claude-3.5-haiku". - Recommended models for accuracy: "gpt-5", "gpt-4.1",
250-
"o4-mini", "o3", "claude-opus-4-0", "claude-sonnet-4-0". - Recommended models
251-
for low latency/costs: "gpt-4.1-nano", "nova-micro".
248+
Models still in beta: "gpt-5", "gpt-5-mini", "gpt-5-nano", "o3", "o1",
249+
"o4-mini", "o3-mini", "o1-mini", "gpt-4.5-preview", "claude-opus-4-0",
250+
"claude-sonnet-4-0", "claude-3.7-sonnet", "claude-3.5-haiku". - Recommended
251+
models for accuracy: "gpt-5", "gpt-4.1", "o4-mini", "o3", "claude-opus-4-0",
252+
"claude-sonnet-4-0". - Recommended models for low latency/costs: "gpt-4.1-nano",
253+
"nova-micro".
252254
253255
log (list[str], default = []): optionally specify additional logs or metadata that TLM should return.
254256
For instance, include "explanation" here to get explanations of why a response is scored with low trustworthiness.
@@ -403,11 +405,12 @@ async def prompt(
403405
"claude-3.5-sonnet", "claude-3.5-haiku", "claude-3-haiku", "nova-micro",
404406
"nova-lite", "nova-pro"}, default = "gpt-4.1-mini"): Underlying base LLM to use
405407
(better models yield better results, faster models yield faster results). -
406-
Models still in beta: "o3", "o1", "o4-mini", "o3-mini", "o1-mini",
407-
"gpt-4.5-preview", "claude-opus-4-0", "claude-sonnet-4-0", "claude-3.7-sonnet",
408-
"claude-3.5-haiku". - Recommended models for accuracy: "gpt-5", "gpt-4.1",
409-
"o4-mini", "o3", "claude-opus-4-0", "claude-sonnet-4-0". - Recommended models
410-
for low latency/costs: "gpt-4.1-nano", "nova-micro".
408+
Models still in beta: "gpt-5", "gpt-5-mini", "gpt-5-nano", "o3", "o1",
409+
"o4-mini", "o3-mini", "o1-mini", "gpt-4.5-preview", "claude-opus-4-0",
410+
"claude-sonnet-4-0", "claude-3.7-sonnet", "claude-3.5-haiku". - Recommended
411+
models for accuracy: "gpt-5", "gpt-4.1", "o4-mini", "o3", "claude-opus-4-0",
412+
"claude-sonnet-4-0". - Recommended models for low latency/costs: "gpt-4.1-nano",
413+
"nova-micro".
411414
412415
log (list[str], default = []): optionally specify additional logs or metadata that TLM should return.
413416
For instance, include "explanation" here to get explanations of why a response is scored with low trustworthiness.
@@ -546,11 +549,12 @@ async def score(
546549
"claude-3.5-sonnet", "claude-3.5-haiku", "claude-3-haiku", "nova-micro",
547550
"nova-lite", "nova-pro"}, default = "gpt-4.1-mini"): Underlying base LLM to use
548551
(better models yield better results, faster models yield faster results). -
549-
Models still in beta: "o3", "o1", "o4-mini", "o3-mini", "o1-mini",
550-
"gpt-4.5-preview", "claude-opus-4-0", "claude-sonnet-4-0", "claude-3.7-sonnet",
551-
"claude-3.5-haiku". - Recommended models for accuracy: "gpt-5", "gpt-4.1",
552-
"o4-mini", "o3", "claude-opus-4-0", "claude-sonnet-4-0". - Recommended models
553-
for low latency/costs: "gpt-4.1-nano", "nova-micro".
552+
Models still in beta: "gpt-5", "gpt-5-mini", "gpt-5-nano", "o3", "o1",
553+
"o4-mini", "o3-mini", "o1-mini", "gpt-4.5-preview", "claude-opus-4-0",
554+
"claude-sonnet-4-0", "claude-3.7-sonnet", "claude-3.5-haiku". - Recommended
555+
models for accuracy: "gpt-5", "gpt-4.1", "o4-mini", "o3", "claude-opus-4-0",
556+
"claude-sonnet-4-0". - Recommended models for low latency/costs: "gpt-4.1-nano",
557+
"nova-micro".
554558
555559
log (list[str], default = []): optionally specify additional logs or metadata that TLM should return.
556560
For instance, include "explanation" here to get explanations of why a response is scored with low trustworthiness.

src/codex/types/project_validate_params.py

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -130,11 +130,12 @@ class ProjectValidateParams(TypedDict, total=False):
130130
"claude-3.5-sonnet", "claude-3.5-haiku", "claude-3-haiku", "nova-micro",
131131
"nova-lite", "nova-pro"}, default = "gpt-4.1-mini"): Underlying base LLM to use
132132
(better models yield better results, faster models yield faster results). -
133-
Models still in beta: "o3", "o1", "o4-mini", "o3-mini", "o1-mini",
134-
"gpt-4.5-preview", "claude-opus-4-0", "claude-sonnet-4-0", "claude-3.7-sonnet",
135-
"claude-3.5-haiku". - Recommended models for accuracy: "gpt-5", "gpt-4.1",
136-
"o4-mini", "o3", "claude-opus-4-0", "claude-sonnet-4-0". - Recommended models
137-
for low latency/costs: "gpt-4.1-nano", "nova-micro".
133+
Models still in beta: "gpt-5", "gpt-5-mini", "gpt-5-nano", "o3", "o1",
134+
"o4-mini", "o3-mini", "o1-mini", "gpt-4.5-preview", "claude-opus-4-0",
135+
"claude-sonnet-4-0", "claude-3.7-sonnet", "claude-3.5-haiku". - Recommended
136+
models for accuracy: "gpt-5", "gpt-4.1", "o4-mini", "o3", "claude-opus-4-0",
137+
"claude-sonnet-4-0". - Recommended models for low latency/costs: "gpt-4.1-nano",
138+
"nova-micro".
138139
139140
log (list[str], default = []): optionally specify additional logs or metadata that TLM should return.
140141
For instance, include "explanation" here to get explanations of why a response is scored with low trustworthiness.
@@ -652,8 +653,6 @@ class MessageChatCompletionDeveloperMessageParam(TypedDict, total=False):
652653
class Options(TypedDict, total=False):
653654
custom_eval_criteria: Iterable[object]
654655

655-
disable_persistence: bool
656-
657656
disable_trustworthiness: bool
658657

659658
log: List[str]

src/codex/types/project_validate_response.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -59,9 +59,6 @@ class ProjectValidateResponse(BaseModel):
5959
to answer, if it does not already exist.
6060
"""
6161

62-
log_id: str
63-
"""The UUID of the query log entry created for this validation request."""
64-
6562
should_guardrail: bool
6663
"""
6764
True if the response should be guardrailed by the AI system, False if the

0 commit comments

Comments
 (0)