Skip to content

Commit 41b210d

Browse files
feat(api): api update
1 parent b87206f commit 41b210d

File tree

6 files changed

+180
-10
lines changed

6 files changed

+180
-10
lines changed

.stats.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
configured_endpoints: 65
2-
openapi_spec_hash: 80696dc202de8bacc0e43506d7c210b0
2+
openapi_spec_hash: 8bc7a64933cd540d1d2499d055430832
33
config_hash: 14b2643a0ec60cf326dfed00939644ff

src/codex/types/projects/query_log_list_by_group_response.py

Lines changed: 32 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,10 +10,31 @@
1010
"QueryLogListByGroupResponse",
1111
"QueryLogsByGroup",
1212
"QueryLogsByGroupQueryLog",
13+
"QueryLogsByGroupQueryLogFormattedEscalationEvalScores",
14+
"QueryLogsByGroupQueryLogFormattedEvalScores",
15+
"QueryLogsByGroupQueryLogFormattedGuardrailEvalScores",
1316
"QueryLogsByGroupQueryLogContext",
1417
]
1518

1619

20+
class QueryLogsByGroupQueryLogFormattedEscalationEvalScores(BaseModel):
21+
score: float
22+
23+
status: Literal["pass", "fail"]
24+
25+
26+
class QueryLogsByGroupQueryLogFormattedEvalScores(BaseModel):
27+
score: float
28+
29+
status: Literal["pass", "fail"]
30+
31+
32+
class QueryLogsByGroupQueryLogFormattedGuardrailEvalScores(BaseModel):
33+
score: float
34+
35+
status: Literal["pass", "fail"]
36+
37+
1738
class QueryLogsByGroupQueryLogContext(BaseModel):
1839
content: str
1940
"""The actual content/text of the document."""
@@ -36,14 +57,18 @@ class QueryLogsByGroupQueryLog(BaseModel):
3657

3758
created_at: datetime
3859

39-
formatted_eval_scores: Optional[Dict[str, Dict[str, Union[float, Literal["pass", "fail"]]]]] = None
60+
formatted_escalation_eval_scores: Optional[Dict[str, QueryLogsByGroupQueryLogFormattedEscalationEvalScores]] = None
61+
62+
formatted_eval_scores: Optional[Dict[str, QueryLogsByGroupQueryLogFormattedEvalScores]] = None
4063
"""Format evaluation scores for frontend display with pass/fail status.
4164
4265
Returns: Dictionary mapping eval keys to their formatted representation: {
4366
"eval_key": { "score": float, "status": "pass" | "fail" } } Returns None if
4467
eval_scores is None.
4568
"""
4669

70+
formatted_guardrail_eval_scores: Optional[Dict[str, QueryLogsByGroupQueryLogFormattedGuardrailEvalScores]] = None
71+
4772
is_bad_response: bool
4873

4974
project_id: str
@@ -67,6 +92,9 @@ class QueryLogsByGroupQueryLog(BaseModel):
6792
escalated: Optional[bool] = None
6893
"""If true, the question was escalated to Codex for an SME to review"""
6994

95+
escalation_evals: Optional[List[str]] = None
96+
"""Evals that should trigger escalation to SME"""
97+
7098
eval_issue_labels: Optional[List[str]] = None
7199
"""Labels derived from evaluation scores"""
72100

@@ -79,6 +107,9 @@ class QueryLogsByGroupQueryLog(BaseModel):
79107
evaluated_response: Optional[str] = None
80108
"""The response being evaluated from the RAG system (before any remediation)"""
81109

110+
guardrail_evals: Optional[List[str]] = None
111+
"""Evals that should trigger guardrail"""
112+
82113
guardrailed: Optional[bool] = None
83114
"""If true, the response was guardrailed"""
84115

src/codex/types/projects/query_log_list_groups_response.py

Lines changed: 37 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,32 @@
66

77
from ..._models import BaseModel
88

9-
__all__ = ["QueryLogListGroupsResponse", "QueryLogGroup", "QueryLogGroupContext"]
9+
__all__ = [
10+
"QueryLogListGroupsResponse",
11+
"QueryLogGroup",
12+
"QueryLogGroupFormattedEscalationEvalScores",
13+
"QueryLogGroupFormattedEvalScores",
14+
"QueryLogGroupFormattedGuardrailEvalScores",
15+
"QueryLogGroupContext",
16+
]
17+
18+
19+
class QueryLogGroupFormattedEscalationEvalScores(BaseModel):
20+
score: float
21+
22+
status: Literal["pass", "fail"]
23+
24+
25+
class QueryLogGroupFormattedEvalScores(BaseModel):
26+
score: float
27+
28+
status: Literal["pass", "fail"]
29+
30+
31+
class QueryLogGroupFormattedGuardrailEvalScores(BaseModel):
32+
score: float
33+
34+
status: Literal["pass", "fail"]
1035

1136

1237
class QueryLogGroupContext(BaseModel):
@@ -31,14 +56,18 @@ class QueryLogGroup(BaseModel):
3156

3257
created_at: datetime
3358

34-
formatted_eval_scores: Optional[Dict[str, Dict[str, Union[float, Literal["pass", "fail"]]]]] = None
59+
formatted_escalation_eval_scores: Optional[Dict[str, QueryLogGroupFormattedEscalationEvalScores]] = None
60+
61+
formatted_eval_scores: Optional[Dict[str, QueryLogGroupFormattedEvalScores]] = None
3562
"""Format evaluation scores for frontend display with pass/fail status.
3663
3764
Returns: Dictionary mapping eval keys to their formatted representation: {
3865
"eval_key": { "score": float, "status": "pass" | "fail" } } Returns None if
3966
eval_scores is None.
4067
"""
4168

69+
formatted_guardrail_eval_scores: Optional[Dict[str, QueryLogGroupFormattedGuardrailEvalScores]] = None
70+
4271
is_bad_response: bool
4372

4473
needs_review: bool
@@ -68,6 +97,9 @@ class QueryLogGroup(BaseModel):
6897
escalated: Optional[bool] = None
6998
"""If true, the question was escalated to Codex for an SME to review"""
7099

100+
escalation_evals: Optional[List[str]] = None
101+
"""Evals that should trigger escalation to SME"""
102+
71103
eval_issue_labels: Optional[List[str]] = None
72104
"""Labels derived from evaluation scores"""
73105

@@ -80,6 +112,9 @@ class QueryLogGroup(BaseModel):
80112
evaluated_response: Optional[str] = None
81113
"""The response being evaluated from the RAG system (before any remediation)"""
82114

115+
guardrail_evals: Optional[List[str]] = None
116+
"""Evals that should trigger guardrail"""
117+
83118
guardrailed: Optional[bool] = None
84119
"""If true, the response was guardrailed"""
85120

src/codex/types/projects/query_log_list_response.py

Lines changed: 37 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,32 @@
66

77
from ..._models import BaseModel
88

9-
__all__ = ["QueryLogListResponse", "QueryLog", "QueryLogContext"]
9+
__all__ = [
10+
"QueryLogListResponse",
11+
"QueryLog",
12+
"QueryLogFormattedEscalationEvalScores",
13+
"QueryLogFormattedEvalScores",
14+
"QueryLogFormattedGuardrailEvalScores",
15+
"QueryLogContext",
16+
]
17+
18+
19+
class QueryLogFormattedEscalationEvalScores(BaseModel):
20+
score: float
21+
22+
status: Literal["pass", "fail"]
23+
24+
25+
class QueryLogFormattedEvalScores(BaseModel):
26+
score: float
27+
28+
status: Literal["pass", "fail"]
29+
30+
31+
class QueryLogFormattedGuardrailEvalScores(BaseModel):
32+
score: float
33+
34+
status: Literal["pass", "fail"]
1035

1136

1237
class QueryLogContext(BaseModel):
@@ -31,14 +56,18 @@ class QueryLog(BaseModel):
3156

3257
created_at: datetime
3358

34-
formatted_eval_scores: Optional[Dict[str, Dict[str, Union[float, Literal["pass", "fail"]]]]] = None
59+
formatted_escalation_eval_scores: Optional[Dict[str, QueryLogFormattedEscalationEvalScores]] = None
60+
61+
formatted_eval_scores: Optional[Dict[str, QueryLogFormattedEvalScores]] = None
3562
"""Format evaluation scores for frontend display with pass/fail status.
3663
3764
Returns: Dictionary mapping eval keys to their formatted representation: {
3865
"eval_key": { "score": float, "status": "pass" | "fail" } } Returns None if
3966
eval_scores is None.
4067
"""
4168

69+
formatted_guardrail_eval_scores: Optional[Dict[str, QueryLogFormattedGuardrailEvalScores]] = None
70+
4271
is_bad_response: bool
4372

4473
project_id: str
@@ -62,6 +91,9 @@ class QueryLog(BaseModel):
6291
escalated: Optional[bool] = None
6392
"""If true, the question was escalated to Codex for an SME to review"""
6493

94+
escalation_evals: Optional[List[str]] = None
95+
"""Evals that should trigger escalation to SME"""
96+
6597
eval_issue_labels: Optional[List[str]] = None
6698
"""Labels derived from evaluation scores"""
6799

@@ -74,6 +106,9 @@ class QueryLog(BaseModel):
74106
evaluated_response: Optional[str] = None
75107
"""The response being evaluated from the RAG system (before any remediation)"""
76108

109+
guardrail_evals: Optional[List[str]] = None
110+
"""Evals that should trigger guardrail"""
111+
77112
guardrailed: Optional[bool] = None
78113
"""If true, the response was guardrailed"""
79114

src/codex/types/projects/query_log_retrieve_response.py

Lines changed: 36 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,31 @@
66

77
from ..._models import BaseModel
88

9-
__all__ = ["QueryLogRetrieveResponse", "Context"]
9+
__all__ = [
10+
"QueryLogRetrieveResponse",
11+
"FormattedEscalationEvalScores",
12+
"FormattedEvalScores",
13+
"FormattedGuardrailEvalScores",
14+
"Context",
15+
]
16+
17+
18+
class FormattedEscalationEvalScores(BaseModel):
19+
score: float
20+
21+
status: Literal["pass", "fail"]
22+
23+
24+
class FormattedEvalScores(BaseModel):
25+
score: float
26+
27+
status: Literal["pass", "fail"]
28+
29+
30+
class FormattedGuardrailEvalScores(BaseModel):
31+
score: float
32+
33+
status: Literal["pass", "fail"]
1034

1135

1236
class Context(BaseModel):
@@ -31,14 +55,18 @@ class QueryLogRetrieveResponse(BaseModel):
3155

3256
created_at: datetime
3357

34-
formatted_eval_scores: Optional[Dict[str, Dict[str, Union[float, Literal["pass", "fail"]]]]] = None
58+
formatted_escalation_eval_scores: Optional[Dict[str, FormattedEscalationEvalScores]] = None
59+
60+
formatted_eval_scores: Optional[Dict[str, FormattedEvalScores]] = None
3561
"""Format evaluation scores for frontend display with pass/fail status.
3662
3763
Returns: Dictionary mapping eval keys to their formatted representation: {
3864
"eval_key": { "score": float, "status": "pass" | "fail" } } Returns None if
3965
eval_scores is None.
4066
"""
4167

68+
formatted_guardrail_eval_scores: Optional[Dict[str, FormattedGuardrailEvalScores]] = None
69+
4270
is_bad_response: bool
4371

4472
project_id: str
@@ -62,6 +90,9 @@ class QueryLogRetrieveResponse(BaseModel):
6290
escalated: Optional[bool] = None
6391
"""If true, the question was escalated to Codex for an SME to review"""
6492

93+
escalation_evals: Optional[List[str]] = None
94+
"""Evals that should trigger escalation to SME"""
95+
6596
eval_issue_labels: Optional[List[str]] = None
6697
"""Labels derived from evaluation scores"""
6798

@@ -74,6 +105,9 @@ class QueryLogRetrieveResponse(BaseModel):
74105
evaluated_response: Optional[str] = None
75106
"""The response being evaluated from the RAG system (before any remediation)"""
76107

108+
guardrail_evals: Optional[List[str]] = None
109+
"""Evals that should trigger guardrail"""
110+
77111
guardrailed: Optional[bool] = None
78112
"""If true, the response was guardrailed"""
79113

src/codex/types/projects/remediation_list_resolved_logs_response.py

Lines changed: 37 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,32 @@
66

77
from ..._models import BaseModel
88

9-
__all__ = ["RemediationListResolvedLogsResponse", "QueryLog", "QueryLogContext"]
9+
__all__ = [
10+
"RemediationListResolvedLogsResponse",
11+
"QueryLog",
12+
"QueryLogFormattedEscalationEvalScores",
13+
"QueryLogFormattedEvalScores",
14+
"QueryLogFormattedGuardrailEvalScores",
15+
"QueryLogContext",
16+
]
17+
18+
19+
class QueryLogFormattedEscalationEvalScores(BaseModel):
20+
score: float
21+
22+
status: Literal["pass", "fail"]
23+
24+
25+
class QueryLogFormattedEvalScores(BaseModel):
26+
score: float
27+
28+
status: Literal["pass", "fail"]
29+
30+
31+
class QueryLogFormattedGuardrailEvalScores(BaseModel):
32+
score: float
33+
34+
status: Literal["pass", "fail"]
1035

1136

1237
class QueryLogContext(BaseModel):
@@ -31,14 +56,18 @@ class QueryLog(BaseModel):
3156

3257
created_at: datetime
3358

34-
formatted_eval_scores: Optional[Dict[str, Dict[str, Union[float, Literal["pass", "fail"]]]]] = None
59+
formatted_escalation_eval_scores: Optional[Dict[str, QueryLogFormattedEscalationEvalScores]] = None
60+
61+
formatted_eval_scores: Optional[Dict[str, QueryLogFormattedEvalScores]] = None
3562
"""Format evaluation scores for frontend display with pass/fail status.
3663
3764
Returns: Dictionary mapping eval keys to their formatted representation: {
3865
"eval_key": { "score": float, "status": "pass" | "fail" } } Returns None if
3966
eval_scores is None.
4067
"""
4168

69+
formatted_guardrail_eval_scores: Optional[Dict[str, QueryLogFormattedGuardrailEvalScores]] = None
70+
4271
is_bad_response: bool
4372

4473
project_id: str
@@ -62,6 +91,9 @@ class QueryLog(BaseModel):
6291
escalated: Optional[bool] = None
6392
"""If true, the question was escalated to Codex for an SME to review"""
6493

94+
escalation_evals: Optional[List[str]] = None
95+
"""Evals that should trigger escalation to SME"""
96+
6597
eval_issue_labels: Optional[List[str]] = None
6698
"""Labels derived from evaluation scores"""
6799

@@ -74,6 +106,9 @@ class QueryLog(BaseModel):
74106
evaluated_response: Optional[str] = None
75107
"""The response being evaluated from the RAG system (before any remediation)"""
76108

109+
guardrail_evals: Optional[List[str]] = None
110+
"""Evals that should trigger guardrail"""
111+
77112
guardrailed: Optional[bool] = None
78113
"""If true, the response was guardrailed"""
79114

0 commit comments

Comments
 (0)