|
1 | | -use crate::config_types::ReasoningEffort as ReasoningEffortConfig; |
2 | | -use crate::config_types::ReasoningSummary as ReasoningSummaryConfig; |
3 | 1 | use crate::error::Result; |
4 | 2 | use crate::model_family::ModelFamily; |
5 | 3 | use crate::models::ContentItem; |
6 | 4 | use crate::models::ResponseItem; |
7 | 5 | use crate::openai_tools::OpenAiTool; |
8 | 6 | use crate::protocol::TokenUsage; |
9 | 7 | use codex_apply_patch::APPLY_PATCH_TOOL_INSTRUCTIONS; |
| 8 | +use codex_protocol::config_types::ReasoningEffort as ReasoningEffortConfig; |
| 9 | +use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig; |
10 | 10 | use futures::Stream; |
11 | 11 | use serde::Serialize; |
12 | 12 | use std::borrow::Cow; |
@@ -85,55 +85,8 @@ pub enum ResponseEvent { |
85 | 85 |
|
86 | 86 | #[derive(Debug, Serialize)] |
87 | 87 | pub(crate) struct Reasoning { |
88 | | - pub(crate) effort: OpenAiReasoningEffort, |
89 | | - #[serde(skip_serializing_if = "Option::is_none")] |
90 | | - pub(crate) summary: Option<OpenAiReasoningSummary>, |
91 | | -} |
92 | | - |
93 | | -/// See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning |
94 | | -#[derive(Debug, Serialize, Default, Clone, Copy)] |
95 | | -#[serde(rename_all = "lowercase")] |
96 | | -pub(crate) enum OpenAiReasoningEffort { |
97 | | - Minimal, |
98 | | - Low, |
99 | | - #[default] |
100 | | - Medium, |
101 | | - High, |
102 | | -} |
103 | | - |
104 | | -impl From<ReasoningEffortConfig> for Option<OpenAiReasoningEffort> { |
105 | | - fn from(effort: ReasoningEffortConfig) -> Self { |
106 | | - match effort { |
107 | | - ReasoningEffortConfig::Minimal => Some(OpenAiReasoningEffort::Minimal), |
108 | | - ReasoningEffortConfig::Low => Some(OpenAiReasoningEffort::Low), |
109 | | - ReasoningEffortConfig::Medium => Some(OpenAiReasoningEffort::Medium), |
110 | | - ReasoningEffortConfig::High => Some(OpenAiReasoningEffort::High), |
111 | | - ReasoningEffortConfig::None => None, |
112 | | - } |
113 | | - } |
114 | | -} |
115 | | - |
116 | | -/// A summary of the reasoning performed by the model. This can be useful for |
117 | | -/// debugging and understanding the model's reasoning process. |
118 | | -/// See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#reasoning-summaries |
119 | | -#[derive(Debug, Serialize, Default, Clone, Copy)] |
120 | | -#[serde(rename_all = "lowercase")] |
121 | | -pub(crate) enum OpenAiReasoningSummary { |
122 | | - #[default] |
123 | | - Auto, |
124 | | - Concise, |
125 | | - Detailed, |
126 | | -} |
127 | | - |
128 | | -impl From<ReasoningSummaryConfig> for Option<OpenAiReasoningSummary> { |
129 | | - fn from(summary: ReasoningSummaryConfig) -> Self { |
130 | | - match summary { |
131 | | - ReasoningSummaryConfig::Auto => Some(OpenAiReasoningSummary::Auto), |
132 | | - ReasoningSummaryConfig::Concise => Some(OpenAiReasoningSummary::Concise), |
133 | | - ReasoningSummaryConfig::Detailed => Some(OpenAiReasoningSummary::Detailed), |
134 | | - ReasoningSummaryConfig::None => None, |
135 | | - } |
136 | | - } |
| 88 | + pub(crate) effort: ReasoningEffortConfig, |
| 89 | + pub(crate) summary: ReasoningSummaryConfig, |
137 | 90 | } |
138 | 91 |
|
139 | 92 | /// Request object that is serialized as JSON and POST'ed when using the |
@@ -164,12 +117,7 @@ pub(crate) fn create_reasoning_param_for_request( |
164 | 117 | summary: ReasoningSummaryConfig, |
165 | 118 | ) -> Option<Reasoning> { |
166 | 119 | if model_family.supports_reasoning_summaries { |
167 | | - let effort: Option<OpenAiReasoningEffort> = effort.into(); |
168 | | - let effort = effort?; |
169 | | - Some(Reasoning { |
170 | | - effort, |
171 | | - summary: summary.into(), |
172 | | - }) |
| 120 | + Some(Reasoning { effort, summary }) |
173 | 121 | } else { |
174 | 122 | None |
175 | 123 | } |
|
0 commit comments