@@ -92,6 +92,7 @@ type RunRequest struct {
92
92
// Sampling temperature between 0 and 2. Higher values like 0.8 are more random.
93
93
// lower values are more focused and deterministic.
94
94
Temperature * float32 `json:"temperature,omitempty"`
95
+ TopP * float32 `json:"top_p,omitempty"`
95
96
96
97
// The maximum number of prompt tokens that may be used over the course of the run.
97
98
// If the run exceeds the number of prompt tokens specified, the run will end with status 'complete'.
@@ -103,6 +104,11 @@ type RunRequest struct {
103
104
104
105
// ThreadTruncationStrategy defines the truncation strategy to use for the thread.
105
106
TruncationStrategy * ThreadTruncationStrategy `json:"truncation_strategy,omitempty"`
107
+
108
+ // This can be either a string or a ToolChoice object.
109
+ ToolChoice any `json:"tool_choice,omitempty"`
110
+ // This can be either a string or a ResponseFormat object.
111
+ ResponseFormat any `json:"response_format,omitempty"`
106
112
}
107
113
108
114
// ThreadTruncationStrategy defines the truncation strategy to use for the thread.
@@ -124,6 +130,13 @@ const (
124
130
TruncationStrategyLastMessages = TruncationStrategy ("last_messages" )
125
131
)
126
132
133
+ // ReponseFormat specifies the format the model must output.
134
+ // https://platform.openai.com/docs/api-reference/runs/createRun#runs-createrun-response_format.
135
+ // Type can either be text or json_object.
136
+ type ReponseFormat struct {
137
+ Type string `json:"type"`
138
+ }
139
+
127
140
type RunModifyRequest struct {
128
141
Metadata map [string ]any `json:"metadata,omitempty"`
129
142
}
0 commit comments