@@ -101,18 +101,32 @@ public Model(
101101 /// More capable than any GPT-3.5 model, able to do more complex tasks, and optimized for chat.
102102 /// Will be updated with our latest model iteration.
103103 /// </summary>
104+ /// <remarks>
105+ /// Context Window: 8,192 tokens
106+ /// </remarks>
104107 public static Model GPT4 { get ; } = new ( "gpt-4" , "openai" ) ;
105108
109+ /// <summary>
110+ /// The latest GPT-4 Turbo model with vision capabilities. Vision requests can now use JSON mode and function calling.
111+ /// </summary>
112+ /// <remarks>
113+ /// Context Window: 128,000 tokens
114+ /// </remarks>
115+ public static Model GPT4_Turbo { get ; } = new ( "gpt-4-turbo" , "openai" ) ;
116+
106117 /// <summary>
107118 /// Same capabilities as the base gpt-4 mode but with 4x the context length.
108119 /// Will be updated with our latest model iteration. Tokens are 2x the price of gpt-4.
109120 /// </summary>
110121 public static Model GPT4_32K { get ; } = new ( "gpt-4-32k" , "openai" ) ;
111122
112123 /// <summary>
113- /// Because gpt -3.5-turbo performs at a similar capability to text-davinci-003 but at 10%
114- /// the price per token, we recommend gpt-3.5-turbo for most use cases .
124+ /// GPT -3.5 Turbo models can understand and generate natural language or code and have been optimized for chat
125+ /// using the Chat Completions API but work well for non-chat tasks as well .
115126 /// </summary>
127+ /// <remarks>
128+ /// Context Window: 4,096 tokens
129+ /// </remarks>
116130 public static Model GPT3_5_Turbo { get ; } = new ( "gpt-3.5-turbo" , "openai" ) ;
117131
118132 /// <summary>
@@ -122,71 +136,84 @@ public Model(
122136 public static Model GPT3_5_Turbo_16K { get ; } = new ( "gpt-3.5-turbo-16k" , "openai" ) ;
123137
124138 /// <summary>
125- /// The most powerful, largest engine available, although the speed is quite slow.<para/>
126- /// Good at: Complex intent, cause and effect, summarization for audience
139+ /// Replacement for the GPT-3 curie and davinci base models.
127140 /// </summary>
128- public static Model Davinci { get ; } = new ( "text- davinci-003 " , "openai" ) ;
141+ public static Model Davinci { get ; } = new ( "davinci-002 " , "openai" ) ;
129142
130143 /// <summary>
131- /// For edit requests .
144+ /// Replacement for the GPT-3 ada and babbage base models .
132145 /// </summary>
133- public static Model DavinciEdit { get ; } = new ( "text-davinci-edit-001 " , "openai" ) ;
146+ public static Model Babbage { get ; } = new ( "babbage-002 " , "openai" ) ;
134147
135148 /// <summary>
136- /// The 2nd most powerful engine, a bit faster than <see cref="Davinci"/>, and a bit faster.<para/>
137- /// Good at: Language translation, complex classification, text sentiment, summarization.
149+ /// The default model for <see cref="Embeddings.EmbeddingsEndpoint"/>.
138150 /// </summary>
139- public static Model Curie { get ; } = new ( "text-curie-001 " , "openai" ) ;
151+ public static Model Embedding_Ada_002 { get ; } = new ( "text-embedding-ada-002 " , "openai" ) ;
140152
141153 /// <summary>
142- /// The 2nd fastest engine, a bit more powerful than <see cref="Ada"/>, and a bit slower.<para/>
143- /// Good at: Moderate classification, semantic search classification
154+ /// A highly efficient model which provides a significant upgrade over its predecessor, the text-embedding-ada-002 model.
144155 /// </summary>
145- public static Model Babbage { get ; } = new ( "text-babbage-001 " , "openai" ) ;
156+ public static Model Embedding_3_Small { get ; } = new ( "text-embedding-3-small " , "openai" ) ;
146157
147158 /// <summary>
148- /// The smallest, fastest engine available, although the quality of results may be poor.<para/>
149- /// Good at: Parsing text, simple classification, address correction, keywords
159+ /// Most capable embedding model for both english and non-english tasks with embeddings of up to 3072 dimensions.
150160 /// </summary>
151- public static Model Ada { get ; } = new ( "text-ada-001 " , "openai" ) ;
161+ public static Model Embedding_3_Large { get ; } = new ( "text-embedding-3-large " , "openai" ) ;
152162
153163 /// <summary>
154- /// The default model for <see cref="Embeddings.EmbeddingsEndpoint "/>.
164+ /// The default model for <see cref="Moderations.ModerationsEndpoint "/>.
155165 /// </summary>
156- public static Model Embedding_Ada_002 { get ; } = new ( "text-embedding-ada-002" , "openai" ) ;
166+ public static Model Moderation_Latest { get ; } = new ( "text-moderation-latest" , "openai" ) ;
167+
168+ public static Model Moderation_Stable { get ; } = new ( "text-moderation-stable" , "openai" ) ;
157169
158170 /// <summary>
159- /// A highly efficient model which provides a significant upgrade over its predecessor, the text-embedding-ada-002 model .
171+ /// The latest text to speech model, optimized for speed .
160172 /// </summary>
161- public static Model Embedding_3_Small { get ; } = new ( "text-embedding-3-small" , "openai" ) ;
173+ /// <remarks>
174+ /// The default model for <see cref="Audio.SpeechRequest"/>s.
175+ /// </remarks>
176+ public static Model TTS_1 { get ; } = new ( "tts-1" , "openai" ) ;
162177
163178 /// <summary>
164- /// A next generation larger model with embeddings of up to 3072 dimensions .
179+ /// The latest text to speech model, optimized for quality .
165180 /// </summary>
166- public static Model Embedding_3_Large { get ; } = new ( "text-embedding-3-large " , "openai" ) ;
181+ public static Model TTS_1HD { get ; } = new ( "tts-1-hd " , "openai" ) ;
167182
168183 /// <summary>
169184 /// The default model for <see cref="Audio.AudioEndpoint"/>.
170185 /// </summary>
171186 public static Model Whisper1 { get ; } = new ( "whisper-1" , "openai" ) ;
172187
173188 /// <summary>
174- /// The default model for <see cref="Moderations.ModerationsEndpoint "/>.
189+ /// The default model for <see cref="Images.ImagesEndpoint "/>.
175190 /// </summary>
176- public static Model Moderation_Latest { get ; } = new ( "text-moderation-latest" , "openai" ) ;
191+ public static Model DallE_2 { get ; } = new ( "dall-e-2" , "openai" ) ;
192+
193+ public static Model DallE_3 { get ; } = new ( "dall-e-3" , "openai" ) ;
194+
195+ #region Obsolete
177196
178197 /// <summary>
179- /// The default model for <see cref="Audio.SpeechRequest"/>s .
198+ /// For edit requests .
180199 /// </summary>
181- public static Model TTS_1 { get ; } = new ( "tts-1" , "openai" ) ;
200+ [ Obsolete ( "Removed" ) ]
201+ public static Model DavinciEdit { get ; } = new ( "text-davinci-edit-001" , "openai" ) ;
182202
183- public static Model TTS_1HD { get ; } = new ( "tts-1-hd" , "openai" ) ;
203+ /// <summary>
204+ /// The 2nd most powerful engine, a bit faster than <see cref="Davinci"/>, and a bit faster.<para/>
205+ /// Good at: Language translation, complex classification, text sentiment, summarization.
206+ /// </summary>
207+ [ Obsolete ( "Removed" ) ]
208+ public static Model Curie { get ; } = new ( "text-curie-001" , "openai" ) ;
184209
185210 /// <summary>
186- /// The default model for <see cref="Images.ImagesEndpoint"/>.
211+ /// The smallest, fastest engine available, although the quality of results may be poor.<para/>
212+ /// Good at: Parsing text, simple classification, address correction, keywords
187213 /// </summary>
188- public static Model DallE_2 { get ; } = new ( "dall-e-2" , "openai" ) ;
214+ [ Obsolete ( "Removed" ) ]
215+ public static Model Ada { get ; } = new ( "text-ada-001" , "openai" ) ;
189216
190- public static Model DallE_3 { get ; } = new ( "dall-e-3" , "openai" ) ;
217+ #endregion Obsolete
191218 }
192219}
0 commit comments