@@ -17,6 +17,9 @@ namespace OpenAI {
1717 * Provides type safety and IDE auto-completion for model selection
1818 */
1919enum class Model {
20+ // GPT-5 series (Latest - 2025)
21+ GPT_5, // gpt-5 - Next-generation model
22+
2023 // O3 series (Latest - 2025)
2124 O3, // o3 - Latest reasoning model
2225 O3_Mini, // o3-mini - Cost-effective reasoning model
@@ -56,6 +59,8 @@ enum class Model {
5659 */
5760inline std::string toString (Model model) {
5861 switch (model) {
62+ case Model::GPT_5:
63+ return " gpt-5" ;
5964 case Model::O3:
6065 return " o3" ;
6166 case Model::O3_Mini:
@@ -96,6 +101,7 @@ inline std::string toString(Model model) {
96101 * Convert API string to OpenAI Model enum
97102 */
98103inline Model modelFromString (const std::string& modelStr) {
104+ if (modelStr == " gpt-5" ) return Model::GPT_5;
99105 if (modelStr == " o3" ) return Model::O3;
100106 if (modelStr == " o3-mini" ) return Model::O3_Mini;
101107 if (modelStr == " o1" ) return Model::O1;
@@ -119,6 +125,7 @@ inline Model modelFromString(const std::string& modelStr) {
119125 */
120126inline bool supportsStructuredOutputs (Model model) {
121127 switch (model) {
128+ case Model::GPT_5:
122129 case Model::O3:
123130 case Model::O3_Mini:
124131 case Model::O1:
@@ -592,7 +599,7 @@ struct McpApprovalResponse {
592599};
593600
594601// Response status enumeration
595- enum class ResponseStatus { Queued, InProgress, Completed, Failed, Cancelled };
602+ enum class ResponseStatus { Queued, InProgress, Completed, Failed, Cancelled, Incomplete };
596603
597604inline std::string toString (ResponseStatus status) {
598605 switch (status) {
@@ -606,6 +613,8 @@ inline std::string toString(ResponseStatus status) {
606613 return " failed" ;
607614 case ResponseStatus::Cancelled:
608615 return " cancelled" ;
616+ case ResponseStatus::Incomplete:
617+ return " incomplete" ;
609618 }
610619 return " " ;
611620}
@@ -616,6 +625,7 @@ inline ResponseStatus responseStatusFromString(const std::string& str) {
616625 if (str == " completed" ) return ResponseStatus::Completed;
617626 if (str == " failed" ) return ResponseStatus::Failed;
618627 if (str == " cancelled" ) return ResponseStatus::Cancelled;
628+ if (str == " incomplete" ) return ResponseStatus::Incomplete;
619629 throw std::invalid_argument (" Invalid response status: " + str);
620630}
621631
@@ -656,11 +666,11 @@ struct ResponsesRequest {
656666 // Convert model string to enum for easier checking
657667 auto modelEnum = modelFromString (model);
658668
659- // Reasoning models (O-series) have different parameter support
660- if (modelEnum == Model::O3 || modelEnum == Model::O3_Mini || modelEnum == Model::O1 ||
661- modelEnum == Model::O1_Mini || modelEnum == Model::O1_Preview ||
662- modelEnum == Model::O1_Pro || modelEnum == Model::O4_Mini ||
663- modelEnum == Model::O4_Mini_Deep_Research) {
669+ // Reasoning models (O-series + GPT-5 ) have different parameter support
670+ if (modelEnum == Model::GPT_5 || modelEnum == Model::O3 || modelEnum == Model::O3_Mini ||
671+ modelEnum == Model::O1 || modelEnum == Model::O1_Mini ||
672+ modelEnum == Model::O1_Preview || modelEnum == Model::O1_Pro ||
673+ modelEnum == Model::O4_Mini || modelEnum == Model:: O4_Mini_Deep_Research) {
664674 // Parameters NOT supported by reasoning models
665675 if (paramName == " temperature" || paramName == " top_p" || paramName == " top_logprobs" ||
666676 paramName == " truncation" ) {
@@ -1004,8 +1014,8 @@ std::string getRecommendedApiForModel(const std::string& model);
10041014
10051015// Model lists for different APIs
10061016const std::vector<std::string> RESPONSES_MODELS = {
1007- " gpt-4o" , " gpt-4o-mini" , " gpt-4.1" , " gpt-4.1-nano" , " gpt-4.1-mini" , " gpt-image-1 " ,
1008- " o1" , " o3-mini" , " o3" , " o4-mini" , " computer-use-preview" };
1017+ " gpt-5 " , " gpt- 4o" , " gpt-4o-mini" , " gpt-4.1" , " gpt-4.1-nano" , " gpt-4.1-mini" ,
1018+ " gpt-image-1 " , " o1" , " o3-mini" , " o3" , " o4-mini" , " computer-use-preview" };
10091019
10101020const std::vector<std::string> CHAT_COMPLETION_MODELS = {" gpt-4" , " gpt-4-turbo" , " gpt-4o" ,
10111021 " gpt-4o-mini" , " gpt-3.5-turbo" };
@@ -1029,21 +1039,38 @@ inline ResponsesRequest ResponsesRequest::fromLLMRequest(const LLMRequest& reque
10291039 if (!request.context .empty ()) {
10301040 // Convert context (vector of json) to InputMessages
10311041 std::vector<InputMessage> messages;
1042+
10321043 for (const auto & contextItem : request.context ) {
1044+ // Case 1: Single JSON object with role/content
10331045 if (contextItem.is_object () && contextItem.contains (" role" ) &&
10341046 contextItem.contains (" content" )) {
10351047 InputMessage msg;
10361048 msg.role = InputMessage::stringToRole (contextItem[" role" ].get <std::string>());
10371049 msg.content = contextItem[" content" ].get <std::string>();
10381050 messages.push_back (msg);
1039- } else {
1040- // If it's not a proper message format, treat as user message
1041- InputMessage msg;
1042- msg.role = InputMessage::Role::User;
1043- msg.content = contextItem.dump ();
1044- messages.push_back (msg);
1051+ continue ;
10451052 }
1053+
1054+ // Case 2: Array of message-like objects [{role, content}, ...]
1055+ if (contextItem.is_array ()) {
1056+ for (const auto & item : contextItem) {
1057+ if (item.is_object () && item.contains (" role" ) && item.contains (" content" )) {
1058+ InputMessage msg;
1059+ msg.role = InputMessage::stringToRole (item[" role" ].get <std::string>());
1060+ msg.content = item[" content" ].get <std::string>();
1061+ messages.push_back (msg);
1062+ }
1063+ }
1064+ continue ;
1065+ }
1066+
1067+ // Fallback: stringify unknown item as a user message
1068+ InputMessage msg;
1069+ msg.role = InputMessage::Role::User;
1070+ msg.content = contextItem.dump ();
1071+ messages.push_back (msg);
10461072 }
1073+
10471074 responsesReq.input = ResponsesInput::fromContentList (messages);
10481075 } else if (!request.prompt .empty ()) {
10491076 // If context is empty but prompt is present, use prompt as input
0 commit comments