Skip to content

Commit a5e3681

Browse files
committed
Fix compilation errors and implement missing OpenAI types methods
- Add OpenAITypes.cpp to CMakeLists.txt - Implement missing methods and struct definitions - Fix test failures - All tests passing (108 passed, 8 skipped)
1 parent d373369 commit a5e3681

File tree

12 files changed

+682
-808
lines changed

12 files changed

+682
-808
lines changed

CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -72,6 +72,7 @@ set(LLMCPP_SOURCES
7272
src/openai/OpenAIResponsesApi.cpp
7373
src/openai/OpenAISchemaBuilder.cpp
7474
src/openai/OpenAIModels.cpp
75+
src/openai/OpenAITypes.cpp
7576
src/openai/OpenAIUtils.cpp
7677
src/anthropic/AnthropicClient.cpp
7778
src/anthropic/AnthropicHttpClient.cpp

include/core/LLMTypes.h

Lines changed: 3 additions & 90 deletions
Original file line numberDiff line numberDiff line change
@@ -12,98 +12,11 @@
1212

1313
using json = nlohmann::json;
1414

15-
// Context type using standard C++ vectors of generic objects
16-
using LLMContext = std::vector<json>;
15+
// LLMRequestConfig and LLMContext are now defined in OpenAITypes.h to avoid circular dependency
1716

18-
/// Represents the configuration for the LLM
19-
struct LLMRequestConfig {
20-
std::string client;
21-
std::string model; // String model name (works with any provider)
22-
std::string functionName = "llm_function"; // Default function name for LLM calls
23-
std::string jsonSchema;
24-
std::optional<json> schemaObject; // Structured schema data
17+
// LLMRequest is now defined in OpenAITypes.h to avoid circular dependency
2518

26-
std::optional<float> temperature; // Optional temperature (filtered by model support)
27-
std::optional<int> maxTokens; // Optional max tokens
28-
std::optional<std::vector<OpenAI::ToolVariant>> tools; // Optional tools for function calling
29-
30-
// Convenience method for any model name
31-
void setModel(const std::string& modelName) { model = modelName; }
32-
33-
std::string getModelString() const { return model; }
34-
35-
// Add more configuration options as needed (e.g., top_p, stop sequences, etc.)
36-
37-
std::string toString() const {
38-
std::string schemaStr = schemaObject.has_value() ? schemaObject->dump() : jsonSchema;
39-
std::string tempStr = temperature.has_value() ? std::to_string(*temperature) : "not set";
40-
std::string toolsStr = tools.has_value() ? std::to_string(tools->size()) + " tools" : "no tools";
41-
return "LLMRequestConfig { client: " + client + ", model: " + getModelString() +
42-
", functionName: " + functionName + ", schema: " + schemaStr +
43-
", temperature: " + tempStr +
44-
", maxTokens: " + std::to_string(maxTokens.has_value() ? *maxTokens : 0) +
45-
", tools: " + toolsStr + " }";
46-
}
47-
};
48-
49-
struct LLMRequest {
50-
LLMRequest() = delete;
51-
52-
// Constructor with prompt only
53-
LLMRequest(LLMRequestConfig config, std::string prompt, LLMContext context = {},
54-
std::string previousResponseId = "")
55-
: config(std::move(config)),
56-
prompt(std::move(prompt)),
57-
context(std::move(context)),
58-
previousResponseId(std::move(previousResponseId)) {}
59-
60-
// Constructor with single context object (convenience)
61-
LLMRequest(LLMRequestConfig config, std::string prompt, json contextObject,
62-
std::string previousResponseId = "")
63-
: config(std::move(config)),
64-
prompt(std::move(prompt)),
65-
context({std::move(contextObject)}),
66-
previousResponseId(std::move(previousResponseId)) {}
67-
68-
LLMRequestConfig config;
69-
std::string prompt; // The main task/prompt (what to do) - maps to instructions
70-
LLMContext context; // Context data (vector of generic objects) - maps to inputValues
71-
std::string previousResponseId; // For conversation continuity
72-
73-
// Utility methods
74-
std::string instructions() const { return prompt; } // For OpenAI mapping
75-
76-
std::string toString() const {
77-
std::string contextString = "[";
78-
for (size_t i = 0; i < context.size(); ++i) {
79-
if (i > 0) contextString += ", ";
80-
contextString += context[i].dump();
81-
}
82-
contextString += "]";
83-
84-
return "LLMRequest {\n config: " + config.toString() + ",\n prompt: " + prompt +
85-
",\n context: " + contextString + ",\n previousResponseId: " + previousResponseId +
86-
"\n}";
87-
}
88-
};
89-
90-
// LLMUsage is now defined in OpenAITypes.h to avoid circular dependency
91-
92-
struct LLMResponse {
93-
json result = json::object();
94-
bool success = false;
95-
std::string errorMessage;
96-
std::string responseId; // For conversation continuity with OpenAI
97-
LLMUsage usage; // Token usage information
98-
99-
std::string toString() const {
100-
std::string resultString = result.dump(2);
101-
return "LLMResponse {\n result: " + resultString +
102-
",\n success: " + (success ? "true" : "false") +
103-
",\n errorMessage: " + errorMessage + ",\n responseId: " + responseId +
104-
",\n usage: " + usage.toString() + "\n}";
105-
}
106-
};
19+
// LLMUsage and LLMResponse are now defined in OpenAITypes.h to avoid circular dependency
10720

10821
// Error codes for LLM operations
10922
enum class LLMErrorCode {

0 commit comments

Comments
 (0)