Skip to content

Commit 82ec8d9

Browse files
Merge pull request #32 from lucaromagnoli/fix/optional-params
Fix/optional params
2 parents 31e4692 + 1959836 commit 82ec8d9

File tree

15 files changed

+292
-177
lines changed

15 files changed

+292
-177
lines changed

.github/workflows/ci.yml

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@ on:
1818

1919
jobs:
2020
build-and-test:
21-
if: github.event_name != 'push' || !startsWith(github.ref, 'refs/tags/')
2221
runs-on: ${{ matrix.os }}
2322
strategy:
2423
fail-fast: false

.github/workflows/release.yml

Lines changed: 4 additions & 145 deletions
Original file line numberDiff line numberDiff line change
@@ -33,38 +33,10 @@ jobs:
3333
env:
3434
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
3535

36-
check-ci-status:
37-
needs: check-open-prs
38-
if: needs.check-open-prs.outputs.has_open_prs == 'false'
39-
runs-on: ubuntu-latest
40-
outputs:
41-
ci_passed: ${{ steps.check.outputs.ci_passed }}
42-
steps:
43-
- name: Checkout code
44-
uses: actions/checkout@v4
4536

46-
- name: Check if CI has already passed
47-
id: check
48-
run: |
49-
# Get the commit SHA
50-
COMMIT_SHA="${{ github.sha }}"
51-
echo "Checking CI status for commit: $COMMIT_SHA"
52-
53-
# Check if there's a successful CI run for this commit
54-
CI_STATUS=$(gh api repos/${{ github.repository }}/commits/$COMMIT_SHA/status --jq '.statuses[] | select(.context == "CI") | .state' 2>/dev/null || echo "unknown")
55-
56-
if [ "$CI_STATUS" = "success" ]; then
57-
echo "ci_passed=true" >> $GITHUB_OUTPUT
58-
echo "✅ CI has already passed for this commit. Skipping build-and-test."
59-
else
60-
echo "ci_passed=false" >> $GITHUB_OUTPUT
61-
echo "⚠️ CI has not passed yet or status unknown. Running build-and-test."
62-
fi
63-
env:
64-
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
6537

6638
validate-version:
67-
needs: [check-open-prs, check-ci-status]
39+
needs: [check-open-prs]
6840
if: needs.check-open-prs.outputs.has_open_prs == 'false'
6941
runs-on: ubuntu-latest
7042
outputs:
@@ -135,113 +107,8 @@ jobs:
135107
fi
136108
echo "✅ Changelog validation passed: version $VERSION found in CHANGELOG.md"
137109
138-
build-and-test:
139-
needs: [check-open-prs, check-ci-status, validate-version]
140-
if: needs.check-open-prs.outputs.has_open_prs == 'false' && needs.check-ci-status.outputs.ci_passed == 'false'
141-
strategy:
142-
matrix:
143-
os: [ubuntu-latest, windows-latest]
144-
build_type: [Release]
145-
include:
146-
- os: ubuntu-latest
147-
cmake_args: "-DLLMCPP_BUILD_TESTS=ON -DLLMCPP_BUILD_EXAMPLES=ON"
148-
- os: windows-latest
149-
cmake_args: "-DLLMCPP_BUILD_TESTS=ON -DLLMCPP_BUILD_EXAMPLES=ON"
150-
151-
runs-on: ${{ matrix.os }}
152-
timeout-minutes: 30 # Add timeout to prevent runaway builds
153-
154-
steps:
155-
- name: Checkout code
156-
uses: actions/checkout@v4
157-
158-
# Improved caching for release builds
159-
- name: Cache CMake build
160-
uses: actions/cache@v4
161-
with:
162-
path: |
163-
build
164-
!build/tests/llmcpp_tests
165-
!build/examples
166-
key: release-cmake-${{ matrix.os }}-${{ matrix.build_type }}-${{ hashFiles('CMakeLists.txt', '**/CMakeLists.txt', 'src/**/*', 'include/**/*') }}
167-
restore-keys: |
168-
release-cmake-${{ matrix.os }}-${{ matrix.build_type }}-
169-
170-
# Cache vcpkg packages (Windows)
171-
- name: Cache vcpkg packages
172-
if: matrix.os == 'windows-latest'
173-
uses: actions/cache@v4
174-
with:
175-
path: |
176-
${{ github.workspace }}/vcpkg
177-
${{ github.workspace }}/vcpkg_installed
178-
key: release-vcpkg-${{ matrix.os }}-${{ hashFiles('vcpkg.json') }}
179-
restore-keys: |
180-
release-vcpkg-${{ matrix.os }}-
181-
182-
- name: Setup CMake
183-
uses: jwlawson/actions-setup-cmake@v1.14
184-
with:
185-
cmake-version: ${{ env.CMAKE_VERSION }}
186-
187-
- name: Setup vcpkg (Windows)
188-
if: runner.os == 'Windows'
189-
uses: lukka/run-vcpkg@v11
190-
with:
191-
vcpkgJsonGlob: '**/vcpkg.json'
192-
193-
- name: Install dependencies (Ubuntu)
194-
if: runner.os == 'Linux'
195-
run: |
196-
sudo apt-get update
197-
sudo apt-get install -y libssl-dev nlohmann-json3-dev
198-
199-
- name: Configure CMake (Windows)
200-
if: runner.os == 'Windows'
201-
run: |
202-
cmake -B build ${{ matrix.cmake_args }} -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} -DCMAKE_TOOLCHAIN_FILE="$env:VCPKG_ROOT\scripts\buildsystems\vcpkg.cmake"
203-
204-
- name: Configure CMake (Non-Windows)
205-
if: runner.os != 'Windows'
206-
run: |
207-
cmake -B build ${{ matrix.cmake_args }} -DCMAKE_BUILD_TYPE=${{ matrix.build_type }}
208-
209-
- name: Build
210-
timeout-minutes: 20 # Reduced timeout for faster failure detection
211-
run: |
212-
cmake --build build --config ${{ matrix.build_type }} --parallel
213-
214-
- name: Test
215-
timeout-minutes: 10 # Add timeout to tests
216-
run: |
217-
cd build
218-
ctest --output-on-failure -C ${{ matrix.build_type }} --exclude-regex "integration"
219-
220-
- name: Package (Linux/macOS)
221-
if: runner.os != 'Windows'
222-
run: |
223-
cd build
224-
cpack -G TGZ
225-
ls -la *.tar.gz
226-
227-
- name: Package (Windows)
228-
if: runner.os == 'Windows'
229-
run: |
230-
cd build
231-
cpack -G ZIP
232-
dir *.zip
233-
234-
- name: Upload artifacts
235-
uses: actions/upload-artifact@v4
236-
with:
237-
name: llmcpp-${{ needs.validate-version.outputs.version }}-${{ runner.os }}
238-
path: |
239-
build/*.tar.gz
240-
build/*.zip
241-
retention-days: 30
242-
243110
create-release:
244-
needs: [check-open-prs, check-ci-status, validate-version, build-and-test]
111+
needs: [check-open-prs, validate-version]
245112
if: needs.check-open-prs.outputs.has_open_prs == 'false'
246113
runs-on: ubuntu-latest
247114
permissions:
@@ -253,11 +120,6 @@ jobs:
253120
with:
254121
fetch-depth: 0 # Fetch all history for changelog generation
255122

256-
- name: Download all artifacts
257-
uses: actions/download-artifact@v4
258-
with:
259-
path: release-assets
260-
261123
- name: Setup Python
262124
uses: actions/setup-python@v4
263125
with:
@@ -279,18 +141,15 @@ jobs:
279141
body: ${{ steps.changelog.outputs.changelog }}
280142
prerelease: ${{ needs.validate-version.outputs.is_prerelease }}
281143
draft: false
282-
files: |
283-
release-assets/**/*
284144
env:
285145
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
286146

287147
- name: Create release summary
288148
run: |
289149
echo "## 🎉 Release ${{ needs.validate-version.outputs.version }} Created!" >> $GITHUB_STEP_SUMMARY
290150
echo "" >> $GITHUB_STEP_SUMMARY
291-
echo "### 📦 Assets:" >> $GITHUB_STEP_SUMMARY
292-
echo "- Linux (Ubuntu) build artifacts" >> $GITHUB_STEP_SUMMARY
293-
echo "- Windows build artifacts" >> $GITHUB_STEP_SUMMARY
151+
echo "### 📝 Release Notes:" >> $GITHUB_STEP_SUMMARY
152+
echo "Release notes generated from CHANGELOG.md" >> $GITHUB_STEP_SUMMARY
294153
echo "" >> $GITHUB_STEP_SUMMARY
295154
echo "### 🔗 Links:" >> $GITHUB_STEP_SUMMARY
296155
echo "- [Release Page](https://github.com/${{ github.repository }}/releases/tag/${{ github.ref_name }})" >> $GITHUB_STEP_SUMMARY

CHANGELOG.md

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,21 @@
44

55

66

7+
## [1.0.19] - 2025-07-13
8+
9+
### Added
10+
- skip release build-and-test if CI has already passed for the commit
11+
12+
### Fixed
13+
- simplify CI condition to prevent tag builds
14+
15+
### Other
16+
- Merge pull request #30 from lucaromagnoli/feat/add-models
17+
- Remove o3 model integration test; only test o3-mini (no temperature or maxTokens)
18+
- Commit remaining changes: core, openai, and unit test updates for optional params and o3-mini logic
19+
- Fix maxTokens test: now optional instead of defaulting to 200
20+
- Bump version to 1.0.18 for patch release
21+
722
## [1.0.17] - 2025-07-13
823

924
### Fixed

CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
cmake_minimum_required(VERSION 3.22)
22

3-
project(llmcpp VERSION 1.0.17 LANGUAGES CXX)
3+
project(llmcpp VERSION 1.0.19 LANGUAGES CXX)
44

55
# Set version variables for easier access
66
set(LLMCPP_VERSION_MAJOR ${PROJECT_VERSION_MAJOR})

include/core/LLMTypes.h

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,8 @@ struct LLMRequestConfig {
1818
std::string jsonSchema;
1919
std::optional<json> schemaObject; // Structured schema data
2020

21-
float temperature = 0.8f;
22-
int maxTokens = 200;
21+
std::optional<float> temperature; // Optional temperature (filtered by model support)
22+
std::optional<int> maxTokens; // Optional max tokens
2323

2424
// Convenience method for any model name
2525
void setModel(const std::string& modelName) { model = modelName; }
@@ -30,10 +30,11 @@ struct LLMRequestConfig {
3030

3131
std::string toString() const {
3232
std::string schemaStr = schemaObject.has_value() ? schemaObject->dump() : jsonSchema;
33+
std::string tempStr = temperature.has_value() ? std::to_string(*temperature) : "not set";
3334
return "LLMRequestConfig { client: " + client + ", model: " + getModelString() +
3435
", functionName: " + functionName + ", schema: " + schemaStr +
35-
", temperature: " + std::to_string(temperature) +
36-
", maxTokens: " + std::to_string(maxTokens) + " }";
36+
", temperature: " + tempStr +
37+
", maxTokens: " + std::to_string(maxTokens.has_value() ? *maxTokens : 0) + " }";
3738
}
3839
};
3940

include/openai/OpenAIClient.h

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -63,11 +63,13 @@ class OpenAIClient : public LLMClient {
6363
* Convenience methods with Model enum
6464
*/
6565
LLMResponse sendRequest(OpenAI::Model model, const std::string& prompt, LLMContext context = {},
66-
int maxTokens = 200, float temperature = 0.7f);
66+
std::optional<int> maxTokens = std::nullopt,
67+
std::optional<float> temperature = std::nullopt);
6768
std::future<LLMResponse> sendRequestAsync(OpenAI::Model model, const std::string& prompt,
6869
LLMResponseCallback callback = nullptr,
69-
LLMContext context = {}, int maxTokens = 200,
70-
float temperature = 0.7f);
70+
LLMContext context = {},
71+
std::optional<int> maxTokens = std::nullopt,
72+
std::optional<float> temperature = std::nullopt);
7173

7274
/**
7375
* Configuration methods

include/openai/OpenAITypes.h

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1054,11 +1054,12 @@ inline ResponsesRequest ResponsesRequest::fromLLMRequest(const LLMRequest& reque
10541054
}
10551055
responsesReq.toolChoice =
10561056
ToolChoiceMode::Auto; // Explicitly initialize to fix cppcheck warning
1057-
if (request.config.maxTokens > 0) {
1058-
responsesReq.maxOutputTokens = request.config.maxTokens;
1057+
if (request.config.maxTokens.has_value() && *request.config.maxTokens > 0) {
1058+
responsesReq.maxOutputTokens = *request.config.maxTokens;
10591059
}
1060-
if (request.config.temperature >= 0.0f) {
1061-
responsesReq.temperature = static_cast<double>(request.config.temperature);
1060+
// Only set temperature if it's provided and valid
1061+
if (request.config.temperature.has_value() && *request.config.temperature >= 0.0f) {
1062+
responsesReq.temperature = static_cast<double>(*request.config.temperature);
10621063
}
10631064
if (!request.previousResponseId.empty()) {
10641065
responsesReq.previousResponseID = request.previousResponseId;
@@ -1303,11 +1304,12 @@ inline ChatCompletionRequest ChatCompletionRequest::fromLLMRequest(const LLMRequ
13031304
chatReq.messages.push_back(userMsg);
13041305
}
13051306

1306-
if (request.config.maxTokens > 0) {
1307-
chatReq.maxTokens = request.config.maxTokens;
1307+
if (request.config.maxTokens.has_value() && *request.config.maxTokens > 0) {
1308+
chatReq.maxTokens = *request.config.maxTokens;
13081309
}
1309-
if (request.config.temperature >= 0.0f) {
1310-
chatReq.temperature = static_cast<double>(request.config.temperature);
1310+
// Only set temperature if it's provided and valid
1311+
if (request.config.temperature.has_value() && *request.config.temperature >= 0.0f) {
1312+
chatReq.temperature = static_cast<double>(*request.config.temperature);
13111313
}
13121314

13131315
return chatReq;

src/openai/OpenAIClient.cpp

Lines changed: 17 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -83,27 +83,34 @@ std::future<LLMResponse> OpenAIClient::sendStreamingRequestAsync(
8383

8484
// Convenience methods with Model enum
8585
LLMResponse OpenAIClient::sendRequest(OpenAI::Model model, const std::string& prompt,
86-
LLMContext context, int maxTokens, float temperature) {
86+
LLMContext context, std::optional<int> maxTokens,
87+
std::optional<float> temperature) {
8788
LLMRequestConfig config;
8889
config.client = "openai";
8990
config.model = modelToString(model);
90-
config.maxTokens = maxTokens;
91-
config.temperature = temperature;
91+
if (maxTokens.has_value()) {
92+
config.maxTokens = *maxTokens;
93+
}
94+
if (temperature.has_value()) {
95+
config.temperature = *temperature;
96+
}
9297

9398
LLMRequest request(config, prompt, context);
9499
return sendRequest(request);
95100
}
96101

97-
std::future<LLMResponse> OpenAIClient::sendRequestAsync(OpenAI::Model model,
98-
const std::string& prompt,
99-
LLMResponseCallback callback,
100-
LLMContext context, int maxTokens,
101-
float temperature) {
102+
std::future<LLMResponse> OpenAIClient::sendRequestAsync(
103+
OpenAI::Model model, const std::string& prompt, LLMResponseCallback callback,
104+
LLMContext context, std::optional<int> maxTokens, std::optional<float> temperature) {
102105
LLMRequestConfig config;
103106
config.client = "openai";
104107
config.model = modelToString(model);
105-
config.maxTokens = maxTokens;
106-
config.temperature = temperature;
108+
if (maxTokens.has_value()) {
109+
config.maxTokens = *maxTokens;
110+
}
111+
if (temperature.has_value()) {
112+
config.temperature = *temperature;
113+
}
107114

108115
LLMRequest request(config, prompt, context);
109116
return sendRequestAsync(request, callback);

src/openai/OpenAIResponsesApi.cpp

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,8 @@ OpenAI::ResponsesResponse OpenAIResponsesApi::create(const OpenAI::ResponsesRequ
2626
auto httpResponse = httpClient_->post(url, requestJson);
2727

2828
if (!httpResponse.success) {
29+
std::cerr << "❌ HTTP request failed! Status: " << httpResponse.statusCode << std::endl;
30+
std::cerr << "❌ HTTP response body: " << httpResponse.body << std::endl;
2931
throw std::runtime_error("HTTP request failed: " + httpResponse.errorMessage);
3032
}
3133

@@ -47,8 +49,10 @@ OpenAI::ResponsesResponse OpenAIResponsesApi::create(const OpenAI::ResponsesRequ
4749
return response;
4850

4951
} catch (const json::exception& e) {
52+
std::cerr << "❌ JSON parsing error: " << e.what() << std::endl;
5053
throw std::runtime_error("JSON parsing error: " + std::string(e.what()));
5154
} catch (const std::exception& e) {
55+
std::cerr << "❌ Exception: " << e.what() << std::endl;
5256
throw std::runtime_error("API call failed: " + std::string(e.what()));
5357
}
5458
}

0 commit comments

Comments
 (0)