Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .github/workflows/lint.yml
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ jobs:
- name: Install clang tools
run: |
sudo apt-get update
sudo apt-get install -y clang-format-14 clang-tidy-14
sudo apt-get install -y clang-format-14 clang-tidy-14 libssl-dev
sudo update-alternatives --install /usr/bin/clang-format clang-format /usr/bin/clang-format-14 100
sudo update-alternatives --install /usr/bin/clang-tidy clang-tidy /usr/bin/clang-tidy-14 100

Expand All @@ -38,7 +38,7 @@ jobs:
clang-tidy --version

- name: Configure CMake
run: cmake -B build -S .
run: cmake -B build -S . -DAGENT_CPP_BUILD_MCP=ON

- name: Run pre-commit
run: pre-commit run --all-files --show-diff-on-failure --verbose
8 changes: 4 additions & 4 deletions .github/workflows/update-llama-cpp.yml
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ jobs:
- name: Check for submodule updates
id: check
run: |
cd llama.cpp
cd deps/llama.cpp

CURRENT_COMMIT=$(git rev-parse HEAD)
echo "Current commit: $CURRENT_COMMIT"
Expand Down Expand Up @@ -69,11 +69,11 @@ jobs:

git checkout -b "$BRANCH_NAME"

cd llama.cpp
cd deps/llama.cpp
git checkout origin/master
cd ..
cd ../..

git add llama.cpp
git add deps/llama.cpp
git commit -m "Update llama.cpp submodule to ${{ steps.check.outputs.latest_short }}"

git push origin "$BRANCH_NAME"
Expand Down
5 changes: 4 additions & 1 deletion .gitmodules
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
[submodule "llama.cpp"]
path = llama.cpp
path = deps/llama.cpp
url = https://github.com/ggerganov/llama.cpp.git
[submodule "vendor/cpp-httplib"]
path = deps/cpp-httplib
url = https://github.com/yhirose/cpp-httplib.git
70 changes: 67 additions & 3 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ if(AGENT_CPP_BUNDLED_LLAMA)
set(LLAMA_SOURCE_DIR "${LLAMA_CPP_DIR}")
message(STATUS "Using custom llama.cpp from: ${LLAMA_SOURCE_DIR}")
else()
set(LLAMA_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/llama.cpp")
set(LLAMA_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/deps/llama.cpp")
if(NOT EXISTS "${LLAMA_SOURCE_DIR}/CMakeLists.txt")
message(FATAL_ERROR
"llama.cpp submodule not found at ${LLAMA_SOURCE_DIR}\n"
Expand Down Expand Up @@ -75,6 +75,30 @@ target_include_directories(agent
target_link_libraries(agent PUBLIC model common llama)
target_compile_features(agent PUBLIC cxx_std_17)

# MCP Client library for connecting to MCP servers via HTTP
option(AGENT_CPP_BUILD_MCP "Build MCP client (requires OpenSSL for HTTPS)" OFF)

if(AGENT_CPP_BUILD_MCP)
find_package(OpenSSL REQUIRED)

add_library(mcp_client STATIC
src/mcp/mcp_client.cpp
src/mcp/mcp_tool.cpp
)
add_library(agent-cpp::mcp_client ALIAS mcp_client)
target_include_directories(mcp_client
PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/src>
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/deps/cpp-httplib>
$<BUILD_INTERFACE:${LLAMA_SOURCE_DIR}/common>
$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}/agent-cpp>
)
target_link_libraries(mcp_client PUBLIC common OpenSSL::SSL OpenSSL::Crypto)
target_compile_features(mcp_client PUBLIC cxx_std_17)

message(STATUS "MCP client enabled (using cpp-httplib)")
endif()

if(AGENT_CPP_BUILD_TESTS)
enable_testing()

Expand All @@ -98,6 +122,19 @@ if(AGENT_CPP_BUILD_TESTS)
add_test(NAME ToolTests COMMAND test_tool)
add_test(NAME CallbacksTests COMMAND test_callbacks)

if(AGENT_CPP_BUILD_MCP)
add_executable(test_mcp_client tests/test_mcp_client.cpp)
target_include_directories(test_mcp_client PRIVATE
src
tests
${LLAMA_SOURCE_DIR}/common
)
target_link_libraries(test_mcp_client PRIVATE mcp_client common)
target_compile_features(test_mcp_client PRIVATE cxx_std_17)

add_test(NAME MCPClientTests COMMAND test_mcp_client)
endif()

# On Windows, DLLs are placed in the bin/ directory by llama.cpp
# We need to add this directory to PATH so tests can find the DLLs
if(WIN32)
Expand Down Expand Up @@ -162,6 +199,21 @@ if(AGENT_CPP_BUILD_EXAMPLES)
target_link_libraries(context-engineering-example PRIVATE agent model common llama)
target_compile_features(context-engineering-example PRIVATE cxx_std_17)

# MCP client example (requires MCP support)
if(AGENT_CPP_BUILD_MCP)
add_executable(mcp-example examples/mcp/mcp.cpp)
target_include_directories(mcp-example PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}/src
${CMAKE_CURRENT_SOURCE_DIR}/examples/shared
${LLAMA_SOURCE_DIR}/common
${LLAMA_SOURCE_DIR}/ggml/include
${LLAMA_SOURCE_DIR}/include
${LLAMA_SOURCE_DIR}/vendor
)
target_link_libraries(mcp-example PRIVATE agent model mcp_client common llama)
target_compile_features(mcp-example PRIVATE cxx_std_17)
endif()

# Note: tracing-example is not included here as it requires additional
# dependencies (OpenTelemetry, protobuf, curl). Build it separately from
# examples/tracing/
Expand All @@ -176,17 +228,29 @@ if(AGENT_CPP_INSTALL)
include(CMakePackageConfigHelpers)

# Install public headers
install(FILES
set(INSTALL_HEADERS
src/agent.h
src/callbacks.h
src/error.h
src/model.h
src/tool.h
)

if(AGENT_CPP_BUILD_MCP)
list(APPEND INSTALL_HEADERS src/mcp/mcp_client.h src/mcp/mcp_tool.h)
endif()

install(FILES ${INSTALL_HEADERS}
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/agent-cpp
)

# Install libraries with export set
install(TARGETS agent model
set(INSTALL_TARGETS agent model)
if(AGENT_CPP_BUILD_MCP)
list(APPEND INSTALL_TARGETS mcp_client)
endif()

install(TARGETS ${INSTALL_TARGETS}
EXPORT agent-cpp-targets
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
Expand Down
1 change: 1 addition & 0 deletions deps/cpp-httplib
Submodule cpp-httplib added at 59905c
1 change: 1 addition & 0 deletions deps/llama.cpp
Submodule llama.cpp added at e443fb
26 changes: 26 additions & 0 deletions examples/mcp/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
cmake_minimum_required(VERSION 3.14)
project(mcp-example VERSION 0.1.0)

set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)

set(AGENT_CPP_BUILD_MCP ON CACHE BOOL "Build MCP client" FORCE)

add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/../.. ${CMAKE_CURRENT_BINARY_DIR}/agent-cpp)

add_executable(mcp-example mcp.cpp)

target_include_directories(mcp-example PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}/../../src
${CMAKE_CURRENT_SOURCE_DIR}/../shared
${LLAMA_SOURCE_DIR}/common
${LLAMA_SOURCE_DIR}/ggml/include
${LLAMA_SOURCE_DIR}/include
${LLAMA_SOURCE_DIR}/vendor
)

target_link_libraries(mcp-example PRIVATE agent mcp_client common llama)
target_compile_features(mcp-example PRIVATE cxx_std_17)

message(STATUS "MCP example configured.")
105 changes: 105 additions & 0 deletions examples/mcp/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
# MCP Client Example

[MCP (Model Context Protocol)](https://modelcontextprotocol.io/) is an open protocol that allows AI applications to connect to external tools and data sources.

This example demonstrates how to connect to an MCP server via HTTP and use its tools with an agent.cpp agent.

## Building Blocks

### Tools

Tools are dynamically discovered from the MCP server at runtime. The client connects to the server, performs a handshake, and retrieves the available tool definitions.

### Callbacks

This example uses two shared callbacks from `examples/shared/`:

- **LoggingCallback**: Displays tool execution information with colored output showing which tools are called and their results.

- **ErrorRecoveryCallback**: Converts tool execution errors into JSON results, allowing the agent to see errors and potentially retry or adjust.

## Building

> [!IMPORTANT]
> Check the [llama.cpp build documentation](https://github.com/ggml-org/llama.cpp/blob/master/docs/build.md) to find
> Cmake flags you might want to pass depending on your available hardware.

```bash
cd examples/mcp

git -C ../.. submodule update --init --recursive

# MCP requires OpenSSL for HTTPS support
cmake -B build -DAGENT_CPP_BUILD_MCP=ON
cmake --build build -j$(nproc)
```

### Using a custom llama.cpp

If you have llama.cpp already downloaded:

```bash
cmake -B build -DLLAMA_CPP_DIR=/path/to/your/llama.cpp -DAGENT_CPP_BUILD_MCP=ON
cmake --build build -j$(nproc)
```

## Usage

```bash
./build/mcp-example -m <path-to-model.gguf> -u <mcp-server-url>
```

Options:
- `-m <path>` - Path to the GGUF model file (required)
- `-u <url>` - MCP server URL (Streamable HTTP transport) (required)

## Example

This example includes a simple MCP server (`server.py`) with a `calculator` tool that performs basic math operations (similar to the calculator in `examples/shared`).

### 1. Start the MCP Server

The server uses [uv](https://docs.astral.sh/uv/) inline script metadata, so no installation is needed:

```bash
uv run server.py
```

This starts the MCP server on `http://localhost:8000/mcp`.

### 2. Run the Agent

```bash
./build/mcp-example -m ../../granite-4.0-micro-Q8_0.gguf -u "http://localhost:8000/mcp"
```

### 3. Example Conversation

```console
$ ./build/mcp-example -m ../../granite-4.0-micro-Q8_0.gguf -u "http://localhost:8000/mcp"
Connecting to MCP server: http://localhost:8000/mcp
Initializing MCP session...
MCP session initialized.

Available tools (1):
- calculator: Perform basic mathematical operations.

Loading model...
Model loaded successfully

MCP Agent ready!
Connected to: http://localhost:8000/mcp
Type an empty line to quit.

> What is 42 multiplied by 17?

<tool_call>
{"name": "calculator", "arguments": "{\n \"operation\": \"multiply\",\n \"a\": 42,\n \"b\": 17\n}"}
</tool_call>

[TOOL EXECUTION] Calling calculator
[TOOL RESULT]
{"result": 714}

42 multiplied by 17 equals **714**.
```
Loading