Skip to content
This repository was archived by the owner on Jun 5, 2025. It is now read-only.

Commit 502c5b0

Browse files
lukehindsyrobla
authored andcommitted
Cline Support
This should be considered experimental until tester more widely by the community. I have it working with Anthropic and Ollama so far.
1 parent 781de22 commit 502c5b0

File tree

5 files changed

+127
-14
lines changed

5 files changed

+127
-14
lines changed

src/codegate/pipeline/secrets/signatures.py

Lines changed: 14 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
import re
33
from pathlib import Path
44
from threading import Lock
5-
from typing import ClassVar, Dict, List, NamedTuple, Optional
5+
from typing import ClassVar, Dict, List, NamedTuple, Optional, Union
66

77
import structlog
88
import yaml
@@ -215,16 +215,26 @@ def _load_signatures(cls) -> None:
215215
raise
216216

217217
@classmethod
218-
def find_in_string(cls, text: str) -> List[Match]:
219-
"""Search for secrets in the provided string."""
218+
def find_in_string(cls, text: Union[str, List[str]]) -> List[Match]:
219+
"""Search for secrets in the provided string or list of strings."""
220220
if not text:
221221
return []
222222

223223
if not cls._yaml_path:
224224
raise RuntimeError("SecretFinder not initialized.")
225225

226+
# Convert list to string if necessary (needed for Cline, which sends a list of strings)
227+
if isinstance(text, list):
228+
text = "\n".join(str(line) for line in text)
229+
226230
matches = []
227-
lines = text.splitlines()
231+
232+
# Split text into lines for processing
233+
try:
234+
lines = text.splitlines()
235+
except Exception as e:
236+
logger.warning(f"Error splitting text into lines: {e}")
237+
return []
228238

229239
for line_num, line in enumerate(lines, start=1):
230240
for group in cls._signature_groups:

src/codegate/pipeline/systemmsg.py

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ def get_existing_system_message(request: ChatCompletionRequest) -> Optional[dict
1616
Returns:
1717
The existing system message if found, otherwise None.
1818
"""
19+
1920
for message in request.get("messages", []):
2021
if message["role"] == "system":
2122
return message
@@ -50,8 +51,18 @@ def add_or_update_system_message(
5051
context.add_alert("add-system-message", trigger_string=json.dumps(system_message))
5152
new_request["messages"].insert(0, system_message)
5253
else:
54+
# Handle both string and list content types (needed for Cline (sends list)
55+
existing_content = request_system_message["content"]
56+
new_content = system_message["content"]
57+
58+
# Convert list to string if necessary (needed for Cline (sends list)
59+
if isinstance(existing_content, list):
60+
existing_content = "\n".join(str(item) for item in existing_content)
61+
if isinstance(new_content, list):
62+
new_content = "\n".join(str(item) for item in new_content)
63+
5364
# Update existing system message
54-
updated_content = request_system_message["content"] + "\n\n" + system_message["content"]
65+
updated_content = existing_content + "\n\n" + new_content
5566
context.add_alert("update-system-message", trigger_string=updated_content)
5667
request_system_message["content"] = updated_content
5768

src/codegate/providers/anthropic/provider.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,9 +32,14 @@ def _setup_routes(self):
3232
Sets up the /messages route for the provider as expected by the Anthropic
3333
API. Extracts the API key from the "x-api-key" header and passes it to the
3434
completion handler.
35+
36+
There are two routes:
37+
- /messages: This is the route that is used by the Anthropic API with Continue.dev
38+
- /v1/messages: This is the route that is used by the Anthropic API with Cline
3539
"""
3640

3741
@self.router.post(f"/{self.provider_route_name}/messages")
42+
@self.router.post(f"/{self.provider_route_name}/v1/messages")
3843
async def create_message(
3944
request: Request,
4045
x_api_key: str = Header(None),

src/codegate/providers/ollama/completion_handler.py

Lines changed: 65 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
import json
12
from typing import AsyncIterator, Optional, Union
23

34
import structlog
@@ -10,35 +11,91 @@
1011
logger = structlog.get_logger("codegate")
1112

1213

13-
async def ollama_stream_generator(stream: AsyncIterator[ChatResponse]) -> AsyncIterator[str]:
14+
async def ollama_stream_generator(
15+
stream: AsyncIterator[ChatResponse], is_cline_client: bool
16+
) -> AsyncIterator[str]:
1417
"""OpenAI-style SSE format"""
1518
try:
1619
async for chunk in stream:
1720
try:
18-
content = chunk.model_dump_json()
19-
if content:
21+
# TODO We should wire in the client info so we can respond with
22+
# the correct format and start to handle multiple clients
23+
# in a more robust way.
24+
if not is_cline_client:
2025
yield f"{chunk.model_dump_json()}\n"
26+
else:
27+
# First get the raw dict from the chunk
28+
chunk_dict = chunk.model_dump()
29+
# Create response dictionary in OpenAI-like format
30+
response = {
31+
"id": f"chatcmpl-{chunk_dict.get('created_at', '')}",
32+
"object": "chat.completion.chunk",
33+
"created": chunk_dict.get("created_at"),
34+
"model": chunk_dict.get("model"),
35+
"choices": [
36+
{
37+
"index": 0,
38+
"delta": {
39+
"content": chunk_dict.get("message", {}).get("content", ""),
40+
"role": chunk_dict.get("message", {}).get("role", "assistant"),
41+
},
42+
"finish_reason": (
43+
chunk_dict.get("done_reason")
44+
if chunk_dict.get("done", False)
45+
else None
46+
),
47+
}
48+
],
49+
}
50+
# Preserve existing type or add default if missing
51+
response["type"] = chunk_dict.get("type", "stream")
52+
53+
# Add optional fields that might be present in the final message
54+
optional_fields = [
55+
"total_duration",
56+
"load_duration",
57+
"prompt_eval_count",
58+
"prompt_eval_duration",
59+
"eval_count",
60+
"eval_duration",
61+
]
62+
for field in optional_fields:
63+
if field in chunk_dict:
64+
response[field] = chunk_dict[field]
65+
66+
yield f"data: {json.dumps(response)}\n"
2167
except Exception as e:
22-
if str(e):
23-
yield f"{str(e)}\n"
68+
logger.error(f"Error in stream generator: {str(e)}")
69+
yield f"data: {json.dumps({'error': str(e), 'type': 'error', 'choices': []})}\n"
2470
except Exception as e:
25-
if str(e):
26-
yield f"{str(e)}\n"
71+
logger.error(f"Stream error: {str(e)}")
72+
yield f"data: {json.dumps({'error': str(e), 'type': 'error', 'choices': []})}\n"
2773

2874

2975
class OllamaShim(BaseCompletionHandler):
3076

3177
def __init__(self, base_url):
3278
self.client = AsyncClient(host=base_url, timeout=300)
79+
self.is_cline_client = False
3380

3481
async def execute_completion(
3582
self,
3683
request: ChatCompletionRequest,
3784
api_key: Optional[str],
3885
stream: bool = False,
3986
is_fim_request: bool = False,
87+
is_cline_client: bool = False,
4088
) -> Union[ChatResponse, GenerateResponse]:
4189
"""Stream response directly from Ollama API."""
90+
91+
# TODO: I don't like this, but it's a quick fix for now until we start
92+
# passing through the client info so we can respond with the correct
93+
# format.
94+
# Determine if the client is a Cline client
95+
self.is_cline_client = any(
96+
"Cline" in str(message.get("content", "")) for message in request.get("messages", [])
97+
)
98+
4299
if is_fim_request:
43100
prompt = request["messages"][0].get("content", "")
44101
response = await self.client.generate(
@@ -59,7 +116,7 @@ def _create_streaming_response(self, stream: AsyncIterator[ChatResponse]) -> Str
59116
is the format that FastAPI expects for streaming responses.
60117
"""
61118
return StreamingResponse(
62-
ollama_stream_generator(stream),
119+
ollama_stream_generator(stream, self.is_cline_client),
63120
media_type="application/x-ndjson; charset=utf-8",
64121
headers={
65122
"Cache-Control": "no-cache",

src/codegate/providers/ollama/provider.py

Lines changed: 31 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -69,12 +69,42 @@ async def show_model(request: Request):
6969
)
7070
return response.json()
7171

72+
@self.router.get(f"/{self.provider_route_name}/api/tags")
73+
async def get_tags(request: Request):
74+
"""
75+
Special route for /api/tags that responds outside of the pipeline
76+
Tags are used to get the list of models
77+
https://github.com/ollama/ollama/blob/main/docs/api.md#list-local-models
78+
"""
79+
async with httpx.AsyncClient() as client:
80+
response = await client.get(f"{self.base_url}/api/tags")
81+
return response.json()
82+
83+
@self.router.post(f"/{self.provider_route_name}/api/show")
84+
async def show_model(request: Request):
85+
"""
86+
route for /api/show that responds outside of the pipeline
87+
/api/show displays model is used to get the model information
88+
https://github.com/ollama/ollama/blob/main/docs/api.md#show-model-information
89+
"""
90+
body = await request.body()
91+
async with httpx.AsyncClient() as client:
92+
response = await client.post(
93+
f"{self.base_url}/api/show",
94+
content=body,
95+
headers={"Content-Type": "application/json"},
96+
)
97+
return response.json()
98+
7299
# Native Ollama API routes
73100
@self.router.post(f"/{self.provider_route_name}/api/chat")
74101
@self.router.post(f"/{self.provider_route_name}/api/generate")
75102
# OpenAI-compatible routes for backward compatibility
76103
@self.router.post(f"/{self.provider_route_name}/chat/completions")
77104
@self.router.post(f"/{self.provider_route_name}/completions")
105+
# Cline API routes
106+
@self.router.post(f"/{self.provider_route_name}/v1/chat/completions")
107+
@self.router.post(f"/{self.provider_route_name}/v1/generate")
78108
async def create_completion(request: Request):
79109
body = await request.body()
80110
data = json.loads(body)
@@ -90,7 +120,7 @@ async def create_completion(request: Request):
90120
logger.error("Error in OllamaProvider completion", error=str(e))
91121
raise HTTPException(status_code=503, detail="Ollama service is unavailable")
92122
except Exception as e:
93-
#  check if we have an status code there
123+
# check if we have an status code there
94124
if hasattr(e, "status_code"):
95125
# log the exception
96126
logger = structlog.get_logger("codegate")

0 commit comments

Comments
 (0)