Skip to content
This repository was archived by the owner on Jun 5, 2025. It is now read-only.

Commit 11515a5

Browse files
ptelangjhrozek
authored andcommitted
Add copilot headers/auth for extracting package/ecosystem
1 parent 831617b commit 11515a5

File tree

6 files changed

+36
-4
lines changed

6 files changed

+36
-4
lines changed

src/codegate/llm_utils/extractor.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from typing import List, Optional
1+
from typing import Dict, List, Optional
22

33
import structlog
44

@@ -24,6 +24,7 @@ async def extract_packages(
2424
model: str = None,
2525
base_url: Optional[str] = None,
2626
api_key: Optional[str] = None,
27+
extra_headers: Optional[Dict[str, str]] = None
2728
) -> List[str]:
2829
"""Extract package names from the given content."""
2930
system_prompt = Config.get_config().prompts.lookup_packages
@@ -35,6 +36,7 @@ async def extract_packages(
3536
model=model,
3637
api_key=api_key,
3738
base_url=base_url,
39+
extra_headers=extra_headers,
3840
)
3941

4042
# Handle both formats: {"packages": [...]} and direct list [...]
@@ -49,6 +51,7 @@ async def extract_ecosystem(
4951
model: str = None,
5052
base_url: Optional[str] = None,
5153
api_key: Optional[str] = None,
54+
extra_headers: Optional[Dict[str, str]] = None
5255
) -> List[str]:
5356
"""Extract ecosystem from the given content."""
5457
system_prompt = Config.get_config().prompts.lookup_ecosystem
@@ -60,6 +63,7 @@ async def extract_ecosystem(
6063
model=model,
6164
api_key=api_key,
6265
base_url=base_url,
66+
extra_headers=extra_headers,
6367
)
6468

6569
ecosystem = result if isinstance(result, str) else result.get("ecosystem")

src/codegate/llm_utils/llmclient.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@ async def complete(
2727
model: str = None,
2828
api_key: Optional[str] = None,
2929
base_url: Optional[str] = None,
30+
extra_headers: Optional[Dict[str, str]] = None,
3031
**kwargs,
3132
) -> Dict[str, Any]:
3233
"""
@@ -53,6 +54,7 @@ async def complete(
5354
model,
5455
api_key,
5556
base_url,
57+
extra_headers,
5658
**kwargs,
5759
)
5860

@@ -102,6 +104,7 @@ async def _complete_litellm(
102104
model: str,
103105
api_key: str,
104106
base_url: Optional[str] = None,
107+
extra_headers: Optional[Dict[str, str]] = None,
105108
**kwargs,
106109
) -> Dict[str, Any]:
107110
# Use the private method to create the request
@@ -134,6 +137,7 @@ async def _complete_litellm(
134137
temperature=request["temperature"],
135138
base_url=base_url,
136139
response_format=request["response_format"],
140+
extra_headers=extra_headers
137141
)
138142
content = response["choices"][0]["message"]["content"]
139143

src/codegate/pipeline/base.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -224,6 +224,7 @@ async def process_request(
224224
model: str,
225225
api_key: Optional[str] = None,
226226
api_base: Optional[str] = None,
227+
extra_headers: Optional[Dict[str, str]] = None
227228
) -> PipelineResult:
228229
"""Process a request through all pipeline steps"""
229230
self.context.sensitive = PipelineSensitiveData(
@@ -235,6 +236,7 @@ async def process_request(
235236
api_base=api_base,
236237
)
237238
self.context.metadata["prompt_id"] = prompt_id
239+
self.context.metadata["extra_headers"] = extra_headers
238240
current_request = request
239241

240242
for step in self.pipeline_steps:
@@ -271,9 +273,10 @@ async def process_request(
271273
model: str,
272274
api_key: Optional[str] = None,
273275
api_base: Optional[str] = None,
276+
extra_headers: Optional[Dict[str, str]] = None
274277
) -> PipelineResult:
275278
"""Create a new pipeline instance and process the request"""
276279
instance = self.create_instance()
277280
return await instance.process_request(
278-
request, provider, prompt_id, model, api_key, api_base
281+
request, provider, prompt_id, model, api_key, api_base, extra_headers
279282
)

src/codegate/pipeline/codegate_context_retriever/codegate.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,7 @@ async def __lookup_packages(self, user_query: str, context: PipelineContext):
6666
model=context.sensitive.model,
6767
api_key=context.sensitive.api_key,
6868
base_url=context.sensitive.api_base,
69+
extra_headers=context.metadata.get('extra_headers', None),
6970
)
7071

7172
logger.info(f"Packages in user query: {packages}")
@@ -79,6 +80,7 @@ async def __lookup_ecosystem(self, user_query: str, context: PipelineContext):
7980
model=context.sensitive.model,
8081
api_key=context.sensitive.api_key,
8182
base_url=context.sensitive.api_base,
83+
extra_headers=context.metadata.get('extra_headers', None),
8284
)
8385

8486
logger.info(f"Ecosystem in user query: {ecosystem}")

src/codegate/pipeline/secrets/secrets.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -175,6 +175,10 @@ async def process(
175175
Returns:
176176
PipelineResult containing the processed request and context with redaction metadata
177177
"""
178+
179+
if 'messages' not in request:
180+
return PipelineResult(request=request, context=context)
181+
178182
secrets_manager = context.sensitive.manager
179183
if not secrets_manager or not isinstance(secrets_manager, SecretsManager):
180184
raise ValueError("Secrets manager not found in context")

src/codegate/providers/copilot/pipeline.py

Lines changed: 17 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import json
22
from abc import ABC, abstractmethod
3+
from typing import Dict
34

45
import structlog
56
from litellm.types.llms.openai import ChatCompletionRequest
@@ -41,6 +42,18 @@ def _request_id(headers: list[str]) -> str:
4142
print("No request ID found in headers")
4243
return ""
4344

45+
@staticmethod
46+
def _get_copilot_headers(headers: Dict[str, str]) -> Dict[str, str]:
47+
copilot_header_names = ['copilot-integration-id', 'editor-plugin-version', 'editor-version',
48+
'openai-intent', 'openai-organization', 'user-agent',
49+
'vscode-machineid', 'vscode-sessionid', 'x-github-api-version',
50+
'x-request-id']
51+
copilot_headers = {}
52+
for a_name in copilot_header_names:
53+
copilot_headers[a_name] = headers.get(a_name, '')
54+
55+
return copilot_headers
56+
4457
async def process_body(self, headers: list[str], body: bytes) -> bytes:
4558
"""Common processing logic for all strategies"""
4659
try:
@@ -51,8 +64,10 @@ async def process_body(self, headers: list[str], body: bytes) -> bytes:
5164
request=normalized_body,
5265
provider=self.provider_name,
5366
prompt_id=self._request_id(headers),
54-
model=normalized_body.get("model", ""),
55-
api_key=None,
67+
model=normalized_body.get("model", "gpt-4o-mini"),
68+
api_key = headers.get('authorization','').replace('Bearer ', ''),
69+
api_base = "https://" + headers.get('host', ''),
70+
extra_headers=CopilotPipeline._get_copilot_headers(headers)
5671
)
5772

5873
if result.request:

0 commit comments

Comments
 (0)