1
1
import os
2
2
from abc import ABC
3
+ from collections .abc import AsyncGenerator , Awaitable
3
4
from dataclasses import dataclass
4
5
from typing import (
5
6
Any ,
6
- AsyncGenerator ,
7
- Awaitable ,
8
7
Callable ,
9
- Dict ,
10
- List ,
11
8
Optional ,
12
9
TypedDict ,
13
10
Union ,
@@ -44,9 +41,9 @@ class Document:
44
41
category : Optional [str ]
45
42
sourcepage : Optional [str ]
46
43
sourcefile : Optional [str ]
47
- oids : Optional [List [str ]]
48
- groups : Optional [List [str ]]
49
- captions : List [QueryCaptionResult ]
44
+ oids : Optional [list [str ]]
45
+ groups : Optional [list [str ]]
46
+ captions : list [QueryCaptionResult ]
50
47
score : Optional [float ] = None
51
48
reranker_score : Optional [float ] = None
52
49
@@ -90,15 +87,15 @@ def update_token_usage(self, usage: CompletionUsage) -> None:
90
87
91
88
@dataclass
92
89
class DataPoints :
93
- text : Optional [List [str ]] = None
94
- images : Optional [List ] = None
90
+ text : Optional [list [str ]] = None
91
+ images : Optional [list ] = None
95
92
96
93
97
94
@dataclass
98
95
class ExtraInfo :
99
96
data_points : DataPoints
100
- thoughts : Optional [List [ThoughtStep ]] = None
101
- followup_questions : Optional [List [Any ]] = None
97
+ thoughts : Optional [list [ThoughtStep ]] = None
98
+ followup_questions : Optional [list [Any ]] = None
102
99
103
100
104
101
@dataclass
@@ -188,15 +185,15 @@ async def search(
188
185
top : int ,
189
186
query_text : Optional [str ],
190
187
filter : Optional [str ],
191
- vectors : List [VectorQuery ],
188
+ vectors : list [VectorQuery ],
192
189
use_text_search : bool ,
193
190
use_vector_search : bool ,
194
191
use_semantic_ranker : bool ,
195
192
use_semantic_captions : bool ,
196
193
minimum_search_score : Optional [float ] = None ,
197
194
minimum_reranker_score : Optional [float ] = None ,
198
195
use_query_rewriting : Optional [bool ] = None ,
199
- ) -> List [Document ]:
196
+ ) -> list [Document ]:
200
197
search_text = query_text if use_text_search else ""
201
198
search_vectors = vectors if use_vector_search else []
202
199
if use_semantic_ranker :
@@ -233,7 +230,7 @@ async def search(
233
230
sourcefile = document .get ("sourcefile" ),
234
231
oids = document .get ("oids" ),
235
232
groups = document .get ("groups" ),
236
- captions = cast (List [QueryCaptionResult ], document .get ("@search.captions" )),
233
+ captions = cast (list [QueryCaptionResult ], document .get ("@search.captions" )),
237
234
score = document .get ("@search.score" ),
238
235
reranker_score = document .get ("@search.reranker_score" ),
239
236
)
@@ -251,7 +248,7 @@ async def search(
251
248
return qualified_documents
252
249
253
250
def get_sources_content (
254
- self , results : List [Document ], use_semantic_captions : bool , use_image_citation : bool
251
+ self , results : list [Document ], use_semantic_captions : bool , use_image_citation : bool
255
252
) -> list [str ]:
256
253
257
254
def nonewlines (s : str ) -> str :
@@ -345,13 +342,13 @@ def create_chat_completion(
345
342
overrides : dict [str , Any ],
346
343
response_token_limit : int ,
347
344
should_stream : bool = False ,
348
- tools : Optional [List [ChatCompletionToolParam ]] = None ,
345
+ tools : Optional [list [ChatCompletionToolParam ]] = None ,
349
346
temperature : Optional [float ] = None ,
350
347
n : Optional [int ] = None ,
351
348
reasoning_effort : Optional [ChatCompletionReasoningEffort ] = None ,
352
349
) -> Union [Awaitable [ChatCompletion ], Awaitable [AsyncStream [ChatCompletionChunk ]]]:
353
350
if chatgpt_model in self .GPT_REASONING_MODELS :
354
- params : Dict [str , Any ] = {
351
+ params : dict [str , Any ] = {
355
352
# max_tokens is not supported
356
353
"max_completion_tokens" : response_token_limit
357
354
}
@@ -387,14 +384,14 @@ def create_chat_completion(
387
384
def format_thought_step_for_chatcompletion (
388
385
self ,
389
386
title : str ,
390
- messages : List [ChatCompletionMessageParam ],
387
+ messages : list [ChatCompletionMessageParam ],
391
388
overrides : dict [str , Any ],
392
389
model : str ,
393
390
deployment : Optional [str ],
394
391
usage : Optional [CompletionUsage ] = None ,
395
392
reasoning_effort : Optional [ChatCompletionReasoningEffort ] = None ,
396
393
) -> ThoughtStep :
397
- properties : Dict [str , Any ] = {"model" : model }
394
+ properties : dict [str , Any ] = {"model" : model }
398
395
if deployment :
399
396
properties ["deployment" ] = deployment
400
397
# Only add reasoning_effort setting if the model supports it
0 commit comments