22from  abc  import  ABC 
33from  collections .abc  import  AsyncGenerator , Awaitable 
44from  dataclasses  import  dataclass , field 
5- from  typing  import  Any , TypedDict , cast 
5+ from  typing  import  Any , Optional ,  TypedDict , cast 
66
77from  azure .search .documents .agent .aio  import  KnowledgeAgentRetrievalClient 
88from  azure .search .documents .agent .models  import  (
3838
3939@dataclass  
4040class  Document :
41-     id : str   |   None  =  None 
42-     content : str   |   None  =  None 
43-     category : str   |   None  =  None 
44-     sourcepage : str   |   None  =  None 
45-     sourcefile : str   |   None  =  None 
46-     oids : list [str ]  |   None  =  None 
47-     groups : list [str ]  |   None  =  None 
48-     captions : list [QueryCaptionResult ]  |   None  =  None 
49-     score : float   |   None  =  None 
50-     reranker_score : float   |   None  =  None 
51-     search_agent_query : str   |   None  =  None 
52-     images : list [dict [str , Any ]]  |   None  =  None 
41+     id : Optional [ str ]  =  None 
42+     content : Optional [ str ]  =  None 
43+     category : Optional [ str ]  =  None 
44+     sourcepage : Optional [ str ]  =  None 
45+     sourcefile : Optional [ str ]  =  None 
46+     oids : Optional [ list [str ]]  =  None 
47+     groups : Optional [ list [str ]]  =  None 
48+     captions : Optional [ list [QueryCaptionResult ]]  =  None 
49+     score : Optional [ float ]  =  None 
50+     reranker_score : Optional [ float ]  =  None 
51+     search_agent_query : Optional [ str ]  =  None 
52+     images : Optional [ list [dict [str , Any ]]]  =  None 
5353
5454    def  serialize_for_results (self ) ->  dict [str , Any ]:
5555        result_dict  =  {
@@ -83,8 +83,8 @@ def serialize_for_results(self) -> dict[str, Any]:
8383@dataclass  
8484class  ThoughtStep :
8585    title : str 
86-     description : Any   |   None 
87-     props : dict [str , Any ]  |   None  =  None 
86+     description : Optional [ Any ] 
87+     props : Optional [ dict [str , Any ]]  =  None 
8888
8989    def  update_token_usage (self , usage : CompletionUsage ) ->  None :
9090        if  self .props :
@@ -93,23 +93,23 @@ def update_token_usage(self, usage: CompletionUsage) -> None:
9393
9494@dataclass  
9595class  DataPoints :
96-     text : list [str ]  |   None  =  None 
97-     images : list   |   None  =  None 
98-     citations : list [str ]  |   None  =  None 
96+     text : Optional [ list [str ]]  =  None 
97+     images : Optional [ list ]  =  None 
98+     citations : Optional [ list [str ]]  =  None 
9999
100100
101101@dataclass  
102102class  ExtraInfo :
103103    data_points : DataPoints 
104104    thoughts : list [ThoughtStep ] =  field (default_factory = list )
105-     followup_questions : list [Any ]  |   None  =  None 
105+     followup_questions : Optional [ list [Any ]]  =  None 
106106
107107
108108@dataclass  
109109class  TokenUsageProps :
110110    prompt_tokens : int 
111111    completion_tokens : int 
112-     reasoning_tokens : int   |   None 
112+     reasoning_tokens : Optional [ int ] 
113113    total_tokens : int 
114114
115115    @classmethod  
@@ -151,19 +151,19 @@ def __init__(
151151        self ,
152152        search_client : SearchClient ,
153153        openai_client : AsyncOpenAI ,
154-         query_language : str   |   None ,
155-         query_speller : str   |   None ,
156-         embedding_deployment : str   |   None ,  # Not needed for non-Azure OpenAI or for retrieval_mode="text" 
154+         query_language : Optional [ str ] ,
155+         query_speller : Optional [ str ] ,
156+         embedding_deployment : Optional [ str ] ,  # Not needed for non-Azure OpenAI or for retrieval_mode="text" 
157157        embedding_model : str ,
158158        embedding_dimensions : int ,
159159        embedding_field : str ,
160160        openai_host : str ,
161161        prompt_manager : PromptManager ,
162-         reasoning_effort : str   |   None  =  None ,
162+         reasoning_effort : Optional [ str ]  =  None ,
163163        multimodal_enabled : bool  =  False ,
164-         image_embeddings_client : ImageEmbeddings   |   None  =  None ,
165-         global_blob_manager : BlobManager   |   None  =  None ,
166-         user_blob_manager : AdlsBlobManager   |   None  =  None ,
164+         image_embeddings_client : Optional [ ImageEmbeddings ]  =  None ,
165+         global_blob_manager : Optional [ BlobManager ]  =  None ,
166+         user_blob_manager : Optional [ AdlsBlobManager ]  =  None ,
167167    ):
168168        self .search_client  =  search_client 
169169        self .openai_client  =  openai_client 
@@ -182,30 +182,30 @@ def __init__(
182182        self .global_blob_manager  =  global_blob_manager 
183183        self .user_blob_manager  =  user_blob_manager 
184184
185-     def  build_filter (self , overrides : dict [str , Any ]) ->  str   |   None :
185+     def  build_filter (self , overrides : dict [str , Any ]) ->  Optional [ str ] :
186186        include_category  =  overrides .get ("include_category" )
187187        exclude_category  =  overrides .get ("exclude_category" )
188188        filters  =  []
189189        if  include_category :
190190            filters .append ("category eq '{}'" .format (include_category .replace ("'" , "''" )))
191191        if  exclude_category :
192192            filters .append ("category ne '{}'" .format (exclude_category .replace ("'" , "''" )))
193-         return  None  if  len ( filters )  ==   0  else  " and " .join (filters )
193+         return  None  if  not   filters  else  " and " .join (filters )
194194
195195    async  def  search (
196196        self ,
197197        top : int ,
198-         query_text : str   |   None ,
199-         filter : str   |   None ,
198+         query_text : Optional [ str ] ,
199+         filter : Optional [ str ] ,
200200        vectors : list [VectorQuery ],
201201        use_text_search : bool ,
202202        use_vector_search : bool ,
203203        use_semantic_ranker : bool ,
204204        use_semantic_captions : bool ,
205-         minimum_search_score : float   |   None  =  None ,
206-         minimum_reranker_score : float   |   None  =  None ,
207-         use_query_rewriting : bool   |   None  =  None ,
208-         access_token : str   |   None  =  None ,
205+         minimum_search_score : Optional [ float ]  =  None ,
206+         minimum_reranker_score : Optional [ float ]  =  None ,
207+         use_query_rewriting : Optional [ bool ]  =  None ,
208+         access_token : Optional [ str ]  =  None ,
209209    ) ->  list [Document ]:
210210        search_text  =  query_text  if  use_text_search  else  "" 
211211        search_vectors  =  vectors  if  use_vector_search  else  []
@@ -268,11 +268,11 @@ async def run_agentic_retrieval(
268268        messages : list [ChatCompletionMessageParam ],
269269        agent_client : KnowledgeAgentRetrievalClient ,
270270        search_index_name : str ,
271-         top : int   |   None  =  None ,
272-         filter_add_on : str   |   None  =  None ,
273-         minimum_reranker_score : float   |   None  =  None ,
274-         results_merge_strategy : str   |   None  =  None ,
275-         access_token : str   |   None  =  None ,
271+         top : Optional [ int ]  =  None ,
272+         filter_add_on : Optional [ str ]  =  None ,
273+         minimum_reranker_score : Optional [ float ]  =  None ,
274+         results_merge_strategy : Optional [ str ]  =  None ,
275+         access_token : Optional [ str ]  =  None ,
276276    ) ->  tuple [KnowledgeAgentRetrievalResponse , list [Document ]]:
277277        # STEP 1: Invoke agentic retrieval 
278278        response  =  await  agent_client .retrieve (
@@ -357,7 +357,7 @@ async def get_sources_content(
357357        use_semantic_captions : bool ,
358358        include_text_sources : bool ,
359359        download_image_sources : bool ,
360-         user_oid : str   |   None  =  None ,
360+         user_oid : Optional [ str ]  =  None ,
361361    ) ->  DataPoints :
362362        """Extract text/image sources & citations from documents. 
363363
@@ -407,15 +407,15 @@ def clean_source(s: str) -> str:
407407                    citations .append (self .get_image_citation (doc .sourcepage  or  "" , img ["url" ]))
408408        return  DataPoints (text = text_sources , images = image_sources , citations = citations )
409409
410-     def  get_citation (self , sourcepage : str   |   None ):
410+     def  get_citation (self , sourcepage : Optional [ str ] ):
411411        return  sourcepage  or  "" 
412412
413-     def  get_image_citation (self , sourcepage : str   |   None , image_url : str ):
413+     def  get_image_citation (self , sourcepage : Optional [ str ] , image_url : str ):
414414        sourcepage_citation  =  self .get_citation (sourcepage )
415415        image_filename  =  image_url .split ("/" )[- 1 ]
416416        return  f"{ sourcepage_citation }  ({ image_filename }  )" 
417417
418-     async  def  download_blob_as_base64 (self , blob_url : str , user_oid : str   |   None   =  None ) ->  str   |   None :
418+     async  def  download_blob_as_base64 (self , blob_url : str , user_oid : Optional [ str ]  =  None ) ->  Optional [ str ] :
419419        """ 
420420        Downloads a blob from either Azure Blob Storage or Azure Data Lake Storage and returns it as a base64 encoded string. 
421421
@@ -483,7 +483,7 @@ async def compute_multimodal_embedding(self, q: str):
483483        multimodal_query_vector  =  await  self .image_embeddings_client .create_embedding_for_text (q )
484484        return  VectorizedQuery (vector = multimodal_query_vector , k_nearest_neighbors = 50 , fields = "images/embedding" )
485485
486-     def  get_system_prompt_variables (self , override_prompt : str   |   None ) ->  dict [str , str ]:
486+     def  get_system_prompt_variables (self , override_prompt : Optional [ str ] ) ->  dict [str , str ]:
487487        # Allows client to replace the entire prompt, or to inject into the existing prompt using >>> 
488488        if  override_prompt  is  None :
489489            return  {}
@@ -510,16 +510,16 @@ def get_lowest_reasoning_effort(self, model: str) -> ChatCompletionReasoningEffo
510510
511511    def  create_chat_completion (
512512        self ,
513-         chatgpt_deployment : str   |   None ,
513+         chatgpt_deployment : Optional [ str ] ,
514514        chatgpt_model : str ,
515515        messages : list [ChatCompletionMessageParam ],
516516        overrides : dict [str , Any ],
517517        response_token_limit : int ,
518518        should_stream : bool  =  False ,
519-         tools : list [ChatCompletionToolParam ]  |   None  =  None ,
520-         temperature : float   |   None  =  None ,
521-         n : int   |   None  =  None ,
522-         reasoning_effort : ChatCompletionReasoningEffort   |   None  =  None ,
519+         tools : Optional [ list [ChatCompletionToolParam ]]  =  None ,
520+         temperature : Optional [ float ]  =  None ,
521+         n : Optional [ int ]  =  None ,
522+         reasoning_effort : Optional [ ChatCompletionReasoningEffort ]  =  None ,
523523    ) ->  Awaitable [ChatCompletion ] |  Awaitable [AsyncStream [ChatCompletionChunk ]]:
524524        if  chatgpt_model  in  self .GPT_REASONING_MODELS :
525525            params : dict [str , Any ] =  {
@@ -561,9 +561,9 @@ def format_thought_step_for_chatcompletion(
561561        messages : list [ChatCompletionMessageParam ],
562562        overrides : dict [str , Any ],
563563        model : str ,
564-         deployment : str   |   None ,
565-         usage : CompletionUsage   |   None  =  None ,
566-         reasoning_effort : ChatCompletionReasoningEffort   |   None  =  None ,
564+         deployment : Optional [ str ] ,
565+         usage : Optional [ CompletionUsage ]  =  None ,
566+         reasoning_effort : Optional [ ChatCompletionReasoningEffort ]  =  None ,
567567    ) ->  ThoughtStep :
568568        properties : dict [str , Any ] =  {"model" : model }
569569        if  deployment :
0 commit comments