File tree Expand file tree Collapse file tree 8 files changed +18
-18
lines changed Expand file tree Collapse file tree 8 files changed +18
-18
lines changed Original file line number Diff line number Diff line change 1313# them in a file named `.env`. The `python-dotenv` package will load `.env` as 
1414# environment variables which can be read by `os.getenv()`. 
1515load_dotenv ()
16- chat_model  =  ChatBedrockAnthropic (
16+ chat_client  =  ChatBedrockAnthropic (
1717    model = "anthropic.claude-3-sonnet-20240229-v1:0" ,
1818)
1919
3232# Define a callback to run when the user submits a message 
3333@chat .on_user_submit  
3434async  def  handle_user_input (user_input : str ):
35-     response  =  await  chat_model .stream_async (user_input )
35+     response  =  await  chat_client .stream_async (user_input )
3636    await  chat .append_message_stream (response )
Original file line number Diff line number Diff line change 1212# See the docs for more information on how to obtain one. 
1313# https://posit-dev.github.io/chatlas/reference/ChatAzureOpenAI.html 
1414load_dotenv ()
15- chat_model  =  ChatAzureOpenAI (
15+ chat_client  =  ChatAzureOpenAI (
1616    api_key = os .getenv ("AZURE_OPENAI_API_KEY" ),
1717    endpoint = "https://my-endpoint.openai.azure.com" ,
1818    deployment_id = "gpt-4o-mini" ,
3737# Define a callback to run when the user submits a message 
3838@chat .on_user_submit  
3939async  def  handle_user_input (user_input : str ):
40-     response  =  await  chat_model .stream_async (user_input )
40+     response  =  await  chat_client .stream_async (user_input )
4141    await  chat .append_message_stream (response )
Original file line number Diff line number Diff line change 1212# See the docs for more information on how to obtain one. 
1313# https://posit-dev.github.io/chatlas/reference/ChatAnthropic.html 
1414load_dotenv ()
15- chat_model  =  ChatAnthropic (
15+ chat_client  =  ChatAnthropic (
1616    api_key = os .environ .get ("ANTHROPIC_API_KEY" ),
1717    model = "claude-3-7-sonnet-latest" ,
1818    system_prompt = "You are a helpful assistant." ,
3737# Generate a response when the user submits a message 
3838@chat .on_user_submit  
3939async  def  handle_user_input (user_input : str ):
40-     response  =  await  chat_model .stream_async (user_input )
40+     response  =  await  chat_client .stream_async (user_input )
4141    await  chat .append_message_stream (response )
Original file line number Diff line number Diff line change 1212# See the docs for more information on how to obtain one. 
1313# https://posit-dev.github.io/chatlas/reference/ChatGoogle.html 
1414load_dotenv ()
15- chat_model  =  ChatGoogle (
15+ chat_client  =  ChatGoogle (
1616    api_key = os .environ .get ("GOOGLE_API_KEY" ),
1717    system_prompt = "You are a helpful assistant." ,
1818    model = "gemini-2.0-flash" ,
3333# Generate a response when the user submits a message 
3434@chat .on_user_submit  
3535async  def  handle_user_input (user_input : str ):
36-     response  =  await  chat_model .stream_async (user_input )
36+     response  =  await  chat_client .stream_async (user_input )
3737    await  chat .append_message_stream (response )
Original file line number Diff line number Diff line change 1515# app, or set them in a file named `.env`. The `python-dotenv` package will load `.env` 
1616# as environment variables which can later be read by `os.getenv()`. 
1717load_dotenv ()
18- chat_model  =  ChatOpenAI (
18+ chat_client  =  ChatOpenAI (
1919    api_key = os .environ .get ("OPENAI_API_KEY" ),
2020    model = "gpt-4o" ,
2121)
3838# Define a callback to run when the user submits a message 
3939@chat .on_user_submit  
4040async  def  handle_user_input (user_input : str ):
41-     response  =  await  chat_model .stream_async (user_input )
41+     response  =  await  chat_client .stream_async (user_input )
4242    await  chat .append_message_stream (response )
Original file line number Diff line number Diff line change 99# ChatOllama() requires an Ollama model server to be running locally. 
1010# See the docs for more information on how to set up a local Ollama server. 
1111# https://posit-dev.github.io/chatlas/reference/ChatOllama.html 
12- chat_model  =  ChatOllama (model = "llama3.2" )
12+ chat_client  =  ChatOllama (model = "llama3.2" )
1313
1414# Set some Shiny page options 
1515ui .page_opts (
2929# Generate a response when the user submits a message 
3030@chat .on_user_submit  
3131async  def  handle_user_input (user_input : str ):
32-     response  =  await  chat_model .stream_async (user_input )
32+     response  =  await  chat_client .stream_async (user_input )
3333    await  chat .append_message_stream (response )
Original file line number Diff line number Diff line change 1212# See the docs for more information on how to obtain one. 
1313# https://posit-dev.github.io/chatlas/reference/ChatOpenAI.html 
1414load_dotenv ()
15- chat_model  =  ChatOpenAI (
15+ chat_client  =  ChatOpenAI (
1616    api_key = os .environ .get ("OPENAI_API_KEY" ),
1717    model = "gpt-4o" ,
1818    system_prompt = "You are a helpful assistant." ,
3737# Generate a response when the user submits a message 
3838@chat .on_user_submit  
3939async  def  handle_user_input (user_input : str ):
40-     response  =  await  chat_model .stream_async (user_input )
40+     response  =  await  chat_client .stream_async (user_input )
4141    await  chat .append_message_stream (response )
Original file line number Diff line number Diff line change @@ -63,15 +63,15 @@ def get_model():
6363    }
6464
6565    if  input .model () in  models ["openai" ]:
66-         chat_model  =  ctl .ChatOpenAI (** model_params )
66+         chat_client  =  ctl .ChatOpenAI (** model_params )
6767    elif  input .model () in  models ["claude" ]:
68-         chat_model  =  ctl .ChatAnthropic (** model_params )
68+         chat_client  =  ctl .ChatAnthropic (** model_params )
6969    elif  input .model () in  models ["google" ]:
70-         chat_model  =  ctl .ChatGoogle (** model_params )
70+         chat_client  =  ctl .ChatGoogle (** model_params )
7171    else :
7272        raise  ValueError (f"Invalid model: { input .model ()}  " )
7373
74-     return  chat_model 
74+     return  chat_client 
7575
7676
7777@reactive .calc  
    
 
   
 
     
   
   
          
     
  
    
     
 
    
      
     
 
     
    You can’t perform that action at this time.
  
 
    
  
     
    
      
        
     
 
       
      
     
   
 
    
    
  
 
  
 
     
    
0 commit comments