diff --git a/README.md b/README.md index 604b9af..f5897b5 100644 --- a/README.md +++ b/README.md @@ -53,14 +53,14 @@ langbase_api_key = os.getenv("LANGBASE_API_KEY") llm_api_key = os.getenv("LLM_API_KEY") # Initialize the client -lb = Langbase(api_key=langbase_api_key) +langbase = Langbase(api_key=langbase_api_key) ``` ### 3. Generate text ```python # Simple generation -response = lb.agent.run( +response = langbase.agent.run( input=[{"role": "user", "content": "Tell me about AI"}], model="openai:gpt-4.1-mini", api_key=llm_api_key, @@ -148,10 +148,10 @@ runner.process() ```python # List all pipes -pipes = lb.pipes.list() +pipes = langbase.pipes.list() # Run a pipe -response = lb.pipes.run( +response = langbase.pipes.run( name="ai-agent", messages=[{"role": "user", "content": "Hello!"}], variables={"style": "friendly"}, # Optional variables @@ -163,13 +163,13 @@ response = lb.pipes.run( ```python # Create a memory -memory = lb.memories.create( +memory = langbase.memories.create( name="product-docs", description="Product documentation", ) # Upload documents -lb.memories.documents.upload( +langbase.memories.documents.upload( memory_name="product-docs", document_name="guide.pdf", document=open("guide.pdf", "rb"), @@ -177,7 +177,7 @@ lb.memories.documents.upload( ) # Retrieve relevant context -results = lb.memories.retrieve( +results = langbase.memories.retrieve( query="How do I get started?", memory=[{"name": "product-docs"}], top_k=3, @@ -188,7 +188,7 @@ results = lb.memories.retrieve( ```python # Run an agent with tools -response = lb.agent.run( +response = langbase.agent.run( model="openai:gpt-4", messages=[{"role": "user", "content": "Search for AI news"}], tools=[{"type": "function", "function": {...}}], @@ -202,20 +202,20 @@ response = lb.agent.run( ```python # Chunk text for processing -chunks = lb.chunker( +chunks = langbase.chunker( content="Long text to split...", chunk_max_length=1024, chunk_overlap=256, ) # Generate embeddings -embeddings = lb.embed( +embeddings = langbase.embed( chunks=["Text 1", "Text 2"], embedding_model="openai:text-embedding-3-small", ) # Parse documents -content = lb.parser( +content = langbase.parser( document=open("document.pdf", "rb"), document_name="document.pdf", content_type="application/pdf", diff --git a/examples/agent/agent.run.typed.py b/examples/agent/agent.run.typed.py new file mode 100644 index 0000000..f2a77ef --- /dev/null +++ b/examples/agent/agent.run.typed.py @@ -0,0 +1,87 @@ +""" +Run Agent + +This example demonstrates how to run an agent with a Typed Stream +""" + +import os + +from dotenv import load_dotenv + +from langbase import Langbase, StreamEventType, get_typed_runner + +load_dotenv() + + +def main(): + # Check for required environment variables + langbase_api_key = os.environ.get("LANGBASE_API_KEY") + llm_api_key = os.environ.get("LLM_API_KEY") + + if not langbase_api_key: + print("āŒ Missing LANGBASE_API_KEY in environment variables.") + print("Please set: export LANGBASE_API_KEY='your_langbase_api_key'") + exit(1) + + if not llm_api_key: + print("āŒ Missing LLM_API_KEY in environment variables.") + print("Please set: export LLM_API_KEY='your_llm_api_key'") + exit(1) + + # Initialize Langbase client + langbase = Langbase(api_key=langbase_api_key) + try: + # Get streaming response + response = langbase.agent.run( + stream=True, + model="openai:gpt-4.1-mini", + api_key=llm_api_key, + instructions="You are a helpful assistant that help users summarize text.", + input=[{"role": "user", "content": "Who is an AI Engineer?"}], + ) + + # Create typed stream processor + runner = get_typed_runner(response) + + # Register event handlers + runner.on( + StreamEventType.CONNECT, + lambda event: print(f"āœ“ Connected! Thread ID: {event['threadId']}\n"), + ) + + runner.on( + StreamEventType.CONTENT, + lambda event: print(event["content"], end="", flush=True), + ) + + runner.on( + StreamEventType.TOOL_CALL, + lambda event: print( + f"\nšŸ”§ Tool call: {event['toolCall']['function']['name']}" + ), + ) + + runner.on( + StreamEventType.COMPLETION, + lambda event: print(f"\n\nāœ“ Completed! Reason: {event['reason']}"), + ) + + runner.on( + StreamEventType.ERROR, + lambda event: print(f"\nāŒ Error: {event['message']}"), + ) + + runner.on( + StreamEventType.END, + lambda event: print(f"ā±ļø Total duration: {event['duration']:.2f}s"), + ) + + # Process the stream + runner.process() + + except Exception as e: + print(f"Error: {e}") + + +if __name__ == "__main__": + main() diff --git a/examples/agent/agent.run.workflow.py b/examples/agent/agent.run.workflow.py index b939d12..0afd3fd 100644 --- a/examples/agent/agent.run.workflow.py +++ b/examples/agent/agent.run.workflow.py @@ -279,7 +279,7 @@ def __init__(self, langbase_client: Langbase, debug: bool = False): langbase_client: Langbase client instance debug: Whether to enable debug mode """ - self.lb = langbase_client + self.langbase = langbase_client self.workflow = Workflow(debug=debug) async def generate_blog_post( @@ -299,7 +299,7 @@ async def generate_blog_post( # Step 1: Generate outline async def create_outline(): - response = self.lb.agent.run( + response = self.langbase.agent.run( input=f"Create a {target_length} blog post outline about: {topic}", model="openai:gpt-4o-mini", api_key=os.environ.get("LLM_API_KEY"), @@ -309,7 +309,7 @@ async def create_outline(): # Step 2: Generate introduction async def write_introduction(): outline = self.workflow.context["outputs"]["outline"] - response = self.lb.agent.run( + response = self.langbase.agent.run( input=f"Write an engaging introduction for this outline: {outline}. Tone: {tone}", model="openai:gpt-4o-mini", api_key=os.environ.get("LLM_API_KEY"), @@ -320,7 +320,7 @@ async def write_introduction(): async def write_main_content(): outline = self.workflow.context["outputs"]["outline"] intro = self.workflow.context["outputs"]["introduction"] - response = self.lb.agent.run( + response = self.langbase.agent.run( input=f"Write the main content based on outline: {outline}\nIntroduction: {intro}\nTone: {tone}", model="openai:gpt-4o-mini", api_key=os.environ.get("LLM_API_KEY"), @@ -331,7 +331,7 @@ async def write_main_content(): async def write_conclusion(): outline = self.workflow.context["outputs"]["outline"] content = self.workflow.context["outputs"]["main_content"] - response = self.lb.agent.run( + response = self.langbase.agent.run( input=f"Write a conclusion for this content: {content[:500]}...", model="openai:gpt-4o-mini", api_key=os.environ.get("LLM_API_KEY"), @@ -392,8 +392,8 @@ async def advanced_workflow_example(): print("\nšŸš€ Advanced Workflow Example") print("=" * 50) - lb = Langbase(api_key=os.environ.get("LANGBASE_API_KEY")) - blog_workflow = AIContentWorkflow(lb, debug=True) + langbase = Langbase(api_key=os.environ.get("LANGBASE_API_KEY")) + blog_workflow = AIContentWorkflow(langbase, debug=True) result = await blog_workflow.generate_blog_post( topic="The Future of Artificial Intelligence", diff --git a/examples/chunker/chunker.py b/examples/chunker/chunker.py index 8c66b99..4874957 100644 --- a/examples/chunker/chunker.py +++ b/examples/chunker/chunker.py @@ -16,7 +16,7 @@ langbase_api_key = os.getenv("LANGBASE_API_KEY") # Initialize the client -lb = Langbase(api_key=langbase_api_key) +langbase = Langbase(api_key=langbase_api_key) def main(): @@ -31,7 +31,7 @@ def main(): with open(document_path, "r", encoding="utf-8") as file: document_content = file.read() # Chunk the content - chunks = lb.chunker( + chunks = langbase.chunker( content=document_content, chunk_max_length=1024, chunk_overlap=256 ) diff --git a/examples/memory/memory.create.py b/examples/memory/memory.create.py index fafde40..a31bb99 100644 --- a/examples/memory/memory.create.py +++ b/examples/memory/memory.create.py @@ -17,11 +17,11 @@ def main(): langbase_api_key = os.getenv("LANGBASE_API_KEY") # Initialize the client - lb = Langbase(api_key=langbase_api_key) + langbase = Langbase(api_key=langbase_api_key) # Create the memory try: - response = lb.memories.create( + response = langbase.memories.create( name="product-knowledge", description="Memory store for product documentation and information", embedding_model="openai:text-embedding-3-large", diff --git a/examples/memory/memory.docs.delete.py b/examples/memory/memory.docs.delete.py index ecb645b..0234ed4 100644 --- a/examples/memory/memory.docs.delete.py +++ b/examples/memory/memory.docs.delete.py @@ -17,7 +17,7 @@ def main(): langbase_api_key = os.getenv("LANGBASE_API_KEY") # Initialize the client - lb = Langbase(api_key=langbase_api_key) + langbase = Langbase(api_key=langbase_api_key) # Memory name and document ID to delete memory_name = "product-knowledge" # Replace with your memory name @@ -25,7 +25,7 @@ def main(): # Delete the document try: - response = lb.memories.documents.delete( + response = langbase.memories.documents.delete( memory_name=memory_name, document_name=document_name ) diff --git a/examples/memory/memory.docs.list.py b/examples/memory/memory.docs.list.py index a73fa9f..690f996 100644 --- a/examples/memory/memory.docs.list.py +++ b/examples/memory/memory.docs.list.py @@ -17,14 +17,14 @@ def main(): langbase_api_key = os.getenv("LANGBASE_API_KEY") # Initialize the client - lb = Langbase(api_key=langbase_api_key) + langbase = Langbase(api_key=langbase_api_key) # Memory name to list documents from memory_name = "product-knowledge" # Replace with your memory name # List documents in the memory try: - response = lb.memories.documents.list(memory_name=memory_name) + response = langbase.memories.documents.list(memory_name=memory_name) print(f"Documents in memory '{memory_name}':") print(json.dumps(response, indent=2)) diff --git a/examples/memory/memory.docs.retry-embed.py b/examples/memory/memory.docs.retry-embed.py index 8ba6370..06f6532 100644 --- a/examples/memory/memory.docs.retry-embed.py +++ b/examples/memory/memory.docs.retry-embed.py @@ -17,7 +17,7 @@ def main(): langbase_api_key = os.getenv("LANGBASE_API_KEY") # Initialize the client - lb = Langbase(api_key=langbase_api_key) + langbase = Langbase(api_key=langbase_api_key) # Memory name to retry embedding for memory_name = "product-knowledge" # Replace with your memory name @@ -25,7 +25,7 @@ def main(): # Retry embedding for failed documents try: - response = lb.memories.documents.embeddings.retry( + response = langbase.memories.documents.embeddings.retry( memory_name=memory_name, document_name=document_name ) diff --git a/examples/memory/memory.docs.upload-pdf.py b/examples/memory/memory.docs.upload-pdf.py index 8c2ea55..8b74171 100644 --- a/examples/memory/memory.docs.upload-pdf.py +++ b/examples/memory/memory.docs.upload-pdf.py @@ -17,7 +17,7 @@ def main(): langbase_api_key = os.getenv("LANGBASE_API_KEY") # Initialize the client - lb = Langbase(api_key=langbase_api_key) + langbase = Langbase(api_key=langbase_api_key) # Memory name to upload documents to memory_name = "product-knowledge" # Replace with your memory name @@ -31,7 +31,7 @@ def main(): document_content = file.read() content = "Langbase is a powerful platform for building AI applications with composable AI." - response = lb.memories.documents.upload( + response = langbase.memories.documents.upload( memory_name=memory_name, document_name="document.pdf", document=document_content, # Convert string to bytes diff --git a/examples/memory/memory.docs.upload.py b/examples/memory/memory.docs.upload.py index 7d2044e..5321625 100644 --- a/examples/memory/memory.docs.upload.py +++ b/examples/memory/memory.docs.upload.py @@ -16,7 +16,7 @@ def main(): langbase_api_key = os.getenv("LANGBASE_API_KEY") # Initialize the client - lb = Langbase(api_key=langbase_api_key) + langbase = Langbase(api_key=langbase_api_key) # Memory name to upload documents to memory_name = "product-knowledge" # Replace with your memory name @@ -24,7 +24,7 @@ def main(): # Upload documents to the memory try: content = "Langbase is a powerful platform for building AI applications with composable AI." - response = lb.memories.documents.upload( + response = langbase.memories.documents.upload( memory_name=memory_name, document_name="intro.txt", document=content.encode("utf-8"), # Convert string to bytes diff --git a/examples/memory/memory.list.py b/examples/memory/memory.list.py index a5fc92c..7653598 100644 --- a/examples/memory/memory.list.py +++ b/examples/memory/memory.list.py @@ -17,11 +17,11 @@ def main(): langbase_api_key = os.getenv("LANGBASE_API_KEY") # Initialize the client - lb = Langbase(api_key=langbase_api_key) + langbase = Langbase(api_key=langbase_api_key) # List all memories try: - response = lb.memories.list() + response = langbase.memories.list() print(json.dumps(response, indent=2)) diff --git a/examples/memory/memory.retrieve.py b/examples/memory/memory.retrieve.py index 6970357..3d82098 100644 --- a/examples/memory/memory.retrieve.py +++ b/examples/memory/memory.retrieve.py @@ -20,14 +20,14 @@ def main(): langbase_api_key = os.getenv("LANGBASE_API_KEY") # Initialize the client - lb = Langbase(api_key=langbase_api_key) + langbase = Langbase(api_key=langbase_api_key) # Retrieve memories using a query memory_name = "product-knowledge" # Replace with your memory name query = "What is Langbase?" try: - response = lb.memories.retrieve( + response = langbase.memories.retrieve( query=query, memory=[{"name": memory_name}], top_k=5, # Number of relevant memories to retrieve diff --git a/examples/pipes/pipe.run.chat.py b/examples/pipes/pipe.run.chat.py new file mode 100644 index 0000000..a8ee131 --- /dev/null +++ b/examples/pipes/pipe.run.chat.py @@ -0,0 +1,43 @@ +""" +Example demonstrating how to have a conversation using a pipe in non-streaming mode. +""" + +import json +import os + +from dotenv import load_dotenv + +from langbase import Langbase + + +def main(): + load_dotenv() + + langbase = Langbase(api_key=os.getenv("LANGBASE_API_KEY")) + + # Message 1: Tell something to the LLM. + response1 = langbase.pipes.run( + stream=False, + name="summary-agent", + messages=[{"role": "user", "content": "My company is called Langbase"}], + ) + + print(json.dumps(response1, indent=2)) + + # Message 2: Continue the conversation in the same thread + # Pass the complete conversation history including the new message + response2 = langbase.pipes.run( + name="summary-agent", + stream=False, + thread_id=response1["threadId"], + messages=[{"role": "user", "content": "Tell me the name of my company?"}], + ) + + print(json.dumps(response2, indent=2)) + # You'll see any LLM will know the company is `Langbase` + # since it's the same chat thread. This is how you can + # continue a conversation in the same thread. + + +if __name__ == "__main__": + main() diff --git a/examples/pipes/pipe.run.pipe.key.py b/examples/pipes/pipe.run.pipe.key.py new file mode 100644 index 0000000..9bde484 --- /dev/null +++ b/examples/pipes/pipe.run.pipe.key.py @@ -0,0 +1,37 @@ +""" +Example demonstrating how to run a pipe with a pipe API key. +""" + +import os + +from dotenv import load_dotenv + +from langbase import Langbase, get_runner + + +def main(): + load_dotenv() + + langbase = Langbase(api_key=os.getenv("LANGBASE_API_KEY")) + + user_msg = "Who is an AI Engineer?" + + # Get readable stream + response = langbase.pipes.run( + messages=[{"role": "user", "content": user_msg}], + stream=True, + raw_response=True, + api_key=os.getenv("PIPE_API_KEY"), + ) + + runner = get_runner(response) + print("Stream started.\n") + # Use text_generator() to stream content + for content in runner.text_generator(): + print(content, end="", flush=True) + + print("\n\nStream ended!") # Add a newline after first response + + +if __name__ == "__main__": + main() diff --git a/examples/pipes/pipe.run.stream.chat.py b/examples/pipes/pipe.run.stream.chat.py new file mode 100644 index 0000000..7c5b8bd --- /dev/null +++ b/examples/pipes/pipe.run.stream.chat.py @@ -0,0 +1,53 @@ +""" +Example demonstrating how to have a conversation using a pipe in streaming mode. +""" + +import os +import sys + +from dotenv import load_dotenv + +from langbase import Langbase, get_runner + + +def main(): + load_dotenv() + + langbase = Langbase(api_key=os.getenv("LANGBASE_API_KEY")) + + # Message 1: Tell something to the LLM. + response1 = langbase.pipes.run( + name="summary-agent", + stream=True, + messages=[{"role": "user", "content": "My company is called Langbase"}], + ) + + runner1 = get_runner(response1) + + # Use text_generator() to stream content + for content in runner1.text_generator(): + print(content, end="", flush=True) + + print("\n\nStream ended!") # Add a newline after first response + + # Message 2: Ask something about the first message. + # Continue the conversation in the same thread by sending + # `thread_id` from the second message onwards. + response2 = langbase.pipes.run( + name="summary-agent", + stream=True, + thread_id=response1["thread_id"], + messages=[{"role": "user", "content": "Tell me the name of my company?"}], + ) + + runner2 = get_runner(response2) + + # Use text_generator() to stream content + for content in runner2.text_generator(): + print(content, end="", flush=True) + + print("\n\nStream ended!") # Add a newline after first response + + +if __name__ == "__main__": + main() diff --git a/examples/pipes/pipe.run.stream.llmkey.py b/examples/pipes/pipe.run.stream.llmkey.py new file mode 100644 index 0000000..dce207b --- /dev/null +++ b/examples/pipes/pipe.run.stream.llmkey.py @@ -0,0 +1,38 @@ +""" +Example demonstrating how to run a pipe with an LLM API key in streaming mode. +""" + +import os +import sys + +from dotenv import load_dotenv + +from langbase import Langbase, get_runner + + +def main(): + load_dotenv() + + langbase = Langbase(api_key=os.getenv("LANGBASE_API_KEY")) + + user_msg = "Who is an AI Engineer?" + + # Get readable stream + response = langbase.pipes.run( + messages=[{"role": "user", "content": user_msg}], + stream=True, + raw_response=True, + name="summary-agent", + llm_key=os.getenv("LLM_KEY"), # Your LLM API key + ) + + # Convert the stream to a stream runner. + runner = get_runner(response) + + # Use text_generator() to stream content + for content in runner.text_generator(): + print(content, end="", flush=True) + + +if __name__ == "__main__": + main() diff --git a/examples/pipes/pipe.run.typed-tool-call.py b/examples/pipes/pipe.run.typed-tool-call.py index ef6c765..96ea14e 100644 --- a/examples/pipes/pipe.run.typed-tool-call.py +++ b/examples/pipes/pipe.run.typed-tool-call.py @@ -13,14 +13,14 @@ def main(): langbase_api_key = os.getenv("LANGBASE_API_KEY") # Initialize the client - lb = Langbase(api_key=langbase_api_key) + langbase = Langbase(api_key=langbase_api_key) # Name of the pipe to run pipe_name = "summary-agent" # Replace with your pipe name try: # Get streaming response - response = lb.pipes.run( + response = langbase.pipes.run( name=pipe_name, messages=[{"role": "user", "content": "What is the weather in Tokyo?"}], stream=True, diff --git a/examples/pipes/pipe.structured.outputs.py b/examples/pipes/pipe.structured.outputs.py new file mode 100644 index 0000000..014cc4f --- /dev/null +++ b/examples/pipes/pipe.structured.outputs.py @@ -0,0 +1,90 @@ +""" +Example demonstrating how to use structured outputs with a pipe. +""" + +import json +import os +from typing import List + +from dotenv import load_dotenv +from pydantic import BaseModel, Field + +from langbase import Langbase + + +# Define the Strucutred Output JSON schema with Pydantic +class Step(BaseModel): + explanation: str + output: str + + +class MathReasoning(BaseModel): + steps: List[Step] + final_answer: str = Field(alias="final_answer") + + +def create_math_tutor_pipe(langbase: Langbase): + json_schema = MathReasoning.model_json_schema() + + pipe = langbase.pipes.create( + name="math-tutor", + model="openai:gpt-4o", + upsert=True, + messages=[ + { + "role": "system", + "content": "You are a helpful math tutor. Guide the user through the solution step by step.", + }, + ], + json=True, + response_format={ + "type": "json_schema", + "json_schema": { + "name": "math_reasoning", + "schema": json_schema, + }, + }, + ) + + print("āœ… Math Tutor pipe created:", json.dumps(pipe, indent=2)) + + +def run_math_tutor_pipe(langbase: Langbase, question: str): + response = langbase.pipes.run( + name="math-tutor", + messages=[{"role": "user", "content": question}], + stream=False, + ) + + # Parse and validate the response using Pydantic + solution = MathReasoning.model_validate_json(response["completion"]) + + print("āœ… Structured Output Response:") + print("=" * 50) + + for i, step in enumerate(solution.steps, 1): + print(f"Step {i}:") + print(f" Explanation: {step.explanation}") + print(f" Output: {step.output}") + print() + + print(f"Final Answer: {solution.final_answer}") + print("=" * 50) + + +def main(): + load_dotenv() + + if not os.getenv("LANGBASE_API_KEY"): + print("āŒ Missing LANGBASE_API_KEY in environment variables.") + exit(1) + + langbase = Langbase(api_key=os.getenv("LANGBASE_API_KEY")) + + # Run this only once to create the pipe. Uncomment if it's your first time setting it up. + create_math_tutor_pipe(langbase) + run_math_tutor_pipe(langbase, "How can I solve 8x + 22 = -23?") + + +if __name__ == "__main__": + main() diff --git a/examples/pipes/pipes.list.py b/examples/pipes/pipes.list.py index e7f473b..fc7bb9b 100644 --- a/examples/pipes/pipes.list.py +++ b/examples/pipes/pipes.list.py @@ -13,12 +13,12 @@ def main(): langbase_api_key = os.getenv("LANGBASE_API_KEY") # Initialize the client - lb = Langbase(api_key=langbase_api_key) + langbase = Langbase(api_key=langbase_api_key) # Test a basic operation (mock or use a real API key) try: # For testing purposes, you can use a mock or a real simple call # This would depend on your API, for example: - response = lb.pipes.list() + response = langbase.pipes.list() print(json.dumps(response, indent=2)) except Exception as e: diff --git a/examples/pipes/pipes.run.py b/examples/pipes/pipes.run.py index 84e1a29..77119b1 100644 --- a/examples/pipes/pipes.run.py +++ b/examples/pipes/pipes.run.py @@ -18,11 +18,11 @@ def main(): langbase_api_key = os.getenv("LANGBASE_API_KEY") # Initialize the client - lb = Langbase(api_key=langbase_api_key) + langbase = Langbase(api_key=langbase_api_key) # Run the pipe with explicit stream=False try: - response = lb.pipes.run( + response = langbase.pipes.run( name="summary-agent", messages=[{"role": "user", "content": "Who is an AI Engineer?"}], stream=False, diff --git a/examples/pipes/pipes.run.stream.py b/examples/pipes/pipes.run.stream.py index eac24cf..1670646 100644 --- a/examples/pipes/pipes.run.stream.py +++ b/examples/pipes/pipes.run.stream.py @@ -16,7 +16,7 @@ def main(): langbase_api_key = os.getenv("LANGBASE_API_KEY") # Initialize the client - lb = Langbase(api_key=langbase_api_key) + langbase = Langbase(api_key=langbase_api_key) # Name of the pipe to run pipe_name = "summary-agent" # Replace with your pipe name @@ -24,7 +24,7 @@ def main(): try: # Message 1: Tell something to the LLM. print("Stream started \n\n") - response1 = lb.pipes.run( + response1 = langbase.pipes.run( name=pipe_name, messages=[{"role": "user", "content": "What is an AI Engineer?"}], stream=True, diff --git a/examples/pipes/pipes.run.typed-stream.py b/examples/pipes/pipes.run.typed-stream.py index d9c4fbe..1067dd2 100644 --- a/examples/pipes/pipes.run.typed-stream.py +++ b/examples/pipes/pipes.run.typed-stream.py @@ -18,14 +18,14 @@ def main(): langbase_api_key = os.getenv("LANGBASE_API_KEY") # Initialize the client - lb = Langbase(api_key=langbase_api_key) + langbase = Langbase(api_key=langbase_api_key) # Name of the pipe to run pipe_name = "summary-agent" # Replace with your pipe name try: # Get streaming response - response = lb.pipes.run( + response = langbase.pipes.run( name=pipe_name, messages=[{"role": "user", "content": "What is an AI Engineer?"}], stream=True, diff --git a/examples/pipes/pipes.update.py b/examples/pipes/pipes.update.py index 1678ed3..80ea8aa 100644 --- a/examples/pipes/pipes.update.py +++ b/examples/pipes/pipes.update.py @@ -17,7 +17,7 @@ def main(): langbase_api_key = os.getenv("LANGBASE_API_KEY") # Initialize the client - lb = Langbase(api_key=langbase_api_key) + langbase = Langbase(api_key=langbase_api_key) # Define updated configuration updates = { @@ -27,7 +27,7 @@ def main(): # Update the pipe try: - response = lb.pipes.update( + response = langbase.pipes.update( name="summary-agent", description="An agent that summarizes text", messages=[ diff --git a/examples/threads/threads.append.py b/examples/threads/threads.append.py index 973880e..15bc820 100644 --- a/examples/threads/threads.append.py +++ b/examples/threads/threads.append.py @@ -16,7 +16,7 @@ def main(): langbase_api_key = os.getenv("LANGBASE_API_KEY") # Initialize the client - lb = Langbase(api_key=langbase_api_key) + langbase = Langbase(api_key=langbase_api_key) # Thread ID to append messages to thread_id = "thread_123" # Replace with your actual thread ID @@ -28,7 +28,7 @@ def main(): # Append messages to the thread try: - response = lb.threads.append(thread_id=thread_id, messages=messages) + response = langbase.threads.append(thread_id=thread_id, messages=messages) print(f"Successfully appended {len(response)} messages to thread '{thread_id}'") diff --git a/examples/threads/threads.create.py b/examples/threads/threads.create.py index 52a8333..0fd248f 100644 --- a/examples/threads/threads.create.py +++ b/examples/threads/threads.create.py @@ -17,11 +17,11 @@ def main(): langbase_api_key = os.getenv("LANGBASE_API_KEY") # Initialize the client - lb = Langbase(api_key=langbase_api_key) + langbase = Langbase(api_key=langbase_api_key) # Create a thread with metadata and initial messages try: - thread = lb.threads.create( + thread = langbase.threads.create( metadata={"company": "langbase"}, messages=[{"role": "user", "content": "Hello, how are you?"}], ) diff --git a/examples/threads/threads.delete.py b/examples/threads/threads.delete.py index abd677f..d67a06d 100644 --- a/examples/threads/threads.delete.py +++ b/examples/threads/threads.delete.py @@ -16,14 +16,14 @@ def main(): langbase_api_key = os.getenv("LANGBASE_API_KEY") # Initialize the client - lb = Langbase(api_key=langbase_api_key) + langbase = Langbase(api_key=langbase_api_key) # Thread ID to delete thread_id = "thread_123" # Replace with your actual thread ID # Delete the thread try: - response = lb.threads.delete(thread_id=thread_id) + response = langbase.threads.delete(thread_id=thread_id) if response.get("success", False): print(f"Successfully deleted thread {thread_id}") diff --git a/examples/threads/threads.get.py b/examples/threads/threads.get.py index 7c35a5c..7cb1b74 100644 --- a/examples/threads/threads.get.py +++ b/examples/threads/threads.get.py @@ -17,14 +17,14 @@ def main(): langbase_api_key = os.getenv("LANGBASE_API_KEY") # Initialize the client - lb = Langbase(api_key=langbase_api_key) + langbase = Langbase(api_key=langbase_api_key) # Thread ID to retrieve thread_id = "thread_123" # Replace with your thread ID # Get the specific thread try: - thread = lb.threads.get(thread_id=thread_id) + thread = langbase.threads.get(thread_id=thread_id) print(json.dumps(thread, indent=2)) diff --git a/examples/threads/threads.messages.list.py b/examples/threads/threads.messages.list.py index 79dfc02..e7540f8 100644 --- a/examples/threads/threads.messages.list.py +++ b/examples/threads/threads.messages.list.py @@ -17,11 +17,11 @@ def main(): langbase_api_key = os.getenv("LANGBASE_API_KEY") # Initialize the client - lb = Langbase(api_key=langbase_api_key) + langbase = Langbase(api_key=langbase_api_key) # List all threads try: - threads = lb.threads.messages.list(thread_id="thread_123") + threads = langbase.threads.messages.list(thread_id="thread_123") print(json.dumps(threads, indent=2)) diff --git a/examples/threads/threads.update.py b/examples/threads/threads.update.py index e8b1084..d8fab00 100644 --- a/examples/threads/threads.update.py +++ b/examples/threads/threads.update.py @@ -17,7 +17,7 @@ def main(): langbase_api_key = os.getenv("LANGBASE_API_KEY") # Initialize the client - lb = Langbase(api_key=langbase_api_key) + langbase = Langbase(api_key=langbase_api_key) # Thread ID to update thread_id = "thread_123" # Replace with your actual thread ID @@ -30,7 +30,7 @@ def main(): # Update the thread metadata try: - updated_thread = lb.threads.update( + updated_thread = langbase.threads.update( thread_id=thread_id, metadata=updated_metadata, ) diff --git a/examples/tools/tools.crawl.py b/examples/tools/tools.crawl.py index 23c78ac..583d398 100644 --- a/examples/tools/tools.crawl.py +++ b/examples/tools/tools.crawl.py @@ -18,7 +18,7 @@ crawl_api_key = os.getenv("CRAWL_KEY") # Initialize the client -lb = Langbase(api_key=langbase_api_key) +langbase = Langbase(api_key=langbase_api_key) def main(): @@ -27,7 +27,7 @@ def main(): """ try: # Perform the web crawl - results = lb.tools.crawl( + results = langbase.tools.crawl( url=["https://langbase.com", "https://langbase.com/about"], max_pages=1, api_key=crawl_api_key, diff --git a/examples/tools/tools.web-search.py b/examples/tools/tools.web-search.py index 902ae03..102878a 100644 --- a/examples/tools/tools.web-search.py +++ b/examples/tools/tools.web-search.py @@ -20,7 +20,7 @@ def main(): search_api_key = os.getenv("EXA_API_KEY") # Initialize the client - lb = Langbase(api_key=langbase_api_key) + langbase = Langbase(api_key=langbase_api_key) # Configure the search request search_query = "latest advancements in quantum computing 2025" @@ -30,7 +30,7 @@ def main(): # Perform the web search try: - search_results = lb.tools.web_search( + search_results = langbase.tools.web_search( query=search_query, service="exa", # The search service to use total_results=5, # Number of results to return