From 0729ed35c1e8a3483d6dcdacc08e2eb74730fc25 Mon Sep 17 00:00:00 2001 From: Nimar Date: Wed, 27 Aug 2025 11:34:13 +0200 Subject: [PATCH] fix: move to start_observation from start_span --- components-mdx/get-started/python-sdk.mdx | 8 +-- components-mdx/integration-learn-more.mdx | 2 +- components-mdx/prompt-linking.mdx | 5 +- .../evaluation-methods/custom-scores.mdx | 18 +++--- .../observability/features/environments.mdx | 2 +- .../observability/features/log-levels.mdx | 9 +-- pages/docs/observability/features/masking.mdx | 2 +- .../docs/observability/features/metadata.mdx | 9 +-- .../observability/features/multi-modality.mdx | 2 +- .../features/releases-and-versioning.mdx | 7 ++- .../docs/observability/features/sessions.mdx | 7 ++- pages/docs/observability/features/tags.mdx | 6 +- .../features/token-and-cost-tracking.mdx | 5 +- .../trace-ids-and-distributed-tracing.mdx | 6 +- pages/docs/observability/features/url.mdx | 4 +- pages/docs/observability/features/users.mdx | 9 +-- .../sdk/python/advanced-usage.mdx | 4 +- .../observability/sdk/python/evaluation.mdx | 8 +-- .../sdk/python/instrumentation.mdx | 57 ++++++++++--------- .../sdk/python/troubleshooting-and-faq.mdx | 2 +- .../observability/sdk/python/upgrade-path.mdx | 20 +++---- .../faq/all/empty-trace-input-and-output.mdx | 2 +- .../cookbook/example_langgraph_agents.mdx | 7 ++- .../guides/cookbook/integration_langgraph.mdx | 6 +- .../cookbook/integration_llama_index.mdx | 2 +- pages/integrations/frameworks/crewai.mdx | 10 ++-- pages/integrations/frameworks/langchain.mdx | 18 +++--- pages/integrations/frameworks/llamaindex.mdx | 2 +- .../model-providers/openai-py.mdx | 4 +- 29 files changed, 125 insertions(+), 118 deletions(-) diff --git a/components-mdx/get-started/python-sdk.mdx b/components-mdx/get-started/python-sdk.mdx index df1969c7d..a1b714f4f 100644 --- a/components-mdx/get-started/python-sdk.mdx +++ b/components-mdx/get-started/python-sdk.mdx @@ -30,12 +30,12 @@ from langfuse import get_client langfuse = get_client() # Create a span using a context manager -with langfuse.start_as_current_span(name="process-request") as span: +with langfuse.start_as_current_observation(name="process-request") as span: # Your processing logic here span.update(output="Processing complete") # Create a nested generation for an LLM call - with langfuse.start_as_current_generation(name="llm-response", model="gpt-3.5-turbo") as generation: + with langfuse.start_as_current_observation(name="llm-response", model="gpt-3.5-turbo", as_type="generation") as generation: # Your LLM call logic here generation.update(output="Generated response") @@ -57,13 +57,13 @@ from langfuse import get_client langfuse = get_client() # Create a span without a context manager -span = langfuse.start_span(name="user-request") +span = langfuse.start_observation(name="user-request") # Your processing logic here span.update(output="Request processed") # Child spans must be created using the parent span object -nested_span = span.start_span(name="nested-span") +nested_span = span.start_observation(name="nested-span") nested_span.update(output="Nested span output") # Important: Manually end the span diff --git a/components-mdx/integration-learn-more.mdx b/components-mdx/integration-learn-more.mdx index 64993430f..226f709ea 100644 --- a/components-mdx/integration-learn-more.mdx +++ b/components-mdx/integration-learn-more.mdx @@ -51,7 +51,7 @@ from langfuse import get_client langfuse = get_client() -with langfuse.start_as_current_span(name="my-trace") as span: +with langfuse.start_as_current_observation(name="my-trace") as span: # Run your application here output = my_llm_call(input) diff --git a/components-mdx/prompt-linking.mdx b/components-mdx/prompt-linking.mdx index 3562f6c51..58a422df4 100644 --- a/components-mdx/prompt-linking.mdx +++ b/components-mdx/prompt-linking.mdx @@ -32,10 +32,11 @@ langfuse = get_client() prompt = langfuse.get_prompt("movie-critic") -with langfuse.start_as_current_generation( +with langfuse.start_as_current_observation( name="movie-generation", model="gpt-4o", - prompt=prompt + prompt=prompt, + as_type="generation" ) as generation: # Your LLM call here generation.update(output="LLM response") diff --git a/pages/docs/evaluation/evaluation-methods/custom-scores.mdx b/pages/docs/evaluation/evaluation-methods/custom-scores.mdx index 721debea2..1566c0ab1 100644 --- a/pages/docs/evaluation/evaluation-methods/custom-scores.mdx +++ b/pages/docs/evaluation/evaluation-methods/custom-scores.mdx @@ -55,7 +55,7 @@ langfuse.create_score( ) # Method 2: Score current span/generation (within context) -with langfuse.start_as_current_span(name="my-operation") as span: +with langfuse.start_as_current_observation(name="my-operation") as span: # Score the current span span.score( name="correctness", @@ -73,7 +73,7 @@ with langfuse.start_as_current_span(name="my-operation") as span: # Method 3: Score via the current context -with langfuse.start_as_current_span(name="my-operation"): +with langfuse.start_as_current_observation(name="my-operation"): # Score the current span langfuse.score_current_span( name="correctness", @@ -109,7 +109,7 @@ langfuse.create_score( ) # Method 2: Score current span/generation (within context) -with langfuse.start_as_current_span(name="my-operation") as span: +with langfuse.start_as_current_observation(name="my-operation") as span: # Score the current span span.score( name="accuracy", @@ -126,7 +126,7 @@ with langfuse.start_as_current_span(name="my-operation") as span: ) # Method 3: Score via the current context -with langfuse.start_as_current_span(name="my-operation"): +with langfuse.start_as_current_observation(name="my-operation"): # Score the current span langfuse.score_current_span( name="accuracy", @@ -162,7 +162,7 @@ langfuse.create_score( ) # Method 2: Score current span/generation (within context) -with langfuse.start_as_current_span(name="my-operation") as span: +with langfuse.start_as_current_observation(name="my-operation") as span: # Score the current span span.score( name="helpfulness", @@ -178,7 +178,7 @@ with langfuse.start_as_current_span(name="my-operation") as span: data_type="BOOLEAN" ) # Method 3: Score via the current context -with langfuse.start_as_current_span(name="my-operation"): +with langfuse.start_as_current_observation(name="my-operation"): # Score the current span langfuse.score_current_span( name="helpfulness", @@ -304,7 +304,7 @@ langfuse.create_score( ) # Method 2: Score within context -with langfuse.start_as_current_span(name="my-operation") as span: +with langfuse.start_as_current_observation(name="my-operation") as span: span.score( name="accuracy", value=0.9, @@ -335,7 +335,7 @@ langfuse.create_score( ) # Method 2: Score within context -with langfuse.start_as_current_span(name="my-operation") as span: +with langfuse.start_as_current_observation(name="my-operation") as span: span.score( name="correctness", value="correct", @@ -366,7 +366,7 @@ langfuse.create_score( ) # Method 2: Score within context -with langfuse.start_as_current_span(name="my-operation") as span: +with langfuse.start_as_current_observation(name="my-operation") as span: span.score( name="helpfulness", value=1, diff --git a/pages/docs/observability/features/environments.mdx b/pages/docs/observability/features/environments.mdx index 1a34d44c8..db9c4ae15 100644 --- a/pages/docs/observability/features/environments.mdx +++ b/pages/docs/observability/features/environments.mdx @@ -50,7 +50,7 @@ os.environ["LANGFUSE_TRACING_ENVIRONMENT"] = "production" langfuse = get_client() # All operations will now be associated with the "production" environment -with langfuse.start_as_current_span(name="my-operation") as span: +with langfuse.start_as_current_observation(name="my-operation") as span: # Your code here pass diff --git a/pages/docs/observability/features/log-levels.mdx b/pages/docs/observability/features/log-levels.mdx index 4b0d7adf5..6341449b0 100644 --- a/pages/docs/observability/features/log-levels.mdx +++ b/pages/docs/observability/features/log-levels.mdx @@ -40,9 +40,9 @@ from langfuse import get_client langfuse = get_client() # Using context managers (recommended) -with langfuse.start_as_current_span(name="my-operation") as span: +with langfuse.start_as_current_observation(name="my-operation") as span: # Set level and status message on creation - with span.start_as_current_span( + with span.start_as_current_observation( name="potentially-risky-operation", level="WARNING", status_message="Operation may fail" @@ -56,7 +56,7 @@ with langfuse.start_as_current_span(name="my-operation") as span: ) # You can also update the currently active span without a direct reference -with langfuse.start_as_current_span(name="another-operation"): +with langfuse.start_as_current_observation(name="another-operation"): # ... some processing ... langfuse.update_current_span( level="DEBUG", @@ -69,7 +69,8 @@ Levels can also be set when creating generations: ```python langfuse = get_client() -with langfuse.start_as_current_generation( +with langfuse.start_as_current_observation( + as_type="generation", name="llm-call", model="gpt-4o", level="DEFAULT" # Default level diff --git a/pages/docs/observability/features/masking.mdx b/pages/docs/observability/features/masking.mdx index 8fe169d9e..a7764ad9b 100644 --- a/pages/docs/observability/features/masking.mdx +++ b/pages/docs/observability/features/masking.mdx @@ -80,7 +80,7 @@ from langfuse import Langfuse langfuse = Langfuse(mask=masking_function) -with langfuse.start_as_current_span( +with langfuse.start_as_current_observation( name="sensitive-operation", input="SECRET_INPUT_DATA" ) as span: diff --git a/pages/docs/observability/features/metadata.mdx b/pages/docs/observability/features/metadata.mdx index 1d2a11415..f83497ffc 100644 --- a/pages/docs/observability/features/metadata.mdx +++ b/pages/docs/observability/features/metadata.mdx @@ -46,7 +46,7 @@ from langfuse import get_client langfuse = get_client() # Add metadata at trace level -with langfuse.start_as_current_span( +with langfuse.start_as_current_observation( name="process-request" ) as root_span: # Add metadata to the trace @@ -56,7 +56,8 @@ with langfuse.start_as_current_span( root_span.update(metadata={"stage": "parsing"}) # Create a child span with metadata - with root_span.start_as_current_generation( + with root_span.start_as_current_observation( + as_type="generation", name="generate-response", model="gpt-4o", metadata={"temperature": 0.7, "max_tokens": 1000} @@ -69,7 +70,7 @@ You can add new keys to the metadata object by continuously updating the entity. We strongly discourage writing the same top-level key multiple times as this will produce an undefined behaviour. ```python -with langfuse.start_as_current_span(name="operation") as span: +with langfuse.start_as_current_observation(name="operation") as span: # First write span.update(metadata={"status": "started"}) @@ -181,7 +182,7 @@ prompt = ChatPromptTemplate.from_template("Tell me a joke about {topic}") chain = prompt | llm # Set trace attributes dynamically via enclosing span -with langfuse.start_as_current_span(name="dynamic-langchain-trace") as span: +with langfuse.start_as_current_observation(name="dynamic-langchain-trace") as span: span.update_trace( metadata={"foo": "bar", "baz": "qux"} ) diff --git a/pages/docs/observability/features/multi-modality.mdx b/pages/docs/observability/features/multi-modality.mdx index 5d8636cba..bae467a56 100644 --- a/pages/docs/observability/features/multi-modality.mdx +++ b/pages/docs/observability/features/multi-modality.mdx @@ -144,7 +144,7 @@ def process_document(): langfuse = get_client() -with langfuse.start_as_current_span(name="analyze-document") as span: # Include media in the span input, output, or metadata +with langfuse.start_as_current_observation(name="analyze-document") as span: # Include media in the span input, output, or metadata span.update( input={"document": pdf_media}, metadata={"file_size": len(pdf_bytes)} diff --git a/pages/docs/observability/features/releases-and-versioning.mdx b/pages/docs/observability/features/releases-and-versioning.mdx index 3af0d153b..63aac17f7 100644 --- a/pages/docs/observability/features/releases-and-versioning.mdx +++ b/pages/docs/observability/features/releases-and-versioning.mdx @@ -63,7 +63,7 @@ def process_data(): # With context managers langfuse = get_client() -with langfuse.start_as_current_span(name="my-operation") as span: +with langfuse.start_as_current_observation(name="my-operation") as span: span.update_trace(release="v2.1.24") ``` @@ -165,14 +165,15 @@ from langfuse import get_client langfuse = get_client() # Set version when creating a span -with langfuse.start_as_current_span( +with langfuse.start_as_current_observation( name="process-data", version="1.0" ) as span: # Processing... # Create a generation with version - with span.start_as_current_generation( + with span.start_as_current_observation( + as_type="generation", name="guess-countries", model="gpt-4o", version="1.0" diff --git a/pages/docs/observability/features/sessions.mdx b/pages/docs/observability/features/sessions.mdx index cff55513a..881078836 100644 --- a/pages/docs/observability/features/sessions.mdx +++ b/pages/docs/observability/features/sessions.mdx @@ -43,14 +43,15 @@ from langfuse import get_client langfuse = get_client() # You can set the session_id when creating the root span -with langfuse.start_as_current_span( +with langfuse.start_as_current_observation( name="process-chat-message" ) as root_span: # Add session_id to the trace root_span.update_trace(session_id="chat-session-123") # All spans in this trace will belong to the same session - with root_span.start_as_current_generation( + with root_span.start_as_current_observation( + as_type="generation", name="generate-response", model="gpt-4o" ) as gen: @@ -61,7 +62,7 @@ with langfuse.start_as_current_span( You can also update the session_id of the current trace without a direct reference to a span: ```python -with langfuse.start_as_current_span(name="another-operation"): +with langfuse.start_as_current_observation(name="another-operation"): # Add to the current trace langfuse.update_current_trace(session_id="your-session-id") ``` diff --git a/pages/docs/observability/features/tags.mdx b/pages/docs/observability/features/tags.mdx index 24058f4e6..bb5a21a0c 100644 --- a/pages/docs/observability/features/tags.mdx +++ b/pages/docs/observability/features/tags.mdx @@ -34,14 +34,14 @@ from langfuse import get_client langfuse = get_client() # Add tags when creating the root span -with langfuse.start_as_current_span( +with langfuse.start_as_current_observation( name="my-operation" ) as root_span: # Add tags to the trace root_span.update_trace(tags=["tag-1", "tag-2"]) # You can add more tags later from any span in the same trace - with root_span.start_as_current_generation(name="llm-call", model="gpt-4o") as gen: + with root_span.start_as_current_observation(as_type="generation", name="llm-call", model="gpt-4o") as gen: # Processing... gen.update_trace(tags=["llm-gen"]) # Adds another tag to the same trace ``` @@ -49,7 +49,7 @@ with langfuse.start_as_current_span( You can also update the tags of the current trace without a direct reference to a span: ```python -with langfuse.start_as_current_span(name="another-operation"): +with langfuse.start_as_current_observation(name="another-operation"): # ... processing ... langfuse.update_current_trace(tags=["processing", "beta-feature"]) ``` diff --git a/pages/docs/observability/features/token-and-cost-tracking.mdx b/pages/docs/observability/features/token-and-cost-tracking.mdx index ff4de2892..cb365c663 100644 --- a/pages/docs/observability/features/token-and-cost-tracking.mdx +++ b/pages/docs/observability/features/token-and-cost-tracking.mdx @@ -117,10 +117,11 @@ import anthropic langfuse = get_client() anthropic_client = anthropic.Anthropic() -with langfuse.start_as_current_generation( +with langfuse.start_as_current_observation( name="anthropic-completion", model="claude-3-opus-20240229", - input=[{"role": "user", "content": "Hello, Claude"}] + input=[{"role": "user", "content": "Hello, Claude"}], + as_type="generation" ) as generation: response = anthropic_client.messages.create( model="claude-3-opus-20240229", diff --git a/pages/docs/observability/features/trace-ids-and-distributed-tracing.mdx b/pages/docs/observability/features/trace-ids-and-distributed-tracing.mdx index ce83b24e7..c158c586f 100644 --- a/pages/docs/observability/features/trace-ids-and-distributed-tracing.mdx +++ b/pages/docs/observability/features/trace-ids-and-distributed-tracing.mdx @@ -85,7 +85,7 @@ external_id = "request_12345" trace_id = langfuse.create_trace_id(seed=external_id) # Use this trace ID in a span -with langfuse.start_as_current_span( +with langfuse.start_as_current_observation( name="process-request", trace_context={"trace_id": trace_id} ) as span: @@ -101,7 +101,7 @@ from langfuse import get_client langfuse = get_client() # Use a predefined trace ID with trace_context parameter -with langfuse.start_as_current_span( +with langfuse.start_as_current_observation( name="my-operation", trace_context={ "trace_id": "abcdef1234567890abcdef1234567890", # Must be 32 hex chars @@ -119,7 +119,7 @@ from langfuse import get_client langfuse = get_client() -with langfuse.start_as_current_span(name="outer-operation") as span: +with langfuse.start_as_current_observation(name="outer-operation") as span: # Access the trace ID of the current span current_trace_id = langfuse.get_current_trace_id() current_span_id = langfuse.get_current_observation_id() diff --git a/pages/docs/observability/features/url.mdx b/pages/docs/observability/features/url.mdx index 8d55661de..09d2aaa3e 100644 --- a/pages/docs/observability/features/url.mdx +++ b/pages/docs/observability/features/url.mdx @@ -39,7 +39,7 @@ from langfuse import get_client langfuse = get_client() -with langfuse.start_as_current_span(name="process-request") as span: +with langfuse.start_as_current_observation(name="process-request") as span: # Get the URL of this trace trace_url = langfuse.get_trace_url() print(f"View trace at: {trace_url}") @@ -125,7 +125,7 @@ from langfuse import get_client langfuse = get_client() -with langfuse.start_as_current_span(name="process-request") as span: +with langfuse.start_as_current_observation(name="process-request") as span: # Make this trace public span.update_trace(public=True) diff --git a/pages/docs/observability/features/users.mdx b/pages/docs/observability/features/users.mdx index c6faaa16a..de4bb85ba 100644 --- a/pages/docs/observability/features/users.mdx +++ b/pages/docs/observability/features/users.mdx @@ -41,16 +41,17 @@ from langfuse import get_client langfuse = get_client() # You can set the user_id when creating the root span via update_trace -with langfuse.start_as_current_span( +with langfuse.start_as_current_observation( name="process-user-request" ) as root_span: # Add user_id to the trace root_span.update_trace(user_id="user_12345") # All spans in this trace will be associated with this user - with root_span.start_as_current_generation( + with root_span.start_as_current_observation( name="generate-response", - model="gpt-4o" + model="gpt-4o", + as_type="generation" ) as gen: # ...generate response... pass @@ -59,7 +60,7 @@ with langfuse.start_as_current_span( You can also update the user_id of the current trace without a direct reference to a span: ```python -with langfuse.start_as_current_span(name="handle-user-interaction"): +with langfuse.start_as_current_observation(name="handle-user-interaction"): # Add user_id to the current trace langfuse.update_current_trace(user_id="user_12345") ``` diff --git a/pages/docs/observability/sdk/python/advanced-usage.mdx b/pages/docs/observability/sdk/python/advanced-usage.mdx index 1c0569d83..26bbb6909 100644 --- a/pages/docs/observability/sdk/python/advanced-usage.mdx +++ b/pages/docs/observability/sdk/python/advanced-usage.mdx @@ -31,7 +31,7 @@ def pii_masker(data: any, **kwargs) -> any: langfuse = Langfuse(mask=pii_masker) # Now, any input/output/metadata will be passed through pii_masker -with langfuse.start_as_current_span(name="user-query", input={"email": "test@example.com", "query": "..."}) as span: +with langfuse.start_as_current_observation(name="user-query", input={"email": "test@example.com", "query": "..."}) as span: # The 'email' field in the input will be masked. pass ``` @@ -123,7 +123,7 @@ langfuse_tracer_provider = TracerProvider() # do not set to global tracer provid langfuse = Langfuse(tracer_provider=langfuse_tracer_provider) -langfuse.start_span(name="myspan").end() # Span will be isolated from remaining OTEL instrumentation +langfuse.start_observation(name="myspan").end() # Span will be isolated from remaining OTEL instrumentation ``` ## Using `ThreadPoolExecutors` or `ProcessPoolExecutors` diff --git a/pages/docs/observability/sdk/python/evaluation.mdx b/pages/docs/observability/sdk/python/evaluation.mdx index 68e1bca4c..699534931 100644 --- a/pages/docs/observability/sdk/python/evaluation.mdx +++ b/pages/docs/observability/sdk/python/evaluation.mdx @@ -27,7 +27,7 @@ This page shows the evaluation methods that are supported by the Python SDK. Ple langfuse = get_client() - with langfuse.start_as_current_generation(name="summary_generation") as gen: + with langfuse.start_as_current_observation(as_type="generation", name="summary_generation") as gen: # ... LLM call ... gen.update(output="summary text...") # Score this specific generation @@ -47,7 +47,7 @@ This page shows the evaluation methods that are supported by the Python SDK. Ple langfuse = get_client() - with langfuse.start_as_current_span(name="complex_task") as task_span: + with langfuse.start_as_current_observation(name="complex_task") as task_span: # ... perform task ... langfuse.score_current_span(name="task_component_quality", value=True, data_type="BOOLEAN") # ... @@ -159,7 +159,8 @@ current_run_name = "qna_model_v3_run_05_20" # Identifies this specific evaluatio # Assume 'my_qna_app' is your instrumented application function def my_qna_app(question: str, context: str, item_id: str, run_name: str): - with langfuse.start_as_current_generation( + with langfuse.start_as_current_observation( + as_type="generation", name="qna-llm-call", input={"question": question, "context": context}, metadata={"item_id": item_id, "run": run_name}, # Example metadata for the generation @@ -210,4 +211,3 @@ print(f"\nFinished processing dataset '{dataset_name}' for run '{current_run_nam ``` By using `item.run()`, you ensure each dataset item's processing is neatly encapsulated in its own trace, and these traces are aggregated under the specified `run_name` in the Langfuse UI. This allows for systematic review of results, comparison across runs, and deep dives into individual processing traces. - diff --git a/pages/docs/observability/sdk/python/instrumentation.mdx b/pages/docs/observability/sdk/python/instrumentation.mdx index fe1cc236a..21805806b 100644 --- a/pages/docs/observability/sdk/python/instrumentation.mdx +++ b/pages/docs/observability/sdk/python/instrumentation.mdx @@ -69,15 +69,15 @@ The observe decorator is capturing the args, kwargs and return value of decorate You can create spans or generations anywhere in your application. If you need more control than the `@observe` decorator, the primary way to do this is using context managers (with `with` statements), which ensure that observations are properly started and ended. -- `langfuse.start_as_current_span()`: Creates a new span and sets it as the currently active observation in the OTel context for its duration. Any new observations created within this block will be its children. -- `langfuse.start_as_current_generation()`: Similar to the above, but creates a specialized "generation" observation for LLM calls. +- `langfuse.start_as_current_observation()`: Creates a new span and sets it as the currently active observation in the OTel context for its duration. Any new observations created within this block will be its children. +- `langfuse.start_as_current_observation(as_type="generation")`: Similar to the above, but creates a specialized "generation" observation for LLM calls. ```python from langfuse import get_client langfuse = get_client() -with langfuse.start_as_current_span( +with langfuse.start_as_current_observation( name="user-request-pipeline", input={"user_query": "Tell me a joke about OpenTelemetry"}, ) as root_span: @@ -91,7 +91,8 @@ with langfuse.start_as_current_span( ) # Create a nested generation - with langfuse.start_as_current_generation( + with langfuse.start_as_current_observation( + as_type="generation", name="joke-generation", model="gpt-4o", input=[{"role": "user", "content": "Tell me a joke about OpenTelemetry"}], @@ -140,8 +141,8 @@ span.end() # Important: Manually end the span - **Parenting**: The observation created by `start_span()` or `start_generation()` will still be a child of the span that was active in the context at the moment of its creation. - **Manual Lifecycle**: These observations are not managed by a `with` block and therefore **must be explicitly ended** by calling their `.end()` method. - **Nesting Children**: - - Subsequent observations created using the global `langfuse.start_as_current_span()` (or similar global methods) will _not_ be children of these "manual" observations. Instead, they will be parented by the original active span. - - To create children directly under a "manual" observation, you would use methods _on that specific observation object_ (e.g., `manual_span.start_as_current_span(...)`). + - Subsequent observations created using the global `langfuse.start_as_current_observation()` (or similar global methods) will _not_ be children of these "manual" observations. Instead, they will be parented by the original active span. + - To create children directly under a "manual" observation, you would use methods _on that specific observation object_ (e.g., `manual_span.start_as_current_observation(...)`). **When to Use:** @@ -159,7 +160,7 @@ from langfuse import get_client langfuse = get_client() # This outer span establishes an active context. -with langfuse.start_as_current_span(name="main-operation") as main_operation_span: +with langfuse.start_as_current_observation(name="main-operation") as main_operation_span: # 'main_operation_span' is the current active context. # 1. Create a "manual" span using langfuse.start_span(). @@ -172,7 +173,7 @@ with langfuse.start_as_current_span(name="main-operation") as main_operation_spa # 2. Start another operation that DOES become the active context. # This will be a child of 'main_operation_span', NOT 'manual_side_task', # because 'manual_side_task' did not alter the active context. - with langfuse.start_as_current_span(name="core-step-within-main") as core_step_span: + with langfuse.start_as_current_observation(name="core-step-within-main") as core_step_span: # 'core_step_span' is now the active context. # 'manual_side_task' is still open but not active in the global context. core_step_span.update(input="Data for core step") @@ -222,25 +223,25 @@ def main_function(data, parameter): -Nesting is handled automatically by OpenTelemetry's context propagation. When you create a new observation (span or generation) using `start_as_current_span` or `start_as_current_generation`, it becomes a child of the observation that was active in the context when it was created. +Nesting is handled automatically by OpenTelemetry's context propagation. When you create a new observation (span or generation) using `start_as_current_observation` or `start_as_current_observation(as_type="generation")`, it becomes a child of the observation that was active in the context when it was created. ```python from langfuse import get_client langfuse = get_client() -with langfuse.start_as_current_span(name="outer-process") as outer_span: +with langfuse.start_as_current_observation(name="outer-process") as outer_span: # outer_span is active - with langfuse.start_as_current_generation(name="llm-step-1") as gen1: + with langfuse.start_as_current_observation(as_type="generation", name="llm-step-1") as gen1: # gen1 is active, child of outer_span gen1.update(output="LLM 1 output") - with outer_span.start_as_current_span(name="intermediate-step") as mid_span: + with outer_span.start_as_current_observation(name="intermediate-step") as mid_span: # mid_span is active, also a child of outer_span # This demonstrates using the yielded span object to create children - with mid_span.start_as_current_generation(name="llm-step-2") as gen2: + with mid_span.start_as_current_observation(as_type="generation", name="llm-step-2") as gen2: # gen2 is active, child of mid_span gen2.update(output="LLM 2 output") @@ -304,7 +305,7 @@ from langfuse import get_client langfuse = get_client() -with langfuse.start_as_current_generation(name="llm-call", model="gpt-3.5-turbo") as gen: +with langfuse.start_as_current_observation(as_type="generation", name="llm-call", model="gpt-3.5-turbo") as gen: gen.update(input={"prompt": "Why is the sky blue?"}) # ... make LLM call ... response_text = "Rayleigh scattering..." @@ -315,7 +316,7 @@ with langfuse.start_as_current_generation(name="llm-call", model="gpt-3.5-turbo" ) # Alternatively, update the current observation in context: -with langfuse.start_as_current_span(name="data-processing"): +with langfuse.start_as_current_observation(name="data-processing"): # ... some processing ... langfuse.update_current_span(metadata={"step1_complete": True}) # ... more processing ... @@ -350,7 +351,7 @@ from langfuse import get_client langfuse = get_client() -with langfuse.start_as_current_span(name="initial-operation") as span: +with langfuse.start_as_current_observation(name="initial-operation") as span: # Set trace attributes early span.update_trace( user_id="user_xyz", @@ -359,7 +360,7 @@ with langfuse.start_as_current_span(name="initial-operation") as span: ) # ... # Later, from another span in the same trace: - with span.start_as_current_generation(name="final-generation") as gen: + with span.start_as_current_observation(as_type="generation", name="final-generation") as gen: # ... langfuse.update_current_trace(output={"final_status": "success"}, public=True) ``` @@ -375,12 +376,13 @@ from langfuse import get_client langfuse = get_client() -with langfuse.start_as_current_span( +with langfuse.start_as_current_observation( name="user-request", input={"query": "What is the capital of France?"} # This becomes the trace input ) as root_span: - with langfuse.start_as_current_generation( + with langfuse.start_as_current_observation( + as_type="generation", name="llm-call", model="gpt-4o", input={"messages": [{"role": "user", "content": "What is the capital of France?"}]} @@ -401,7 +403,7 @@ from langfuse import get_client langfuse = get_client() -with langfuse.start_as_current_span(name="complex-pipeline") as root_span: +with langfuse.start_as_current_observation(name="complex-pipeline") as root_span: # Root span has its own input/output root_span.update(input="Step 1 data", output="Step 1 result") @@ -460,7 +462,7 @@ from langfuse import get_client, Langfuse langfuse = get_client() # Get current IDs -with langfuse.start_as_current_span(name="my-op") as current_op: +with langfuse.start_as_current_observation(name="my-op") as current_op: trace_id = langfuse.get_current_trace_id() observation_id = langfuse.get_current_observation_id() print(f"Current Trace ID: {trace_id}, Current Observation ID: {observation_id}") @@ -484,7 +486,7 @@ langfuse = get_client() existing_trace_id = "abcdef1234567890abcdef1234567890" # From an upstream service existing_parent_span_id = "fedcba0987654321" # Optional parent span in that trace -with langfuse.start_as_current_span( +with langfuse.start_as_current_observation( name="process-downstream-task", trace_context={ "trace_id": existing_trace_id, @@ -562,7 +564,7 @@ Langfuse offers a drop-in replacement for the OpenAI Python SDK to automatically - **Cost**: Estimated cost in USD (based on model and token usage). - **Media**: Input audio and output audio from speech-to-text and text-to-speech endpoints. -The integration is fully interoperable with `@observe` and manual tracing methods (`start_as_current_span`, etc.). If an OpenAI call is made within an active Langfuse span, the OpenAI generation will be correctly nested under it. +The integration is fully interoperable with `@observe` and manual tracing methods (`start_as_current_observation`, etc.). If an OpenAI call is made within an active Langfuse span, the OpenAI generation will be correctly nested under it. **Passing Langfuse arguments to OpenAI calls:** @@ -576,7 +578,7 @@ langfuse = get_client() client = openai.OpenAI() -with langfuse.start_as_current_span(name="qna-bot-openai") as span: +with langfuse.start_as_current_observation(name="qna-bot-openai") as span: langfuse.update_current_trace(tags=["qna-bot-openai"]) # This will be traced as a Langfuse generation @@ -642,7 +644,7 @@ llm = ChatOpenAI(model_name="gpt-4o") prompt = ChatPromptTemplate.from_template("Tell me a joke about {topic}") chain = prompt | llm -with langfuse.start_as_current_span(name="joke-chain") as span: +with langfuse.start_as_current_observation(name="joke-chain") as span: langfuse.update_current_trace(tags=["joke-chain"]) response = chain.invoke({"topic": "cats"}, config={"callbacks": [langfuse_handler]}) @@ -823,7 +825,7 @@ AnthropicInstrumentor().instrument() langfuse = get_client() anthropic_client = Anthropic() -with langfuse.start_as_current_span(name="myspan"): +with langfuse.start_as_current_observation(name="myspan"): # This will be traced as a Langfuse generation nested under the current span message = anthropic_client.messages.create( model="claude-3-7-sonnet-20250219", @@ -857,7 +859,7 @@ LlamaIndexInstrumentor().instrument() langfuse = get_client() llm = OpenAI(model="gpt-4o") -with langfuse.start_as_current_span(name="myspan"): +with langfuse.start_as_current_observation(name="myspan"): response = llm.complete("Hello, world!") print(response) @@ -869,4 +871,3 @@ Learn more in the [Llamaindex integration](/integrations/frameworks/llamaindex) - diff --git a/pages/docs/observability/sdk/python/troubleshooting-and-faq.mdx b/pages/docs/observability/sdk/python/troubleshooting-and-faq.mdx index 99d9144c7..c91a2cb95 100644 --- a/pages/docs/observability/sdk/python/troubleshooting-and-faq.mdx +++ b/pages/docs/observability/sdk/python/troubleshooting-and-faq.mdx @@ -20,7 +20,7 @@ If you don't find a solution to your issue here, try using [Ask AI](/docs/ask-ai - Enable debug logging (`debug=True` or `LANGFUSE_DEBUG="True"`) to see SDK activity and potential errors during exporting. - **Incorrect Nesting or Missing Spans**: - If you are self-hosting Langfuse, the Python SDK v3 requires [**Langfuse platform version >= 3.63.0**](https://github.com/langfuse/langfuse/releases/tag/v3.63.0) for traces to be correctly processed. You can find a snapshot of the legacy v2 SDK documentation [here](https://python-sdk-v2.docs-snapshot.langfuse.com/docs/observability/sdk/python/decorators). - - Ensure you are using context managers (`with langfuse.start_as_current_span(...)`) for proper context propagation. + - Ensure you are using context managers (`with langfuse.start_as_current_observation(...)`) for proper context propagation. - If manually creating spans (`langfuse.start_span()`), ensure they are correctly ended with `.end()`. - In async code, ensure context is not lost across `await` boundaries if not using Langfuse's async-compatible methods. - **Langchain/OpenAI Integration Not Working**: diff --git a/pages/docs/observability/sdk/python/upgrade-path.mdx b/pages/docs/observability/sdk/python/upgrade-path.mdx index 74c53eb37..845211476 100644 --- a/pages/docs/observability/sdk/python/upgrade-path.mdx +++ b/pages/docs/observability/sdk/python/upgrade-path.mdx @@ -99,7 +99,7 @@ from langfuse.openai import openai langfuse = get_client() -with langfuse.start_as_current_span(name="chat-request") as span: +with langfuse.start_as_current_observation(name="chat-request") as span: # Set trace attributes on the enclosing span span.update_trace( user_id="user_123", @@ -167,7 +167,7 @@ from langfuse.langchain import CallbackHandler langfuse = get_client() -with langfuse.start_as_current_span(name="langchain-request") as span: +with langfuse.start_as_current_observation(name="langchain-request") as span: span.update_trace( user_id="user_123", session_id="session_456", @@ -206,7 +206,7 @@ LlamaIndexInstrumentor().instrument() langfuse = get_client() -with langfuse.start_as_current_span(name="llamaindex-query") as span: +with langfuse.start_as_current_observation(name="llamaindex-query") as span: span.update_trace( user_id="user_123", input={"query": "Hello"} @@ -252,14 +252,15 @@ from langfuse import get_client langfuse = get_client() # Use context managers instead of manual objects -with langfuse.start_as_current_span( +with langfuse.start_as_current_observation( name="my-trace", input={"query": "Hello"} # Becomes trace input automatically ) as root_span: # Set trace attributes root_span.update_trace(user_id="user_123") - with langfuse.start_as_current_generation( + with langfuse.start_as_current_observation( + as_type="generation", name="llm-call", model="gpt-4o" ) as generation: @@ -291,7 +292,7 @@ with langfuse.start_as_current_span( 4. **Context Managers**: - Replace manual `langfuse.trace()`, `trace.span()` with context managers if you want to use them - - Use `with langfuse.start_as_current_span()` instead + - Use `with langfuse.start_as_current_observation()` instead 5. **LlamaIndex Migration**: @@ -338,12 +339,12 @@ with langfuse.start_as_current_span( - **v2**: Could be set directly on integration calls - **v3**: Must be set on enclosing spans - - **Migration**: Wrap integration calls with `langfuse.start_as_current_span()` + - **Migration**: Wrap integration calls with `langfuse.start_as_current_observation()` 4. **Creating Observations**: - **v2**: `langfuse.trace()`, `langfuse.span()`, `langfuse.generation()` - - **v3**: `langfuse.start_as_current_span()`, `langfuse.start_as_current_generation()` + - **v3**: `langfuse.start_as_current_observation()`, `langfuse.start_as_current_observation(as_type="generation")` - **Migration**: Use context managers, ensure `.end()` is called or use `with` statements 5. **IDs and Context**: @@ -358,6 +359,3 @@ with langfuse.start_as_current_span( ## Future support for v2 We will continue to support the v2 SDK for the foreseeable future with critical bug fixes and security patches. We will not be adding any new features to the v2 SDK. You can find a snapshot of the v2 SDK documentation [here](https://python-sdk-v2.docs-snapshot.langfuse.com/docs/observability/sdk/python/decorators). - - - diff --git a/pages/faq/all/empty-trace-input-and-output.mdx b/pages/faq/all/empty-trace-input-and-output.mdx index 056df5603..22e65b053 100644 --- a/pages/faq/all/empty-trace-input-and-output.mdx +++ b/pages/faq/all/empty-trace-input-and-output.mdx @@ -13,7 +13,7 @@ from langfuse import get_client langfuse = get_client() -with langfuse.start_as_current_span(name="complex-pipeline") as root_span: +with langfuse.start_as_current_observation(name="complex-pipeline") as root_span: # Root span has its own input/output root_span.update(input="Step 1 data", output="Step 1 result") diff --git a/pages/guides/cookbook/example_langgraph_agents.mdx b/pages/guides/cookbook/example_langgraph_agents.mdx index 26d495167..b71ed2307 100644 --- a/pages/guides/cookbook/example_langgraph_agents.mdx +++ b/pages/guides/cookbook/example_langgraph_agents.mdx @@ -394,7 +394,7 @@ from langfuse import get_client langfuse = get_client() # Option 1: Use the yielded span object from the context manager -with langfuse.start_as_current_span( +with langfuse.start_as_current_observation( name="langgraph-request") as span: # ... LangGraph execution ... @@ -596,7 +596,7 @@ graph = graph_builder.compile() def my_agent(question, langfuse_handler): # Create a trace via Langfuse spans and use Langchain within it - with langfuse.start_as_current_span(name="my-langgraph-agent") as root_span: + with langfuse.start_as_current_observation(name="my-langgraph-agent") as root_span: # Step 2: LangChain processing response = graph.invoke( @@ -639,7 +639,8 @@ for item in dataset.items: # Call your application logic - this can use any combination of decorators, # context managers, or manual observations - with langfuse.start_as_current_generation( + with langfuse.start_as_current_observation( + as_type="generation", name="llm-call", model="gpt-4o", input=item.input diff --git a/pages/guides/cookbook/integration_langgraph.mdx b/pages/guides/cookbook/integration_langgraph.mdx index 5cd49b35e..9a71a32b7 100644 --- a/pages/guides/cookbook/integration_langgraph.mdx +++ b/pages/guides/cookbook/integration_langgraph.mdx @@ -506,7 +506,7 @@ from langchain_core.tools import tool def langgraph_research(question): """Conducts research for various topics.""" - with langfuse.start_as_current_span( + with langfuse.start_as_current_observation( name="🤖-sub-research-agent", trace_context={"trace_id": predefined_trace_id} ) as span: @@ -540,7 +540,7 @@ main_agent = create_react_agent( user_question = "What is Langfuse?" # Use the predefined trace ID with trace_context -with langfuse.start_as_current_span( +with langfuse.start_as_current_observation( name="🤖-main-agent", trace_context={"trace_id": predefined_trace_id} ) as span: @@ -576,7 +576,7 @@ from langfuse import get_client langfuse = get_client() # Option 1: Use the yielded span object from the context manager -with langfuse.start_as_current_span( +with langfuse.start_as_current_observation( name="langgraph-request") as span: # ... LangGraph execution ... diff --git a/pages/guides/cookbook/integration_llama_index.mdx b/pages/guides/cookbook/integration_llama_index.mdx index 1f14ba392..e71897e6d 100644 --- a/pages/guides/cookbook/integration_llama_index.mdx +++ b/pages/guides/cookbook/integration_llama_index.mdx @@ -88,7 +88,7 @@ from llama_index.llms.openai import OpenAI llm = OpenAI(model="gpt-4o") -with langfuse.start_as_current_span(name="llama-index-trace"): +with langfuse.start_as_current_observation(name="llama-index-trace"): response = llm.complete("Hello, world!") print(response) diff --git a/pages/integrations/frameworks/crewai.mdx b/pages/integrations/frameworks/crewai.mdx index da70d596a..2760be4e8 100644 --- a/pages/integrations/frameworks/crewai.mdx +++ b/pages/integrations/frameworks/crewai.mdx @@ -101,7 +101,7 @@ crew = Crew( tasks=[task1], ) -with langfuse.start_as_current_span(name="crewai-index-trace"): +with langfuse.start_as_current_observation(name="crewai-index-trace"): result = crew.kickoff() print(result) @@ -129,7 +129,7 @@ The following code demonstrates how to start a custom span with `langfuse.start_ ```python -with langfuse.start_as_current_span( +with langfuse.start_as_current_observation( name="crewai-index-trace", ) as span: @@ -165,7 +165,7 @@ In the example below, we demonstrate how to score a specific span for `relevance ```python -with langfuse.start_as_current_span( +with langfuse.start_as_current_observation( name="crewai-index-trace", ) as span: @@ -218,7 +218,7 @@ crew_with_prompt = Crew( ) # Run your application -with langfuse.start_as_current_span( +with langfuse.start_as_current_observation( name="crewai-index-trace", ) as span: @@ -273,7 +273,7 @@ current_run_description="Evaluation run for CrewAI model on June 4th" # Assume 'your_application' is your instrumented application function def your_application(country): - with langfuse.start_as_current_span(name="crewai-trace") as span: + with langfuse.start_as_current_observation(name="crewai-trace") as span: # Define your agents with roles and goals geography_expert = Agent( diff --git a/pages/integrations/frameworks/langchain.mdx b/pages/integrations/frameworks/langchain.mdx index 76717b7f3..a9ffb1572 100644 --- a/pages/integrations/frameworks/langchain.mdx +++ b/pages/integrations/frameworks/langchain.mdx @@ -256,7 +256,7 @@ prompt = ChatPromptTemplate.from_template("Tell me a joke about {topic}") chain = prompt | llm # Set trace attributes dynamically via enclosing span -with langfuse.start_as_current_span(name="dynamic-langchain-trace") as span: +with langfuse.start_as_current_observation(name="dynamic-langchain-trace") as span: span.update_trace( user_id="random-user", session_id="random-session", @@ -319,7 +319,7 @@ predefined_trace_id = Langfuse.create_trace_id(seed=external_request_id) langfuse_handler = CallbackHandler() # Use the predefined trace ID with trace_context -with langfuse.start_as_current_span( +with langfuse.start_as_current_observation( name="langchain-request", trace_context={"trace_id": predefined_trace_id} ) as span: @@ -376,7 +376,7 @@ from langfuse import get_client langfuse = get_client() # Option 1: Use the yielded span object from the context manager -with langfuse.start_as_current_span( +with langfuse.start_as_current_observation( name="langchain-request", trace_context={"trace_id": predefined_trace_id} ) as span: @@ -391,7 +391,7 @@ with langfuse.start_as_current_span( ) # Option 2: Use langfuse.score_current_trace() if still in context -with langfuse.start_as_current_span(name="langchain-request") as span: +with langfuse.start_as_current_observation(name="langchain-request") as span: # ... LangChain execution ... # Score using current context @@ -496,7 +496,7 @@ from langchain_core.prompts import ChatPromptTemplate langfuse = get_client() # Create a trace via Langfuse spans and use Langchain within it -with langfuse.start_as_current_span(name="multi-step-process") as root_span: +with langfuse.start_as_current_observation(name="multi-step-process") as root_span: # Update trace attributes root_span.update_trace( session_id="session-1234", @@ -508,7 +508,7 @@ with langfuse.start_as_current_span(name="multi-step-process") as root_span: langfuse_handler = CallbackHandler() # Step 1: Initial processing (custom logic) - with langfuse.start_as_current_span(name="input-preprocessing") as prep_span: + with langfuse.start_as_current_observation(name="input-preprocessing") as prep_span: processed_input = "Simplified: Explain quantum computing" prep_span.update(output={"processed_query": processed_input}) @@ -523,7 +523,7 @@ with langfuse.start_as_current_span(name="multi-step-process") as root_span: ) # Step 3: Post-processing (custom logic) - with langfuse.start_as_current_span(name="output-postprocessing") as post_span: + with langfuse.start_as_current_observation(name="output-postprocessing") as post_span: final_result = f"Response: {result.content}" post_span.update(output={"final_response": final_result}) @@ -693,7 +693,7 @@ Python SDK v3 introduces a completely revised Langfuse core with a new **observa | ------------------- | ----------------------------------------------------------------- | ---------------------------------------------------------------------- | | Package import | `from langfuse.callback import CallbackHandler` | `from langfuse.langchain import CallbackHandler` | | Client handling | Multiple instantiated clients | Singleton pattern, access via `get_client()` | -| Trace/Span context | `CallbackHandler` optionally accepted `root` to group runs | Use context managers `with langfuse.start_as_current_span(...)` | +| Trace/Span context | `CallbackHandler` optionally accepted `root` to group runs | Use context managers `with langfuse.start_as_current_observation(...)` | | Dynamic trace attrs | Pass via LangChain `config` (e.g. `metadata["langfuse_user_id"]`) | Use `metadata["langfuse_user_id"]` OR `span.update_trace(user_id=...)` | | Constructor args | `CallbackHandler(sample_rate=..., user_id=...)` | No constructor args – use Langfuse client or spans | @@ -738,7 +738,7 @@ response = chain.invoke( ) # 3. Option 2: Wrap LangChain execution in a span (for more control) -# with langfuse.start_as_current_span(name="tell-joke") as span: +# with langfuse.start_as_current_observation(name="tell-joke") as span: # span.update_trace(user_id="user_123", input={"topic": "cats"}) # response = chain.invoke({"topic": "cats"}, config={"callbacks": [handler]}) # span.update_trace(output={"joke": response.content}) diff --git a/pages/integrations/frameworks/llamaindex.mdx b/pages/integrations/frameworks/llamaindex.mdx index 1f14ba392..e71897e6d 100644 --- a/pages/integrations/frameworks/llamaindex.mdx +++ b/pages/integrations/frameworks/llamaindex.mdx @@ -88,7 +88,7 @@ from llama_index.llms.openai import OpenAI llm = OpenAI(model="gpt-4o") -with langfuse.start_as_current_span(name="llama-index-trace"): +with langfuse.start_as_current_observation(name="llama-index-trace"): response = llm.complete("Hello, world!") print(response) diff --git a/pages/integrations/model-providers/openai-py.mdx b/pages/integrations/model-providers/openai-py.mdx index b707efd88..c6f6d5466 100644 --- a/pages/integrations/model-providers/openai-py.mdx +++ b/pages/integrations/model-providers/openai-py.mdx @@ -243,7 +243,7 @@ from langfuse.openai import openai langfuse = get_client() -with langfuse.start_as_current_span(name="calculator-request") as span: +with langfuse.start_as_current_observation(name="calculator-request") as span: span.update_trace( session_id="session_123", user_id="user_456", @@ -316,7 +316,7 @@ from langfuse.openai import openai langfuse = get_client() -with langfuse.start_as_current_span(name="capital-poem-generator") as span: +with langfuse.start_as_current_observation(name="capital-poem-generator") as span: # Set trace attributes span.update_trace( user_id="user_123",