11from langchain_core .messages import HumanMessage , SystemMessage
22from langchain_openai import ChatOpenAI
33
4- # from opentelemetry import trace
5- # from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter
6- # from opentelemetry.sdk.trace import TracerProvider
7- # from opentelemetry.sdk.trace.export import BatchSpanProcessor
8- # # Configure tracing
9- # trace.set_tracer_provider(TracerProvider())
10- # span_processor = BatchSpanProcessor(OTLPSpanExporter())
11- # trace.get_tracer_provider().add_span_processor(span_processor)
4+ def main ():
125
13- from flask import Flask , request , jsonify
6+ llm = ChatOpenAI ( model = "gpt-3.5-turbo" )
147
15- # Set up logging
16- import logging
17- logging . basicConfig ( level = logging . INFO )
18- logger = logging . getLogger ( __name__ )
8+ messages = [
9+ SystemMessage ( content = "You are a helpful assistant!" ),
10+ HumanMessage ( content = "What is the capital of France?" ),
11+ ]
1912
20- app = Flask (__name__ )
21-
22- @app .post ("/chat" )
23- def chat ():
24- try :
25- print ("LLM output1:\n " )
26- payload = request .get_json (silent = True ) or request .form
27- llm = ChatOpenAI (model = "gpt-3.5-turbo" , temperature = 0.1 , max_tokens = 100 )
28- print ("LLM output2:\n " )
29- messages = [
30- SystemMessage (content = "You are a helpful assistant!" ),
31- HumanMessage (content = "What is the capital of France?" ),
32- ]
33- print ("LLM output3:\n " )
34- result = llm .invoke (messages ).content
35- print ("LLM output:\n " , result )
36- # return result.content
37- except Exception as e :
38- logger .error (f"Error processing chat request: { e } " )
39- str = f"Error processing chat request: { e } "
40- return jsonify ({str }), 500
41- # from opentelemetry import _logs
42- # from opentelemetry.exporter.otlp.proto.grpc._log_exporter import (
43- # OTLPLogExporter,
44- # )
45- # from opentelemetry.sdk._logs import LoggerProvider
46- # from opentelemetry.sdk._logs.export import BatchLogRecordProcessor
47- # _logs.set_logger_provider(LoggerProvider())
48- # _logs.get_logger_provider().add_log_record_processor(
49- # BatchLogRecordProcessor(OTLPLogExporter())
50- # )
51- # import logging
52- # logger = logging.getLogger(__name__)
53- # logging.basicConfig(level=logging.DEBUG)
54- # logger.debug("OpenTelemetry instrumentation for LangChain encountered an error in $$$$$$$$$$$$$$$$")
13+ result = llm .invoke (messages ).content
14+ print ("LLM output:\n " , result )
5515
5616if __name__ == "__main__" :
57- # When run directly: python app.py
58- app .run (host = "0.0.0.0" , port = 5003 )
17+ main ()
0 commit comments