-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathgh_assistant.py
More file actions
156 lines (135 loc) · 4.4 KB
/
gh_assistant.py
File metadata and controls
156 lines (135 loc) · 4.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
import streamlit as st
from openai import OpenAI
from langchain.agents.openai_assistant.base import OpenAIAssistantRunnable
from langchain.tools import StructuredTool
from assistant_wrapper import OpenAIAssistantWrapper
from dotenv import load_dotenv
import os
import requests
import json
# Load environment variables
load_dotenv()
openai_api_key = os.getenv('OPENAI_API_KEY')
# GraphQL API URL (GitHub, or replace with other API URL if needed)
graphql_url = "https://api.github.com/graphql"
headers = {
"Authorization": f"Bearer {os.getenv('GITHUB_TOKEN')}"
}
# Function to run GraphQL query
def run_graphql_query(query):
"""
Executes a GraphQL query and returns the response.
"""
try:
response = requests.post(graphql_url, json={'query': query}, headers=headers)
response.raise_for_status() # Raise exception for HTTP errors
data = response.json()
# Return pretty printed data
return json.dumps(data, indent=2)
except requests.exceptions.RequestException as e:
return f"Error executing GraphQL query: {str(e)}"
# Function to validate GraphQL query
def validate_graphql_query(query):
"""
Validates the GraphQL query using the introspection schema.
"""
introspection_query = """
{
__schema {
queryType {
fields {
name
}
}
mutationType {
fields {
name
}
}
}
}
"""
schema = run_graphql_query(introspection_query)
# Simple validation logic: check if query is valid based on schema fields (to be expanded)
return "Query looks valid based on schema."
# Function calling API for the assistant
def github_graphql_tool_as_user(query):
"""
Tool for executing GraphQL queries after validation.
"""
validation_result = validate_graphql_query(query)
if "valid" in validation_result:
return run_graphql_query(query)
else:
return validation_result
# Function to interact with secondary assistant for search queries
def search_web(query):
"""
Sends a search query to the Perplexity API via the second assistant.
"""
pplx_model = "llama-3.1-sonar-large-128k-online"
messages = [
{
"role": "system",
"content": (
"You are an artificial intelligence assistant and you need to "
"engage in a helpful, detailed, polite conversation with a user."
),
},
{
"role": "user",
"content": query,
},
]
client = OpenAI(
api_key="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",
base_url="https://api.perplexity.ai",
)
reply = client.chat.completions.create(
model=pplx_model,
messages=messages,
)
reply_text = reply.choices[0].message.content
return reply_text
# Register GraphQL tool
tools = [
StructuredTool.from_function(search_web),
StructuredTool.from_function(github_graphql_tool_as_user)
]
client = OpenAI(api_key=openai_api_key)
assistant = OpenAIAssistantRunnable.create_assistant(
client=client,
name="Main assistant",
instructions="Generic AI Assistant with GitHub GraphQL API support.",
tools=tools,
model="gpt-4o",
as_agent=True,
)
# Create the wrapper
assistant_wrapper = OpenAIAssistantWrapper(assistant, tools)
def ask_assistant(input_text):
return assistant_wrapper.invoke(input_text)
# Streamlit UI
st.title("Dual Assistant with GraphQL Support")
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Accept user input
if prompt := st.chat_input("Enter your query"):
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Display user message in chat message container
with st.chat_message("user"):
st.markdown(prompt)
# Display assistant response in chat message container
with st.chat_message("assistant"):
response = ask_assistant(prompt)
st.markdown(response)
st.session_state.messages.append({"role": "assistant", "content": response})
if st.button("Start New Conversation"):
st.session_state.messages = []
st.session_state.thread_id = None