|
98 | 98 | }, |
99 | 99 | { |
100 | 100 | "cell_type": "code", |
101 | | - "execution_count": null, |
| 101 | + "execution_count": 1, |
102 | 102 | "id": "310d21b3", |
103 | 103 | "metadata": {}, |
104 | 104 | "outputs": [], |
|
125 | 125 | }, |
126 | 126 | { |
127 | 127 | "cell_type": "code", |
128 | | - "execution_count": 6, |
| 128 | + "execution_count": null, |
129 | 129 | "id": "ccfb4159-34ac-4816-a8f0-795c5442c0b2", |
130 | 130 | "metadata": {}, |
131 | 131 | "outputs": [], |
|
148 | 148 | " \"TABLEAU_JWT_SECRET\"\n", |
149 | 149 | ") # a JWT secret ID (obtained through Tableau's admin UI)\n", |
150 | 150 | "tableau_api_version = \"3.21\" # the current Tableau REST API Version\n", |
151 | | - "tableau_user = \"[email protected]\" # replace with the username querying the target Tableau Data Source\n", |
| 151 | + "tableau_user = \"[email protected]\" # enter the username querying the target Tableau Data Source\n", |
152 | 152 | "\n", |
153 | 153 | "# For this cookbook we are connecting to the Superstore dataset that comes by default with every Tableau server\n", |
154 | 154 | "datasource_luid = (\n", |
155 | 155 | " \"0965e61b-a072-43cf-994c-8c6cf526940d\" # the target data source for this Tool\n", |
156 | 156 | ")\n", |
157 | | - "\n", |
| 157 | + "model_provider = \"openai\" # the name of the model provider you are using for your Agent\n", |
158 | 158 | "# Add variables to control LLM models for the Agent and Tools\n", |
159 | 159 | "os.environ[\"OPENAI_API_KEY\"] # set an your model API key as an environment variable\n", |
160 | | - "tooling_llm_model = \"gpt-4o\"" |
| 160 | + "tooling_llm_model = \"gpt-4o-mini\"" |
161 | 161 | ] |
162 | 162 | }, |
163 | 163 | { |
|
178 | 178 | }, |
179 | 179 | { |
180 | 180 | "cell_type": "code", |
181 | | - "execution_count": null, |
| 181 | + "execution_count": 6, |
182 | 182 | "id": "72ee3eca", |
183 | 183 | "metadata": {}, |
184 | 184 | "outputs": [], |
|
194 | 194 | " tableau_user=tableau_user,\n", |
195 | 195 | " datasource_luid=datasource_luid,\n", |
196 | 196 | " tooling_llm_model=tooling_llm_model,\n", |
| 197 | + " model_provider=model_provider,\n", |
197 | 198 | ")\n", |
198 | 199 | "\n", |
199 | 200 | "# load the List of Tools to be used by the Agent. In this case we will just load our data source Q&A tool.\n", |
|
211 | 212 | }, |
212 | 213 | { |
213 | 214 | "cell_type": "code", |
214 | | - "execution_count": 8, |
| 215 | + "execution_count": null, |
215 | 216 | "id": "06a1d3f7-79a8-452e-b37e-9070d15445b0", |
216 | 217 | "metadata": {}, |
217 | | - "outputs": [ |
218 | | - { |
219 | | - "data": { |
220 | | - "text/markdown": [ |
221 | | - "Here are the results for the states with the highest sales and profits based on the data queried:\n", |
222 | | - "\n", |
223 | | - "### States with the Most Sales\n", |
224 | | - "1. **California**: $457,687.63\n", |
225 | | - "2. **New York**: $310,876.27\n", |
226 | | - "3. **Texas**: $170,188.05\n", |
227 | | - "4. **Washington**: $138,641.27\n", |
228 | | - "5. **Pennsylvania**: $116,511.91\n", |
229 | | - "\n", |
230 | | - "### States with the Most Profit\n", |
231 | | - "1. **California**: $76,381.39\n", |
232 | | - "2. **New York**: $74,038.55\n", |
233 | | - "3. **Washington**: $33,402.65\n", |
234 | | - "4. **Michigan**: $24,463.19\n", |
235 | | - "5. **Virginia**: $18,597.95\n", |
236 | | - "\n", |
237 | | - "### Comparison\n", |
238 | | - "- **California** and **New York** are the only states that appear in both lists, indicating they are the top sellers and also generate the most profit.\n", |
239 | | - "- **Texas**, while having the third highest sales, does not rank in the top five for profit, showing a potential issue with profitability despite high sales.\n", |
240 | | - "\n", |
241 | | - "This analysis suggests that high sales do not always correlate with high profits, as seen with Texas." |
242 | | - ], |
243 | | - "text/plain": [ |
244 | | - "<IPython.core.display.Markdown object>" |
245 | | - ] |
246 | | - }, |
247 | | - "metadata": {}, |
248 | | - "output_type": "display_data" |
249 | | - } |
250 | | - ], |
| 218 | + "outputs": [], |
251 | 219 | "source": [ |
252 | 220 | "from IPython.display import Markdown, display\n", |
253 | 221 | "\n", |
254 | | - "model = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)\n", |
| 222 | + "model = ChatOpenAI(model=\"gpt-4o\", temperature=0)\n", |
255 | 223 | "\n", |
256 | 224 | "tableauAgent = create_react_agent(model, tools)\n", |
257 | 225 | "\n", |
|
261 | 229 | " \"messages\": [\n", |
262 | 230 | " (\n", |
263 | 231 | " \"human\",\n", |
264 | | - " \"which states sell the most? Are those the same states with the most profits?\",\n", |
| 232 | + " \"what's going on with table sales?\",\n", |
265 | 233 | " )\n", |
266 | 234 | " ]\n", |
267 | 235 | " }\n", |
268 | 236 | ")\n", |
269 | 237 | "messages\n", |
270 | | - "# display(Markdown(messages['messages'][4].content)) #display a nicely formatted answer for successful generations" |
| 238 | + "# display(Markdown(messages['messages'][3].content)) #display a nicely formatted answer for successful generations" |
271 | 239 | ] |
272 | 240 | }, |
273 | 241 | { |
|
293 | 261 | ], |
294 | 262 | "metadata": { |
295 | 263 | "kernelspec": { |
296 | | - "display_name": "Python (package_test_env)", |
| 264 | + "display_name": "Python 3", |
297 | 265 | "language": "python", |
298 | | - "name": "package_test_env" |
| 266 | + "name": "python3" |
299 | 267 | }, |
300 | 268 | "language_info": { |
301 | 269 | "codemirror_mode": { |
|
0 commit comments