diff --git a/apps/web/src/app/(docs)/docs/quickstart/connect-llms/page.mdx b/apps/web/src/app/(docs)/docs/quickstart/connect-llms/page.mdx index bba3e0f72b..e3a1493818 100644 --- a/apps/web/src/app/(docs)/docs/quickstart/connect-llms/page.mdx +++ b/apps/web/src/app/(docs)/docs/quickstart/connect-llms/page.mdx @@ -14,6 +14,7 @@ If the LLM doesn't support tool use, you can, for example, prompt the LLM to out - [LlamaIndex](#llamaindex) - [Ollama](#ollama) - [Hugging Face](#hugging-face) +- [AI/ML API](#ai-ml-api) --- @@ -801,4 +802,43 @@ with Sandbox() as sandbox: execution = sandbox.run_code(code) print(execution) ``` - \ No newline at end of file + + +--- + +## AI/ML API + +[AI/ML API](https://aimlapi.com/app/?utm_source=e2b&utm_medium=github&utm_campaign=integration) provides 300+ AI models including Deepseek, Gemini, ChatGPT. The models run at enterprise-grade rate limits and uptimes. + + +```python +# pip install openai e2b-code-interpreter +import os +from openai import OpenAI +from e2b_code_interpreter import Sandbox + +# Create AI/ML API client using OpenAI-compatible endpoint +client = OpenAI(base_url="https://api.aimlapi.com/v1", api_key=os.environ["AIML_API_KEY"]) +system_prompt = "You are a helpful assistant that can execute python code in a Jupyter notebook. Only respond with the code to be executed and nothing else. Strip backticks in code blocks." +prompt = "Calculate how many r's are in the word 'strawberry'" + +# Send the prompt to the model +response = client.chat.completions.create( + model="gpt-3.5-turbo", + messages=[ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": prompt}, + ] +) + +# Extract the code from the response +code = response.choices[0].message.content + +# Execute code in E2B Sandbox +with Sandbox() as sandbox: + execution = sandbox.run_code(code) + result = execution.text + +print(result) +``` +