Skip to content
Merged
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
127 changes: 62 additions & 65 deletions src/content/docs/browser-rendering/how-to/ai.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,6 @@ export default {
}

} satisfies ExportedHandler<Env>;

```

## Call an LLM
Expand All @@ -125,40 +124,39 @@ Having the webpage text, the user's goal and output schema, we can now use an LL
The example below uses `@hf/thebloke/deepseek-coder-6.7b-instruct-awq` but other [models](/workers-ai/models/), or services like OpenAI, could be used with minimal changes:

```ts
async getLLMResult(env, prompt: string, schema?: any) {
const model = "@hf/thebloke/deepseek-coder-6.7b-instruct-awq"
const requestBody = {
messages: [{
role: "user",
content: prompt
}
],
};
const aiUrl = `https://api.cloudflare.com/client/v4/accounts/${env.ACCOUNT_ID}/ai/run/${model}`

const response = await fetch(aiUrl, {
method: "POST",
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${env.API_TOKEN}`,
},
body: JSON.stringify(requestBody),
});
if (!response.ok) {
console.log(JSON.stringify(await response.text(), null, 2));
throw new Error(`LLM call failed ${aiUrl} ${response.status}`);
}
async function getLLMResult(env, prompt: string, schema?: any) {
const model = "@hf/thebloke/deepseek-coder-6.7b-instruct-awq"
const requestBody = {
messages: [{
role: "user",
content: prompt
}],
};
const aiUrl = `https://api.cloudflare.com/client/v4/accounts/${env.ACCOUNT_ID}/ai/run/${model}`

const response = await fetch(aiUrl, {
method: "POST",
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${env.API_TOKEN}`,
},
body: JSON.stringify(requestBody),
});
if (!response.ok) {
console.log(JSON.stringify(await response.text(), null, 2));
throw new Error(`LLM call failed ${aiUrl} ${response.status}`);
}

// process response
const data = await response.json();
const text = data.result.response || '';
const value = (text.match(/```(?:json)?\s*([\s\S]*?)\s*```/) || [null, text])[1];
try {
return JSON.parse(value);
} catch(e) {
console.error(`${e} . Response: ${value}`)
}
// process response
const data = await response.json();
const text = data.result.response || '';
const value = (text.match(/```(?:json)?\s*([\s\S]*?)\s*```/) || [null, text])[1];
try {
return JSON.parse(value);
} catch(e) {
console.error(`${e} . Response: ${value}`)
}
}
```

If you want to use Browser Rendering with OpenAI instead you'd just need to change the `aiUrl` endpoint and `requestBody` (or check out the [llm-scraper-worker](https://www.npmjs.com/package/llm-scraper-worker) package).
Expand Down Expand Up @@ -224,38 +222,37 @@ export default {


async function getLLMResult(env, prompt: string, schema?: any) {
const model = "@hf/thebloke/deepseek-coder-6.7b-instruct-awq"
const requestBody = {
messages: [{
role: "user",
content: prompt
}
],
};
const aiUrl = `https://api.cloudflare.com/client/v4/accounts/${env.ACCOUNT_ID}/ai/run/${model}`

const response = await fetch(aiUrl, {
method: "POST",
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${env.API_TOKEN}`,
},
body: JSON.stringify(requestBody),
});
if (!response.ok) {
console.log(JSON.stringify(await response.text(), null, 2));
throw new Error(`LLM call failed ${aiUrl} ${response.status}`);
}
const model = "@hf/thebloke/deepseek-coder-6.7b-instruct-awq"
const requestBody = {
messages: [{
role: "user",
content: prompt
}],
};
const aiUrl = `https://api.cloudflare.com/client/v4/accounts/${env.ACCOUNT_ID}/ai/run/${model}`

const response = await fetch(aiUrl, {
method: "POST",
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${env.API_TOKEN}`,
},
body: JSON.stringify(requestBody),
});
if (!response.ok) {
console.log(JSON.stringify(await response.text(), null, 2));
throw new Error(`LLM call failed ${aiUrl} ${response.status}`);
}

// process response
const data = await response.json() as { result: { response: string }};
const text = data.result.response || '';
const value = (text.match(/```(?:json)?\s*([\s\S]*?)\s*```/) || [null, text])[1];
try {
return JSON.parse(value);
} catch(e) {
console.error(`${e} . Response: ${value}`)
}
// process response
const data = await response.json() as { result: { response: string }};
const text = data.result.response || '';
const value = (text.match(/```(?:json)?\s*([\s\S]*?)\s*```/) || [null, text])[1];
try {
return JSON.parse(value);
} catch(e) {
console.error(`${e} . Response: ${value}`)
}
}
```

Expand All @@ -275,4 +272,4 @@ With your script now running, you can go to `http://localhost:8787/` and should
}
```

For more complex websites or prompts, you might need a better model. Check out the latest models in [Workers AI](/workers-ai/models/).
For more complex websites or prompts, you might need a better model. Check out the latest models in [Workers AI](/workers-ai/models/).