Skip to content

Commit e2d3cdf

Browse files
Yizackkodster28
andauthored
[Browser Rendering] Fix how to AI example formatting (cloudflare#19713)
* [Browser Rendering] Add missing function reserved word * [Browser Rendering] Fix messages format * [Browser Rendering] Fix indentation * Update src/content/docs/browser-rendering/how-to/ai.mdx --------- Co-authored-by: Kody Jackson <[email protected]>
1 parent 77ccc12 commit e2d3cdf

File tree

1 file changed

+63
-66
lines changed
  • src/content/docs/browser-rendering/how-to

1 file changed

+63
-66
lines changed

src/content/docs/browser-rendering/how-to/ai.mdx

Lines changed: 63 additions & 66 deletions
Original file line numberDiff line numberDiff line change
@@ -116,49 +116,47 @@ export default {
116116
}
117117

118118
} satisfies ExportedHandler<Env>;
119-
120119
```
121120

122121
## Call an LLM
123122

124123
Having the webpage text, the user's goal and output schema, we can now use an LLM to transform it to JSON according to the user's request.
125-
The example below uses `@hf/thebloke/deepseek-coder-6.7b-instruct-awq` but other [models](/workers-ai/models/), or services like OpenAI, could be used with minimal changes:
124+
The example below uses `@hf/thebloke/deepseek-coder-6.7b-instruct-awq` but other [models](/workers-ai/models/) or services like OpenAI, could be used with minimal changes:
126125

127126
```ts
128-
async getLLMResult(env, prompt: string, schema?: any) {
129-
const model = "@hf/thebloke/deepseek-coder-6.7b-instruct-awq"
130-
const requestBody = {
131-
messages: [{
132-
role: "user",
133-
content: prompt
134-
}
135-
],
136-
};
137-
const aiUrl = `https://api.cloudflare.com/client/v4/accounts/${env.ACCOUNT_ID}/ai/run/${model}`
138-
139-
const response = await fetch(aiUrl, {
140-
method: "POST",
141-
headers: {
142-
"Content-Type": "application/json",
143-
Authorization: `Bearer ${env.API_TOKEN}`,
144-
},
145-
body: JSON.stringify(requestBody),
146-
});
147-
if (!response.ok) {
148-
console.log(JSON.stringify(await response.text(), null, 2));
149-
throw new Error(`LLM call failed ${aiUrl} ${response.status}`);
150-
}
127+
async function getLLMResult(env, prompt: string, schema?: any) {
128+
const model = "@hf/thebloke/deepseek-coder-6.7b-instruct-awq"
129+
const requestBody = {
130+
messages: [{
131+
role: "user",
132+
content: prompt
133+
}],
134+
};
135+
const aiUrl = `https://api.cloudflare.com/client/v4/accounts/${env.ACCOUNT_ID}/ai/run/${model}`
136+
137+
const response = await fetch(aiUrl, {
138+
method: "POST",
139+
headers: {
140+
"Content-Type": "application/json",
141+
Authorization: `Bearer ${env.API_TOKEN}`,
142+
},
143+
body: JSON.stringify(requestBody),
144+
});
145+
if (!response.ok) {
146+
console.log(JSON.stringify(await response.text(), null, 2));
147+
throw new Error(`LLM call failed ${aiUrl} ${response.status}`);
148+
}
151149

152-
// process response
153-
const data = await response.json();
154-
const text = data.result.response || '';
155-
const value = (text.match(/```(?:json)?\s*([\s\S]*?)\s*```/) || [null, text])[1];
156-
try {
157-
return JSON.parse(value);
158-
} catch(e) {
159-
console.error(`${e} . Response: ${value}`)
160-
}
150+
// process response
151+
const data = await response.json();
152+
const text = data.result.response || '';
153+
const value = (text.match(/```(?:json)?\s*([\s\S]*?)\s*```/) || [null, text])[1];
154+
try {
155+
return JSON.parse(value);
156+
} catch(e) {
157+
console.error(`${e} . Response: ${value}`)
161158
}
159+
}
162160
```
163161

164162
If you want to use Browser Rendering with OpenAI instead you'd just need to change the `aiUrl` endpoint and `requestBody` (or check out the [llm-scraper-worker](https://www.npmjs.com/package/llm-scraper-worker) package).
@@ -224,38 +222,37 @@ export default {
224222

225223

226224
async function getLLMResult(env, prompt: string, schema?: any) {
227-
const model = "@hf/thebloke/deepseek-coder-6.7b-instruct-awq"
228-
const requestBody = {
229-
messages: [{
230-
role: "user",
231-
content: prompt
232-
}
233-
],
234-
};
235-
const aiUrl = `https://api.cloudflare.com/client/v4/accounts/${env.ACCOUNT_ID}/ai/run/${model}`
236-
237-
const response = await fetch(aiUrl, {
238-
method: "POST",
239-
headers: {
240-
"Content-Type": "application/json",
241-
Authorization: `Bearer ${env.API_TOKEN}`,
242-
},
243-
body: JSON.stringify(requestBody),
244-
});
245-
if (!response.ok) {
246-
console.log(JSON.stringify(await response.text(), null, 2));
247-
throw new Error(`LLM call failed ${aiUrl} ${response.status}`);
248-
}
225+
const model = "@hf/thebloke/deepseek-coder-6.7b-instruct-awq"
226+
const requestBody = {
227+
messages: [{
228+
role: "user",
229+
content: prompt
230+
}],
231+
};
232+
const aiUrl = `https://api.cloudflare.com/client/v4/accounts/${env.ACCOUNT_ID}/ai/run/${model}`
233+
234+
const response = await fetch(aiUrl, {
235+
method: "POST",
236+
headers: {
237+
"Content-Type": "application/json",
238+
Authorization: `Bearer ${env.API_TOKEN}`,
239+
},
240+
body: JSON.stringify(requestBody),
241+
});
242+
if (!response.ok) {
243+
console.log(JSON.stringify(await response.text(), null, 2));
244+
throw new Error(`LLM call failed ${aiUrl} ${response.status}`);
245+
}
249246

250-
// process response
251-
const data = await response.json() as { result: { response: string }};
252-
const text = data.result.response || '';
253-
const value = (text.match(/```(?:json)?\s*([\s\S]*?)\s*```/) || [null, text])[1];
254-
try {
255-
return JSON.parse(value);
256-
} catch(e) {
257-
console.error(`${e} . Response: ${value}`)
258-
}
247+
// process response
248+
const data = await response.json() as { result: { response: string }};
249+
const text = data.result.response || '';
250+
const value = (text.match(/```(?:json)?\s*([\s\S]*?)\s*```/) || [null, text])[1];
251+
try {
252+
return JSON.parse(value);
253+
} catch(e) {
254+
console.error(`${e} . Response: ${value}`)
255+
}
259256
}
260257
```
261258

@@ -275,4 +272,4 @@ With your script now running, you can go to `http://localhost:8787/` and should
275272
}
276273
```
277274

278-
For more complex websites or prompts, you might need a better model. Check out the latest models in [Workers AI](/workers-ai/models/).
275+
For more complex websites or prompts, you might need a better model. Check out the latest models in [Workers AI](/workers-ai/models/).

0 commit comments

Comments
 (0)