Skip to content

Commit eb5c36b

Browse files
committed
[Browser Rendering] Fix indentation
1 parent fb451eb commit eb5c36b

File tree

1 file changed

+60
-61
lines changed
  • src/content/docs/browser-rendering/how-to

1 file changed

+60
-61
lines changed

src/content/docs/browser-rendering/how-to/ai.mdx

Lines changed: 60 additions & 61 deletions
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,6 @@ export default {
116116
}
117117

118118
} satisfies ExportedHandler<Env>;
119-
120119
```
121120

122121
## Call an LLM
@@ -126,38 +125,38 @@ The example below uses `@hf/thebloke/deepseek-coder-6.7b-instruct-awq` but other
126125

127126
```ts
128127
async function getLLMResult(env, prompt: string, schema?: any) {
129-
const model = "@hf/thebloke/deepseek-coder-6.7b-instruct-awq"
130-
const requestBody = {
131-
messages: [{
132-
role: "user",
133-
content: prompt
134-
}],
135-
};
136-
const aiUrl = `https://api.cloudflare.com/client/v4/accounts/${env.ACCOUNT_ID}/ai/run/${model}`
137-
138-
const response = await fetch(aiUrl, {
139-
method: "POST",
140-
headers: {
141-
"Content-Type": "application/json",
142-
Authorization: `Bearer ${env.API_TOKEN}`,
143-
},
144-
body: JSON.stringify(requestBody),
145-
});
146-
if (!response.ok) {
147-
console.log(JSON.stringify(await response.text(), null, 2));
148-
throw new Error(`LLM call failed ${aiUrl} ${response.status}`);
149-
}
128+
const model = "@hf/thebloke/deepseek-coder-6.7b-instruct-awq"
129+
const requestBody = {
130+
messages: [{
131+
role: "user",
132+
content: prompt
133+
}],
134+
};
135+
const aiUrl = `https://api.cloudflare.com/client/v4/accounts/${env.ACCOUNT_ID}/ai/run/${model}`
136+
137+
const response = await fetch(aiUrl, {
138+
method: "POST",
139+
headers: {
140+
"Content-Type": "application/json",
141+
Authorization: `Bearer ${env.API_TOKEN}`,
142+
},
143+
body: JSON.stringify(requestBody),
144+
});
145+
if (!response.ok) {
146+
console.log(JSON.stringify(await response.text(), null, 2));
147+
throw new Error(`LLM call failed ${aiUrl} ${response.status}`);
148+
}
150149

151-
// process response
152-
const data = await response.json();
153-
const text = data.result.response || '';
154-
const value = (text.match(/```(?:json)?\s*([\s\S]*?)\s*```/) || [null, text])[1];
155-
try {
156-
return JSON.parse(value);
157-
} catch(e) {
158-
console.error(`${e} . Response: ${value}`)
159-
}
150+
// process response
151+
const data = await response.json();
152+
const text = data.result.response || '';
153+
const value = (text.match(/```(?:json)?\s*([\s\S]*?)\s*```/) || [null, text])[1];
154+
try {
155+
return JSON.parse(value);
156+
} catch(e) {
157+
console.error(`${e} . Response: ${value}`)
160158
}
159+
}
161160
```
162161

163162
If you want to use Browser Rendering with OpenAI instead you'd just need to change the `aiUrl` endpoint and `requestBody` (or check out the [llm-scraper-worker](https://www.npmjs.com/package/llm-scraper-worker) package).
@@ -223,37 +222,37 @@ export default {
223222

224223

225224
async function getLLMResult(env, prompt: string, schema?: any) {
226-
const model = "@hf/thebloke/deepseek-coder-6.7b-instruct-awq"
227-
const requestBody = {
228-
messages: [{
229-
role: "user",
230-
content: prompt
231-
}],
232-
};
233-
const aiUrl = `https://api.cloudflare.com/client/v4/accounts/${env.ACCOUNT_ID}/ai/run/${model}`
234-
235-
const response = await fetch(aiUrl, {
236-
method: "POST",
237-
headers: {
238-
"Content-Type": "application/json",
239-
Authorization: `Bearer ${env.API_TOKEN}`,
240-
},
241-
body: JSON.stringify(requestBody),
242-
});
243-
if (!response.ok) {
244-
console.log(JSON.stringify(await response.text(), null, 2));
245-
throw new Error(`LLM call failed ${aiUrl} ${response.status}`);
246-
}
225+
const model = "@hf/thebloke/deepseek-coder-6.7b-instruct-awq"
226+
const requestBody = {
227+
messages: [{
228+
role: "user",
229+
content: prompt
230+
}],
231+
};
232+
const aiUrl = `https://api.cloudflare.com/client/v4/accounts/${env.ACCOUNT_ID}/ai/run/${model}`
233+
234+
const response = await fetch(aiUrl, {
235+
method: "POST",
236+
headers: {
237+
"Content-Type": "application/json",
238+
Authorization: `Bearer ${env.API_TOKEN}`,
239+
},
240+
body: JSON.stringify(requestBody),
241+
});
242+
if (!response.ok) {
243+
console.log(JSON.stringify(await response.text(), null, 2));
244+
throw new Error(`LLM call failed ${aiUrl} ${response.status}`);
245+
}
247246

248-
// process response
249-
const data = await response.json() as { result: { response: string }};
250-
const text = data.result.response || '';
251-
const value = (text.match(/```(?:json)?\s*([\s\S]*?)\s*```/) || [null, text])[1];
252-
try {
253-
return JSON.parse(value);
254-
} catch(e) {
255-
console.error(`${e} . Response: ${value}`)
256-
}
247+
// process response
248+
const data = await response.json() as { result: { response: string }};
249+
const text = data.result.response || '';
250+
const value = (text.match(/```(?:json)?\s*([\s\S]*?)\s*```/) || [null, text])[1];
251+
try {
252+
return JSON.parse(value);
253+
} catch(e) {
254+
console.error(`${e} . Response: ${value}`)
255+
}
257256
}
258257
```
259258

0 commit comments

Comments
 (0)