diff --git a/src/components/PackageManagers.astro b/src/components/PackageManagers.astro new file mode 100644 index 00000000000000..0bd355841a4f8e --- /dev/null +++ b/src/components/PackageManagers.astro @@ -0,0 +1,13 @@ +--- +import { + PackageManagers as PackageManagersComponent, + type PackageManagersProps, +} from "starlight-package-managers"; + +type Props = PackageManagersProps; +--- + + diff --git a/src/components/index.ts b/src/components/index.ts index d77b749112697d..e65928d09c7609 100644 --- a/src/components/index.ts +++ b/src/components/index.ts @@ -2,7 +2,6 @@ export * from "@astrojs/starlight/components"; export { Icon as StarlightIcon } from "@astrojs/starlight/components"; // Community packages -export { PackageManagers } from "starlight-package-managers"; export { Icon as AstroIcon } from "astro-icon/components"; // Custom components export { default as AnchorHeading } from "./AnchorHeading.astro"; @@ -34,6 +33,7 @@ export { default as ListTutorials } from "./ListTutorials.astro"; export { default as Markdown } from "./Markdown.astro"; export { default as MetaInfo } from "./MetaInfo.astro"; export { default as NetworkMap } from "./NetworkMap.astro"; +export { default as PackageManagers } from "./PackageManagers.astro"; export { default as PagesBuildEnvironment } from "./PagesBuildEnvironment.astro"; export { default as PagesBuildEnvironmentLanguages } from "./PagesBuildEnvironmentLanguages.astro"; export { default as PagesBuildEnvironmentTools } from "./PagesBuildEnvironmentTools.astro"; diff --git a/src/content/docs/agents/api-reference/browse-the-web.mdx b/src/content/docs/agents/api-reference/browse-the-web.mdx index 689b856339b5e3..4be955efea9f90 100644 --- a/src/content/docs/agents/api-reference/browse-the-web.mdx +++ b/src/content/docs/agents/api-reference/browse-the-web.mdx @@ -5,7 +5,14 @@ sidebar: order: 7 --- -import { MetaInfo, Render, Type, TypeScriptExample, WranglerConfig } from "~/components"; +import { + MetaInfo, + Render, + Type, + TypeScriptExample, + WranglerConfig, + PackageManagers, +} from "~/components"; Agents can browse the web using the [Browser Rendering](/browser-rendering/) API or your preferred headless browser service. @@ -30,8 +37,11 @@ export class MyAgent extends Agent { const page = await browser.newPage(); await page.goto(url); - await page.waitForSelector('body'); - const bodyContent = await page.$eval('body', (element) => element.innerHTML); + await page.waitForSelector("body"); + const bodyContent = await page.$eval( + "body", + (element) => element.innerHTML, + ); const client = new OpenAI({ apiKey: this.env.OPENAI_API_KEY, }); @@ -40,12 +50,12 @@ export class MyAgent extends Agent { model: this.env.MODEL, messages: [ { - role: 'user', + role: "user", content: `Return a JSON object with the product names, prices and URLs with the following format: { "name": "Product Name", "price": "Price", "url": "URL" } from the website content below. ${bodyContent}`, }, ], response_format: { - type: 'json_object', + type: "json_object", }, }); @@ -62,9 +72,7 @@ export class MyAgent extends Agent { You'll also need to add install the `@cloudflare/puppeteer` package and add the following to the wrangler configuration of your Agent: -```sh -npm install @cloudflare/puppeteer --save-dev -``` + @@ -72,9 +80,9 @@ npm install @cloudflare/puppeteer --save-dev { // ... "browser": { - "binding": "MYBROWSER" - } - // ... + "binding": "MYBROWSER", + }, + // ... } ``` @@ -90,6 +98,7 @@ Once you have your [Browserbase API key](https://docs.browserbase.com/integratio cd your-agent-project-folder npx wrangler@latest secret put BROWSERBASE_API_KEY ``` + ```sh output Enter a secret value: ****** Creating the secret for the Worker "agents-example" @@ -98,9 +107,7 @@ Success! Uploaded secret BROWSERBASE_API_KEY Install the `@cloudflare/puppeteer` package and use it from within your Agent to call the Browserbase API: -```sh -npm install @cloudflare/puppeteer -``` + diff --git a/src/content/docs/agents/api-reference/using-ai-models.mdx b/src/content/docs/agents/api-reference/using-ai-models.mdx index c9a60db00a4685..bb6091af856aa9 100644 --- a/src/content/docs/agents/api-reference/using-ai-models.mdx +++ b/src/content/docs/agents/api-reference/using-ai-models.mdx @@ -3,18 +3,25 @@ title: Using AI Models pcx_content_type: concept sidebar: order: 3 - --- -import { AnchorHeading, MetaInfo, Render, Type, TypeScriptExample, WranglerConfig } from "~/components"; +import { + AnchorHeading, + MetaInfo, + Render, + Type, + TypeScriptExample, + WranglerConfig, + PackageManagers, +} from "~/components"; Agents can communicate with AI models hosted on any provider, including: -* [Workers AI](/workers-ai/) -* The [AI SDK](https://sdk.vercel.ai/docs/ai-sdk-core/overview) -* [OpenAI](https://platform.openai.com/docs/quickstart?language=javascript) -* [Anthropic](https://docs.anthropic.com/en/api/client-sdks#typescript) -* [Google's Gemini](https://ai.google.dev/gemini-api/docs/openai) +- [Workers AI](/workers-ai/) +- The [AI SDK](https://sdk.vercel.ai/docs/ai-sdk-core/overview) +- [OpenAI](https://platform.openai.com/docs/quickstart?language=javascript) +- [Anthropic](https://docs.anthropic.com/en/api/client-sdks#typescript) +- [Google's Gemini](https://ai.google.dev/gemini-api/docs/openai) You can also use the model routing features in [AI Gateway](/ai-gateway/) to route across providers, eval responses, and manage AI provider rate limits. @@ -28,7 +35,7 @@ You can call models from any method within an Agent, including from HTTP request Importantly, Agents can call AI models on their own — autonomously — and can handle long-running responses that can take minutes (or longer) to respond in full. -### Long-running model requests {/*long-running-model-requests*/} +### Long-running model requests {/* long-running-model-requests */} Modern [reasoning models](https://platform.openai.com/docs/guides/reasoning) or "thinking" model can take some time to both generate a response _and_ stream the response back to the client. @@ -37,8 +44,8 @@ Instead of buffering the entire response, or risking the client disconnecting, y ```ts -import { Agent } from "agents" -import { OpenAI } from "openai" +import { Agent } from "agents"; +import { OpenAI } from "openai"; export class MyAgent extends Agent { async onConnect(connection: Connection, ctx: ConnectionContext) { @@ -46,10 +53,10 @@ export class MyAgent extends Agent { } async onMessage(connection: Connection, message: WSMessage) { - let msg = JSON.parse(message) + let msg = JSON.parse(message); // This can run as long as it needs to, and return as many messages as it needs to! - await queryReasoningModel(connection, msg.prompt) - } + await queryReasoningModel(connection, msg.prompt); + } async queryReasoningModel(connection: Connection, userPrompt: string) { const client = new OpenAI({ @@ -58,23 +65,23 @@ export class MyAgent extends Agent { try { const stream = await client.chat.completions.create({ - model: this.env.MODEL || 'o3-mini', - messages: [{ role: 'user', content: userPrompt }], + model: this.env.MODEL || "o3-mini", + messages: [{ role: "user", content: userPrompt }], stream: true, }); // Stream responses back as WebSocket messages for await (const chunk of stream) { - const content = chunk.choices[0]?.delta?.content || ''; + const content = chunk.choices[0]?.delta?.content || ""; if (content) { - connection.send(JSON.stringify({ type: 'chunk', content })); + connection.send(JSON.stringify({ type: "chunk", content })); } } // Send completion message - connection.send(JSON.stringify({ type: 'done' })); + connection.send(JSON.stringify({ type: "done" })); } catch (error) { - connection.send(JSON.stringify({ type: 'error', error: error })); + connection.send(JSON.stringify({ type: "error", error: error })); } } } @@ -95,7 +102,7 @@ Workers AI supports streaming responses out-of-the-box by setting `stream: true` ```ts -import { Agent } from "agents" +import { Agent } from "agents"; interface Env { AI: Ai; @@ -104,17 +111,17 @@ interface Env { export class MyAgent extends Agent { async onRequest(request: Request) { const response = await env.AI.run( - "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b", - { - prompt: "Build me a Cloudflare Worker that returns JSON.", - stream: true, // Stream a response and don't block the client! - } - ); + "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b", + { + prompt: "Build me a Cloudflare Worker that returns JSON.", + stream: true, // Stream a response and don't block the client! + }, + ); // Return the stream - return new Response(answer, { - headers: { "content-type": "text/event-stream" } - }) + return new Response(answer, { + headers: { "content-type": "text/event-stream" }, + }); } } ``` @@ -145,7 +152,7 @@ Model routing allows you to route requests to different AI models based on wheth ```ts -import { Agent } from "agents" +import { Agent } from "agents"; interface Env { AI: Ai; @@ -154,20 +161,20 @@ interface Env { export class MyAgent extends Agent { async onRequest(request: Request) { const response = await env.AI.run( - "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b", - { - prompt: "Build me a Cloudflare Worker that returns JSON." - }, - { - gateway: { - id: "{gateway_id}", // Specify your AI Gateway ID here - skipCache: false, - cacheTtl: 3360, - }, - }, - ); - - return Response.json(response) + "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b", + { + prompt: "Build me a Cloudflare Worker that returns JSON.", + }, + { + gateway: { + id: "{gateway_id}", // Specify your AI Gateway ID here + skipCache: false, + cacheTtl: 3360, + }, + }, + ); + + return Response.json(response); } } ``` @@ -175,6 +182,7 @@ export class MyAgent extends Agent { Your Wrangler configuration will need an `ai` binding added. This is shared across both Workers AI and AI Gateway. + ```toml @@ -192,25 +200,23 @@ The [AI SDK](https://sdk.vercel.ai/docs/introduction) provides a unified API for To use the AI SDK, install the `ai` package and use it within your Agent. The example below shows how it use it to generate text on request, but you can use it from any method within your Agent, including WebSocket handlers, as part of a scheduled task, or even when the Agent is initialized. -```sh -npm install ai @ai-sdk/openai -``` + ```ts -import { Agent } from "agents" -import { generateText } from 'ai'; -import { openai } from '@ai-sdk/openai'; +import { Agent } from "agents"; +import { generateText } from "ai"; +import { openai } from "@ai-sdk/openai"; export class MyAgent extends Agent { async onRequest(request: Request): Promise { const { text } = await generateText({ model: openai("o3-mini"), prompt: "Build me an AI agent on Cloudflare Workers", - }); + }); - return Response.json({modelResponse: text}) + return Response.json({ modelResponse: text }); } } ``` @@ -226,43 +232,45 @@ Agents can stream responses back over HTTP using Server Sent Events (SSE) from w ```ts -import { Agent } from "agents" -import { OpenAI } from "openai" +import { Agent } from "agents"; +import { OpenAI } from "openai"; export class MyAgent extends Agent { async onRequest(request: Request): Promise { const openai = new OpenAI({ - apiKey: this.env.GEMINI_API_KEY, - baseURL: "https://generativelanguage.googleapis.com/v1beta/openai/" + apiKey: this.env.GEMINI_API_KEY, + baseURL: "https://generativelanguage.googleapis.com/v1beta/openai/", }); // Create a TransformStream to handle streaming data - let { readable, writable } = new TransformStream(); - let writer = writable.getWriter(); - const textEncoder = new TextEncoder(); + let { readable, writable } = new TransformStream(); + let writer = writable.getWriter(); + const textEncoder = new TextEncoder(); // Use ctx.waitUntil to run the async function in the background // so that it doesn't block the streaming response - ctx.waitUntil( - (async () => { - const stream = await openai.chat.completions.create({ - model: "4o", - messages: [{ role: "user", content: "Write me a Cloudflare Worker." }], - stream: true, - }); - - // loop over the data as it is streamed and write to the writeable - for await (const part of stream) { - writer.write( - textEncoder.encode(part.choices[0]?.delta?.content || ""), - ); - } - writer.close(); - })(), - ); + ctx.waitUntil( + (async () => { + const stream = await openai.chat.completions.create({ + model: "4o", + messages: [ + { role: "user", content: "Write me a Cloudflare Worker." }, + ], + stream: true, + }); + + // loop over the data as it is streamed and write to the writeable + for await (const part of stream) { + writer.write( + textEncoder.encode(part.choices[0]?.delta?.content || ""), + ); + } + writer.close(); + })(), + ); // Return the readable stream back to the client - return new Response(readable) + return new Response(readable); } } ``` diff --git a/src/content/docs/agents/getting-started/testing-your-agent.mdx b/src/content/docs/agents/getting-started/testing-your-agent.mdx index 028883995f8927..bc0373b88e081b 100644 --- a/src/content/docs/agents/getting-started/testing-your-agent.mdx +++ b/src/content/docs/agents/getting-started/testing-your-agent.mdx @@ -3,10 +3,9 @@ title: Testing your Agents pcx_content_type: get-started sidebar: order: 9 - --- -import { Render, PackageManagers, WranglerConfig } from "~/components" +import { Render, PackageManagers, WranglerConfig } from "~/components"; Because Agents run on Cloudflare Workers and Durable Objects, they can be tested using the same tools and techniques as Workers and Durable Objects. @@ -33,13 +32,13 @@ Ensure that your `vitest.config.js` file is identical to the following: import { defineWorkersConfig } from "@cloudflare/vitest-pool-workers/config"; export default defineWorkersConfig({ - test: { - poolOptions: { - workers: { - wrangler: { configPath: "./wrangler.toml" }, - }, - }, - }, + test: { + poolOptions: { + workers: { + wrangler: { configPath: "./wrangler.toml" }, + }, + }, + }, }); ``` @@ -48,16 +47,16 @@ export default defineWorkersConfig({ Add a `durableObjects` configuration to `vitest.config.js` with the name of your Agent class: ```js -import { defineWorkersConfig } from '@cloudflare/vitest-pool-workers/config'; +import { defineWorkersConfig } from "@cloudflare/vitest-pool-workers/config"; export default defineWorkersConfig({ test: { poolOptions: { workers: { - main: './src/index.ts', + main: "./src/index.ts", miniflare: { durableObjects: { - NAME: 'MyAgent', + NAME: "MyAgent", }, }, }, @@ -77,29 +76,36 @@ Review the [Vitest documentation](https://vitest.dev/) for more information on t Tests use the `vitest` framework. A basic test suite for your Agent can validate how your Agent responds to requests, but can also unit test your Agent's methods and state. ```ts -import { env, createExecutionContext, waitOnExecutionContext, SELF } from 'cloudflare:test'; -import { describe, it, expect } from 'vitest'; -import worker from '../src'; -import { Env } from '../src'; +import { + env, + createExecutionContext, + waitOnExecutionContext, + SELF, +} from "cloudflare:test"; +import { describe, it, expect } from "vitest"; +import worker from "../src"; +import { Env } from "../src"; interface ProvidedEnv extends Env {} -describe('make a request to my Agent', () => { +describe("make a request to my Agent", () => { // Unit testing approach - it('responds with state', async () => { + it("responds with state", async () => { // Provide a valid URL that your Worker can use to route to your Agent // If you are using routeAgentRequest, this will be /agent/:agent/:name - const request = new Request('http://example.com/agent/my-agent/agent-123'); + const request = new Request( + "http://example.com/agent/my-agent/agent-123", + ); const ctx = createExecutionContext(); const response = await worker.fetch(request, env, ctx); await waitOnExecutionContext(ctx); - expect(await response.text()).toMatchObject({ hello: 'from your agent' }); + expect(await response.text()).toMatchObject({ hello: "from your agent" }); }); - it('also responds with state', async () => { - const request = new Request('http://example.com/agent/my-agent/agent-123'); + it("also responds with state", async () => { + const request = new Request("http://example.com/agent/my-agent/agent-123"); const response = await SELF.fetch(request); - expect(await response.text()).toMatchObject({ hello: 'from your agent' }); + expect(await response.text()).toMatchObject({ hello: "from your agent" }); }); }); ``` @@ -113,6 +119,7 @@ $ npm run test # or run vitest directly $ npx vitest ``` + ```sh output MyAgent ✓ should return a greeting (1 ms) @@ -129,6 +136,7 @@ You can also run an Agent locally using the `wrangler` CLI: ```sh $ npx wrangler dev ``` + ```sh output Your Worker and resources are simulated locally via Miniflare. For more information, see: https://developers.cloudflare.com/workers/testing/local-development. diff --git a/src/content/docs/ai-gateway/tutorials/deploy-aig-worker.mdx b/src/content/docs/ai-gateway/tutorials/deploy-aig-worker.mdx index ac03290212ac17..f0473805d5c59a 100644 --- a/src/content/docs/ai-gateway/tutorials/deploy-aig-worker.mdx +++ b/src/content/docs/ai-gateway/tutorials/deploy-aig-worker.mdx @@ -67,9 +67,7 @@ export default { With your Worker project created, we can learn how to make your first request to OpenAI. You will use the OpenAI node library to interact with the OpenAI API. Install the OpenAI node library with `npm`: -```sh title="Install the OpenAI node library" -npm install openai -``` + In your `src/index.js` file, add the import for `openai` above `export default`: @@ -95,9 +93,7 @@ export default { To make this work, you need to use [`wrangler secret put`](/workers/wrangler/commands/#put) to set your `OPENAI_API_KEY`. This will save the API key to your environment so your Worker can access it when deployed. This key is the API key you created earlier in the OpenAI dashboard: -```sh title="Save your API key to your Workers env" -npx wrangler secret put OPENAI_API_KEY -``` + To make this work in local development, create a new file `.dev.vars` in your Worker project and add this line. Make sure to replace `OPENAI_API_KEY` with your own OpenAI API key: @@ -143,9 +139,7 @@ export default { To deploy your application, run the `npx wrangler deploy` command to deploy your Worker application: -```sh title="Deploy your Worker" -npx wrangler deploy -``` + You can now preview your Worker at \.\.workers.dev. diff --git a/src/content/docs/browser-rendering/how-to/pdf-generation.mdx b/src/content/docs/browser-rendering/how-to/pdf-generation.mdx index 0be1fed0e738e7..fa47e6a20338a8 100644 --- a/src/content/docs/browser-rendering/how-to/pdf-generation.mdx +++ b/src/content/docs/browser-rendering/how-to/pdf-generation.mdx @@ -5,7 +5,7 @@ sidebar: order: 1 --- -import { Aside, WranglerConfig } from "~/components"; +import { Aside, WranglerConfig, PackageManagers } from "~/components"; As seen in the [Getting Started guide](/browser-rendering/workers-binding-api/screenshots/), Browser Rendering can be used to generate screenshots for any given URL. Alongside screenshots, you can also generate full PDF documents for a given webpage, and can also provide the webpage markup and style ourselves. @@ -13,15 +13,11 @@ As seen in the [Getting Started guide](/browser-rendering/workers-binding-api/sc 1. Use the `create-cloudflare` CLI to generate a new Hello World Cloudflare Worker script: -```sh -npm create cloudflare@latest -- browser-worker -``` + 2. Install `@cloudflare/puppeteer`, which allows you to control the Browser Rendering instance: -```sh -npm install @cloudflare/puppeteer --save-dev -``` + 3. Add your Browser Rendering binding to your new Wrangler configuration: @@ -30,6 +26,7 @@ npm install @cloudflare/puppeteer --save-dev ```toml title="wrangler.toml" browser = { binding = "BROWSER" } ``` + 4. Replace the contents of `src/index.ts` (or `src/index.js` for JavaScript projects) with the following skeleton script: @@ -240,7 +237,7 @@ export default { const pdf = await page.pdf({ printBackground: true }); // Close browser since we no longer need it - await browser.close(); + await browser.close(); return new Response(pdf, { headers: { @@ -253,9 +250,7 @@ export default { You can run this script to test it using Wrangler’s `--remote` flag: -```sh -npx wrangler@latest dev --remote -``` + With your script now running, you can pass in a `?name` parameter to the local URL (such as `http://localhost:8787/?name=Harley`) and should see the following: diff --git a/src/content/docs/browser-rendering/platform/playwright.mdx b/src/content/docs/browser-rendering/platform/playwright.mdx index 1bcdb96f002199..4da2213b0e9163 100644 --- a/src/content/docs/browser-rendering/platform/playwright.mdx +++ b/src/content/docs/browser-rendering/platform/playwright.mdx @@ -7,7 +7,13 @@ sidebar: badge: Beta --- -import { Render, WranglerConfig, TabItem, Tabs } from "~/components"; +import { + Render, + WranglerConfig, + TabItem, + Tabs, + PackageManagers, +} from "~/components"; [Playwright](https://playwright.dev/) is an open-source package developed by Microsoft that can do browser automation tasks; it is commonly used to write frontend tests, create screenshots, or crawl pages. @@ -15,9 +21,7 @@ The Workers team forked a [version of Playwright](https://github.com/cloudflare/ Our version is open sourced and can be found in [Cloudflare's fork of Playwright](https://github.com/cloudflare/playwright). The npm package can be installed from [npmjs](https://www.npmjs.com/) as [@cloudflare/playwright](https://www.npmjs.com/package/@cloudflare/playwright): -```bash -npm install @cloudflare/playwright --save-dev -``` + ## Use Playwright in a Worker @@ -44,9 +48,7 @@ binding = "MYBROWSER" Install the npm package: -```bash -npm install --save-dev @cloudflare/playwright -``` + Let's look at some examples of how to use Playwright: @@ -55,41 +57,41 @@ Let's look at some examples of how to use Playwright: Using browser automation to take screenshots of web pages is a common use case. This script tells the browser to navigate to https://demo.playwright.dev/todomvc, create some items, take a screenshot of the page, and return the image in the response. ```ts -import { launch, type BrowserWorker } from '@cloudflare/playwright'; +import { launch, type BrowserWorker } from "@cloudflare/playwright"; interface Env { - MYBROWSER: BrowserWorker; + MYBROWSER: BrowserWorker; } export default { - async fetch(request: Request, env: Env) { - const browser = await launch(env.MYBROWSER); - const page = await browser.newPage(); - - await page.goto('https://demo.playwright.dev/todomvc'); - - const TODO_ITEMS = [ - 'buy some cheese', - 'feed the cat', - 'book a doctors appointment' - ]; - - const newTodo = page.getByPlaceholder('What needs to be done?'); - for (const item of TODO_ITEMS) { - await newTodo.fill(item); - await newTodo.press('Enter'); - } - - const img = await page.screenshot(); - await browser.close(); - - return new Response(img, { - headers: { - 'Content-Type': 'image/png', - }, - }); - }, -} + async fetch(request: Request, env: Env) { + const browser = await launch(env.MYBROWSER); + const page = await browser.newPage(); + + await page.goto("https://demo.playwright.dev/todomvc"); + + const TODO_ITEMS = [ + "buy some cheese", + "feed the cat", + "book a doctors appointment", + ]; + + const newTodo = page.getByPlaceholder("What needs to be done?"); + for (const item of TODO_ITEMS) { + await newTodo.fill(item); + await newTodo.press("Enter"); + } + + const img = await page.screenshot(); + await browser.close(); + + return new Response(img, { + headers: { + "Content-Type": "image/png", + }, + }); + }, +}; ``` ### Trace @@ -100,46 +102,46 @@ Here's an example of a worker generating a trace file: ```ts import { launch, type BrowserWorker } from "@cloudflare/playwright"; -import fs from '@cloudflare/playwright/fs'; +import fs from "@cloudflare/playwright/fs"; interface Env { - MYBROWSER: BrowserWorker; + MYBROWSER: BrowserWorker; } export default { - async fetch(request: Request, env: Env) { - const browser = await launch(env.MYBROWSER); - const page = await browser.newPage(); - - // Start tracing before navigating to the page - await page.context().tracing.start({ screenshots: true, snapshots: true }); - - await page.goto('https://demo.playwright.dev/todomvc'); - - const TODO_ITEMS = [ - 'buy some cheese', - 'feed the cat', - 'book a doctors appointment' - ]; - - const newTodo = page.getByPlaceholder('What needs to be done?'); - for (const item of TODO_ITEMS) { - await newTodo.fill(item); - await newTodo.press('Enter'); - } - - // Stop tracing and save the trace to a zip file - await page.context().tracing.stop({ path: 'trace.zip' }); - await browser.close(); - const file = await fs.promises.readFile('trace.zip'); - - return new Response(file, { - status: 200, - headers: { - 'Content-Type': 'application/zip', - }, - }); - }, + async fetch(request: Request, env: Env) { + const browser = await launch(env.MYBROWSER); + const page = await browser.newPage(); + + // Start tracing before navigating to the page + await page.context().tracing.start({ screenshots: true, snapshots: true }); + + await page.goto("https://demo.playwright.dev/todomvc"); + + const TODO_ITEMS = [ + "buy some cheese", + "feed the cat", + "book a doctors appointment", + ]; + + const newTodo = page.getByPlaceholder("What needs to be done?"); + for (const item of TODO_ITEMS) { + await newTodo.fill(item); + await newTodo.press("Enter"); + } + + // Stop tracing and save the trace to a zip file + await page.context().tracing.stop({ path: "trace.zip" }); + await browser.close(); + const file = await fs.promises.readFile("trace.zip"); + + return new Response(file, { + status: 200, + headers: { + "Content-Type": "application/zip", + }, + }); + }, }; ``` @@ -148,38 +150,40 @@ export default { One of the most common use cases for using Playwright is software testing. Playwright includes test assertion features in its APIs; refer to [Assertions](https://playwright.dev/docs/test-assertions) in the Playwright documentation for details. Here's an example of a Worker doing `expect()` test assertions of the [todomvc](https://demo.playwright.dev/todomvc) demo page: ```ts -import { launch, type BrowserWorker } from '@cloudflare/playwright'; -import { expect } from '@cloudflare/playwright/test'; +import { launch, type BrowserWorker } from "@cloudflare/playwright"; +import { expect } from "@cloudflare/playwright/test"; interface Env { - MYBROWSER: BrowserWorker; + MYBROWSER: BrowserWorker; } export default { - async fetch(request: Request, env: Env) { - const browser = await launch(env.MYBROWSER); - const page = await browser.newPage(); - - await page.goto('https://demo.playwright.dev/todomvc'); - - const TODO_ITEMS = [ - 'buy some cheese', - 'feed the cat', - 'book a doctors appointment' - ]; - - const newTodo = page.getByPlaceholder('What needs to be done?'); - for (const item of TODO_ITEMS) { - await newTodo.fill(item); - await newTodo.press('Enter'); - } - - await expect(page.getByTestId('todo-title')).toHaveCount(TODO_ITEMS.length); - - await Promise.all(TODO_ITEMS.map( - (value, index) => expect(page.getByTestId('todo-title').nth(index)).toHaveText(value) - )); - }, + async fetch(request: Request, env: Env) { + const browser = await launch(env.MYBROWSER); + const page = await browser.newPage(); + + await page.goto("https://demo.playwright.dev/todomvc"); + + const TODO_ITEMS = [ + "buy some cheese", + "feed the cat", + "book a doctors appointment", + ]; + + const newTodo = page.getByPlaceholder("What needs to be done?"); + for (const item of TODO_ITEMS) { + await newTodo.fill(item); + await newTodo.press("Enter"); + } + + await expect(page.getByTestId("todo-title")).toHaveCount(TODO_ITEMS.length); + + await Promise.all( + TODO_ITEMS.map((value, index) => + expect(page.getByTestId("todo-title").nth(index)).toHaveText(value), + ), + ); + }, }; ``` @@ -203,16 +207,16 @@ In order to facilitate browser session management, we have extended the Playwrig ```json [ - { - "connectionId": "2a2246fa-e234-4dc1-8433-87e6cee80145", - "connectionStartTime": 1711621704607, - "sessionId": "478f4d7d-e943-40f6-a414-837d3736a1dc", - "startTime": 1711621703708 - }, - { - "sessionId": "565e05fb-4d2a-402b-869b-5b65b1381db7", - "startTime": 1711621703808 - } + { + "connectionId": "2a2246fa-e234-4dc1-8433-87e6cee80145", + "connectionStartTime": 1711621704607, + "sessionId": "478f4d7d-e943-40f6-a414-837d3736a1dc", + "startTime": 1711621703708 + }, + { + "sessionId": "565e05fb-4d2a-402b-869b-5b65b1381db7", + "startTime": 1711621703808 + } ] ``` @@ -224,20 +228,20 @@ Notice that the session `478f4d7d-e943-40f6-a414-837d3736a1dc` has an active wor ```json [ - { - "closeReason": 2, - "closeReasonText": "BrowserIdle", - "endTime": 1711621769485, - "sessionId": "478f4d7d-e943-40f6-a414-837d3736a1dc", - "startTime": 1711621703708 - }, - { - "closeReason": 1, - "closeReasonText": "NormalClosure", - "endTime": 1711123501771, - "sessionId": "2be00a21-9fb6-4bb2-9861-8cd48e40e771", - "startTime": 1711123430918 - } + { + "closeReason": 2, + "closeReasonText": "BrowserIdle", + "endTime": 1711621769485, + "sessionId": "478f4d7d-e943-40f6-a414-837d3736a1dc", + "startTime": 1711621703708 + }, + { + "closeReason": 1, + "closeReasonText": "NormalClosure", + "endTime": 1711123501771, + "sessionId": "2be00a21-9fb6-4bb2-9861-8cd48e40e771", + "startTime": 1711123430918 + } ] ``` @@ -251,13 +255,13 @@ You should also be able to access this information in the dashboard, albeit with ```json { - "activeSessions": [ - { "id": "478f4d7d-e943-40f6-a414-837d3736a1dc" }, - { "id": "565e05fb-4d2a-402b-869b-5b65b1381db7" } - ], - "allowedBrowserAcquisitions": 1, - "maxConcurrentSessions": 2, - "timeUntilNextAllowedBrowserAcquisition": 0 + "activeSessions": [ + { "id": "478f4d7d-e943-40f6-a414-837d3736a1dc" }, + { "id": "565e05fb-4d2a-402b-869b-5b65b1381db7" } + ], + "allowedBrowserAcquisitions": 1, + "maxConcurrentSessions": 2, + "timeUntilNextAllowedBrowserAcquisition": 0 } ``` diff --git a/src/content/docs/browser-rendering/platform/puppeteer.mdx b/src/content/docs/browser-rendering/platform/puppeteer.mdx index cb58795616f59c..50a4b14930400a 100644 --- a/src/content/docs/browser-rendering/platform/puppeteer.mdx +++ b/src/content/docs/browser-rendering/platform/puppeteer.mdx @@ -6,7 +6,7 @@ sidebar: order: 10 --- -import { TabItem, Tabs } from "~/components"; +import { TabItem, Tabs, PackageManagers } from "~/components"; [Puppeteer](https://pptr.dev/) is one of the most popular libraries that abstract the lower-level DevTools protocol from developers and provides a high-level API that you can use to easily instrument Chrome/Chromium and automate browsing sessions. Puppeteer is used for tasks like creating screenshots, crawling pages, and testing web applications. @@ -16,9 +16,7 @@ The Workers team forked a version of Puppeteer and patched it to connect to the Our version is open sourced and can be found in [Cloudflare's fork of Puppeteer](https://github.com/cloudflare/puppeteer). The npm can be installed from [npmjs](https://www.npmjs.com/) as [@cloudflare/puppeteer](https://www.npmjs.com/package/@cloudflare/puppeteer): -```bash -npm install @cloudflare/puppeteer --save-dev -``` + ## Use Puppeteer in a Worker diff --git a/src/content/docs/browser-rendering/workers-binding-api/browser-rendering-with-DO.mdx b/src/content/docs/browser-rendering/workers-binding-api/browser-rendering-with-DO.mdx index 8112b9440bff5f..5b8e9af4695335 100644 --- a/src/content/docs/browser-rendering/workers-binding-api/browser-rendering-with-DO.mdx +++ b/src/content/docs/browser-rendering/workers-binding-api/browser-rendering-with-DO.mdx @@ -37,9 +37,7 @@ Create a new Worker project named `browser-worker` by running: In your `browser-worker` directory, install Cloudflare’s [fork of Puppeteer](/browser-rendering/platform/puppeteer/): -```sh -npm install @cloudflare/puppeteer --save-dev -``` + ## 3. Create a R2 bucket diff --git a/src/content/docs/browser-rendering/workers-binding-api/reuse-sessions.mdx b/src/content/docs/browser-rendering/workers-binding-api/reuse-sessions.mdx index 39fdf1fb8d647d..dc17f7e7cd3fb3 100644 --- a/src/content/docs/browser-rendering/workers-binding-api/reuse-sessions.mdx +++ b/src/content/docs/browser-rendering/workers-binding-api/reuse-sessions.mdx @@ -37,13 +37,12 @@ Create a new Worker project named `browser-worker` by running: In your `browser-worker` directory, install Cloudflare's [fork of Puppeteer](/browser-rendering/platform/puppeteer/): -```sh -npm install @cloudflare/puppeteer --save-dev -``` + ## 3. Configure the [Wrangler configuration file](/workers/wrangler/configuration/) + ```toml name = "browser-worker" main = "src/index.ts" @@ -53,6 +52,7 @@ compatibility_flags = [ "nodejs_compat" ] browser = { binding = "MYBROWSER" } ``` + ## 4. Code diff --git a/src/content/docs/browser-rendering/workers-binding-api/screenshots.mdx b/src/content/docs/browser-rendering/workers-binding-api/screenshots.mdx index da009c3273c409..92b15319a57fd4 100644 --- a/src/content/docs/browser-rendering/workers-binding-api/screenshots.mdx +++ b/src/content/docs/browser-rendering/workers-binding-api/screenshots.mdx @@ -5,7 +5,13 @@ sidebar: order: 1 --- -import { Render, TabItem, Tabs, PackageManagers, WranglerConfig } from "~/components"; +import { + Render, + TabItem, + Tabs, + PackageManagers, + WranglerConfig, +} from "~/components"; By following this guide, you will create a Worker that uses the Browser Rendering API to take screenshots from web pages. This is a common use case for browser automation. @@ -37,9 +43,7 @@ Create a new Worker project named `browser-worker` by running: In your `browser-worker` directory, install Cloudflare’s [fork of Puppeteer](/browser-rendering/platform/puppeteer/): -```sh -npm install @cloudflare/puppeteer --save-dev -``` + ## 3. Create a KV namespace diff --git a/src/content/docs/d1/tutorials/build-a-comments-api/index.mdx b/src/content/docs/d1/tutorials/build-a-comments-api/index.mdx index 70129551bee5c8..1466e870fb13aa 100644 --- a/src/content/docs/d1/tutorials/build-a-comments-api/index.mdx +++ b/src/content/docs/d1/tutorials/build-a-comments-api/index.mdx @@ -50,9 +50,7 @@ cd d1-example In this tutorial, you will use [Hono](https://github.com/honojs/hono), an Express.js-style framework, to build your API. To use Hono in this project, install it using `npm`: -```sh -npm install hono -``` + ## 2. Initialize your Hono application @@ -204,8 +202,6 @@ Begin by running `wrangler whoami` to confirm that you are logged in to your Clo After you have logged in, confirm that your Wrangler file is configured similarly to what is seen below. You can change the `name` field to a project name of your choice: - - ```toml diff --git a/src/content/docs/d1/tutorials/d1-and-prisma-orm/index.mdx b/src/content/docs/d1/tutorials/d1-and-prisma-orm/index.mdx index 94c64277be9039..85eb80126ba907 100644 --- a/src/content/docs/d1/tutorials/d1-and-prisma-orm/index.mdx +++ b/src/content/docs/d1/tutorials/d1-and-prisma-orm/index.mdx @@ -11,7 +11,7 @@ languages: - SQL --- -import { WranglerConfig, FileTree } from "~/components"; +import { WranglerConfig, FileTree, PackageManagers } from "~/components"; ## What is Prisma ORM? @@ -53,21 +53,21 @@ To set up Prisma ORM, go into your project directory, and install the Prisma CLI ```sh cd prisma-d1-example -npm install prisma --save-dev ``` + + Next, install the Prisma Client package and the driver adapter for D1: -```sh -npm install @prisma/client -npm install @prisma/adapter-d1 -``` + Finally, bootstrap the files required by Prisma ORM using the following command: -```sh -npx prisma init --datasource-provider sqlite -``` + The command above: @@ -86,6 +86,7 @@ generator client { + previewFeatures = ["driverAdapters"] } ``` + :::note Do not specify an `output` destination in the `generator client` block. Instead, allow prisma to generate the files in the default output path. ::: @@ -158,9 +159,11 @@ Answer `yes` to creating a new folder called `migrations`. The command has now created a new directory called `migrations` and an empty file called `0001_create_user_table.sql` inside of it: + - prisma-d1-example - migrations - **0001_create_user_table.sql** + Next, you need to add the SQL statement that will create a `User` table to that file. @@ -242,6 +245,7 @@ Your Wrangler command will then look like: npx wrangler d1 execute prisma-demo-db --command "INSERT INTO `"User`" (`"email`", `"name`") VALUES ('jane@prisma.io', 'Jane Doe (Local)');" -- ``` + ::: ### 5. Query your database from the Worker diff --git a/src/content/docs/d1/tutorials/import-to-d1-with-rest-api/index.mdx b/src/content/docs/d1/tutorials/import-to-d1-with-rest-api/index.mdx index 60af7188b174e9..6341ef7e915441 100644 --- a/src/content/docs/d1/tutorials/import-to-d1-with-rest-api/index.mdx +++ b/src/content/docs/d1/tutorials/import-to-d1-with-rest-api/index.mdx @@ -12,7 +12,7 @@ languages: - SQL --- -import { Render, Steps } from "~/components"; +import { Render, Steps, PackageManagers } from "~/components"; In this tutorial, you will learn how to import a database into D1 using the [REST API](/api/resources/d1/subresources/database/methods/import/). @@ -39,6 +39,7 @@ This tutorial uses the following: To create the table, follow these steps: + 1. Go to **Storage & Databases** > **D1**. 2. Select **Create**. 3. Name your database. For this tutorial, name your D1 database `d1-import-tutorial`. @@ -46,60 +47,63 @@ To create the table, follow these steps: 5. Select **Create**. 6. Go to **Console**, then paste the following SQL snippet. This creates a table named `TargetD1Table`. - ```sql - DROP TABLE IF EXISTS TargetD1Table; - CREATE TABLE IF NOT EXISTS TargetD1Table (id INTEGER PRIMARY KEY, text TEXT, date_added TEXT); - ``` + ```sql + DROP TABLE IF EXISTS TargetD1Table; + CREATE TABLE IF NOT EXISTS TargetD1Table (id INTEGER PRIMARY KEY, text TEXT, date_added TEXT); + ``` + + Alternatively, you can use the [Wrangler CLI](/workers/wrangler/install-and-update/). - Alternatively, you can use the [Wrangler CLI](/workers/wrangler/install-and-update/). + ```bash + # Create a D1 database + npx wrangler d1 create d1-import-tutorial - ```bash - # Create a D1 database - npx wrangler d1 create d1-import-tutorial + # Create a D1 table + npx wrangler d1 execute d1-import-tutorial --command="DROP TABLE IF EXISTS TargetD1Table; CREATE TABLE IF NOT EXISTS TargetD1Table (id INTEGER PRIMARY KEY, text TEXT, date_added TEXT);" --remote - # Create a D1 table - npx wrangler d1 execute d1-import-tutorial --command="DROP TABLE IF EXISTS TargetD1Table; CREATE TABLE IF NOT EXISTS TargetD1Table (id INTEGER PRIMARY KEY, text TEXT, date_added TEXT);" --remote + ``` - ``` ## 3. Create an `index.js` file + 1. Create a new directory and initialize a new Node.js project. - ```bash - mkdir d1-import-tutorial - cd d1-import-tutorial - npm init -y - ``` + ```bash + mkdir d1-import-tutorial + cd d1-import-tutorial + npm init -y + ``` 2. In this repository, create a new file called `index.js`. This file will contain the code which uses REST API to import your data to your D1 database. 3. In your `index.js` file, define the following variables: - - `TARGET_TABLE`: The target table name - - `ACCOUNT_ID`: The account ID (you can find this in the Cloudflare dashboard > **Workers & Pages**) - - `DATABASE_ID`: The D1 database ID (you can find this in the Cloudflare dashboard > **Storage & Databases** > **D1 SQL Database** > your database) - - `D1_API_KEY`: The D1 API token generated in [step 1](/d1/tutorials/import-to-d1-with-rest-api#1-create-a-d1-api-token) - - :::caution - In production, you should use environment variables to store sensitive information. - ::: - - ```js title="index.js" - const TARGET_TABLE = " "; // for the tutorial, `TargetD1Table` - const ACCOUNT_ID = " "; - const DATABASE_ID = " "; - const D1_API_KEY = " "; - const D1_URL = `https://api.cloudflare.com/client/v4/accounts/${ACCOUNT_ID}/d1/database/${DATABASE_ID}/import`; - const filename = crypto.randomUUID(); // create a random filename - const uploadSize = 500; - const headers = { - "Content-Type": "application/json", - Authorization: `Bearer ${D1_API_KEY}`, - }; - ``` + - `TARGET_TABLE`: The target table name + - `ACCOUNT_ID`: The account ID (you can find this in the Cloudflare dashboard > **Workers & Pages**) + - `DATABASE_ID`: The D1 database ID (you can find this in the Cloudflare dashboard > **Storage & Databases** > **D1 SQL Database** > your database) + - `D1_API_KEY`: The D1 API token generated in [step 1](/d1/tutorials/import-to-d1-with-rest-api#1-create-a-d1-api-token) + + :::caution + In production, you should use environment variables to store sensitive information. + ::: + + ```js title="index.js" + const TARGET_TABLE = " "; // for the tutorial, `TargetD1Table` + const ACCOUNT_ID = " "; + const DATABASE_ID = " "; + const D1_API_KEY = " "; + const D1_URL = `https://api.cloudflare.com/client/v4/accounts/${ACCOUNT_ID}/d1/database/${DATABASE_ID}/import`; + const filename = crypto.randomUUID(); // create a random filename + const uploadSize = 500; + const headers = { + "Content-Type": "application/json", + Authorization: `Bearer ${D1_API_KEY}`, + }; + ``` + ## 4. Generate example data (optional) @@ -109,55 +113,57 @@ In practice, you may already have the data you wish to import to a D1 database. This tutorial generates example data to demonstrate the import process. + 1. Install the `@faker-js/faker` module. - ```sh - npm install @faker-js/faker - ``` + 2. Add the following code at the beginning of the `index.js` file. This code creates an array called `data` with 2500 (`uploadSize`) array elements, where each array element contains an object with `id`, `text`, and `date_added`. Each array element corresponds to a table row. - ```js title="index.js" - import crypto from "crypto"; - import { faker } from "@faker-js/faker"; - - // Generate Fake data - const data = Array.from({ length: uploadSize }, () => ({ - id: Math.floor(Math.random() * 1000000), - text: faker.lorem.paragraph(), - date_added: new Date().toISOString().slice(0, 19).replace("T", " "), - })); - ``` + ```js title="index.js" + import crypto from "crypto"; + import { faker } from "@faker-js/faker"; + + // Generate Fake data + const data = Array.from({ length: uploadSize }, () => ({ + id: Math.floor(Math.random() * 1000000), + text: faker.lorem.paragraph(), + date_added: new Date().toISOString().slice(0, 19).replace("T", " "), + })); + ``` + ## 5. Generate the SQL command + 1. Create a function that will generate the SQL command to insert the data into the target table. This function uses the `data` array generated in the previous step. - ```js title="index.js" - function makeSqlInsert(data, tableName, skipCols = []) { - const columns = Object.keys(data[0]).join(","); - const values = data - .map((row) => { - return ( - "(" + - Object.values(row) - .map((val) => { - if (skipCols.includes(val) || val === null || val === "") { - return "NULL"; - } - return `'${String(val).replace(/'/g, "").replace(/"/g, "'")}'`; - }) - .join(",") + - ")" - ); - }) - .join(","); - - return `INSERT INTO ${tableName} (${columns}) VALUES ${values};`; - } - ``` + ```js title="index.js" + function makeSqlInsert(data, tableName, skipCols = []) { + const columns = Object.keys(data[0]).join(","); + const values = data + .map((row) => { + return ( + "(" + + Object.values(row) + .map((val) => { + if (skipCols.includes(val) || val === null || val === "") { + return "NULL"; + } + return `'${String(val).replace(/'/g, "").replace(/"/g, "'")}'`; + }) + .join(",") + + ")" + ); + }) + .join(","); + + return `INSERT INTO ${tableName} (${columns}) VALUES ${values};`; + } + ``` + ## 6. Import the data to D1 @@ -170,122 +176,124 @@ The import process consists of four steps: 4. **Polling**: This step polls the import process until it completes. + 1. Create a function called `uploadToD1` which executes the four steps of the import process. - ```js title="index.js" - async function uploadToD1() { - // 1. Init upload - const hashStr = crypto.createHash("md5").update(sqlInsert).digest("hex"); - - try { - const initResponse = await fetch(D1_URL, { - method: "POST", - headers, - body: JSON.stringify({ - action: "init", - etag: hashStr, - }), - }); - - const uploadData = await initResponse.json(); - const uploadUrl = uploadData.result.upload_url; - const filename = uploadData.result.filename; - - // 2. Upload to R2 - const r2Response = await fetch(uploadUrl, { - method: "PUT", - body: sqlInsert, - }); - - const r2Etag = r2Response.headers.get("ETag").replace(/"/g, ""); - - // Verify etag - if (r2Etag !== hashStr) { - throw new Error("ETag mismatch"); - } - - // 3. Start ingestion - const ingestResponse = await fetch(D1_URL, { - method: "POST", - headers, - body: JSON.stringify({ - action: "ingest", - etag: hashStr, - filename, - }), - }); - - const ingestData = await ingestResponse.json(); - console.log("Ingestion Response:", ingestData); - - // 4. Polling - await pollImport(ingestData.result.at_bookmark); - - return "Import completed successfully"; - } catch (e) { - console.error("Error:", e); - return "Import failed"; - } - } - ``` - - In the above code: - - - An `md5` hash of the SQL command is generated. - - `initResponse` initializes the upload process and receives the upload URL. - - `r2Response` uploads the SQL command to the upload URL. - - Before starting ingestion, the ETag is verified. - - `ingestResponse` starts the ingestion process. - - `pollImport` polls the import process until it completes. + ```js title="index.js" + async function uploadToD1() { + // 1. Init upload + const hashStr = crypto.createHash("md5").update(sqlInsert).digest("hex"); + + try { + const initResponse = await fetch(D1_URL, { + method: "POST", + headers, + body: JSON.stringify({ + action: "init", + etag: hashStr, + }), + }); + + const uploadData = await initResponse.json(); + const uploadUrl = uploadData.result.upload_url; + const filename = uploadData.result.filename; + + // 2. Upload to R2 + const r2Response = await fetch(uploadUrl, { + method: "PUT", + body: sqlInsert, + }); + + const r2Etag = r2Response.headers.get("ETag").replace(/"/g, ""); + + // Verify etag + if (r2Etag !== hashStr) { + throw new Error("ETag mismatch"); + } + + // 3. Start ingestion + const ingestResponse = await fetch(D1_URL, { + method: "POST", + headers, + body: JSON.stringify({ + action: "ingest", + etag: hashStr, + filename, + }), + }); + + const ingestData = await ingestResponse.json(); + console.log("Ingestion Response:", ingestData); + + // 4. Polling + await pollImport(ingestData.result.at_bookmark); + + return "Import completed successfully"; + } catch (e) { + console.error("Error:", e); + return "Import failed"; + } + } + ``` + + In the above code: + + - An `md5` hash of the SQL command is generated. + - `initResponse` initializes the upload process and receives the upload URL. + - `r2Response` uploads the SQL command to the upload URL. + - Before starting ingestion, the ETag is verified. + - `ingestResponse` starts the ingestion process. + - `pollImport` polls the import process until it completes. 2. Add the `pollImport` function to the `index.js` file. - ```js title="index.js" - async function pollImport(bookmark) { - const payload = { - action: "poll", - current_bookmark: bookmark, - }; + ```js title="index.js" + async function pollImport(bookmark) { + const payload = { + action: "poll", + current_bookmark: bookmark, + }; - while (true) { - const pollResponse = await fetch(D1_URL, { - method: "POST", - headers, - body: JSON.stringify(payload), - }); + while (true) { + const pollResponse = await fetch(D1_URL, { + method: "POST", + headers, + body: JSON.stringify(payload), + }); - const result = await pollResponse.json(); - console.log("Poll Response:", result.result); + const result = await pollResponse.json(); + console.log("Poll Response:", result.result); - const { success, error } = result.result; + const { success, error } = result.result; - if ( - success || - (!success && error === "Not currently importing anything.") - ) { - break; - } + if ( + success || + (!success && error === "Not currently importing anything.") + ) { + break; + } - await new Promise((resolve) => setTimeout(resolve, 1000)); - } - } - ``` + await new Promise((resolve) => setTimeout(resolve, 1000)); + } + } + ``` - The code above does the following: + The code above does the following: - - Sends a `poll` action to the D1 API. - - Polls the import process until it completes. + - Sends a `poll` action to the D1 API. + - Polls the import process until it completes. 3. Finally, add the `runImport` function to the `index.js` file to run the import process. - ```js title="index.js" - async function runImport() { - const result = await uploadToD1(); - console.log(result); - } + ```js title="index.js" + async function runImport() { + const result = await uploadToD1(); + console.log(result); + } + + runImport(); + ``` - runImport(); - ``` ## 7. Write the final code @@ -293,157 +301,161 @@ The import process consists of four steps: In the previous steps, you have created functions to execute various processes involved in importing data into D1. The final code executes those functions to import the example data into the target D1 table. + 1. Copy the final code of your `index.js` file as shown below, with your variables defined at the top of the code. - ```js - import crypto from "crypto"; - import { faker } from "@faker-js/faker"; - - const TARGET_TABLE = ""; - const ACCOUNT_ID = ""; - const DATABASE_ID = ""; - const D1_API_KEY = ""; - const D1_URL = `https://api.cloudflare.com/client/v4/accounts/${ACCOUNT_ID}/d1/database/${DATABASE_ID}/import`; - const uploadSize = 500; - const headers = { - "Content-Type": "application/json", - Authorization: `Bearer ${D1_API_KEY}`, - }; - - // Generate Fake data - const data = Array.from({ length: uploadSize }, () => ({ - id: Math.floor(Math.random() * 1000000), - text: faker.lorem.paragraph(), - date_added: new Date().toISOString().slice(0, 19).replace("T", " "), - })); - - // Make SQL insert statements - function makeSqlInsert(data, tableName, skipCols = []) { - const columns = Object.keys(data[0]).join(","); - const values = data - .map((row) => { - return ( - "(" + - Object.values(row) - .map((val) => { - if (skipCols.includes(val) || val === null || val === "") { - return "NULL"; - } - return `'${String(val).replace(/'/g, "").replace(/"/g, "'")}'`; - }) - .join(",") + - ")" - ); - }) - .join(","); - - return `INSERT INTO ${tableName} (${columns}) VALUES ${values};`; - } - - const sqlInsert = makeSqlInsert(data, TARGET_TABLE); - - async function pollImport(bookmark) { - const payload = { - action: "poll", - current_bookmark: bookmark, - }; - - while (true) { - const pollResponse = await fetch(D1_URL, { - method: "POST", - headers, - body: JSON.stringify(payload), - }); - - const result = await pollResponse.json(); - console.log("Poll Response:", result.result); - - const { success, error } = result.result; - - if ( - success || - (!success && error === "Not currently importing anything.") - ) { - break; - } - - await new Promise((resolve) => setTimeout(resolve, 1000)); - } - } - - // Upload to D1 - async function uploadToD1() { - // 1. Init upload - const hashStr = crypto.createHash("md5").update(sqlInsert).digest("hex"); - - try { - const initResponse = await fetch(D1_URL, { - method: "POST", - headers, - body: JSON.stringify({ - action: "init", - etag: hashStr, - }), - }); - - const uploadData = await initResponse.json(); - const uploadUrl = uploadData.result.upload_url; - const filename = uploadData.result.filename; - - // 2. Upload to R2 - const r2Response = await fetch(uploadUrl, { - method: "PUT", - body: sqlInsert, - }); - - const r2Etag = r2Response.headers.get("ETag").replace(/"/g, ""); - - // Verify etag - if (r2Etag !== hashStr) { - throw new Error("ETag mismatch"); - } - - // 3. Start ingestion - const ingestResponse = await fetch(D1_URL, { - method: "POST", - headers, - body: JSON.stringify({ - action: "ingest", - etag: hashStr, - filename, - }), - }); - - const ingestData = await ingestResponse.json(); - console.log("Ingestion Response:", ingestData); - - // 4. Polling - await pollImport(ingestData.result.at_bookmark); - - return "Import completed successfully"; - } catch (e) { - console.error("Error:", e); - return "Import failed"; - } - } - - async function runImport() { - const result = await uploadToD1(); - console.log(result); - } - - runImport(); - ``` + ```js + import crypto from "crypto"; + import { faker } from "@faker-js/faker"; + + const TARGET_TABLE = ""; + const ACCOUNT_ID = ""; + const DATABASE_ID = ""; + const D1_API_KEY = ""; + const D1_URL = `https://api.cloudflare.com/client/v4/accounts/${ACCOUNT_ID}/d1/database/${DATABASE_ID}/import`; + const uploadSize = 500; + const headers = { + "Content-Type": "application/json", + Authorization: `Bearer ${D1_API_KEY}`, + }; + + // Generate Fake data + const data = Array.from({ length: uploadSize }, () => ({ + id: Math.floor(Math.random() * 1000000), + text: faker.lorem.paragraph(), + date_added: new Date().toISOString().slice(0, 19).replace("T", " "), + })); + + // Make SQL insert statements + function makeSqlInsert(data, tableName, skipCols = []) { + const columns = Object.keys(data[0]).join(","); + const values = data + .map((row) => { + return ( + "(" + + Object.values(row) + .map((val) => { + if (skipCols.includes(val) || val === null || val === "") { + return "NULL"; + } + return `'${String(val).replace(/'/g, "").replace(/"/g, "'")}'`; + }) + .join(",") + + ")" + ); + }) + .join(","); + + return `INSERT INTO ${tableName} (${columns}) VALUES ${values};`; + } + + const sqlInsert = makeSqlInsert(data, TARGET_TABLE); + + async function pollImport(bookmark) { + const payload = { + action: "poll", + current_bookmark: bookmark, + }; + + while (true) { + const pollResponse = await fetch(D1_URL, { + method: "POST", + headers, + body: JSON.stringify(payload), + }); + + const result = await pollResponse.json(); + console.log("Poll Response:", result.result); + + const { success, error } = result.result; + + if ( + success || + (!success && error === "Not currently importing anything.") + ) { + break; + } + + await new Promise((resolve) => setTimeout(resolve, 1000)); + } + } + + // Upload to D1 + async function uploadToD1() { + // 1. Init upload + const hashStr = crypto.createHash("md5").update(sqlInsert).digest("hex"); + + try { + const initResponse = await fetch(D1_URL, { + method: "POST", + headers, + body: JSON.stringify({ + action: "init", + etag: hashStr, + }), + }); + + const uploadData = await initResponse.json(); + const uploadUrl = uploadData.result.upload_url; + const filename = uploadData.result.filename; + + // 2. Upload to R2 + const r2Response = await fetch(uploadUrl, { + method: "PUT", + body: sqlInsert, + }); + + const r2Etag = r2Response.headers.get("ETag").replace(/"/g, ""); + + // Verify etag + if (r2Etag !== hashStr) { + throw new Error("ETag mismatch"); + } + + // 3. Start ingestion + const ingestResponse = await fetch(D1_URL, { + method: "POST", + headers, + body: JSON.stringify({ + action: "ingest", + etag: hashStr, + filename, + }), + }); + + const ingestData = await ingestResponse.json(); + console.log("Ingestion Response:", ingestData); + + // 4. Polling + await pollImport(ingestData.result.at_bookmark); + + return "Import completed successfully"; + } catch (e) { + console.error("Error:", e); + return "Import failed"; + } + } + + async function runImport() { + const result = await uploadToD1(); + console.log(result); + } + + runImport(); + ``` + ## 8. Run the code + 1. Run your code. - ```sh - node index.js - ``` + ```sh + node index.js + ``` + You will now see your target D1 table populated with the example data. diff --git a/src/content/docs/d1/tutorials/using-read-replication-for-e-com/index.mdx b/src/content/docs/d1/tutorials/using-read-replication-for-e-com/index.mdx index fbb16123ec040b..df765c46b0e1d3 100644 --- a/src/content/docs/d1/tutorials/using-read-replication-for-e-com/index.mdx +++ b/src/content/docs/d1/tutorials/using-read-replication-for-e-com/index.mdx @@ -42,23 +42,21 @@ You can then visit the deployed application. Create a new Workers project by running the following command: - - - + + + For creating the API routes, you will use [Hono](https://hono.dev/). You need to install Hono by running the following command: -```sh -npm install hono -``` + ## Step 2: Update the frontend @@ -270,6 +268,7 @@ Update the `public/index.html` file to list the products. Use the below code as Create a new `public/product-details.html` file to display a single product.
+ ```html @@ -285,186 +284,192 @@ Create a new `public/product-details.html` file to display a single product. font-family: Arial, sans-serif; } - body { - background-color: #f9fafb; - min-height: 100vh; - display: flex; - flex-direction: column; - } - - header { - background-color: white; - padding: 1rem 2rem; - display: flex; - justify-content: space-between; - align-items: center; - border-bottom: 1px solid #e5e7eb; - } - - .store-title { - font-weight: bold; - font-size: 1.25rem; - text-decoration: none; - color: black; - } - - .cart-button { - padding: 0.5rem 1rem; - cursor: pointer; - background: none; - border: none; - } - - .product-container { - max-width: 800px; - margin: 2rem auto; - background-color: white; - border-radius: 0.5rem; - box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1); - padding: 2rem; - } - - .product-title { - font-size: 1.875rem; - font-weight: bold; - margin-bottom: 0.5rem; - } - - .product-description { - color: #4b5563; - margin-bottom: 1.5rem; - } - - .product-price { - font-size: 1.875rem; - font-weight: bold; - margin-bottom: 0.5rem; - } + body { + background-color: #f9fafb; + min-height: 100vh; + display: flex; + flex-direction: column; + } - .product-stock { - font-size: 0.875rem; - color: #4b5563; - text-align: right; - } + header { + background-color: white; + padding: 1rem 2rem; + display: flex; + justify-content: space-between; + align-items: center; + border-bottom: 1px solid #e5e7eb; + } - .add-to-cart-btn { - display: block; - width: 100%; - padding: 0.75rem; - background-color: #2563eb; - color: white; - border: none; - border-radius: 0.375rem; - cursor: pointer; - text-align: center; - font-size: 1rem; - margin-top: 1.5rem; - } + .store-title { + font-weight: bold; + font-size: 1.25rem; + text-decoration: none; + color: black; + } - .add-to-cart-btn:hover { - background-color: #1d4ed8; - } + .cart-button { + padding: 0.5rem 1rem; + cursor: pointer; + background: none; + border: none; + } - .price-stock-container { - display: flex; - justify-content: space-between; - align-items: center; - margin-bottom: 1rem; - } + .product-container { + max-width: 800px; + margin: 2rem auto; + background-color: white; + border-radius: 0.5rem; + box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1); + padding: 2rem; + } - footer { - background-color: white; - padding: 1rem 2rem; - text-align: center; - border-top: 1px solid #e5e7eb; - color: #4b5563; - font-size: 0.875rem; - margin-top: auto; - } + .product-title { + font-size: 1.875rem; + font-weight: bold; + margin-bottom: 0.5rem; + } - /* Back button */ - .back-button { - display: inline-block; - margin-bottom: 1.5rem; - color: #2563eb; - text-decoration: none; - font-size: 0.875rem; - } + .product-description { + color: #4b5563; + margin-bottom: 1.5rem; + } - .back-button:hover { - text-decoration: underline; - } + .product-price { + font-size: 1.875rem; + font-weight: bold; + margin-bottom: 0.5rem; + } - /* Notification */ - .notification { - position: fixed; - top: 1rem; - right: 1rem; - background-color: #10b981; - color: white; - padding: 0.75rem 1rem; - border-radius: 0.375rem; - box-shadow: 0 2px 5px rgba(0, 0, 0, 0.2); - transform: translateX(150%); - transition: transform 0.3s ease; - } + .product-stock { + font-size: 0.875rem; + color: #4b5563; + text-align: right; + } - .notification.show { - transform: translateX(0); - } - - - -
- E-commerce Store - -
+ .add-to-cart-btn { + display: block; + width: 100%; + padding: 0.75rem; + background-color: #2563eb; + color: white; + border: none; + border-radius: 0.375rem; + cursor: pointer; + text-align: center; + font-size: 1rem; + margin-top: 1.5rem; + } -
- ← Back to products -

Product Name

-

Product description goes here.

+ .add-to-cart-btn:hover { + background-color: #1d4ed8; + } -
-

$0.00

-

0 in stock

-
+ .price-stock-container { + display: flex; + justify-content: space-between; + align-items: center; + margin-bottom: 1rem; + } - -
+ footer { + background-color: white; + padding: 1rem 2rem; + text-align: center; + border-top: 1px solid #e5e7eb; + color: #4b5563; + font-size: 0.875rem; + margin-top: auto; + } -
Added to cart!
+ /* Back button */ + .back-button { + display: inline-block; + margin-bottom: 1.5rem; + color: #2563eb; + text-decoration: none; + font-size: 0.875rem; + } -
-

© 2025 E-commerce Store. All rights reserved.

-
+ .back-button:hover { + text-decoration: underline; + } - - + /* Notification */ + .notification { + position: fixed; + top: 1rem; + right: 1rem; + background-color: #10b981; + color: white; + padding: 0.75rem 1rem; + border-radius: 0.375rem; + box-shadow: 0 2px 5px rgba(0, 0, 0, 0.2); + transform: translateX(150%); + transition: transform 0.3s ease; + } + .notification.show { + transform: translateX(0); + } + + + +
+ E-commerce Store + +
+ +
+ ← Back to products +

Product Name

+

+ Product description goes here. +

+ +
+

$0.00

+

0 in stock

+
+ + +
+ +
Added to cart!
+ +
+

© 2025 E-commerce Store. All rights reserved.

+
+ + + - ``` +
You now have a frontend that lists products and displays a single product. However, the frontend is not yet connected to the D1 database. If you start the development server now, you will see no products. In the next steps, you will create a D1 database and create APIs to fetch products and display them on the frontend. @@ -480,13 +485,14 @@ npx wrangler d1 create fast-commerce Add the D1 bindings returned in the terminal to the `wrangler` file: - - ```toml - [[d1_databases]] - binding = "DB" - database_name = "fast-commerce" - database_id = "YOUR_DATABASE_ID" - ``` + +```toml +[[d1_databases]] +binding = "DB" +database_name = "fast-commerce" +database_id = "YOUR_DATABASE_ID" +``` + Run the following command to update the `Env` interface in the `worker-congifuration.d.ts` file. @@ -560,35 +566,35 @@ To make the application more resilient, you can add retry logic to the API route ```ts export interface RetryConfig { - maxRetries: number; - initialDelay: number; - maxDelay: number; - backoffFactor: number; + maxRetries: number; + initialDelay: number; + maxDelay: number; + backoffFactor: number; } const shouldRetry = (error: unknown): boolean => { - const errMsg = error instanceof Error ? error.message : String(error); - return ( - errMsg.includes("Network connection lost") || - errMsg.includes("storage caused object to be reset") || - errMsg.includes("reset because its code was updated") - ); + const errMsg = error instanceof Error ? error.message : String(error); + return ( + errMsg.includes("Network connection lost") || + errMsg.includes("storage caused object to be reset") || + errMsg.includes("reset because its code was updated") + ); }; // Helper function for sleeping const sleep = (ms: number): Promise => { - return new Promise((resolve) => setTimeout(resolve, ms)); + return new Promise((resolve) => setTimeout(resolve, ms)); }; export const defaultRetryConfig: RetryConfig = { - maxRetries: 3, - initialDelay: 100, - maxDelay: 1000, - backoffFactor: 2, + maxRetries: 3, + initialDelay: 100, + maxDelay: 1000, + backoffFactor: 2, }; export async function withRetry( - operation: () => Promise, + operation: () => Promise, config: Partial = defaultRetryConfig, ): Promise { const maxRetries = config.maxRetries ?? defaultRetryConfig.maxRetries; @@ -597,30 +603,30 @@ export async function withRetry( const backoffFactor = config.backoffFactor ?? defaultRetryConfig.backoffFactor; - let lastError: Error | unknown; - let delay = initialDelay; + let lastError: Error | unknown; + let delay = initialDelay; - for (let attempt = 0; attempt <= maxRetries; attempt++) { - try { - const result = await operation(); - return result; - } catch (error) { - lastError = error; + for (let attempt = 0; attempt <= maxRetries; attempt++) { + try { + const result = await operation(); + return result; + } catch (error) { + lastError = error; - if (!shouldRetry(error) || attempt === maxRetries) { - throw error; - } + if (!shouldRetry(error) || attempt === maxRetries) { + throw error; + } - // Add randomness to avoid synchronizing retries - // Wait for a random delay between delay and delay*2 - await sleep(delay * (1 + Math.random())); + // Add randomness to avoid synchronizing retries + // Wait for a random delay between delay and delay*2 + await sleep(delay * (1 + Math.random())); - // Calculate next delay with exponential backoff - delay = Math.min(delay * backoffFactor, maxDelay); - } - } + // Calculate next delay with exponential backoff + delay = Math.min(delay * backoffFactor, maxDelay); + } + } - throw lastError; + throw lastError; } ``` @@ -648,71 +654,71 @@ Update the API routes to connect to the D1 database. ```ts app.post("/api/product", async (c) => { - const product = await c.req.json(); - - if (!product) { - return c.json({ message: "No data passed" }, 400); - } - - const db = c.env.DB; - const session = db.withSession("first-primary"); - - const { id } = product; - - try { - return await withRetry(async () => { - // Check if the product exists - const { results } = await session - .prepare("SELECT * FROM products where id = ?") - .bind(id) - .run(); - if (results.length === 0) { - const fields = [...Object.keys(product)]; - const values = [...Object.values(product)]; - // Insert the product - await session - .prepare( - `INSERT INTO products (${fields.join(", ")}) VALUES (${fields.map(() => "?").join(", ")})` - ) - .bind(...values) - .run(); - const latestBookmark = session.getBookmark(); - latestBookmark && - setCookie(c, "product_bookmark", latestBookmark, { - maxAge: 60 * 60, // 1 hour - }); - return c.json({ message: "Product inserted" }); - } - - // Update the product - const updates = Object.entries(product) - .filter(([_, value]) => value !== undefined) - .map(([key, _]) => `${key} = ?`) - .join(", "); - - if (!updates) { - throw new Error("No valid fields to update"); - } - - const values = Object.entries(product) - .filter(([_, value]) => value !== undefined) - .map(([_, value]) => value); - - await session - .prepare(`UPDATE products SET ${updates} WHERE id = ?`) - .bind(...[...values, id]) - .run(); - const latestBookmark = session.getBookmark(); - latestBookmark && - setCookie(c, "product_bookmark", latestBookmark, { - maxAge: 60 * 60, // 1 hour - }); - return c.json({ message: "Product updated" }); - }); - } catch (e) { - console.error(e); - return c.json({ message: "Error upserting product" }, 500); - } + const product = await c.req.json(); + + if (!product) { + return c.json({ message: "No data passed" }, 400); + } + + const db = c.env.DB; + const session = db.withSession("first-primary"); + + const { id } = product; + + try { + return await withRetry(async () => { + // Check if the product exists + const { results } = await session + .prepare("SELECT * FROM products where id = ?") + .bind(id) + .run(); + if (results.length === 0) { + const fields = [...Object.keys(product)]; + const values = [...Object.values(product)]; + // Insert the product + await session + .prepare( + `INSERT INTO products (${fields.join(", ")}) VALUES (${fields.map(() => "?").join(", ")})`, + ) + .bind(...values) + .run(); + const latestBookmark = session.getBookmark(); + latestBookmark && + setCookie(c, "product_bookmark", latestBookmark, { + maxAge: 60 * 60, // 1 hour + }); + return c.json({ message: "Product inserted" }); + } + + // Update the product + const updates = Object.entries(product) + .filter(([_, value]) => value !== undefined) + .map(([key, _]) => `${key} = ?`) + .join(", "); + + if (!updates) { + throw new Error("No valid fields to update"); + } + + const values = Object.entries(product) + .filter(([_, value]) => value !== undefined) + .map(([_, value]) => value); + + await session + .prepare(`UPDATE products SET ${updates} WHERE id = ?`) + .bind(...[...values, id]) + .run(); + const latestBookmark = session.getBookmark(); + latestBookmark && + setCookie(c, "product_bookmark", latestBookmark, { + maxAge: 60 * 60, // 1 hour + }); + return c.json({ message: "Product updated" }); + }); + } catch (e) { + console.error(e); + return c.json({ message: "Error upserting product" }, 500); + } }); ``` @@ -735,31 +741,31 @@ If you are using an external platform to manage your products, you can connect t ```ts app.get("/api/products", async (c) => { - const db = c.env.DB; + const db = c.env.DB; - // Get bookmark from the cookie - const bookmark = getCookie(c, "product_bookmark") || "first-unconstrained"; + // Get bookmark from the cookie + const bookmark = getCookie(c, "product_bookmark") || "first-unconstrained"; - const session = db.withSession(bookmark); + const session = db.withSession(bookmark); - try { - return await withRetry(async () => { - const { results } = await session.prepare("SELECT * FROM products").all(); + try { + return await withRetry(async () => { + const { results } = await session.prepare("SELECT * FROM products").all(); - const latestBookmark = session.getBookmark(); + const latestBookmark = session.getBookmark(); - // Set the bookmark in the cookie - latestBookmark && - setCookie(c, "product_bookmark", latestBookmark, { - maxAge: 60 * 60, // 1 hour - }); + // Set the bookmark in the cookie + latestBookmark && + setCookie(c, "product_bookmark", latestBookmark, { + maxAge: 60 * 60, // 1 hour + }); - return c.json(results); - }); - } catch (e) { - console.error(e); - return c.json([]); - } + return c.json(results); + }); + } catch (e) { + console.error(e); + return c.json([]); + } }); ``` @@ -776,42 +782,42 @@ In the above code: ```ts app.get("/api/products/:id", async (c) => { - const id = c.req.param("id"); + const id = c.req.param("id"); - if (!id) { - return c.json({ message: "Invalid id" }, 400); - } + if (!id) { + return c.json({ message: "Invalid id" }, 400); + } - const db = c.env.DB; + const db = c.env.DB; - // Get bookmark from the cookie - const bookmark = getCookie(c, "product_bookmark") || "first-unconstrained"; + // Get bookmark from the cookie + const bookmark = getCookie(c, "product_bookmark") || "first-unconstrained"; - const session = db.withSession(bookmark); + const session = db.withSession(bookmark); - try { - return await withRetry(async () => { - const { results } = await session - .prepare("SELECT * FROM products where id = ?") - .bind(id) - .run(); + try { + return await withRetry(async () => { + const { results } = await session + .prepare("SELECT * FROM products where id = ?") + .bind(id) + .run(); - const latestBookmark = session.getBookmark(); + const latestBookmark = session.getBookmark(); - // Set the bookmark in the cookie - latestBookmark && - setCookie(c, "product_bookmark", latestBookmark, { - maxAge: 60 * 60, // 1 hour - }); + // Set the bookmark in the cookie + latestBookmark && + setCookie(c, "product_bookmark", latestBookmark, { + maxAge: 60 * 60, // 1 hour + }); - console.log(results); + console.log(results); - return c.json(results); - }); - } catch (e) { - console.error(e); - return c.json([]); - } + return c.json(results); + }); + } catch (e) { + console.error(e); + return c.json([]); + } }); ``` diff --git a/src/content/docs/developer-spotlight/tutorials/create-sitemap-from-sanity-cms.mdx b/src/content/docs/developer-spotlight/tutorials/create-sitemap-from-sanity-cms.mdx index 583d06bfefdbea..b761f635d213cc 100644 --- a/src/content/docs/developer-spotlight/tutorials/create-sitemap-from-sanity-cms.mdx +++ b/src/content/docs/developer-spotlight/tutorials/create-sitemap-from-sanity-cms.mdx @@ -51,25 +51,7 @@ Select the options in the command-line interface (CLI) that work best for you, s Next, require the `@sanity/client` package. - - -```sh -pnpm install @sanity/client -``` - - - -```sh -npm install @sanity/client -``` - - - -```sh -yarn add @sanity/client -``` - - + ## Configure Wrangler diff --git a/src/content/docs/hyperdrive/tutorials/serverless-timeseries-api-with-timescale/index.mdx b/src/content/docs/hyperdrive/tutorials/serverless-timeseries-api-with-timescale/index.mdx index dfbb5b7b1e0dfb..4ed59a28b729a0 100644 --- a/src/content/docs/hyperdrive/tutorials/serverless-timeseries-api-with-timescale/index.mdx +++ b/src/content/docs/hyperdrive/tutorials/serverless-timeseries-api-with-timescale/index.mdx @@ -158,9 +158,7 @@ id = "your-id-here" Install the Postgres driver into your Worker project: -```sh -npm install pg -``` + Now copy the below Worker code, and replace the current code in `./src/index.ts`. The code below: diff --git a/src/content/docs/pages/framework-guides/deploy-a-vitepress-site.mdx b/src/content/docs/pages/framework-guides/deploy-a-vitepress-site.mdx index 4510b656ba68e7..b8ac0e4be0b078 100644 --- a/src/content/docs/pages/framework-guides/deploy-a-vitepress-site.mdx +++ b/src/content/docs/pages/framework-guides/deploy-a-vitepress-site.mdx @@ -8,7 +8,13 @@ banner: id: pages-migrate-to-workers --- -import { PagesBuildPreset, Render, TabItem, Tabs } from "~/components"; +import { + PagesBuildPreset, + Render, + TabItem, + Tabs, + PackageManagers, +} from "~/components"; [VitePress](https://vitepress.dev/) is a [static site generator](https://en.wikipedia.org/wiki/Static_site_generator) (SSG) designed for building fast, content-centric websites. VitePress takes your source content written in [Markdown](https://en.wikipedia.org/wiki/Markdown), applies a theme to it, and generates static HTML pages that can be easily deployed anywhere. @@ -20,60 +26,12 @@ VitePress ships with a command line setup wizard that will help you scaffold a b Run the following command in your terminal to create a new VitePress project: - - -```sh -npx vitepress@latest init -``` - - - -```sh -pnpm dlx vitepress@latest init -``` - - - -```sh -yarn dlx vitepress@latest init -``` - - - -```sh -bunx vitepress@latest init -``` - - + Amongst other questions, the setup wizard will ask you in which directory to save your new project, make sure to be in the project's directory and then install the `vitepress` dependency with the following command: - - -```sh -npm add -D vitepress -``` - - - -```sh -pnpm add -D vitepress -``` - - - -```sh -yarn add -D vitepress -``` - - - -```sh -bun add -D vitepress -``` - - + :::note diff --git a/src/content/docs/pages/framework-guides/nextjs/ssr/get-started.mdx b/src/content/docs/pages/framework-guides/nextjs/ssr/get-started.mdx index 9d532c92dfe1d2..88f28e4d724088 100644 --- a/src/content/docs/pages/framework-guides/nextjs/ssr/get-started.mdx +++ b/src/content/docs/pages/framework-guides/nextjs/ssr/get-started.mdx @@ -44,9 +44,7 @@ For more guidance on developing your app, refer to [Bindings](/pages/framework-g First, install [@cloudflare/next-on-pages](https://github.com/cloudflare/next-on-pages): -```sh -npm install --save-dev @cloudflare/next-on-pages -``` + ### 2. Add Wrangler file @@ -112,9 +110,7 @@ Add the following to the scripts field of your `package.json` file: Either deploy via the command line: -```sh -npm run deploy -``` + Or [connect a Github or Gitlab repository](/pages/get-started/git-integration/), and Cloudflare will automatically build and deploy each pull request you merge to your production branch. @@ -122,9 +118,7 @@ Or [connect a Github or Gitlab repository](/pages/get-started/git-integration/), Optionally, you might want to add `eslint-plugin-next-on-pages`, which lints your Next.js app to ensure it is configured correctly to run on Cloudflare Pages. -```sh -npm install --save-dev eslint-plugin-next-on-pages -``` + Once it is installed, add the following to `.eslintrc.json`: diff --git a/src/content/docs/pages/functions/plugins/cloudflare-access.mdx b/src/content/docs/pages/functions/plugins/cloudflare-access.mdx index 3bd366aee485d8..6ba36620753507 100644 --- a/src/content/docs/pages/functions/plugins/cloudflare-access.mdx +++ b/src/content/docs/pages/functions/plugins/cloudflare-access.mdx @@ -10,13 +10,13 @@ banner: id: pages-migrate-to-workers --- +import { PackageManagers } from "~/components"; + The Cloudflare Access Pages Plugin is a middleware to validate Cloudflare Access JWT assertions. It also includes an API to lookup additional information about a given user's JWT. ## Installation -```sh -npm install @cloudflare/pages-plugin-cloudflare-access -``` + ## Usage diff --git a/src/content/docs/pages/functions/plugins/google-chat.mdx b/src/content/docs/pages/functions/plugins/google-chat.mdx index a560acdf2cb2a8..5e70a6570ba1e3 100644 --- a/src/content/docs/pages/functions/plugins/google-chat.mdx +++ b/src/content/docs/pages/functions/plugins/google-chat.mdx @@ -10,13 +10,13 @@ banner: id: pages-migrate-to-workers --- +import { PackageManagers } from "~/components"; + The Google Chat Pages Plugin creates a Google Chat bot which can respond to messages. It also includes an API for interacting with Google Chat (for example, for creating messages) without the need for user input. This API is useful for situations such as alerts. ## Installation -```sh -npm install @cloudflare/pages-plugin-google-chat -``` + ## Usage diff --git a/src/content/docs/pages/functions/plugins/graphql.mdx b/src/content/docs/pages/functions/plugins/graphql.mdx index 91e77b0b23d7ce..f09dbebc1fa377 100644 --- a/src/content/docs/pages/functions/plugins/graphql.mdx +++ b/src/content/docs/pages/functions/plugins/graphql.mdx @@ -10,13 +10,13 @@ banner: id: pages-migrate-to-workers --- +import { PackageManagers } from "~/components"; + The GraphQL Pages Plugin creates a GraphQL server which can respond to `application/json` and `application/graphql` `POST` requests. It responds with [the GraphQL Playground](https://github.com/graphql/graphql-playground) for `GET` requests. ## Installation -```sh -npm install @cloudflare/pages-plugin-graphql -``` + ## Usage diff --git a/src/content/docs/pages/functions/plugins/hcaptcha.mdx b/src/content/docs/pages/functions/plugins/hcaptcha.mdx index 03e9916b77a57f..7e467fc07ad857 100644 --- a/src/content/docs/pages/functions/plugins/hcaptcha.mdx +++ b/src/content/docs/pages/functions/plugins/hcaptcha.mdx @@ -10,15 +10,13 @@ banner: id: pages-migrate-to-workers --- -import { Render } from "~/components"; +import { Render, PackageManagers } from "~/components"; The hCaptcha Pages Plugin validates hCaptcha tokens. ## Installation -```sh -npm install @cloudflare/pages-plugin-hcaptcha -``` + ## Usage diff --git a/src/content/docs/pages/functions/plugins/honeycomb.mdx b/src/content/docs/pages/functions/plugins/honeycomb.mdx index 720418d635cafb..cfeab51e5473fa 100644 --- a/src/content/docs/pages/functions/plugins/honeycomb.mdx +++ b/src/content/docs/pages/functions/plugins/honeycomb.mdx @@ -10,13 +10,13 @@ banner: id: pages-migrate-to-workers --- +import { PackageManagers } from "~/components"; + The Honeycomb Pages Plugin automatically sends traces to Honeycomb for analysis and observability. ## Installation -```sh -npm install @cloudflare/pages-plugin-honeycomb -``` + ## Usage diff --git a/src/content/docs/pages/functions/plugins/sentry.mdx b/src/content/docs/pages/functions/plugins/sentry.mdx index 1cdd68b3feb3a5..92725f6ec217c5 100644 --- a/src/content/docs/pages/functions/plugins/sentry.mdx +++ b/src/content/docs/pages/functions/plugins/sentry.mdx @@ -10,6 +10,8 @@ banner: id: pages-migrate-to-workers --- +import { PackageManagers } from "~/components"; + :::note Sentry now provides official support for Cloudflare Workers and Pages. Refer to the [Sentry documentation](https://docs.sentry.io/platforms/javascript/guides/cloudflare/) for more details. @@ -20,9 +22,7 @@ The Sentry Pages Plugin captures and logs all exceptions which occur below it in ## Installation -```sh -npm install @cloudflare/pages-plugin-sentry -``` + ## Usage diff --git a/src/content/docs/pages/functions/plugins/static-forms.mdx b/src/content/docs/pages/functions/plugins/static-forms.mdx index 6f1be5c3ecfa5c..ed33ed1f604b63 100644 --- a/src/content/docs/pages/functions/plugins/static-forms.mdx +++ b/src/content/docs/pages/functions/plugins/static-forms.mdx @@ -10,13 +10,13 @@ banner: id: pages-migrate-to-workers --- +import { PackageManagers } from "~/components"; + The Static Forms Pages Plugin intercepts all form submissions made which have the `data-static-form-name` attribute set. This allows you to take action on these form submissions by, for example, saving the submission to KV. ## Installation -```sh -npm install @cloudflare/pages-plugin-static-forms -``` + ## Usage diff --git a/src/content/docs/pages/functions/plugins/stytch.mdx b/src/content/docs/pages/functions/plugins/stytch.mdx index 42f68537b6a0ce..94d23055e6fd2f 100644 --- a/src/content/docs/pages/functions/plugins/stytch.mdx +++ b/src/content/docs/pages/functions/plugins/stytch.mdx @@ -10,13 +10,13 @@ banner: id: pages-migrate-to-workers --- +import { PackageManagers } from "~/components"; + The Stytch Pages Plugin is a middleware which validates all requests and their `session_token`. ## Installation -```sh -npm install @cloudflare/pages-plugin-stytch -``` + ## Usage diff --git a/src/content/docs/pages/functions/plugins/turnstile.mdx b/src/content/docs/pages/functions/plugins/turnstile.mdx index 1a19aabdda7f70..83e7727ee9728e 100644 --- a/src/content/docs/pages/functions/plugins/turnstile.mdx +++ b/src/content/docs/pages/functions/plugins/turnstile.mdx @@ -10,7 +10,7 @@ banner: id: pages-migrate-to-workers --- -import { Render } from "~/components"; +import { Render, PackageManagers } from "~/components"; [Turnstile](/turnstile/) is Cloudflare's smart CAPTCHA alternative. @@ -18,9 +18,7 @@ The Turnstile Pages Plugin validates Cloudflare Turnstile tokens. ## Installation -```sh -npm install @cloudflare/pages-plugin-turnstile -``` + ## Usage diff --git a/src/content/docs/pages/functions/plugins/vercel-og.mdx b/src/content/docs/pages/functions/plugins/vercel-og.mdx index 6fed3463cfb87a..62fa23c40d1571 100644 --- a/src/content/docs/pages/functions/plugins/vercel-og.mdx +++ b/src/content/docs/pages/functions/plugins/vercel-og.mdx @@ -8,6 +8,8 @@ banner: id: pages-migrate-to-workers --- +import { PackageManagers } from "~/components"; + The `@vercel/og` Pages Plugin is a middleware which renders social images for webpages. It also includes an API to create arbitrary images. As the name suggests, it is powered by [`@vercel/og`](https://vercel.com/docs/concepts/functions/edge-functions/og-image-generation). This plugin and its underlying [Satori](https://github.com/vercel/satori) library was created by the Vercel team. @@ -16,9 +18,7 @@ As the name suggests, it is powered by [`@vercel/og`](https://vercel.com/docs/co To install the `@vercel/og` Pages Plugin, run: -```sh -npm install @cloudflare/pages-plugin-vercel-og -``` + ## Use diff --git a/src/content/docs/pages/tutorials/add-a-react-form-with-formspree/index.mdx b/src/content/docs/pages/tutorials/add-a-react-form-with-formspree/index.mdx index 09fc44ac23eb68..f6cc47097865dd 100644 --- a/src/content/docs/pages/tutorials/add-a-react-form-with-formspree/index.mdx +++ b/src/content/docs/pages/tutorials/add-a-react-form-with-formspree/index.mdx @@ -9,6 +9,8 @@ languages: - JavaScript --- +import { PackageManagers } from "~/components"; + Almost every React website needs a form to collect user data. [Formspree](https://formspree.io/) is a back-end service that handles form processing and storage, allowing developers to include forms on their website without writing server-side code or functions. In this tutorial, you will create a `
` component using React and add it to a single page application built with `create-react-app`. Though you are using `create-react-app` (CRA), the concepts will apply to any React framework including Next.js, Gatsby, and more. You will use Formspree to collect the submitted data and send out email notifications when new submissions arrive, without requiring any server-side coding. @@ -51,9 +53,7 @@ Next, you will build the form component using a helper library from Formspree, [ Install it with: -```sh -npm install --save @formspree/react -``` + Then paste the following code snippet into the `ContactForm.js` file: diff --git a/src/content/docs/pages/tutorials/build-a-blog-using-nuxt-and-sanity/index.mdx b/src/content/docs/pages/tutorials/build-a-blog-using-nuxt-and-sanity/index.mdx index 7f7e38f5e9debb..590407ffe98f3c 100644 --- a/src/content/docs/pages/tutorials/build-a-blog-using-nuxt-and-sanity/index.mdx +++ b/src/content/docs/pages/tutorials/build-a-blog-using-nuxt-and-sanity/index.mdx @@ -10,7 +10,7 @@ languages: - JavaScript --- -import { Stream } from "~/components"; +import { Stream, PackageManagers } from "~/components"; In this tutorial, you will build a blog application using Nuxt.js and Sanity.io and deploy it on Cloudflare Pages. Nuxt.js is a powerful static site generator built on the front-end framework Vue.js. Sanity.io is a headless CMS tool built for managing your application's data without needing to maintain a database. @@ -27,9 +27,9 @@ To begin, create a new Sanity project, using one of Sanity's templates, the blog Create your new Sanity project by installing the `@sanity/cli` client from npm, and running `sanity init` in your terminal: -```sh title="Installing the Sanity client and creating a new project" -npm install -g @sanity/cli && sanity init -``` + + + When you create a Sanity project, you can choose to use one of their pre-defined schemas. Schemas describe the shape of your data in your Sanity dataset -- if you were to start a brand new project, you may choose to initialize the schema from scratch, but for now, select the **Blog** schema. @@ -37,11 +37,12 @@ When you create a Sanity project, you can choose to use one of their pre-defined With your project created, you can navigate into the folder and start up the studio locally: -```sh title="Starting the Sanity studio" +```sh cd my-sanity-project -sanity start ``` + + The Sanity studio is where you can create new records for your dataset. By default, running the studio locally makes it available at `localhost:3333`– go there now and create your author record. You can also create blog posts here. ![Creating a blog post in the Sanity Project dashboard](~/assets/images/pages/tutorials/sanity-studio.png) @@ -50,9 +51,7 @@ The Sanity studio is where you can create new records for your dataset. By defau When you are ready to deploy your studio, run `sanity deploy` to choose a unique URL for your studio. This means that you (or anyone else you invite to manage your blog) can access the studio at a `yoururl.sanity.studio` domain. -```sh title="Deploying the studio" -sanity deploy -``` + Once you have deployed your Sanity studio: @@ -69,26 +68,23 @@ This means that requests that come to your Sanity dataset from your Nuxt applica Next, create a Nuxt.js project. In a new terminal, use `create-nuxt-app` to set up a new Nuxt project: -```sh title="Creating a new Nuxt.js project" -npx create-nuxt-app blog -``` + Importantly, ensure that you select a rendering mode of **Universal (SSR / SSG)** and a deployment target of **Static (Static/JAMStack hosting)**, while going through the setup process. After you have completed your project, `cd` into your new project, and start a local development server by running `yarn dev` (or, if you chose npm as your package manager, `npm run dev`): -```sh title="Starting a Nuxt.js development server" +```sh cd blog -yarn dev ``` + + ### Integrating Sanity.io After your Nuxt.js application is set up, add Sanity's `@sanity/nuxt` plugin to your Nuxt project: -```sh title="Adding @nuxt/sanity" -yarn add @nuxtjs/sanity @sanity/client -``` + To configure the plugin in your Nuxt.js application, you will need to provide some configuration details. The easiest way to do this is to copy the `sanity.json` folder from your studio into your application directory (though there are other methods, too: [refer to the `@nuxt/sanity` documentation](https://sanity.nuxtjs.org/getting-started/quick-start/). @@ -236,9 +232,7 @@ You have rendered the `post` title for our blog, but you are still missing the c First, install the npm package: -```sh title="Add sanity-blocks-vue-component package" -yarn add sanity-blocks-vue-component -``` + After the package is installed, create `plugins/sanity-blocks.js`, which will import the component and register it as the Vue component `block-content`: @@ -353,10 +347,7 @@ In `pages/index.vue`, you can use the `block-content` component to render a summ ``` - + There are many other things inside of your blog schema that you can add to your project. As an exercise, consider one of the following to continue developing your understanding of how to build with a headless CMS: diff --git a/src/content/docs/pages/tutorials/build-an-api-with-pages-functions/index.mdx b/src/content/docs/pages/tutorials/build-an-api-with-pages-functions/index.mdx index 8cdbbd39b61f0c..80b8f386ce0339 100644 --- a/src/content/docs/pages/tutorials/build-an-api-with-pages-functions/index.mdx +++ b/src/content/docs/pages/tutorials/build-an-api-with-pages-functions/index.mdx @@ -7,7 +7,7 @@ languages: - JavaScript --- -import { Stream } from "~/components"; +import { Stream, PackageManagers } from "~/components"; In this tutorial, you will build a full-stack Pages application. Your application will contain: @@ -45,17 +45,7 @@ To set up your React project: 1. Install the [React Router](https://reactrouter.com/en/main/start/tutorial) in the root of your `blog-frontend` directory. -With `npm`: - -```sh -npm install react-router-dom@6 -``` - -With `yarn`: - -```sh -yarn add react-router-dom@6 -``` +s 2. Clear the contents of `src/App.js`. Copy and paste the following code to import the React Router into `App.js`, and set up a new router with two routes: diff --git a/src/content/docs/pub-sub/examples/connect-javascript.mdx b/src/content/docs/pub-sub/examples/connect-javascript.mdx index 25dd6c0180266b..edf48e7e59ae85 100644 --- a/src/content/docs/pub-sub/examples/connect-javascript.mdx +++ b/src/content/docs/pub-sub/examples/connect-javascript.mdx @@ -6,6 +6,8 @@ summary: Use MQTT.js with the token authentication mode configured on a broker. description: Use MQTT.js with the token authentication mode configured on a broker. --- +import { PackageManagers } from "~/components"; + Below is an example using [MQTT.js](https://github.com/mqttjs/MQTT.js#mqttclientstreambuilder-options) with the TOKEN authentication mode configured on a broker. The example assumes you have [Node.js](https://nodejs.org/en/) v16 or higher installed on your system. Make sure to set the following environmental variables before running the example: @@ -16,10 +18,7 @@ Make sure to set the following environmental variables before running the exampl Before running the example, make sure to install the MQTT library: -```sh -# Pre-requisite: install MQTT.js -npm install mqtt --save -``` + Copy the following example as `example.js` and run it with `node example.js`. diff --git a/src/content/docs/pub-sub/guide.mdx b/src/content/docs/pub-sub/guide.mdx index 35f2f24a4e9891..283fd06fb8456a 100644 --- a/src/content/docs/pub-sub/guide.mdx +++ b/src/content/docs/pub-sub/guide.mdx @@ -5,7 +5,7 @@ sidebar: order: 1 --- -import { Render } from "~/components"; +import { Render, PackageManagers } from "~/components"; :::note @@ -43,7 +43,7 @@ Installing `wrangler`, the Workers command-line interface (CLI), allows you to [ To install [`wrangler`](https://github.com/cloudflare/workers-sdk/tree/main/packages/wrangler), ensure you have [`npm` installed](https://docs.npmjs.com/getting-started), preferably using a Node version manager like [Volta](https://volta.sh/) or [nvm](https://github.com/nvm-sh/nvm). Using a version manager helps avoid permission issues and allows you to easily change Node.js versions. Then run: - + Validate that you have a version of `wrangler` that supports Pub/Sub: diff --git a/src/content/docs/pub-sub/learning/websockets-browsers.mdx b/src/content/docs/pub-sub/learning/websockets-browsers.mdx index 33f4ceb01ce4fc..dfa209aadc126d 100644 --- a/src/content/docs/pub-sub/learning/websockets-browsers.mdx +++ b/src/content/docs/pub-sub/learning/websockets-browsers.mdx @@ -4,16 +4,17 @@ pcx_content_type: reference type: concept summary: Connect to Pub/Sub with WebSockets description: Connect to Pub/Sub with WebSockets - --- +import { PackageManagers } from "~/components"; + Pub/Sub allows you to both publish and subscribe from within a web browser or other WebSocket capable client. Every Pub/Sub Broker supports MQTT over WebSockets natively, without further configuration. With Pub/Sub’s WebSocket support, you can: -* Subscribe to a topic in the browser and push near real-time updates (such as notifications or chat messages) to those clients from your backend services. -* Publish telemetry directly from WebSocket clients and then aggregate those messages in a centralized service or by using [Workers Analytics Engine](https://blog.cloudflare.com/workers-analytics-engine/) to consume them on your behalf. -* Publish and subscribe directly between a browser client and your MQTT-capable IoT devices in the field. +- Subscribe to a topic in the browser and push near real-time updates (such as notifications or chat messages) to those clients from your backend services. +- Publish telemetry directly from WebSocket clients and then aggregate those messages in a centralized service or by using [Workers Analytics Engine](https://blog.cloudflare.com/workers-analytics-engine/) to consume them on your behalf. +- Publish and subscribe directly between a browser client and your MQTT-capable IoT devices in the field. When clients are connecting from a browser, you should use [`token` authentication](/pub-sub/platform/authentication-authorization/) and ensure you are issuing clients unique credentials. @@ -21,11 +22,11 @@ When clients are connecting from a browser, you should use [`token` authenticati WebSocket support in Pub/Sub works by encapsulating MQTT packets (Pub/Sub’s underlying native protocol) within WebSocket [frames](https://developer.mozilla.org/en-US/docs/Web/API/WebSockets_API/Writing_WebSocket_servers#exchanging_data_frames). -* In many MQTT libraries, you can replace the `mqtts://` scheme with `wss://`, and your code will use a WebSocket transport instead of the native “raw” TCP transport. -* The WebSocket listener is available on both TCP ports `443` and `8884` versus `8883` for native MQTT. -* WebSocket clients need to speak MQTT over WebSockets. Pub/Sub does not support other message serialization methods over WebSockets at present. -* **Clients should include a `sec-websocket-protocol: mqtt` request header in the initial HTTP GET request** to distinguish an "MQTT over WebSocket" request from future, non-MQTT protocol support over WebSockets. -* Authentication is performed within the WebSocket connection itself. An MQTT `CONNECT` packet inside the WebSocket tunnel includes the required username and password. The WebSocket connection itself does not need to be authenticated at the HTTP level. +- In many MQTT libraries, you can replace the `mqtts://` scheme with `wss://`, and your code will use a WebSocket transport instead of the native “raw” TCP transport. +- The WebSocket listener is available on both TCP ports `443` and `8884` versus `8883` for native MQTT. +- WebSocket clients need to speak MQTT over WebSockets. Pub/Sub does not support other message serialization methods over WebSockets at present. +- **Clients should include a `sec-websocket-protocol: mqtt` request header in the initial HTTP GET request** to distinguish an "MQTT over WebSocket" request from future, non-MQTT protocol support over WebSockets. +- Authentication is performed within the WebSocket connection itself. An MQTT `CONNECT` packet inside the WebSocket tunnel includes the required username and password. The WebSocket connection itself does not need to be authenticated at the HTTP level. We recommend using [MQTT.js](https://github.com/mqttjs/MQTT.js), one of the most popular JavaScript libraries, for client-side JavaScript support. It can be used in both the browser via Webpack or Browserify and in a Node.js environment. @@ -40,9 +41,9 @@ You can view a live demo available at [demo.mqtt.dev](http://demo.mqtt.dev) that In a real-world deployment, our publisher could be another client, a native MQTT client, or a WebSocket client running on a remote server elsewhere. + + ```js -// Ensure MQTT.js is installed first -// > npm install mqtt import * as mqtt from "mqtt" // Where 'url' is "mqtts://BROKER.NAMESPACE.cloudflarepubsub.com:8884" @@ -54,7 +55,7 @@ function example(url) { password: jwt, // pass this from a form field in your app clientId: '', }) - + client.on('connect', function () { client.subscribe(topic, function (err) { if (err) { @@ -63,7 +64,7 @@ function example(url) { console.log(`subscribed to ${topic}`) } }) - + client.on('message', function (topic, message) { let line = (new Date()).toLocaleString('en-US') + ": " + message.toString() + "\n"; console.log(line) diff --git a/src/content/docs/queues/tutorials/handle-rate-limits/index.mdx b/src/content/docs/queues/tutorials/handle-rate-limits/index.mdx index c3a3be035377f5..48ad806108c9e3 100644 --- a/src/content/docs/queues/tutorials/handle-rate-limits/index.mdx +++ b/src/content/docs/queues/tutorials/handle-rate-limits/index.mdx @@ -235,9 +235,7 @@ interface Env { Lastly, install the [`resend` package](https://www.npmjs.com/package/resend) using the following command: -```sh title="Install Resend" -npm install resend -``` + You can now use the `RESEND_API_KEY` variable in your code. diff --git a/src/content/docs/queues/tutorials/web-crawler-with-browser-rendering/index.mdx b/src/content/docs/queues/tutorials/web-crawler-with-browser-rendering/index.mdx index 9e8711dead7701..450464b6e47d85 100644 --- a/src/content/docs/queues/tutorials/web-crawler-with-browser-rendering/index.mdx +++ b/src/content/docs/queues/tutorials/web-crawler-with-browser-rendering/index.mdx @@ -60,10 +60,17 @@ cd queues-web-crawler We need to create a KV store. This can be done through the Cloudflare dashboard or the Wrangler CLI. For this tutorial, we will use the Wrangler CLI. -```sh -npx wrangler kv namespace create crawler_links -npx wrangler kv namespace create crawler_screenshots -``` + + + ```sh output 🌀 Creating namespace with title "web-crawler-crawler-links" @@ -102,10 +109,9 @@ Now, you need to set up your Worker for Browser Rendering. In your current directory, install Cloudflare’s [fork of Puppeteer](/browser-rendering/platform/puppeteer/) and also [robots-parser](https://www.npmjs.com/package/robots-parser): -```sh -npm install @cloudflare/puppeteer --save-dev -npm install robots-parser -``` + + + Then, add a Browser Rendering binding. Adding a Browser Rendering binding gives the Worker access to a headless Chromium instance you will control with Puppeteer. @@ -121,9 +127,11 @@ browser = { binding = "CRAWLER_BROWSER" } Now, we need to set up the Queue. -```sh -npx wrangler queues create queues-web-crawler -``` + ```txt title="Output" Creating queue queues-web-crawler. @@ -536,9 +544,7 @@ export default { To deploy your Worker, run the following command: -```sh -npx wrangler deploy -``` + You have successfully created a Worker which can submit URLs to a queue for crawling and save results to Workers KV. diff --git a/src/content/docs/r2/get-started.mdx b/src/content/docs/r2/get-started.mdx index 1dfd25c522ca9a..67f7cf0e210867 100644 --- a/src/content/docs/r2/get-started.mdx +++ b/src/content/docs/r2/get-started.mdx @@ -6,14 +6,20 @@ sidebar: head: - tag: title content: Getting started guide - --- -import { Render } from "~/components" +import { Render, PackageManagers } from "~/components"; Cloudflare R2 Storage allows developers to store large amounts of unstructured data without the costly egress bandwidth fees associated with typical cloud storage services. -
+
+ +
## 1. Install and authenticate Wrangler @@ -23,7 +29,7 @@ Before you create your first bucket, you must purchase R2 from the Cloudflare da 1. [Install Wrangler](/workers/wrangler/install-and-update/) within your project using npm and Node.js or Yarn. - + 2. [Authenticate Wrangler](/workers/wrangler/commands/#login) to enable deployments to Cloudflare. When Wrangler automatically opens your browser to display Cloudflare's consent screen, select **Allow** to send the API Token to Wrangler. @@ -51,6 +57,6 @@ You will receive a confirmation message after a successful upload. Cloudflare provides multiple ways for developers to access their R2 buckets: -* [Workers Runtime API](/r2/api/workers/workers-api-usage/) -* [S3 API compatibility](/r2/api/s3/api/) -* [Public buckets](/r2/buckets/public-buckets/) +- [Workers Runtime API](/r2/api/workers/workers-api-usage/) +- [S3 API compatibility](/r2/api/s3/api/) +- [Public buckets](/r2/buckets/public-buckets/) diff --git a/src/content/docs/r2/tutorials/summarize-pdf.mdx b/src/content/docs/r2/tutorials/summarize-pdf.mdx index c3c86ff01493cc..db29a1fe540efc 100644 --- a/src/content/docs/r2/tutorials/summarize-pdf.mdx +++ b/src/content/docs/r2/tutorials/summarize-pdf.mdx @@ -346,9 +346,7 @@ To extract the textual content from the PDF, the Worker will use the [unpdf](htt Install the `unpdf` library by running the following command: -```sh -npm install unpdf -``` + Update the `src/index.ts` file to import the required modules from the `unpdf` library: diff --git a/src/content/docs/stream/uploading-videos/resumable-uploads.mdx b/src/content/docs/stream/uploading-videos/resumable-uploads.mdx index 693bcea080b493..025b81ad9aecb5 100644 --- a/src/content/docs/stream/uploading-videos/resumable-uploads.mdx +++ b/src/content/docs/stream/uploading-videos/resumable-uploads.mdx @@ -5,6 +5,8 @@ sidebar: order: 3 --- +import { PackageManagers } from "~/components"; + If you have a video over 200 MB, we recommend using the [tus protocol](https://tus.io/) for resumable file uploads. A resumable upload ensures that the upload can be interrupted and resumed without uploading the previous data again. ## Requirements @@ -13,7 +15,6 @@ If you have a video over 200 MB, we recommend using the [tus protocol](https://t - Maximum chunk size is 209,715,200 bytes. - Chunk size must be divisible by 256 KiB (256x1024 bytes). Round your chunk size to the nearest multiple of 256 KiB. Note that the final chunk of an upload that fits within a single chunk is exempt from this requirement. - ## Prerequisites Before you can upload a video using tus, you will need to download a tus client. @@ -103,9 +104,7 @@ Refer to [go-tus](https://github.com/eventials/go-tus) for functionality such as Before you begin, install the tus-js-client. -```sh title="Install tus-js-client" -npm install tus-js-client -``` + Create an `index.js` file and configure: diff --git a/src/content/docs/turnstile/tutorials/protecting-your-payment-form-from-attackers-bots-using-turnstile.mdx b/src/content/docs/turnstile/tutorials/protecting-your-payment-form-from-attackers-bots-using-turnstile.mdx index 011bd7d152cbd5..0061c0567c3cd3 100644 --- a/src/content/docs/turnstile/tutorials/protecting-your-payment-form-from-attackers-bots-using-turnstile.mdx +++ b/src/content/docs/turnstile/tutorials/protecting-your-payment-form-from-attackers-bots-using-turnstile.mdx @@ -289,25 +289,7 @@ This implementation blocks order operations for any requests that Cloudflare ide To integrate Turnstile with a Stripe payment form, first you will need to install the Stripe SDK: - - -```sh -npm install stripe -``` - - - -```sh -yarn add stripe -``` - - - -```sh -pnpm add stripe -``` - - + Next, implement the code to create a payment form in `src/index.tsx`. The following code creates a [Payment Intent](https://docs.stripe.com/api/payment_intents) on the server side: diff --git a/src/content/docs/workers-ai/configuration/ai-sdk.mdx b/src/content/docs/workers-ai/configuration/ai-sdk.mdx index c5b8d38b84c629..d71f97bd20fcfb 100644 --- a/src/content/docs/workers-ai/configuration/ai-sdk.mdx +++ b/src/content/docs/workers-ai/configuration/ai-sdk.mdx @@ -3,18 +3,17 @@ pcx_content_type: configuration title: Vercel AI SDK sidebar: order: 3 - --- +import { PackageManagers } from "~/components"; + Workers AI can be used with the [Vercel AI SDK](https://sdk.vercel.ai/) for JavaScript and TypeScript codebases. ## Setup Install the [`workers-ai-provider` provider](https://sdk.vercel.ai/providers/community-providers/cloudflare-workers-ai): -```bash -npm install workers-ai-provider -``` + Then, add an AI binding in your Workers project Wrangler file: @@ -28,12 +27,12 @@ binding = "AI" The AI SDK can be configured to work with [any AI model](/workers-ai/models/). ```js -import { createWorkersAI } from 'workers-ai-provider'; +import { createWorkersAI } from "workers-ai-provider"; const workersai = createWorkersAI({ binding: env.AI }); // Choose any model: https://developers.cloudflare.com/workers-ai/models/ -const model = workersai('@cf/meta/llama-3.1-8b-instruct', {}); +const model = workersai("@cf/meta/llama-3.1-8b-instruct", {}); ``` ## Generate Text diff --git a/src/content/docs/workers-ai/features/function-calling/embedded/get-started.mdx b/src/content/docs/workers-ai/features/function-calling/embedded/get-started.mdx index 69af4181e06ffc..5329e9a31511ce 100644 --- a/src/content/docs/workers-ai/features/function-calling/embedded/get-started.mdx +++ b/src/content/docs/workers-ai/features/function-calling/embedded/get-started.mdx @@ -8,7 +8,7 @@ head: content: Get Started --- -import { TypeScriptExample } from "~/components"; +import { TypeScriptExample, PackageManagers } from "~/components"; This guide will instruct you through setting up and deploying your first Workers AI project with embedded function calling. You will use Workers, a Workers AI binding, the [`ai-utils package`](https://github.com/cloudflare/ai-utils), and a large language model (LLM) to deploy your first AI-powered application on the Cloudflare global network with embedded function calling. @@ -20,9 +20,7 @@ Follow the [Workers AI Get Started Guide](/workers-ai/get-started/workers-wrangl Next, run the following command in your project repository to install the Worker AI utilities package. -```sh -npm install @cloudflare/ai-utils --save -``` + ## 3. Add Workers AI Embedded function calling diff --git a/src/content/docs/workers-ai/guides/tutorials/build-a-retrieval-augmented-generation-ai.mdx b/src/content/docs/workers-ai/guides/tutorials/build-a-retrieval-augmented-generation-ai.mdx index 56573d7ec07990..bda9637d2ef146 100644 --- a/src/content/docs/workers-ai/guides/tutorials/build-a-retrieval-augmented-generation-ai.mdx +++ b/src/content/docs/workers-ai/guides/tutorials/build-a-retrieval-augmented-generation-ai.mdx @@ -243,9 +243,7 @@ env.RAG_WORKFLOW.create({ params: { text } }); To expand on your Workers function in order to handle multiple routes, we will add `hono`, a routing library for Workers. This will allow us to create a new route for adding notes to our database. Install `hono` using `npm`: -```sh -npm install hono -``` + Then, import `hono` into your `src/index.js` file. You should also update the `fetch` handler to use `hono`: @@ -408,9 +406,7 @@ If you are working with larger documents, you have the option to use Anthropic's To begin, install the `@anthropic-ai/sdk` package: -```sh -npm install @anthropic-ai/sdk -``` + In `src/index.js`, you can update the `GET /` route to check for the `ANTHROPIC_API_KEY` environment variable. If it's set, we can generate text using the Anthropic SDK. If it isn't set, we'll fall back to the existing Workers AI code: @@ -498,9 +494,7 @@ For large pieces of text, it is recommended to split the text into smaller chunk To implement this, we'll add a new NPM package to our project, `@langchain/textsplitters': -```sh -npm install @langchain/textsplitters -``` + The `RecursiveCharacterTextSplitter` class provided by this package will split the text into smaller chunks. It can be customized to your liking, but the default config works in most cases: diff --git a/src/content/docs/workers/databases/native-integrations/neon.mdx b/src/content/docs/workers/databases/native-integrations/neon.mdx index 70c00478e253ea..a114185bb51105 100644 --- a/src/content/docs/workers/databases/native-integrations/neon.mdx +++ b/src/content/docs/workers/databases/native-integrations/neon.mdx @@ -3,7 +3,7 @@ pcx_content_type: configuration title: Neon --- -import { Render } from "~/components"; +import { Render, PackageManagers } from "~/components"; [Neon](https://neon.tech/) is a fully managed serverless PostgreSQL. It separates storage and compute to offer modern developer features, such as serverless, branching, and bottomless storage. @@ -53,9 +53,7 @@ To set up an integration with Neon: 5. In your Worker, install the `@neondatabase/serverless` driver to connect to your database and start manipulating data: - ```sh - npm install @neondatabase/serverless - ``` + 6. The following example shows how to make a query to your Neon database in a Worker. The credentials needed to connect to Neon have been automatically added as secrets to your Worker through the integration. diff --git a/src/content/docs/workers/databases/native-integrations/planetscale.mdx b/src/content/docs/workers/databases/native-integrations/planetscale.mdx index fa406d18ce1870..9e91aa10f0266b 100644 --- a/src/content/docs/workers/databases/native-integrations/planetscale.mdx +++ b/src/content/docs/workers/databases/native-integrations/planetscale.mdx @@ -3,7 +3,7 @@ pcx_content_type: configuration title: PlanetScale --- -import { Render } from "~/components"; +import { Render, PackageManagers } from "~/components"; [PlanetScale](https://planetscale.com/) is a MySQL-compatible platform that makes databases infinitely scalable, easier and safer to manage. @@ -44,9 +44,7 @@ To set up an integration with PlanetScale: 5. In your Worker, install the `@planetscale/database` driver to connect to your PlanetScale database and start manipulating data: - ```sh - npm install @planetscale/database - ``` + 6. The following example shows how to make a query to your PlanetScale database in a Worker. The credentials needed to connect to PlanetScale have been automatically added as secrets to your Worker through the integration. diff --git a/src/content/docs/workers/databases/native-integrations/supabase.mdx b/src/content/docs/workers/databases/native-integrations/supabase.mdx index 1bae560615c0c8..4c473cb33fc1e3 100644 --- a/src/content/docs/workers/databases/native-integrations/supabase.mdx +++ b/src/content/docs/workers/databases/native-integrations/supabase.mdx @@ -3,7 +3,7 @@ pcx_content_type: configuration title: Supabase --- -import { Render } from "~/components"; +import { Render, PackageManagers } from "~/components"; [Supabase](https://supabase.com/) is an open source Firebase alternative and a PostgreSQL database service that offers real-time functionality, database backups, and extensions. With Supabase, developers can quickly set up a PostgreSQL database and build applications. @@ -45,9 +45,7 @@ To set up an integration with Supabase: 5. In your Worker, install the `@supabase/supabase-js` driver to connect to your database and start manipulating data: - ```sh - npm install @supabase/supabase-js - ``` + 6. The following example shows how to make a query to your Supabase database in a Worker. The credentials needed to connect to Supabase have been automatically added as secrets to your Worker through the integration. diff --git a/src/content/docs/workers/databases/native-integrations/turso.mdx b/src/content/docs/workers/databases/native-integrations/turso.mdx index 0bb84b8f9e7672..14f19357526b10 100644 --- a/src/content/docs/workers/databases/native-integrations/turso.mdx +++ b/src/content/docs/workers/databases/native-integrations/turso.mdx @@ -3,7 +3,7 @@ pcx_content_type: configuration title: Turso --- -import { Render } from "~/components"; +import { Render, PackageManagers } from "~/components"; [Turso](https://turso.tech/) is an edge-hosted, distributed database based on [libSQL](https://libsql.org/), an open-source fork of SQLite. Turso was designed to minimize query latency for applications where queries comes from anywhere in the world. @@ -15,51 +15,51 @@ To set up an integration with Turso: 1. You need to install Turso CLI to create and populate a database. Use one of the following two commands in your terminal to install the Turso CLI: -```sh -# On macOS and linux with homebrew -brew install tursodatabase/tap/turso + ```sh + # On macOS and linux with homebrew + brew install tursodatabase/tap/turso -# Manual scripted installation -curl -sSfL https://get.tur.so/install.sh | bash -``` + # Manual scripted installation + curl -sSfL https://get.tur.so/install.sh | bash + ``` -Next, run the following command to make sure the Turso CLI is installed: + Next, run the following command to make sure the Turso CLI is installed: -```sh -turso --version -``` + ```sh + turso --version + ``` 2. Before you create your first Turso database, you have to authenticate with your GitHub account by running: -```sh -turso auth login -``` + ```sh + turso auth login + ``` -```sh output -Waiting for authentication... -✔ Success! Logged in as -``` + ```sh output + Waiting for authentication... + ✔ Success! Logged in as + ``` -After you have authenticated, you can create a database using the command `turso db create `. Turso will create a database and automatically choose a location closest to you. + After you have authenticated, you can create a database using the command `turso db create `. Turso will create a database and automatically choose a location closest to you. -```sh -turso db create my-db -``` + ```sh + turso db create my-db + ``` -```sh output + ```sh output -# Example: -Creating database my-db in Amsterdam, Netherlands (ams) + # Example: + Creating database my-db in Amsterdam, Netherlands (ams) -# Once succeeded: -Created database my-db in Amsterdam, Netherlands (ams) in 13 seconds. -``` + # Once succeeded: + Created database my-db in Amsterdam, Netherlands (ams) in 13 seconds. + ``` -With the first database created, you can now connect to it directly and execute SQL queries against it. + With the first database created, you can now connect to it directly and execute SQL queries against it. -```sh -turso db shell my-db -``` + ```sh + turso db shell my-db + ``` 3. Copy the following SQL query into the shell you just opened: @@ -94,58 +94,59 @@ turso db shell my-db 5. In your Worker, install the Turso client library: -```sh -npm install @libsql/client -``` + 6. The following example shows how to make a query to your Turso database in a Worker. The credentials needed to connect to Turso have been automatically added as [secrets](/workers/configuration/secrets/) to your Worker through the integration. -```ts -import { Client as LibsqlClient, createClient } from "@libsql/client/web"; - -export interface Env { - TURSO_URL?: string; - TURSO_AUTH_TOKEN?: string; -} - -export default { - async fetch(request, env, ctx): Promise { - const client = buildLibsqlClient(env); - - try { - const res = await client.execute("SELECT * FROM elements"); - return new Response(JSON.stringify(res), { - status: 200, - headers: { "Content-Type": "application/json" }, - }); - } catch (error) { - console.error("Error executing SQL query:", error); - return new Response(JSON.stringify({ error: "Internal Server Error" }), { - status: 500, - }); - } - }, -} satisfies ExportedHandler; - -function buildLibsqlClient(env: Env): LibsqlClient { - const url = env.TURSO_URL?.trim(); - if (url === undefined) { - throw new Error("TURSO_URL env var is not defined"); - } - - const authToken = env.TURSO_AUTH_TOKEN?.trim(); - if (authToken == undefined) { - throw new Error("TURSO_AUTH_TOKEN env var is not defined"); - } - - return createClient({ url, authToken }); -} -``` - -- The libSQL client library import `@libsql/client/web` must be imported exactly as shown when working with Cloudflare Workers. The non-web import will not work in the Workers environment. -- The `Env` interface contains the [environment variable](/workers/configuration/environment-variables/) and [secret](/workers/configuration/secrets/) defined when you added the Turso integration in step 4. -- The `Env` interface also caches the libSQL client object and router, which was created on the first request to the Worker. -- The Worker uses `buildLibsqlClient` to query the `elements` database and returns the response as a JSON object. + ```ts + import { Client as LibsqlClient, createClient } from "@libsql/client/web"; + + export interface Env { + TURSO_URL?: string; + TURSO_AUTH_TOKEN?: string; + } + + export default { + async fetch(request, env, ctx): Promise { + const client = buildLibsqlClient(env); + + try { + const res = await client.execute("SELECT * FROM elements"); + return new Response(JSON.stringify(res), { + status: 200, + headers: { "Content-Type": "application/json" }, + }); + } catch (error) { + console.error("Error executing SQL query:", error); + return new Response( + JSON.stringify({ error: "Internal Server Error" }), + { + status: 500, + }, + ); + } + }, + } satisfies ExportedHandler; + + function buildLibsqlClient(env: Env): LibsqlClient { + const url = env.TURSO_URL?.trim(); + if (url === undefined) { + throw new Error("TURSO_URL env var is not defined"); + } + + const authToken = env.TURSO_AUTH_TOKEN?.trim(); + if (authToken == undefined) { + throw new Error("TURSO_AUTH_TOKEN env var is not defined"); + } + + return createClient({ url, authToken }); + } + ``` + + - The libSQL client library import `@libsql/client/web` must be imported exactly as shown when working with Cloudflare Workers. The non-web import will not work in the Workers environment. + - The `Env` interface contains the [environment variable](/workers/configuration/environment-variables/) and [secret](/workers/configuration/secrets/) defined when you added the Turso integration in step 4. + - The `Env` interface also caches the libSQL client object and router, which was created on the first request to the Worker. + - The Worker uses `buildLibsqlClient` to query the `elements` database and returns the response as a JSON object. With your environment configured and your code ready, you can now test your Worker locally before you deploy. diff --git a/src/content/docs/workers/databases/native-integrations/upstash.mdx b/src/content/docs/workers/databases/native-integrations/upstash.mdx index 1d6380a240719e..4c50221794ee7a 100644 --- a/src/content/docs/workers/databases/native-integrations/upstash.mdx +++ b/src/content/docs/workers/databases/native-integrations/upstash.mdx @@ -3,7 +3,7 @@ pcx_content_type: configuration title: Upstash --- -import { Render } from "~/components"; +import { Render, PackageManagers } from "~/components"; [Upstash](https://upstash.com/) is a serverless database with Redis\* and Kafka API. Upstash also offers QStash, a task queue/scheduler designed for the serverless. @@ -54,39 +54,37 @@ To set up an integration with Upstash: 4. In your Worker, install the `@upstash/redis`, a HTTP client to connect to your database and start manipulating data: - ```sh - npm install @upstash/redis - ``` + 5. The following example shows how to make a query to your Upstash database in a Worker. The credentials needed to connect to Upstash have been automatically added as secrets to your Worker through the integration. -```js -import { Redis } from "@upstash/redis/cloudflare"; + ```js + import { Redis } from "@upstash/redis/cloudflare"; -export default { - async fetch(request, env) { - const redis = Redis.fromEnv(env); + export default { + async fetch(request, env) { + const redis = Redis.fromEnv(env); - const country = request.headers.get("cf-ipcountry"); - if (country) { - const greeting = await redis.get(country); - if (greeting) { - return new Response(greeting); - } - } + const country = request.headers.get("cf-ipcountry"); + if (country) { + const greeting = await redis.get(country); + if (greeting) { + return new Response(greeting); + } + } - return new Response("Hello What's up!"); - }, -}; -``` + return new Response("Hello What's up!"); + }, + }; + ``` -:::note + :::note -`Redis.fromEnv(env)` automatically picks up the default `url` and `token` names created in the integration. + `Redis.fromEnv(env)` automatically picks up the default `url` and `token` names created in the integration. -If you have renamed the secrets, you must declare them explicitly like in the [Upstash basic example](https://docs.upstash.com/redis/sdks/redis-ts/getstarted#basic-usage). + If you have renamed the secrets, you must declare them explicitly like in the [Upstash basic example](https://docs.upstash.com/redis/sdks/redis-ts/getstarted#basic-usage). -::: + ::: To learn more about Upstash, refer to the [Upstash documentation](https://docs.upstash.com/redis). @@ -106,9 +104,7 @@ To set up an integration with Upstash Kafka: 3. In your Worker, install `@upstash/kafka`, a HTTP/REST based Kafka client: - ```sh - npm install @upstash/kafka - ``` + 4. Use the [upstash-kafka](https://github.com/upstash/upstash-kafka/blob/main/README.md) JavaScript SDK to send data to Kafka. @@ -130,9 +126,7 @@ To set up an integration with Upstash QStash: 3. In your Worker, install the `@upstash/qstash`, a HTTP client to connect to your database QStash endpoint: - ```sh - npm install @upstash/qstash - ``` + 4. Refer to the [Upstash documentation on how to receive webhooks from QStash in your Cloudflare Worker](https://docs.upstash.com/qstash/quickstarts/cloudflare-workers#3-use-qstash-in-your-handler). diff --git a/src/content/docs/workers/frameworks/framework-guides/tanstack.mdx b/src/content/docs/workers/frameworks/framework-guides/tanstack.mdx index 7635486081605f..413a78cb20f011 100644 --- a/src/content/docs/workers/frameworks/framework-guides/tanstack.mdx +++ b/src/content/docs/workers/frameworks/framework-guides/tanstack.mdx @@ -49,67 +49,68 @@ Whether you created a new TanStack Start project or are using an existing projec [`nitro-cloudflare-dev`](https://github.com/nitrojs/nitro-cloudflare-dev) enables access to the Cloudflare runtime bindings like R2, D1, and other Cloudflare services in the development server. - + 2. **Modify the `app.config.ts` file** - To configure your application for Cloudflare Workers deployment, add the following lines to your `app.config.ts` file: + To configure your application for Cloudflare Workers deployment, add the following lines to your `app.config.ts` file: - ```ts - // Required imports - import { cloudflare } from 'unenv' - import nitroCloudflareBindings from "nitro-cloudflare-dev"; + ```ts + // Required imports + import { cloudflare } from 'unenv' + import nitroCloudflareBindings from "nitro-cloudflare-dev"; - // Add this new server section to the defineConfig object - server: { - preset: "cloudflare-module", - unenv: cloudflare, - modules: [nitroCloudflareBindings], - }, - ``` + // Add this new server section to the defineConfig object + server: { + preset: "cloudflare-module", + unenv: cloudflare, + modules: [nitroCloudflareBindings], + }, + ``` - This will set the correct build format and runtime environment for Cloudflare. + This will set the correct build format and runtime environment for Cloudflare. 3. **Add a Wrangler file** - Create a `wrangler.jsonc` or `wrangler.toml` file in the root of your project, `wrangler.jsonc` is the recommended approach. This file is used to configure the Cloudflare Workers deployment. - - - ```json - { - "$schema": "node_modules/wrangler/config-schema.json", - "name": "start-basic", - "main": "./.output/server/index.mjs", - "compatibility_date": "2025-04-14", - "observability": { - "enabled": true, - }, - "assets": { - "directory": "./.output/public/", - }, - "compatibility_flags": ["nodejs_compat"], - } - ``` - - - Note that the `directory` key is set to `.output/public/`, which is the folder that will be filled with the build output. Additionally, the `main` key is set to `.output/server/index.mjs`, indicating to Cloudflare Workers where to locate the entry point for your application. + Create a `wrangler.jsonc` or `wrangler.toml` file in the root of your project, `wrangler.jsonc` is the recommended approach. This file is used to configure the Cloudflare Workers deployment. -4. **Build the application** + + + ```json + { + "$schema": "node_modules/wrangler/config-schema.json", + "name": "start-basic", + "main": "./.output/server/index.mjs", + "compatibility_date": "2025-04-14", + "observability": { + "enabled": true + }, + "assets": { + "directory": "./.output/public/" + }, + "compatibility_flags": ["nodejs_compat"] + } + ``` + + - You must build your application before deploying it to Cloudflare Workers. + Note that the `directory` key is set to `.output/public/`, which is the folder that will be filled with the build output. Additionally, the `main` key is set to `.output/server/index.mjs`, indicating to Cloudflare Workers where to locate the entry point for your application. - +4. **Build the application** + + You must build your application before deploying it to Cloudflare Workers. + 5. **Deploy the application** - The command below will deploy your application to Cloudflare Workers and provide a deployment URL. Make sure to rebuild your application after making any changes to see those changes reflected in the deployment. + The command below will deploy your application to Cloudflare Workers and provide a deployment URL. Make sure to rebuild your application after making any changes to see those changes reflected in the deployment. - ```sh - npx wrangler deploy - ``` + ```sh + npx wrangler deploy + ``` - When making changes in the future ensure you rebuild your application. The deploy will deploy what is in your `.output/public` folder and that only gets updated when you run the build command. + When making changes in the future ensure you rebuild your application. The deploy will deploy what is in your `.output/public` folder and that only gets updated when you run the build command. @@ -119,47 +120,50 @@ Whether you created a new TanStack Start project or are using an existing projec 1. **Create a helper function to get access to Cloudflare bindings** - Create a helper function named `cloudflareBindings.ts` in the `src/utils` folder, and paste in the below code. You can create a `utils` folder in your project if you don't already have one. The example assumes you have a KV namespace with a binding name of `CACHE` already created in your account and added to the wrangler file. - - ```ts - import type { KVNamespace } from "@cloudflare/workers-types"; - - interface CloudflareBindings { - CACHE: KVNamespace; - } - /** - * Will only work when being accessed on the server. Obviously, CF bindings are not available in the browser. - * @returns - */ - export async function getBindings() { - if (import.meta.env.DEV) { - const { getPlatformProxy } = await import("wrangler"); - const { env } = await getPlatformProxy(); - return env as unknown as CloudflareBindings; - } - - return process.env as unknown as CloudflareBindings; - } - ``` -
- To ensure your bindings work locally with vinxi, the helper function uses [getPlatformProxy](https://developers.cloudflare.com/workers/wrangler/api/#getplatformproxy) method from wrangler. This logic is placed under a check if import.meta.env.DEV is true. -
+ Create a helper function named `cloudflareBindings.ts` in the `src/utils` folder, and paste in the below code. You can create a `utils` folder in your project if you don't already have one. The example assumes you have a KV namespace with a binding name of `CACHE` already created in your account and added to the wrangler file. + + ```ts + import type { KVNamespace } from "@cloudflare/workers-types"; + + interface CloudflareBindings { + CACHE: KVNamespace; + } + /** + * Will only work when being accessed on the server. Obviously, CF bindings are not available in the browser. + * @returns + */ + export async function getBindings() { + if (import.meta.env.DEV) { + const { getPlatformProxy } = await import("wrangler"); + const { env } = await getPlatformProxy(); + return env as unknown as CloudflareBindings; + } + + return process.env as unknown as CloudflareBindings; + } + ``` + +
+ To ensure your bindings work locally with vinxi, the helper function uses + [getPlatformProxy](https://developers.cloudflare.com/workers/wrangler/api/#getplatformproxy) + method from wrangler. This logic is placed under a check if + import.meta.env.DEV is true. +
2. **Example using a Cloudflare Binding** - Now that you have a helper function to get access to your Cloudflare bindings, you can use them in your application. - - Remember bindings are only available on the server. + Now that you have a helper function to get access to your Cloudflare bindings, you can use them in your application. - ```ts - const bindings = await getBindings(); - const cache = bindings.CACHE; - const queryCount = (await cache.get("queryCount")) || "0"; - await cache.put("queryCount", String(Number(queryCount) + 1)); - ``` + Remember bindings are only available on the server. - A special thanks to GitHub user [backpine](https://github.com/backpine) for the code that supports Cloudflare Bindings in TanStack, which is demonstrated in their [TanStack Start on Workers example](https://github.com/backpine/tanstack-start-on-cloudflare-workers-v0). + ```ts + const bindings = await getBindings(); + const cache = bindings.CACHE; + const queryCount = (await cache.get("queryCount")) || "0"; + await cache.put("queryCount", String(Number(queryCount) + 1)); + ``` + A special thanks to GitHub user [backpine](https://github.com/backpine) for the code that supports Cloudflare Bindings in TanStack, which is demonstrated in their [TanStack Start on Workers example](https://github.com/backpine/tanstack-start-on-cloudflare-workers-v0). diff --git a/src/content/docs/workers/testing/miniflare/get-started.mdx b/src/content/docs/workers/testing/miniflare/get-started.mdx index 4deafa6d854f8a..ca369857074e14 100644 --- a/src/content/docs/workers/testing/miniflare/get-started.mdx +++ b/src/content/docs/workers/testing/miniflare/get-started.mdx @@ -4,15 +4,15 @@ sidebar: title: Get Started --- +import { PackageManagers } from "~/components"; + The Miniflare API allows you to dispatch events to workers without making actual HTTP requests, simulate connections between Workers, and interact with local emulations of storage products like [KV](/workers/testing/miniflare/storage/kv), [R2](/workers/testing/miniflare/storage/r2), and [Durable Objects](/workers/testing/miniflare/storage/durable-objects). This makes it great for writing tests, or other advanced use cases where you need finer-grained control. ## Installation Miniflare is installed using `npm` as a dev dependency: -```sh -$ npm install -D miniflare -``` + ## Usage diff --git a/src/content/docs/workers/testing/miniflare/writing-tests.mdx b/src/content/docs/workers/testing/miniflare/writing-tests.mdx index 814e916b030622..f38a650b856de2 100644 --- a/src/content/docs/workers/testing/miniflare/writing-tests.mdx +++ b/src/content/docs/workers/testing/miniflare/writing-tests.mdx @@ -7,7 +7,7 @@ head: [] description: Write integration tests against Workers using Miniflare. --- -import { TabItem, Tabs, Details } from "~/components"; +import { TabItem, Tabs, Details, PackageManagers } from "~/components"; import { FileTree } from "@astrojs/starlight/components"; @@ -19,25 +19,7 @@ This guide will show you how to set up [Miniflare](/workers/testing/miniflare) t To use Miniflare, make sure you've installed the latest version of Miniflare v3: - - -```sh -npm install -D miniflare -``` - - - -```sh -yarn add -D miniflare -``` - - - -```sh -pnpm add -D miniflare -``` - - + The rest of this guide demonstrates concepts with the [`node:test`](https://nodejs.org/api/test.html) testing framework, but any testing framework can be used. @@ -98,13 +80,13 @@ The highlighted lines of the test file above demonstrate how to set up Miniflare
When using the [Vitest integration](/workers/testing/vitest-integration/), your entire test suite runs in - [`workerd`](https://github.com/cloudflare/workerd), which is why it is possible - to unit test individual functions. By contrast, when using a different testing - framework to run tests via Miniflare, only your Worker itself is running in - [`workerd`](https://github.com/cloudflare/workerd) — your test files run in - Node.js. This means that importing functions from your Worker into your test - files might exhibit different behaviour than you'd see at runtime if the - functions rely on `workerd`-specific behaviour. + [`workerd`](https://github.com/cloudflare/workerd), which is why it is + possible to unit test individual functions. By contrast, when using a + different testing framework to run tests via Miniflare, only your Worker + itself is running in [`workerd`](https://github.com/cloudflare/workerd) — your + test files run in Node.js. This means that importing functions from your + Worker into your test files might exhibit different behaviour than you'd see + at runtime if the functions rely on `workerd`-specific behaviour.
## Interacting with Bindings diff --git a/src/content/docs/workers/tutorials/automated-analytics-reporting/index.mdx b/src/content/docs/workers/tutorials/automated-analytics-reporting/index.mdx index c8b058d3c8f0fc..5ff8dcf71fc5c3 100644 --- a/src/content/docs/workers/tutorials/automated-analytics-reporting/index.mdx +++ b/src/content/docs/workers/tutorials/automated-analytics-reporting/index.mdx @@ -15,7 +15,13 @@ languages: - JavaScript --- -import { Render, PackageManagers, TabItem, Tabs, WranglerConfig } from "~/components"; +import { + Render, + PackageManagers, + TabItem, + Tabs, + WranglerConfig, +} from "~/components"; In this tutorial, you will create a [Cloudflare Worker](https://workers.cloudflare.com/) that fetches analytics data about your account from Cloudflare's [GraphQL Analytics API](https://developers.cloudflare.com/analytics/graphql-api/). You will be able to view the account analytics data in your browser and receive a scheduled email report. @@ -71,31 +77,14 @@ cd account-analytics To continue with this tutorial, install the [`mimetext`](https://www.npmjs.com/package/mimetext) package: - - -```sh -pnpm install mimetext -``` - - - -```sh -npm install mimetext -``` - - - -```sh -yarn add mimetext -``` - - + ## 2. Update Wrangler configuration file [`wrangler.jsonc`](/workers/wrangler/configuration/) contains the configuration for your Worker. It was created when you ran `c3` CLI. Open `wrangler.jsonc` in your code editor and update it with the following configuration: + ```toml name = "account-analytics" main = "src/index.js" @@ -105,7 +94,7 @@ compatibility_flags = ["nodejs_compat"] # Set destination_address to the email address where you want to receive the report send_email = [ - {name = "ANALYTICS_EMAIL", destination_address = "<>"} + { name = "ANALYTICS_EMAIL", destination_address = "<>" } ] # Schedule the Worker to run every day at 10:00 AM @@ -129,6 +118,7 @@ RECIPIENT_EMAIL = "<>" # This value will be used as the subject of the email EMAIL_SUBJECT = "Cloudflare Analytics Report" ``` + Before you continue, update the following: diff --git a/src/content/docs/workers/tutorials/build-a-qr-code-generator/index.mdx b/src/content/docs/workers/tutorials/build-a-qr-code-generator/index.mdx index 65ff1340a0200a..d26f53084feafd 100644 --- a/src/content/docs/workers/tutorials/build-a-qr-code-generator/index.mdx +++ b/src/content/docs/workers/tutorials/build-a-qr-code-generator/index.mdx @@ -123,9 +123,7 @@ export default { All projects deployed to Cloudflare Workers support npm packages. This support makes it easy to rapidly build out functionality in your Workers. The ['qrcode-svg'](https://github.com/papnkukn/qrcode-svg) package is a great way to take text and encode it into a QR code. In the command line, install and save 'qrcode-svg' to your project’s 'package.json': -```sh title="Installing the qr-image package" -npm install --save qrcode-svg -``` + In `index.js`, import the `qrcode-svg` package as the variable `QRCode`. In the `generateQRCode` function, parse the incoming request as JSON using `request.json`, and generate a new QR code using the `qrcode-svg` package. The QR code is generated as an SVG. Construct a new instance of `Response`, passing in the SVG data as the body, and a `Content-Type` header of `image/svg+xml`. This will allow browsers to properly parse the data coming back from your Worker as an image: @@ -135,7 +133,9 @@ import QRCode from "qrcode-svg"; async function generateQRCode(request) { const { text } = await request.json(); const qr = new QRCode({ content: text || "https://workers.dev" }); - return new Response(qr.svg(), { headers: { "Content-Type": "image/svg+xml" } }); + return new Response(qr.svg(), { + headers: { "Content-Type": "image/svg+xml" }, + }); } ``` @@ -161,7 +161,9 @@ export default { async function generateQRCode(request) { const { text } = await request.json(); const qr = new QRCode({ content: text || "https://workers.dev" }); - return new Response(qr.svg(), { headers: { "Content-Type": "image/svg+xml" } }); + return new Response(qr.svg(), { + headers: { "Content-Type": "image/svg+xml" }, + }); } const landing = ` @@ -215,7 +217,9 @@ export default { async function generateQRCode(request) { const { text } = await request.json(); const qr = new QRCode({ content: text || "https://workers.dev" }); - return new Response(qr.svg(), { headers: { "Content-Type": "image/svg+xml" } }); + return new Response(qr.svg(), { + headers: { "Content-Type": "image/svg+xml" }, + }); } const landing = ` @@ -259,4 +263,4 @@ npx wrangler deploy In this tutorial, you built and deployed a Worker application for generating QR codes. If you would like to see the full source code for this application, you can find it [on GitHub](https://github.com/kristianfreeman/workers-qr-code-generator). -If you want to get started building your own projects, review the existing list of [Quickstart templates](/workers/get-started/quickstarts/). \ No newline at end of file +If you want to get started building your own projects, review the existing list of [Quickstart templates](/workers/get-started/quickstarts/). diff --git a/src/content/docs/workers/tutorials/build-a-slackbot/index.mdx b/src/content/docs/workers/tutorials/build-a-slackbot/index.mdx index 75815820d318c4..5d11b31df2f89b 100644 --- a/src/content/docs/workers/tutorials/build-a-slackbot/index.mdx +++ b/src/content/docs/workers/tutorials/build-a-slackbot/index.mdx @@ -131,19 +131,7 @@ This is a minimal application using Hono. If a GET access comes in on the path ` To run the application on your local machine, execute the following command. - - -```sh title="Run your application locally" -npm run dev -``` - - - -```sh title="Run your application locally" -yarn dev -``` - - + Access to `http://localhost:8787` in your browser after the server has been started, and you can see the message. @@ -738,19 +726,7 @@ By completing the preceding steps, you have finished writing the code for your S Wrangler has built-in support for bundling, uploading, and releasing your Cloudflare Workers application. To do this, run the following command which will build and deploy your code. - - -```sh title="Deploy your application" -npm run deploy -``` - - - -```sh title="Deploy your application" -yarn deploy -``` - - + Deploying your Workers application should now cause issue updates to start appearing in your Slack channel, as the GitHub webhook can now successfully reach your Workers webhook route: diff --git a/src/content/docs/workers/tutorials/connect-to-turso-using-workers/index.mdx b/src/content/docs/workers/tutorials/connect-to-turso-using-workers/index.mdx index 8e6de4c1b8342c..8a4157772ee38d 100644 --- a/src/content/docs/workers/tutorials/connect-to-turso-using-workers/index.mdx +++ b/src/content/docs/workers/tutorials/connect-to-turso-using-workers/index.mdx @@ -202,9 +202,7 @@ Select `` on your keyboard to save the token as a secret. Both `LIBSQL_DB Install the Turso client library and a router: -```sh -npm install @libsql/client itty-router -``` + The `@libsql/client` library allows you to query a Turso database. The `itty-router` library is a lightweight router you will use to help handle incoming requests to the worker. diff --git a/src/content/docs/workers/tutorials/create-finetuned-chatgpt-ai-models-with-r2/index.mdx b/src/content/docs/workers/tutorials/create-finetuned-chatgpt-ai-models-with-r2/index.mdx index 2217a060a72e32..a437e3eb5bce48 100644 --- a/src/content/docs/workers/tutorials/create-finetuned-chatgpt-ai-models-with-r2/index.mdx +++ b/src/content/docs/workers/tutorials/create-finetuned-chatgpt-ai-models-with-r2/index.mdx @@ -106,15 +106,11 @@ bucket_name = '' You will use [Hono](https://hono.dev/), a lightweight framework for building Cloudflare Workers applications. Hono provides an interface for defining routes and middleware functions. Inside your project directory, run the following command to install Hono: -```sh -npm install hono -``` + You also need to install the [OpenAI Node API library](https://www.npmjs.com/package/openai). This library provides convenient access to the OpenAI REST API in a Node.js project. To install the library, execute the following command: -```sh -npm install openai -``` + Next, open the `src/index.ts` file and replace the default code with the below code. Replace `` with the binding name you set in Wrangler file. diff --git a/src/content/docs/workers/tutorials/openai-function-calls-workers/index.mdx b/src/content/docs/workers/tutorials/openai-function-calls-workers/index.mdx index d48bbb782fb041..b034ec3e0696b2 100644 --- a/src/content/docs/workers/tutorials/openai-function-calls-workers/index.mdx +++ b/src/content/docs/workers/tutorials/openai-function-calls-workers/index.mdx @@ -61,9 +61,7 @@ You will also need an OpenAI account and API key for this tutorial. If you do no With your Worker project created, make your first request to OpenAI. You will use the OpenAI node library to interact with the OpenAI API. In this project, you will also use the Cheerio library to handle processing the HTML content of websites -```sh -npm install openai cheerio -``` + Now, define the structure of your Worker in `index.js`: diff --git a/src/content/docs/workers/tutorials/postgres/index.mdx b/src/content/docs/workers/tutorials/postgres/index.mdx index 2b4edaccd9f324..4e913a87dbe70d 100644 --- a/src/content/docs/workers/tutorials/postgres/index.mdx +++ b/src/content/docs/workers/tutorials/postgres/index.mdx @@ -65,9 +65,7 @@ cd postgres-tutorial To connect to a PostgreSQL database, you will need the `postgres` library. In your Worker application directory, run the following command to install the library: -```sh -npm install postgres -``` + Make sure you are using `postgres` (`Postgres.js`) version `3.4.4` or higher. `Postgres.js` is compatible with both Pages and Workers. diff --git a/src/content/docs/workers/tutorials/using-prisma-postgres-with-workers/index.mdx b/src/content/docs/workers/tutorials/using-prisma-postgres-with-workers/index.mdx index f2f1ad2f148233..7aaa7430ccc362 100644 --- a/src/content/docs/workers/tutorials/using-prisma-postgres-with-workers/index.mdx +++ b/src/content/docs/workers/tutorials/using-prisma-postgres-with-workers/index.mdx @@ -10,6 +10,8 @@ languages: - PostgreSQL --- +import { PackageManagers } from "~/components"; + [Prisma Postgres](https://www.prisma.io/postgres) is a managed, serverless PostgreSQL database. It supports features like connection pooling, caching, real-time subscriptions, and query optimization recommendations. In this tutorial, you will learn how to: @@ -60,29 +62,21 @@ In this step, you will set up Prisma ORM with a Prisma Postgres database using t Install Prisma CLI as a dev dependency: -```sh -npm install prisma --save-dev -``` + Install the [Prisma Accelerate client extension](https://www.npmjs.com/package/@prisma/extension-accelerate) as it is required for Prisma Postgres: -```sh -npm install @prisma/extension-accelerate -``` + Install the [`dotenv-cli` package](https://www.npmjs.com/package/dotenv-cli) to load environment variables from `.dev.vars`: -```sh -npm install dotenv-cli --save-dev -``` + ### 2.2. Create a Prisma Postgres database and initialize Prisma Initialize Prisma in your application: -```sh -npx prisma@latest init --db -``` + If you do not have a [Prisma Data Platform](https://console.prisma.io/) account yet, or if you are not logged in, the command will prompt you to log in using one of the available authentication providers. A browser window will open so you can log in or create an account. Return to the CLI after you have completed this step. diff --git a/src/content/docs/workers/wrangler/commands.mdx b/src/content/docs/workers/wrangler/commands.mdx index 025cb0ec17c695..99f2d9d73eabed 100644 --- a/src/content/docs/workers/wrangler/commands.mdx +++ b/src/content/docs/workers/wrangler/commands.mdx @@ -14,6 +14,7 @@ import { Type, MetaInfo, WranglerConfig, + PackageManagers, } from "~/components"; Wrangler offers a number of commands to manage your Cloudflare Workers. @@ -71,25 +72,11 @@ wrangler [PARAMETERS] [OPTIONS] Since Cloudflare recommends [installing Wrangler locally](/workers/wrangler/install-and-update/) in your project(rather than globally), the way to run Wrangler will depend on your specific setup and package manager. - - -```sh -npx wrangler [PARAMETERS] [OPTIONS] -``` - - - -```sh -yarn wrangler [PARAMETERS] [OPTIONS] -``` - - - -```sh -pnpm wrangler [PARAMETERS] [OPTIONS] -``` - - + You can add Wrangler commands that you use often as scripts in your project's `package.json` file: @@ -106,25 +93,7 @@ You can add Wrangler commands that you use often as scripts in your project's `p You can then run them using your package manager of choice: - - -```sh -npm run deploy -``` - - - -```sh -yarn run deploy -``` - - - -```sh -pnpm run deploy -``` - - + --- diff --git a/src/content/docs/workers/wrangler/install-and-update.mdx b/src/content/docs/workers/wrangler/install-and-update.mdx index 46461d4eb673eb..14370980a22eb5 100644 --- a/src/content/docs/workers/wrangler/install-and-update.mdx +++ b/src/content/docs/workers/wrangler/install-and-update.mdx @@ -8,7 +8,7 @@ description: Get started by installing Wrangler, and update to newer versions by following this guide. --- -import { Render } from "~/components"; +import { Render, PackageManagers } from "~/components"; Wrangler is a command-line tool for building with Cloudflare developer products. @@ -29,7 +29,7 @@ Wrangler is installed locally into each of your projects. This allows you and yo To install Wrangler within your Worker project, run: - + Since Cloudflare recommends installing Wrangler locally in your project (rather than globally), the way to run Wrangler will depend on your specific setup and package manager. Refer to [How to run Wrangler commands](/workers/wrangler/commands/#how-to-run-wrangler-commands) for more information. @@ -55,9 +55,7 @@ npx wrangler -v To update the version of Wrangler used in your project, run: -```sh -npm install wrangler@latest -``` + ## Related resources diff --git a/src/content/docs/workers/wrangler/migration/v1-to-v2/eject-webpack.mdx b/src/content/docs/workers/wrangler/migration/v1-to-v2/eject-webpack.mdx index 889fb3827194bd..5d5546e434ed3d 100644 --- a/src/content/docs/workers/wrangler/migration/v1-to-v2/eject-webpack.mdx +++ b/src/content/docs/workers/wrangler/migration/v1-to-v2/eject-webpack.mdx @@ -3,10 +3,9 @@ pcx_content_type: how-to title: 1. Migrate webpack projects sidebar: order: 2 - --- -import { WranglerConfig } from "~/components"; +import { WranglerConfig, PackageManagers } from "~/components"; This guide describes the steps to migrate a webpack project from Wrangler v1 to Wrangler v2. After completing this guide, [update your Wrangler version](/workers/wrangler/migration/v1-to-v2/update-v1-to-v2/). @@ -50,25 +49,24 @@ Wrangler v2 drops support for project types, including `type = webpack` and conf To do that, you will need to add it as a dependency: -``` -npm install --save-dev webpack@^4.46.0 webpack-cli wranglerjs-compat-webpack-plugin -# or -yarn add --dev webpack@4.46.0 webpack-cli wranglerjs-compat-webpack-plugin -``` + You should see this reflected in your `package.json` file: ```json { - "name": "my-worker", - "version": "x.y.z", - // ... - "devDependencies": { - // ... - "wranglerjs-compat-webpack-plugin": "^x.y.z", - "webpack": "^4.46.0", - "webpack-cli": "^x.y.z" - } + "name": "my-worker", + "version": "x.y.z", + // ... + "devDependencies": { + // ... + "wranglerjs-compat-webpack-plugin": "^x.y.z", + "webpack": "^4.46.0", + "webpack-cli": "^x.y.z" + } } ``` @@ -78,12 +76,12 @@ Modify your `webpack.config.js` file to include the plugin you just installed. ```js const { - WranglerJsCompatWebpackPlugin, + WranglerJsCompatWebpackPlugin, } = require("wranglerjs-compat-webpack-plugin"); module.exports = { - // ... - plugins: [new WranglerJsCompatWebpackPlugin()], + // ... + plugins: [new WranglerJsCompatWebpackPlugin()], }; ``` @@ -91,13 +89,13 @@ module.exports = { ```json { - "name": "my-worker", - "version": "2.0.0", - // ... - "scripts": { - "build": "webpack" // <-- Add this line! - // ... - } + "name": "my-worker", + "version": "2.0.0", + // ... + "scripts": { + "build": "webpack" // <-- Add this line! + // ... + } } ``` @@ -119,8 +117,6 @@ webpack_config = "webpack.config.js" Wrangler no longer has any knowledge of how to build your Worker. You will need to tell it how to call webpack and where to look for webpack's output. This translates into two fields: - - ```toml diff --git a/src/content/partials/hyperdrive/use-mysql-to-make-query.mdx b/src/content/partials/hyperdrive/use-mysql-to-make-query.mdx index 5b59fd54b3a420..1a696537f6de6d 100644 --- a/src/content/partials/hyperdrive/use-mysql-to-make-query.mdx +++ b/src/content/partials/hyperdrive/use-mysql-to-make-query.mdx @@ -2,17 +2,15 @@ {} --- -import { Render } from "~/components"; +import { Render, PackageManagers } from "~/components"; Install the [mysql](https://github.com/mysqljs/mysql) driver: -```sh -npm install mysql -``` + Add the required Node.js compatibility flags and Hyperdrive binding to your `wrangler.jsonc` file: - + Create a new connection and pass the Hyperdrive parameters: diff --git a/src/content/partials/hyperdrive/use-mysql2-to-make-query.mdx b/src/content/partials/hyperdrive/use-mysql2-to-make-query.mdx index 5860e8d38ce76c..24aa5dcf1ab21a 100644 --- a/src/content/partials/hyperdrive/use-mysql2-to-make-query.mdx +++ b/src/content/partials/hyperdrive/use-mysql2-to-make-query.mdx @@ -2,18 +2,19 @@ {} --- -import { Render } from "~/components"; +import { Render, PackageManagers } from "~/components"; Install the [mysql2](https://github.com/sidorares/node-mysql2) driver: -```sh -# mysql2 v3.13.0 or later is required -npm install mysql2 -``` + + +:::note +`mysql2` v3.13.0 or later is required +::: Add the required Node.js compatibility flags and Hyperdrive binding to your `wrangler.jsonc` file: - + Create a new `connection` instance and pass the Hyperdrive parameters: diff --git a/src/content/partials/hyperdrive/use-node-postgres-to-make-query.mdx b/src/content/partials/hyperdrive/use-node-postgres-to-make-query.mdx index c3f471fd1c92d1..a9d7eb51fbbce5 100644 --- a/src/content/partials/hyperdrive/use-node-postgres-to-make-query.mdx +++ b/src/content/partials/hyperdrive/use-node-postgres-to-make-query.mdx @@ -2,19 +2,25 @@ {} --- -import { Render } from "~/components"; +import { Render, PackageManagers } from "~/components"; Install the `node-postgres` driver: -```sh -npm install pg -# If using TypeScript -npm i --save-dev @types/pg -``` + + +:::note + +The minimum version of `node-postgres` required for Hyperdrive is `8.13.0`. + +::: + +If using TypeScript, install the types package: + + Add the required Node.js compatibility flags and Hyperdrive binding to your `wrangler.jsonc` file: - + Create a new `Client` instance and pass the Hyperdrive `connectionString`: @@ -56,9 +62,3 @@ export default { }, }; ``` - -:::note - -The minimum version of `node-postgres` required for Hyperdrive is `8.13.0`. - -::: diff --git a/src/content/partials/hyperdrive/use-postgres-js-to-make-query.mdx b/src/content/partials/hyperdrive/use-postgres-js-to-make-query.mdx index 600d78d51bc682..1bf2d91b4b919e 100644 --- a/src/content/partials/hyperdrive/use-postgres-js-to-make-query.mdx +++ b/src/content/partials/hyperdrive/use-postgres-js-to-make-query.mdx @@ -2,18 +2,19 @@ {} --- -import { Render } from "~/components"; +import { Render, PackageManagers } from "~/components"; Install [Postgres.js](https://github.com/porsager/postgres): -```sh -# Postgres.js 3.4.5 or later is recommended -npm install postgres -``` + + +:::note +The minimum version of `postgres-js` required for Hyperdrive is `3.4.5`. +::: Add the required Node.js compatibility flags and Hyperdrive binding to your `wrangler.jsonc` file: - + Create a Worker that connects to your PostgreSQL database via Hyperdrive: @@ -54,9 +55,3 @@ export default { }, } satisfies ExportedHandler; ``` - -:::note - -The minimum version of `postgres-js` required for Hyperdrive is `3.4.5`. - -::: diff --git a/src/content/partials/workers/dash-creation-next-steps.mdx b/src/content/partials/workers/dash-creation-next-steps.mdx index 1ea666b81e7ef9..b4a8eda54a87a2 100644 --- a/src/content/partials/workers/dash-creation-next-steps.mdx +++ b/src/content/partials/workers/dash-creation-next-steps.mdx @@ -2,7 +2,7 @@ {} --- -import { TabItem, Tabs } from "~/components"; +import { TabItem, Tabs, PackageManagers } from "~/components"; ### Dashboard @@ -31,24 +31,10 @@ Use a Node version manager like [Volta](https://volta.sh/) or [nvm](https://gith 3. Run the following command, replacing the value of `[]` which the location you want to put your Worker Script. - - -```sh -npm create cloudflare@latest [] -- --type=pre-existing -``` - - - -```sh -yarn create cloudflare [] --type=pre-existing -``` - - - -```sh -pnpm create cloudflare@latest [] --type=pre-existing -``` - - + After you run this command - and work through the prompts - your local changes will not automatically sync with dashboard. So, once you download your script, continue using the CLI. diff --git a/src/content/partials/workers/install_wrangler.mdx b/src/content/partials/workers/install_wrangler.mdx deleted file mode 100644 index dcfa03917c17bc..00000000000000 --- a/src/content/partials/workers/install_wrangler.mdx +++ /dev/null @@ -1,19 +0,0 @@ ---- -{} ---- - -import { TabItem, Tabs } from "~/components"; - - - -```sh -npm install wrangler --save-dev -``` - - - -```sh -yarn add --dev wrangler -``` - -